content
stringlengths 5
1.05M
|
|---|
"""Unit test package for dpybrew."""
|
__author__ = 'mehdibenchoufi'
IMAGE_SIZE_x = 512
IMAGE_SIZE_z = 512
NUM_IMG_DATA = 128
NUM_SAMPLES = 1672
OPENCV_RELATIVE_ANGLE = 512
CENTER_POINT_x = 400
CENTER_POINT_z = 100
SCAN_CONVERTER_SCALE = 200
|
import time
import nltk
import numpy as np
import pandas as pd
from textblob.classifiers import DecisionTreeClassifier
from textblob.classifiers import NaiveBayesClassifier
nltk.download('stopwords')
from nltk.corpus import stopwords
stopset = set(stopwords.words('english'))
ignoreDT = True
def remove_prefix(theList, prefix):
return [text[len(prefix):] if text.startswith(prefix) else text for text in theList]
def remove_stopwords(theList):
return [' '.join([word for word in text.split() if word not in stopset]) for text in theList]
if __name__ == '__main__':
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
train_data = remove_stopwords(train.Title)
train_target = remove_prefix(train.Team, 'UCM ')
test_data = remove_stopwords(test.Title)
test_id = test.Id
test_target = remove_prefix(test.Team, 'UCM ')
train = list(zip(train_data, train_target))
test = list(zip(test_data, test_target))
start_time = time.time()
cl = NaiveBayesClassifier(train)
# Compute accuracy
print("NaiveBayes Accuracy: {0}".format(cl.accuracy(test)))
# Show 10 most informative features
cl.show_informative_features(10)
print(cl.informative_features(10))
elapsed_time = time.time() - start_time
print(elapsed_time)
if (not ignoreDT):
start_time = time.time()
cl = DecisionTreeClassifier(train)
print("DecisionTree Accuracy: {0}".format(cl.accuracy(test)))
print(cl.pseudocode())
elapsed_time = time.time() - start_time
print(elapsed_time)
start_time = time.time()
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
class StemmedCountVectorizer(CountVectorizer):
def build_analyzer(self):
analyzer = super(StemmedCountVectorizer, self).build_analyzer()
return lambda doc: ([stemmer.stem(w) for w in analyzer(doc)])
from nltk.stem.snowball import SnowballStemmer
stemmer = SnowballStemmer("english", ignore_stopwords=True)
stemmed_count_vect = StemmedCountVectorizer(stop_words='english')
text_clf = Pipeline([('vect', stemmed_count_vect),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB()),
])
text_clf.fit(train_data, train_target)
predicted = text_clf.predict(test_data)
print("MultinomialNB Accuracy: {0}".format(np.mean(predicted == test_target)))
df = pd.DataFrame(list(zip(test_data, predicted, test_target)))
df.to_csv('MB_list.csv', index=False)
elapsed_time = time.time() - start_time
print(elapsed_time)
start_time = time.time()
from sklearn.linear_model import SGDClassifier
text_clf = Pipeline([('vect', stemmed_count_vect),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier(loss='hinge', penalty='l2',
alpha=1e-3, random_state=42)),
])
text_clf.fit(train_data, train_target)
predicted = text_clf.predict(test_data)
print("SGD Accuracy: {0}".format(np.mean(predicted == test_target)))
df = pd.DataFrame(list(zip(test_id, test_data, predicted, test_target)))
df.to_csv('SGD_list.csv', index=False)
elapsed_time = time.time() - start_time
print(elapsed_time)
from sklearn import metrics
print(metrics.classification_report(test_target, predicted))
|
from .wgan import WGAN
|
# interpret.py
"""Parses tokenized statements into nodes"""
from syntax.tokenizer import tokenize
from syntax.token import TokenType
from syntax.node import Node
def parse(tokens: list) -> object:
"""
Reads tokens to create a simple abstract syntax tree
"""
nodes = []
if not tokens:
return nodes
while tokens:
# iterate action nodes
token, *tokens = tokens
if token.type is not TokenType.NAME:
raise Exception(f"{token} is not an NAME token")
node = Node(action=token)
while tokens:
token, *tokens = tokens
if token.type is TokenType.END:
break
node.data.append(token)
nodes.append(node)
return nodes
def handle_input(user_input):
"""
function hook to pass into repl loop
"""
output = []
tokens = tokenize(user_input)
interpreted = parse(tokens)
if interpreted:
output.append(str(interpreted))
return output
if __name__ == "__main__":
from util.repl import user_input
from util.reader import from_file
from util.output_handler import handle_output
from_file(handle_input, handle_output, 'instructions.txt')
user_input(handle_input, handle_output)
|
from pymongo import MongoClient
import sys, argparse, random
from numpy import array
from sklearn import model_selection, neural_network, svm
from sklearn.preprocessing import StandardScaler
from progressbar import *
import logging, pickle
from statistics import mean
from sklearn.metrics import confusion_matrix
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def NeuralNetwork(filename):
client = MongoClient('mongodb://localhost:27017/')
db_ = client['ClearData']
collection_ = db_['TimeFrame5S']
cursor = collection_.find({}, {'_id': False})
toDrop = ['timeStampStart', 'timeStampEnd', 'timeWindow', 'classificationIn']
data = []
dataNormalActivity = []
dataUnusualActivity = []
for doc in cursor:
tmp = [v for (k,v) in sorted(doc.items()) if k not in toDrop]
tmp.append(doc['classificationIn'])
dataNormalActivity.append(tmp)
db_ = client['ClearDataAnomalies']
collection_ = db_['TimeFrame5S']
cursor = collection_.find({}, {'_id': False})
for doc in cursor:
tmp = [v for (k, v) in sorted(doc.items()) if k not in toDrop]
tmp.append(doc['classificationIn'])
dataUnusualActivity.append(tmp)
data.extend(dataNormalActivity)
data.extend(dataUnusualActivity)
random.shuffle(data) # randomize data order
label = [x[-1] for x in data] # extract output
data = [[float(y) for y in x[:-1]] for x in data] # extract input
normalizer = StandardScaler()
print(len(data[0]))
normalizedData = normalizer.fit_transform(data) # normalize features
dataNumPy = array(normalizedData)
labelNumPy = array(label)
XTrain = dataNumPy[:int(len(dataNumPy) * 0.8)]
yTrain = labelNumPy[:int(len(dataNumPy) * 0.8)]
XTest = dataNumPy[int(len(dataNumPy) * 0.8):]
yTest = labelNumPy[int(len(dataNumPy) * 0.8):]
finalAcc_SVCRArr = []
trueNegatives, falsePositives, falseNegatives, truePositives = [], [], [], []
iterations = 300
bar = ProgressBar(widgets=['Progress: ', Percentage(), ' ', Bar(marker='#', left='[', right=']'),
' ', ETA(), ' ', FileTransferSpeed()], maxval=iterations)
bar.start()
for i in range(0, iterations):
bar.update(i)
modelSVCR = svm.SVC(kernel="rbf", C=1) # instantiate SVM
accuracyTestSVCR, accuracyTrainSVCR = accuracyFromTest(XTrain, yTrain, modelSVCR) # train/cross-validation
finalAcc_SVCR = (modelSVCR.score(XTest, yTest) * 100) # final test
yPred = modelSVCR.predict(XTest)
matrix = confusion_matrix(yTest, yPred).ravel()
trueNegatives.append(matrix[0])
falsePositives.append(matrix[1])
falseNegatives.append(matrix[2])
truePositives.append(matrix[3])
pickle.dump(modelSVCR, open(filename, 'wb'))
finalAcc_SVCRArr.append(finalAcc_SVCR)
bar.finish()
SVCR = mean(finalAcc_SVCRArr)
meanFalsePositives = mean(falsePositives)
meanFalseNegatives = mean(falseNegatives)
print ('finalAcc_SVCR = ', SVCR)
print('Mean False Positives = ', meanFalsePositives, ')')
print('Mean False Negatives = ', meanFalseNegatives, ')')
def test_neuralNetwork_btm(filename):
print('########################')
print('# 1 - Normal data #')
print('# 2 - Anomaly data #')
print('# 3 - Both data #')
print('########################')
method = input('Choose a method: ')
if (int(method) not in [1, 2, 3]):
print('Invalid input')
exit()
client = MongoClient('mongodb://localhost:27017/')
if method == 2:
db_ = client['ClearAnomalies']
else:
db_ = client['ClearUserProfiling']
collection_ = db_['TimeFrame2M']
cursor = collection_.find({}, {'_id': False})
toDrop = ['timeStampStart', 'timeStampEnd', 'timeWindow', 'classificationIn', 'skewBytesIn', 'kurtosisBytesIn',
'firstQBytesIn', 'thirdQBytesIn',
'skewBytesOut', 'kurtosisBytesOut', 'firstQBytesOut', 'thirdQBytesOut', 'skewDeltaIn', 'kurtosisDeltaIn',
'firstQDeltaIn', 'thirdQDeltaIn',
'skewDeltaOut', 'kurtosisDeltaOut', 'firstQDeltaOut', 'thirdQDeltaOut', 'skewDistance',
'kurtosisDistance', 'firstQDistance', 'thirdQDistance',
'skewAngle', 'kurtosisAngle', 'firstQAngle', 'thirdQAngle', 'skewConnectionToSameIP',
'kurtosisConnectionToSameIP',
'firstQConnectionToSameIP', 'thirdQConnectionToSameIP']
data = []
dataNormalActivity = []
dataUnusualActivity = []
for doc in cursor:
tmp = [v for (k, v) in sorted(doc.items()) if k not in toDrop]
tmp.append(doc['classificationIn'])
dataNormalActivity.append(tmp)
if int(method) == 3:
db_ = client['ClearAnomalies']
collection_ = db_['TimeFrame2M']
cursor = collection_.find({}, {'_id': False})
for doc in cursor:
tmp = [v for (k, v) in sorted(doc.items()) if k not in toDrop]
tmp.append(doc['classificationIn'])
dataUnusualActivity.append(tmp)
data.extend(dataNormalActivity)
if int(method) == 3:
data.extend(dataUnusualActivity)
random.shuffle(data) # randomize data order
label = [x[-1] for x in data] # extract output
data = [[float(y) for y in x[:-1]] for x in data] # extract input
normalizer = StandardScaler()
normalizedData = normalizer.fit_transform(data) # normalize features
dataNumPy = array(normalizedData)
labelNumPy = array(label)
XTest = dataNumPy[int(len(dataNumPy) * 0.6):]
yTest = labelNumPy[int(len(dataNumPy) * 0.6):]
finalAcc_SVCRArr = []
trueNegatives, falsePositives, falseNegatives, truePositives = [], [], [], []
with (open(filename, 'rb')) as file:
while True:
try:
modelSVCR = pickle.load(file)
except EOFError:
break
finalAcc_SVCR = (modelSVCR.score(XTest, yTest) * 100) # final test
yPred = modelSVCR.predict(XTest)
matrix = confusion_matrix(yTest, yPred).ravel()
trueNegatives.append(matrix[0])
falsePositives.append(matrix[1])
falseNegatives.append(matrix[2])
truePositives.append(matrix[3])
finalAcc_SVCRArr.append(finalAcc_SVCR)
file.close()
SVCR = mean(finalAcc_SVCRArr)
meanFalsePositives = mean(falsePositives)
meanFalseNegatives = mean(falseNegatives)
print('Final accuracy = ', SVCR)
print('Mean False Positives = ', meanFalsePositives, ')')
print('Mean False Negatives = ', meanFalseNegatives, ')')
def test_neuralNetwork_stm(filename):
print('########################')
print('# 1 - Normal data #')
print('# 2 - Anomaly data #')
print('# 3 - Both data #')
print('########################')
method = input('Choose a method: ')
if (int(method) not in [1, 2, 3]):
print('Invalid input')
exit()
client = MongoClient('mongodb://localhost:27017/')
if int(method) == 2:
db_ = client['ClearAnomalies']
else:
db_ = client['ClearUserProfiling']
collection_ = db_['TimeFrame5S']
cursor = collection_.find({}, {'_id': False})
toDrop = ['timeStampStart', 'timeStampEnd', 'timeWindow', 'classificationIn']
data = []
dataNormalActivity = []
dataUnusualActivity = []
for doc in cursor:
tmp = [v for (k, v) in sorted(doc.items()) if k not in toDrop]
tmp.append(doc['classificationIn'])
dataNormalActivity.append(tmp)
if int(method) == 3:
db_ = client['ClearAnomalies']
collection_ = db_['TimeFrame5S']
cursor = collection_.find({}, {'_id': False})
for doc in cursor:
tmp = [v for (k, v) in sorted(doc.items()) if k not in toDrop]
tmp.append(doc['classificationIn'])
dataUnusualActivity.append(tmp)
data.extend(dataNormalActivity)
if int(method) == 3:
data.extend(dataUnusualActivity)
random.shuffle(data) # randomize data order
label = [x[-1] for x in data] # extract output
data = [[float(y) for y in x[:-1]] for x in data] # extract input
normalizer = StandardScaler()
normalizedData = normalizer.fit_transform(data) # normalize features
dataNumPy = array(normalizedData)
labelNumPy = array(label)
XTest = dataNumPy[int(len(dataNumPy) * 0.6):]
yTest = labelNumPy[int(len(dataNumPy) * 0.6):]
finalAcc_SVCRArr = []
trueNegatives, falsePositives, falseNegatives, truePositives = [], [], [], []
with (open(filename, 'rb')) as file:
while True:
try:
modelSVCR = pickle.load(file)
except EOFError:
break
finalAcc_SVCR = (modelSVCR.score(XTest, yTest) * 100) # final test
yPred = modelSVCR.predict(XTest)
matrix = confusion_matrix(yTest, yPred).ravel()
trueNegatives.append(matrix[0])
falsePositives.append(matrix[1])
falseNegatives.append(matrix[2])
truePositives.append(matrix[3])
finalAcc_SVCRArr.append(finalAcc_SVCR)
file.close()
SVCR = mean(finalAcc_SVCRArr)
meanFalsePositives = mean(falsePositives)
meanFalseNegatives = mean(falseNegatives)
print('Final accuracy = ', SVCR)
print('Mean False Positives = ', meanFalsePositives, ')')
print('Mean False Negatives = ', meanFalseNegatives, ')')
def goTrain_Test(model, X_train, X_test, y_train, y_test): #training function
model.fit(X_train, y_train)
return model.score(X_train, y_train) * 100, model.score(X_test, y_test) * 100
def accuracyFromTest(XTrain, yTrain, model):
kf = model_selection.KFold(n_splits=10,shuffle=True) #k-fold
accuracyTrain = []
accuracyTest = []
for train_index, test_index in kf.split(XTrain):
X_train, X_test = XTrain[train_index], XTrain[test_index]
y_train, y_test = yTrain[train_index], yTrain[test_index]
accuracy_train, accuracy_test = goTrain_Test(model, X_train, X_test, y_train, y_test)
accuracyTrain.append(accuracy_train)
accuracyTest.append(accuracy_test)
accuracyTrain = sum(accuracyTrain) / len(accuracyTrain)
accuracyTest = sum(accuracyTest) / len(accuracyTest)
return accuracyTest, accuracyTrain
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='MachineLearning is a script to train, save and load the agent using Support Vector Machine. '
'After the load, it tests and presents the results.')
parser.add_argument('-d', action="store_true", dest='dump',
help='Dump Ml objects to file')
parser.add_argument('-f', action="store", dest='file',
help='File to save or load the serialized object')
parser.add_argument('-tb', action="store_true", dest='test_big',
help='Test big time window.')
parser.add_argument('-ts', action="store_true", dest='test_small',
help='Test small time window.')
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = vars(args)
if args['file']:
if args['dump']:
NeuralNetwork(args['file'])
elif args['test_big']:
test_neuralNetwork_btm(args['file'])
elif args['test_small']:
test_neuralNetwork_stm(args['file'])
else:
parser.print_help()
else:
parser.print_help()
|
#!/usr/bin/env python3
import configparser
import logging
import os
import shlex
import socket
import subprocess
import threading
from datetime import timedelta
from glob import glob
from sys import argv
from time import sleep
import psutil
print("pineapple.py v1 - the scff daemon")
fpid = os.fork()
if fpid != 0:
# Running as daemon now. PID is fpid
exit(0)
class Thread(threading.Thread):
def __init__(self, shellcmd):
threading.Thread.__init__(self)
self.shellcmd = shellcmd
def run(self):
logging.info("Start Thread: " + str(self.shellcmd))
for outp in run_cmd(self.shellcmd):
echolog(">>" + str(outp))
print("OP", outp)
logging.info("Exiting Thread: " + str(self.shellcmd))
logging.basicConfig(
filename="scff-daemon.log", \
format='%(asctime)s %(levelname)s: %(message)s', \
datefmt="%H:%M:%S", \
level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler())
logFormatter = logging.Formatter(
"%(asctime)s [%(levelname)s] %(message)s", "%H:%M:%S")
rootLogger = logging.getLogger()
"""
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
"""
def list2str(alist):
astr = ""
for word in alist:
astr += word + " "
return astr
def getRunningFuzzers():
proc_list = []
for proc in psutil.process_iter():
if any(fuzzer in s for s in proc.cmdline()):
proc_list.append(proc)
if any("crash-watch" in s for s in proc.cmdline()):
proc_list.append(proc)
if any(ROVING_CLIENT in s for s in proc.cmdline()):
proc_list.append(proc)
if any(ROVING_SERVER in s for s in proc.cmdline()):
proc_list.append(proc)
if any(FUZZDIR + "/target" in s for s in proc.cmdline()):
proc_list.append(proc)
if any(targ in s for s in proc.cmdline()):
if proc not in proc_list:
proc_list.append(proc)
# hard coded, in the future someone might use another fuzzer!
if any("afl-fuzz" in s for s in proc.cmdline()):
if proc not in proc_list:
proc_list.append(proc)
proc_list = set(proc_list) # easy way to filter duplicates ;)
proc_list = list(proc_list)
return proc_list
def get_uptime():
with open('/proc/uptime', 'r') as file:
uptime = str(timedelta(seconds=float(file.readline().split()[0])))
return uptime[:-7]
def shcmd(shellcmd):
Thread(shellcmd).start()
def run_cmd(command_line):
command_line_args = shlex.split(command_line)
logging.info('Subprocess: "' + command_line + '"')
try:
with subprocess.Popen(
command_line_args,
stdout=subprocess.PIPE,
bufsize = 1,
universal_newlines=True) as command_line_process:
for line in command_line_process.stdout:
echolog(" [" + command_line_args[0] + "] " + str(line))
except Exception as exception:
logging.warning("Exception occurred: " + str(exception))
logging.warning("Subprocess failed")
return False
else:
# no exception was raised
logging.info("Subprocess " + command_line + " finished")
return True
def echolog(msg, level="info"):
if level == "info":
logging.info(msg)
elif level == "warn":
logging.warning(msg)
elif level == "error":
logging.error(msg)
return msg + "\n"
def is_distributed():
for proc in psutil.process_iter():
if any(ROVING_SERVER in s for s in proc.cmdline()):
return "True"
return "False"
def print_status():
ret = ""
if os.path.isfile(".scff/distributed"):
mode = "Distributed"
else:
mode = str(CPU_CORES) + " * " + fuzzer
ret = ("\nMode: " + mode + " \tUptime: " + get_uptime() + " \tLoad: " \
+ str(os.getloadavg()[0])[:4] + "\tCPU:" \
+ str(int(psutil.cpu_percent(interval=0.2))) \
+ "%")
if len(getRunningFuzzers()) >= 1:
ret += ("\nS CMDLINE PID CPU% MEM%")
for proc in getRunningFuzzers():
if proc.status() == "sleeping":
status = "zZ"
status = "S"
elif proc.status() == "running":
status = ">>"
status = "R"
elif proc.status() == "stopped":
status = "||"
status = "T"
else:
status = ":("
status = "D"
cmdline = list2str(proc.cmdline())
ret += ( \
"\n{} {:.42} {} {} {}".format( \
status, \
cmdline, \
" " * (45 - min(len(cmdline), 42)) + str(proc.pid), \
proc.cpu_percent(interval=0.1), \
str(round(proc.memory_percent(), 2))) \
)
else:
ret += ("\n\t\t*** No running fuzzers found! ***")
return ret
def start():
ret = ""
ret += echolog("Killing remaining fuzzers first (if any)")
ret += stop()
if os.path.isfile(".scff/distributed"):
if os.path.isfile(".scff/master"):
if os.path.isfile(ROVING_SERVER):
ret += echolog("Starting Roving server ...")
shcmd(ROVING_SERVER + " " + FUZZDIR)
shcmd("./crash-watch.py")
sleep(3)
shcmd(ROVING_CLIENT + " 127.0.0.1:8000 " + args)
else:
ret += echolog("./roving/server not found!", "error")
else:
sleep(4) # wait for the server
# get master ip from filename - TODO: VERY BAD HACK!
mip = glob(".scff/172*")[0][6:]
for i in range(CPU_CORES):
ret += echolog("Starting client #" + str(i+1))
shcmd(ROVING_CLIENT + " " + mip + ":8000 " + args)
# single mode
else:
if CPU_CORES == 1:
try:
shcmd("fuzzers/" + fuzzer + " " + str(-1))
ret += echolog("Started fuzzers/" + fuzzer + " (single core mode)")
except:
logging.error("Failed to start " + fuzzer)
else:
for i in range(CPU_CORES):
try:
shcmd("fuzzers/" + fuzzer + " " + str(i))
ret += echolog("Started fuzzers/" + fuzzer \
+ " (PAR_ID: " + str(i) + ")")
except:
logging.error("Failed to start " + fuzzer)
shcmd("./crash-watch.py")
return ret
def kill():
ret = ""
for proc in getRunningFuzzers():
ret += echolog("killing " + list2str(proc.cmdline()))
proc.kill()
return ret
def stop():
ret = ""
try:
for proc in getRunningFuzzers():
ret += echolog("terminating " + list2str(proc.cmdline()))
proc.terminate()
except:
pass
sleep(1)
for proc in getRunningFuzzers():
ret += echolog("Process "+ list2str(proc.cmdline()) \
+ " is still alive!", "warn")
return ret
def pause():
ret = ""
for proc in getRunningFuzzers():
ret += echolog("pausing " + list2str(proc.cmdline()))
proc.suspend()
return ret
def resume():
ret = ""
for proc in getRunningFuzzers():
ret += echolog("resuming " + list2str(proc.cmdline()))
proc.resume()
return ret
def count():
return str((len(getRunningFuzzers())))
def checkcmd(data, clientsock):
echolog("recieved: '" + data +"'")
if data == "status":
ret = print_status()
elif data == "start":
ret = start()
elif data == "stop":
ret = stop()
elif data == "count":
ret = count()
elif data == "kill":
ret = kill()
elif data == "resume":
ret = resume()
elif data == "is-distributed":
ret = is_distributed()
elif data == "ping":
ret = "pong"
elif data == "quit":
clientsock.close()
exit()
else:
ret = echolog("INVALID CMD: " + data, "warn")
ret += "\n"
clientsock.send(bytes(ret.encode('UTF-8')))
def handler(clientsock, addr):
while 1:
data = clientsock.recv(BUFSIZ)
if not data:
break
data = data.decode('UTF-8')
data = data.replace('\n', '')
if data not in CMDS:
clientsock.send(bytes(("not in" + str(CMDS) + "!\n") \
.encode('UTF-8')))
else:
checkcmd(data, clientsock)
echolog("closing socket")
clientsock.close()
CMDS = ("start", "pause", "resume", "stop", "report", "status", "kill", \
"count", "is-distributed", "ping", "quit")
CONFIG_FILE = os.path.expanduser("~") + "/.scff/scff.proj"
ROVING_CLIENT = os.path.expanduser("~") + "/roving/client"
ROVING_SERVER = os.path.expanduser("~") + "/roving/server"
CPU_CORES = psutil.cpu_count()
FUZZDIR = os.path.realpath(os.path.expanduser("~") + "/.scff/fuzzdir")
targ = FUZZDIR + "/target"
fuzzer = ""
args = ""
if not os.path.isfile(CONFIG_FILE):
echolog(CONFIG_FILE + " NOT found!", "error")
exit(3)
cfg = configparser.ConfigParser()
cfg.read(CONFIG_FILE)
try:
args = cfg['FUZZING']['args']
fuzzer = cfg['FUZZING']['fuzzer']
except:
echolog("Configfile seems to be corrupt!", "error")
exit(3)
if __name__ == "__main__":
HOST = "localhost"
if len(argv) == 2:
PORT = int(argv[1])
else:
PORT = 5555
BUFSIZ = 1024
ADDR = (HOST, PORT)
serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
serversock.bind(ADDR)
serversock.listen(2)
except OSError:
echolog("Port in use?!")
exit(1)
while 1:
echolog("waiting for connection...")
clientsock, addr = serversock.accept()
echolog("...connected from:" + str(addr))
handler(clientsock, addr)
echolog("Pineapple is shutting down. Goodbye :)")
|
# -*- coding=utf-8 -*-
#
# NER 数据集有两种存储格式
# 默认采用的标注标准为 BIOES
import json
from typing import Dict, Any, Tuple, Optional, List
from jionlp import logging
__all__ = ['entity2tag', 'tag2entity']
def entity2tag(token_list: List[str], entities: List[Dict[str, Any]],
formater='BIOES'):
''' 将实体 entity 格式转为 tag 格式,若标注过程中有重叠标注,则会自动将靠后的
实体忽略、删除。
Args:
ner_entities(List[str, Dict[str, Any]]): 文本以及相应的实体。
formater(str): 选择的标注标准
return:
List[List[str], List[str]]: tag 格式的数据
Examples:
>>> ner_entities = [
'胡静静在水利局工作。',
{'text': '胡静静', 'offset': [0, 3], 'type': 'Person'},
{'text': '水利局', 'offset': [4, 7], 'type': 'Orgnization'}]]
>>> print(entity2tag(ner_entities))
[['胡', '静', '静', '在', '水', '利', '局', '工', '作', '。'],
['B-Person', 'I-Person', 'E-Person', 'O', 'B-Orgnization',
'I-Orgnization', 'E-Orgnization', 'O', 'O', 'O']]
'''
tags = ['O' for i in range(len(token_list))]
flag = 0 # 判断重叠标注
for idx, entity in enumerate(entities):
if entity['offsets'][1] < flag: # 说明重叠标注,要删除
if 1 < idx + 1 < len(entities):
logging.warning(
'The entity {} is overlapped with {}.'.format(
json.dumps(entity, ensure_ascii=False),
json.dumps(entities[idx - 1], ensure_ascii=False)))
else:
if entity['offsets'][1] - entity['offsets'][0] == 1:
tags[entity['offsets'][0]] = 'S-' + entity['type']
else:
tags[entity['offsets'][0]] = 'B-' + entity['type']
if entity['offsets'][1] - entity['offsets'][0] > 2:
for j in range(entity['offsets'][0] + 1,
entity['offsets'][1] - 1):
tags[j] = 'I-' + entity['type']
tags[entity['offsets'][1] - 1] = 'E-' + entity['type']
flag = entity['offsets'][1]
return tags
def tag2entity():
''' 将 tag 格式转为实体 entity 格式 '''
|
#factorial.py
def fact(n):
if n == 0:
return 1
else:
return n * fact(n-1)
print(fact(4))
print(fact(10))
|
import sys
from setuptools import setup, find_packages
requires = [
"pyramid==1.10.8",
"SQLAlchemy==1.4.3",
"transaction",
"pyramid_mako",
"pyramid_tm",
"pyramid_debugtoolbar",
"zope.sqlalchemy",
"wtforms",
"wtdojo",
"nose",
"mako",
"python-dateutil",
]
if sys.version_info[:3] < (2, 5, 0):
requires.append("pysqlite")
setup(
name="PyCK",
version="0.10.7",
description="Python Code Karigar - Web Framework",
long_description="""PyCK is an "opinionated" web framework based on Pyramid that makes choices of
ORM (SQLAlchemy), Templates(Mako) etc and aims at building more reusable componenets on top of these choices.""",
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author="Kashif Iftikhar",
author_email="kashif@compulife.com.pk",
url="http://pyck.compulife.com.pk",
keywords="web wsgi bfg pylons pyramid",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite="combined_apps",
install_requires=requires,
entry_points="""\
[pyramid.scaffold]
pyck=pyck.scaffolds:PyCKTemplate
""",
)
|
from aws_data_tools.client import APIClient # noqa: F401
class TestAPIClient:
"""Test the APIClient class"""
def test_api(self):
"""Test API calls with the client"""
assert "pass" == "pass"
def test_init_with_client(self):
"""Test initializing an APIClient with a custom botocore client being passed"""
assert "pass" == "pass"
def test_init_with_client_kwargs(self):
"""Test APIClient init with kwargs for the botocore client"""
assert "pass" == "pass"
def test_init_with_session(self):
"""Test initializing an APIClient with a custom botocore session being passed"""
assert "pass" == "pass"
def test_init_with_session_kwargs(self):
"""Test APIClient init with kwargs for the botocore session"""
assert "pass" == "pass"
|
import os
import sys
from fabric import task
from invoke import run as fab_local
import django
# ----------------------------------------------------
# Add this directory to the python system path
# ----------------------------------------------------
FAB_BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(FAB_BASE_DIR)
# ----------------------------------------------------
# Set the DJANGO_SETTINGS_MODULE, if it's not already
# ----------------------------------------------------
KEY_DJANGO_SETTINGS_MODULE = 'DJANGO_SETTINGS_MODULE'
if not KEY_DJANGO_SETTINGS_MODULE in os.environ:
if FAB_BASE_DIR == '/var/webapps/PSI':
os.environ.setdefault(KEY_DJANGO_SETTINGS_MODULE,
'psiproject.settings.production')
else:
os.environ.setdefault(KEY_DJANGO_SETTINGS_MODULE,
'psiproject.settings.local')
# ----------------------------------------------------
# Django setup
# ----------------------------------------------------
try:
django.setup()
except Exception as e:
print("WARNING: Can't configure Django. %s" % e)
def run_local_cmd(cmd, description=None):
"""Run a command on the host"""
print('-' * 40)
if description:
print(description)
print(cmd)
print('-' * 40)
fab_local(cmd)
@task
def run_rook(context):
"""Run the rook server via the command line"""
cmd = 'cd rook; Rscript rook_nonstop.R'
run_local_cmd(cmd, run_rook.__doc__)
# @task
# def create_django_superuser():
# """(Test only) Create superuser with username: dev_admin. Password is printed to the console."""
# User.objects.create_superuser('admin', '', 'admin')
@task
def collect_static(context):
"""Run the Django collectstatic command"""
fab_local('python manage.py collectstatic --noinput')
@task
def init_db(context):
"""Initialize the django database--if needed"""
cmd = ('python manage.py check;'
'python manage.py migrate')
print("about to run init_db")
run_local_cmd(cmd, init_db.__doc__)
@task
def run_web(context):
"""Run the django web app"""
init_db(context)
print("Run web server")
cmd = ('python manage.py runserver 8080')
run_local_cmd(cmd, run_web.__doc__)
@task
def create_django_superuser(context):
"""(Test only) Create superuser with username: dev_admin. Password is printed to the console."""
from psi_apps.psi_auth.models import User
import random
dev_admin_username = 'dev_admin'
#User.objects.filter(username=dev_admin_username).delete()
if User.objects.filter(username=dev_admin_username).count() > 0:
print('A "%s" superuser already exists' % dev_admin_username)
return
admin_pw = 'admin'
#''.join(random.choice(string.ascii_lowercase + string.digits)
# for _ in range(7))
new_user = User(username=dev_admin_username,
first_name='Dev',
last_name='Administrator',
is_staff=True,
is_active=True,
is_superuser=True)
new_user.set_password(admin_pw)
new_user.save()
print('superuser created: "%s"' % dev_admin_username)
print('password: "%s"' % admin_pw)
|
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
import re
from django.core.cache import cache
from django.db import models
class ParameterManager(models.Manager):
def get_choices(self):
cache_key = 'metrics.parameters.{0}'.format(
self.model.__name__.lower())
choices = cache.get(cache_key)
if choices is None:
choices = []
processed = {}
unprocessed = list(self.all())
pending = len(unprocessed)
i = 0
while pending and i < 10:
j = 0
while j < pending:
obj = unprocessed[j]
parent_key = obj.parent_id
if parent_key is None:
level = choices
elif parent_key in processed:
level = processed[parent_key][3]
else:
j += 1
continue
choice = (obj.pk, obj.token.lower(), obj.regex, [])
level.append(choice)
processed[obj.pk] = choice
del unprocessed[j]
pending -= 1
continue
i += 1
cache.set(cache_key, choices)
return choices
def detect(self, user_agent):
return self._detect(user_agent.lower(), self.get_choices())
def _detect(self, ua, choices):
for pk, token, regex, children in choices:
if regex:
if re.search(token, ua) is not None:
child_pk = self._detect(ua, children)
return child_pk if child_pk is not None else pk
else:
if token in ua:
child_pk = self._detect(ua, children)
return child_pk if child_pk is not None else pk
|
import cv2
import matplotlib.pyplot as plt
import cvlib as cv
from cvlib.object_detection import draw_bbox
import os
import tensorflow
from datetime import datetime
import time
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
total = 0
print(" ")
print("Hold down Shift T to stop the video feed and process.")
print("There are a few open feeds in the code. Uncomment the one you wish to view. (Belgium, Houston, Spain)")
print("yolo v4 is more accurate than yolo-tiny tensors, but a bit slower... You can uncomment code to see for yourself.")
now = datetime.now()
start = now.strftime("%H:%M:%S")
#cap = cv2.VideoCapture('http://166.248.188.1/mjpg/video.mjpg') #houston
#cap = cv2.VideoCapture('http://46.151.102.171:8082/?action=stream') #Ghent Belgium
cap = cv2.VideoCapture('http://86.127.235.130:81/?action=stream') #spain
if (cap.isOpened() == False):
print("Error opening Video Stream at " + str(start))
while(cap.isOpened()):
fps = cap.get(cv2.CAP_PROP_FPS)
print("Frame Rate=" + str(fps))
while fps > 0 :
ret, frame = cap.read()
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
cv2.imshow("Capturing", frame)
# There are multiple accurate tensors published, here are 4. Simply choose one, and run the code to see.
# bbox, label, conf = cv.detect_common_objects(frame, confidence=0.25, model='yolov3', enable_gpu=False)
# bbox, label, conf = cv.detect_common_objects(frame, confidence=0.25, model='yolov3-tiny', enable_gpu=False)
bbox, label, conf = cv.detect_common_objects(frame, confidence=0.25, model='yolov4', enable_gpu=False)
# bbox, label, conf = cv.detect_common_objects(frame, confidence=0.25, model='yolov4-tiny', enable_gpu=False)
output_image = draw_bbox(frame , bbox , label , conf)
vehicles = int(label.count('car')) + int(label.count('bus')) + int(label.count('motorcycle')) + int(label.count('truck'))
print('Counted vehicles = ' + str(vehicles) + ' at:' + current_time)
total += vehicles
time.sleep(4)
#Press Q to exit
if cv2.waitKey(1) & 0xff == ord('T'):
break
if cv2.waitKey(1) & 0xff == ord('T'):
break
end = datetime.now()
end.strftime("%H:%M:%S")
print("Vehicles Counted: " + str(total) + " between " + str(start) + " and " + str(end))
cap.release()
cv2.destroyAllWindows()
|
import time
from utils import epoch_time_to_digital
DEFAULT_EXPIRY_S = 60 * 60 * 4
class WouldPlay:
def __init__(self, player, game, for_time=None, expires_at=None):
self.player = player
self.game = game
self.recorded_at = time.time()
self.for_time = for_time
if self.for_time:
self.expires_at = self.for_time + DEFAULT_EXPIRY_S
else:
self.expires_at = expires_at or (self.recorded_at + DEFAULT_EXPIRY_S)
def __str__(self):
return '<%s would play %s (for time: %s, recorded at: %s, expires at: %s)>' % (self.user, self.game, epoch_time_to_digital(self.for_time), epoch_time_to_digital(self.recorded_at), epoch_time_to_digital(self.expires_at))
def __repr__(self):
return str(self)
@property
def user(self):
return self.player.name
@property
def expired(self):
return self.expires_at <= time.time()
@property
def second_recorded_at(self):
return int(self.recorded_at)
def remove_for_time(self):
self.for_time = None
def __eq__(self, other):
if type(other) is type(self):
return (self.player.id == other.player.id and self.game.name == other.game.name)
else:
return False
def __hash__(self):
return hash((self.player.id, self.game.name))
class DB:
def __init__(self):
self._store = set()
def record_would_play(self, player, game, for_time):
if not game.name:
raise RuntimeError('Cannot record for a game with no name')
self._prune_expired()
self._update_for_times()
wp = WouldPlay(player=player, game=game, for_time=for_time)
if wp in self._store:
self._store.remove(wp)
self._store.add(wp)
self._print_db()
return wp
def cancel_would_plays(self, player):
self._store = set([wp for wp in self._store if wp.player != player])
def get_players_for_game(self, game):
return [wp.player for wp in self.get_would_plays_for_game(game)]
def get_would_plays(self):
self._prune_expired()
self._update_for_times()
return sorted(self._store, key=lambda x: x.recorded_at)
def get_would_plays_for_game(self, game):
return [wp for wp in self.get_would_plays() if wp.game.name == game.name]
def get_last_would_play(self, game):
if game:
sorted_wps = self.get_would_plays_for_game(game)
else:
sorted_wps = self.get_would_plays()
return sorted_wps[-1] if sorted_wps else []
def get_last_would_plays_at_same_time(self):
sorted_wps = self.get_would_plays()
if sorted_wps:
most_recent = sorted_wps[-1]
return [s for s in sorted_wps if s.second_recorded_at == most_recent.second_recorded_at]
return []
def get_ready_would_plays_for_game(self, game):
return [wp for wp in self.get_would_plays() if ((wp.game.name == game.name) and (wp.for_time is None))]
def get_ready_players_for_game(self, game):
return [wp.player for wp in self.get_ready_would_plays_for_game(game)]
def get_unready_would_plays_for_game(self, game):
return [wp for wp in self.get_would_plays() if ((wp.game.name == game.name) and (wp.for_time is not None) and (wp.for_time > time.time()))]
def get_unready_players_for_game(self, game):
return [wp.player for wp in self.get_unready_would_plays_for_game(game)]
def get_would_plays_ready_at_time(self, game, ftime):
would_plays = self.get_would_plays_for_game(game)
return [wp for wp in would_plays if ((wp.expires_at > ftime) and ((wp.for_time is None) or (wp.for_time <= ftime)))]
def _prune_expired(self):
# why can't I do self.prune(wp -> wp.expired)
self._store = set([wp for wp in self._store if not wp.expired])
def _update_for_times(self):
for wp in self._store:
if wp.for_time is not None and wp.for_time < time.time():
wp.remove_for_time()
def _print_db(self):
for x in self._store:
print(x)
def clear_game(self, game):
self._store = set([wp for wp in self._store if wp.game.name != game.name])
db = DB()
|
from torch import nn
from torchvision.models import googlenet, vgg16 , vgg19, resnet152, resnet50
model_dict ={
'googlenet': googlenet,
'vgg16': vgg16 ,
'vgg19':vgg19,
'resnet152':resnet152, # TODO Generate Perturbations
'resnet50':resnet50 # TODO Generate Perturbations
}
# Fixed Architecture: Weights will be updated by Backprop.
class AdveraryGenerator(nn.Module):
def __init__(self,nz,e_lim):
super(AdveraryGenerator, self).__init__()
self.e_lim = e_lim
self.nz = nz
self.main = nn.Sequential(
nn.ConvTranspose2d( in_channels=nz,out_channels= 1024, kernel_size=4, stride=1, padding=0, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(1024, 512, 4, 2, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d( 512, 256, 4, 2, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(256, 128, 4, 2, 2, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( 128, 64, 4, 2, 2, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(True),
# state size. (nc) x 64 x 64
nn.ConvTranspose2d( 64, 3, 4, 4,4, bias=False),
nn.BatchNorm2d(3),
nn.ReLU(True),
nn.Tanh()
)
def forward(self, x):
return self.e_lim * self.main(x) # Scaling of ε
# move Generator to GPU if available
# adversarygen=AdveraryGenerator(e_lim).to(device)
|
from django.conf.urls import re_path
from .views import FilterQueryView
from . import settings
urlpatterns = [
re_path(r'^(?P<app_label>\w+)/(?P<model>\w+)/{}$'.format(settings.URL_PATH), FilterQueryView.as_view()),
re_path(r'^(?P<app_label>\w+)/(?P<model>\w+)/{}(?P<pk>\d+)/$'.format(settings.URL_PATH), FilterQueryView.as_view()),
]
|
from azsc.handlers.az.Generic import *
from azsc.handlers.az.EventGrid import *
from azsc.handlers.az.EventHubs import *
from azsc.handlers.az.ResourceGroup import *
from azsc.handlers.az.Storage import *
from azsc.handlers.az.CosmosDB import *
from azsc.handlers.az.AppService import *
from azsc.handlers.az.FunctionApp import *
from azsc.handlers.az.Extension import *
from azsc.handlers.az.IoT import *
|
import logging
from injector import inject
import app_config
from microsoft_graph import MicrosoftGraph
class MicrosoftGraphImages:
@inject
def __init__(self, graph: MicrosoftGraph):
self.graph = graph
def query_images(self):
images = self.graph.query(app_config.MSG_ENDPOINT_IMAGES).json()
return images
|
#!/usr/bin/env python
from eth_tester.exceptions import TransactionFailed
from pytest import raises, mark
from utils import longTo32Bytes, longToHexString, fix, AssertLog, BuyWithCash, nullAddress
from constants import BID, ASK, YES, NO
def test_cancelBid(contractsFixture, cash, market, universe):
createOrder = contractsFixture.contracts['CreateOrder']
cancelOrder = contractsFixture.contracts['CancelOrder']
orders = contractsFixture.contracts['Orders']
orderType = BID
amount = fix(1)
fxpPrice = 60
outcomeID = YES
tradeGroupID = longTo32Bytes(42)
yesShareToken = contractsFixture.applySignature('ShareToken', market.getShareToken(YES))
noShareToken = contractsFixture.applySignature('ShareToken', market.getShareToken(NO))
creatorInitialShares = yesShareToken.balanceOf(contractsFixture.accounts[1])
marketInitialCash = cash.balanceOf(market.address)
marketInitialYesShares = yesShareToken.totalSupply()
marketInitialNoShares = noShareToken.totalSupply()
with BuyWithCash(cash, fix(fxpPrice), contractsFixture.accounts[1], "The sender didn't get cost deducted for create order"):
orderID = createOrder.publicCreateOrder(orderType, amount, fxpPrice, market.address, outcomeID, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, nullAddress, sender=contractsFixture.accounts[1])
assert orderID, "Order ID should be non-zero"
assert orders.getOrderCreator(orderID), "Order should have an owner"
orderEventLog = {
"universe": universe.address,
"market": market.address,
"eventType": 1,
"addressData": [nullAddress, contractsFixture.accounts[1], nullAddress],
"uint256Data": [0, 0, 0, fix('1', '60'), 0, 0, 0, contractsFixture.contracts['Time'].getTimestamp(), 0, 0],
}
with AssertLog(contractsFixture, 'OrderEvent', orderEventLog):
assert(cancelOrder.cancelOrder(orderID, sender=contractsFixture.accounts[1]) == 1), "cancelOrder should succeed"
assert orders.getAmount(orderID) == 0
assert orders.getPrice(orderID) == 0
assert orders.getOrderCreator(orderID) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID) == 0
assert orders.getOrderSharesEscrowed(orderID) == 0
assert orders.getBetterOrderId(orderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID) == longTo32Bytes(0)
assert(cash.balanceOf(contractsFixture.accounts[1]) == fix('60')), "Maker's cash balance should be order size"
assert(marketInitialCash == cash.balanceOf(market.address)), "Market's cash balance should be the same as before the order was placed"
assert(creatorInitialShares == yesShareToken.balanceOf(contractsFixture.accounts[1])), "Maker's shares should be unchanged"
assert(marketInitialYesShares == yesShareToken.totalSupply()), "Market's yes shares should be unchanged"
assert marketInitialNoShares == noShareToken.totalSupply(), "Market's no shares should be unchanged"
def test_cancelAsk(contractsFixture, cash, market):
createOrder = contractsFixture.contracts['CreateOrder']
cancelOrder = contractsFixture.contracts['CancelOrder']
orders = contractsFixture.contracts['Orders']
orderType = ASK
amount = fix(1)
fxpPrice = 60
outcomeID = 1
tradeGroupID = longTo32Bytes(42)
yesShareToken = contractsFixture.applySignature('ShareToken', market.getShareToken(YES))
noShareToken = contractsFixture.applySignature('ShareToken', market.getShareToken(NO))
creatorInitialShares = yesShareToken.balanceOf(contractsFixture.accounts[1])
marketInitialCash = cash.balanceOf(market.address)
marketInitialYesShares = yesShareToken.totalSupply()
marketInitialNoShares = noShareToken.totalSupply()
with BuyWithCash(cash, fix(100 - fxpPrice), contractsFixture.accounts[1], "create order"):
orderID = createOrder.publicCreateOrder(orderType, amount, fxpPrice, market.address, outcomeID, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, nullAddress, sender=contractsFixture.accounts[1])
assert(orderID != bytearray(32)), "Order ID should be non-zero"
assert orders.getOrderCreator(orderID), "Order should have an owner"
assert(cancelOrder.cancelOrder(orderID, sender=contractsFixture.accounts[1]) == 1), "cancelOrder should succeed"
assert orders.getAmount(orderID) == 0
assert orders.getPrice(orderID) == 0
assert orders.getOrderCreator(orderID) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID) == 0
assert orders.getOrderSharesEscrowed(orderID) == 0
assert orders.getBetterOrderId(orderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID) == longTo32Bytes(0)
assert(marketInitialCash == cash.balanceOf(market.address)), "Market's cash balance should be the same as before the order was placed"
assert(creatorInitialShares == yesShareToken.balanceOf(contractsFixture.accounts[1])), "Maker's shares should be unchanged"
assert(marketInitialYesShares == yesShareToken.totalSupply()), "Market's yes shares should be unchanged"
assert marketInitialNoShares == noShareToken.totalSupply(), "Market's no shares should be unchanged"
def test_cancelWithSharesInEscrow(contractsFixture, cash, market, universe):
completeSets = contractsFixture.contracts['CompleteSets']
createOrder = contractsFixture.contracts['CreateOrder']
cancelOrder = contractsFixture.contracts['CancelOrder']
orders = contractsFixture.contracts['Orders']
yesShareToken = contractsFixture.applySignature('ShareToken', market.getShareToken(YES))
noShareToken = contractsFixture.applySignature('ShareToken', market.getShareToken(NO))
totalProceeds = fix('12', market.getNumTicks())
marketCreatorFee = totalProceeds / market.getMarketCreatorSettlementFeeDivisor()
reporterFee = totalProceeds / universe.getOrCacheReportingFeeDivisor()
completeSetFees = marketCreatorFee + reporterFee
# buy complete sets
with BuyWithCash(cash, fix('12', market.getNumTicks()), contractsFixture.accounts[1], "buy complete set"):
assert completeSets.publicBuyCompleteSets(market.address, fix(12), sender = contractsFixture.accounts[1])
assert cash.balanceOf(contractsFixture.accounts[1]) == fix('0')
assert yesShareToken.balanceOf(contractsFixture.accounts[1]) == fix(12)
assert noShareToken.balanceOf(contractsFixture.accounts[1]) == fix(12)
creatorInitialShares = yesShareToken.balanceOf(contractsFixture.accounts[1])
marketInitialCash = cash.balanceOf(market.address)
marketInitialYesShares = yesShareToken.totalSupply()
marketInitialNoShares = noShareToken.totalSupply()
# create BID order for YES with NO shares escrowed
assert noShareToken.approve(createOrder.address, fix(12), sender = contractsFixture.accounts[1])
orderID = createOrder.publicCreateOrder(BID, fix(12), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), longTo32Bytes(42), nullAddress, sender = contractsFixture.accounts[1])
assert orderID
assert cash.balanceOf(contractsFixture.accounts[1]) == fix('0')
assert yesShareToken.balanceOf(contractsFixture.accounts[1]) == fix(12)
assert noShareToken.balanceOf(contractsFixture.accounts[1]) == 0
# now cancel the order
assert(cancelOrder.cancelOrder(orderID, sender=contractsFixture.accounts[1]) == 1), "cancelOrder should succeed"
assert orders.getAmount(orderID) == 0
assert orders.getPrice(orderID) == 0
assert orders.getOrderCreator(orderID) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID) == 0
assert orders.getOrderSharesEscrowed(orderID) == 0
assert orders.getBetterOrderId(orderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID) == longTo32Bytes(0)
assert(marketInitialCash == cash.balanceOf(market.address)), "Market's cash balance should be the same as before the order was placed"
assert(creatorInitialShares == yesShareToken.balanceOf(contractsFixture.accounts[1])), "Maker's shares should be unchanged"
assert(marketInitialYesShares == yesShareToken.totalSupply()), "Market's yes shares should be unchanged"
assert marketInitialNoShares == noShareToken.totalSupply(), "Market's no shares should be unchanged"
def test_cancelWithSharesInEscrowAsk(contractsFixture, cash, market, universe):
completeSets = contractsFixture.contracts['CompleteSets']
createOrder = contractsFixture.contracts['CreateOrder']
cancelOrder = contractsFixture.contracts['CancelOrder']
orders = contractsFixture.contracts['Orders']
yesShareToken = contractsFixture.applySignature('ShareToken', market.getShareToken(YES))
noShareToken = contractsFixture.applySignature('ShareToken', market.getShareToken(NO))
totalProceeds = fix('12', market.getNumTicks())
marketCreatorFee = totalProceeds / market.getMarketCreatorSettlementFeeDivisor()
reporterFee = totalProceeds / universe.getOrCacheReportingFeeDivisor()
completeSetFees = marketCreatorFee + reporterFee
# buy complete sets
with BuyWithCash(cash, fix('12', market.getNumTicks()), contractsFixture.accounts[1], "buy complete set"):
assert completeSets.publicBuyCompleteSets(market.address, fix(12), sender = contractsFixture.accounts[1])
assert cash.balanceOf(contractsFixture.accounts[1]) == fix('0')
assert yesShareToken.balanceOf(contractsFixture.accounts[1]) == fix(12)
assert noShareToken.balanceOf(contractsFixture.accounts[1]) == fix(12)
creatorInitialShares = yesShareToken.balanceOf(contractsFixture.accounts[1])
marketInitialCash = cash.balanceOf(market.address)
marketInitialYesShares = yesShareToken.totalSupply()
marketInitialNoShares = noShareToken.totalSupply()
# create ASK order for YES with YES shares escrowed
assert noShareToken.approve(createOrder.address, fix(12), sender = contractsFixture.accounts[1])
orderID = createOrder.publicCreateOrder(ASK, fix(12), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), longTo32Bytes(42), nullAddress, sender = contractsFixture.accounts[1])
assert orderID
assert cash.balanceOf(contractsFixture.accounts[1]) == fix('0')
assert yesShareToken.balanceOf(contractsFixture.accounts[1]) == 0
assert noShareToken.balanceOf(contractsFixture.accounts[1]) == fix(12)
# now cancel the order
assert(cancelOrder.cancelOrder(orderID, sender=contractsFixture.accounts[1]) == 1), "cancelOrder should succeed"
assert orders.getAmount(orderID) == 0
assert orders.getPrice(orderID) == 0
assert orders.getOrderCreator(orderID) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID) == 0
assert orders.getOrderSharesEscrowed(orderID) == 0
assert orders.getBetterOrderId(orderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID) == longTo32Bytes(0)
assert(marketInitialCash == cash.balanceOf(market.address)), "Market's cash balance should be the same as before the order was placed"
assert(creatorInitialShares == yesShareToken.balanceOf(contractsFixture.accounts[1])), "Maker's shares should be unchanged"
assert(marketInitialYesShares == yesShareToken.totalSupply()), "Market's yes shares should be unchanged"
assert marketInitialNoShares == noShareToken.totalSupply(), "Market's no shares should be unchanged"
def test_exceptions(contractsFixture, cash, market):
createOrder = contractsFixture.contracts['CreateOrder']
cancelOrder = contractsFixture.contracts['CancelOrder']
orderType = BID
amount = fix(1)
fxpPrice = 60
outcomeID = YES
tradeGroupID = longTo32Bytes(42)
with BuyWithCash(cash, fix(fxpPrice), contractsFixture.accounts[1], "create order"):
orderID = createOrder.publicCreateOrder(orderType, amount, fxpPrice, market.address, outcomeID, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, nullAddress, sender=contractsFixture.accounts[1])
assert(orderID != bytearray(32)), "Order ID should be non-zero"
# cancelOrder exceptions
with raises(TransactionFailed):
cancelOrder.cancelOrder(longTo32Bytes(0), sender=contractsFixture.accounts[1])
with raises(TransactionFailed):
cancelOrder.cancelOrder(longTo32Bytes(1), sender=contractsFixture.accounts[1])
with raises(TransactionFailed):
cancelOrder.cancelOrder(orderID, sender=contractsFixture.accounts[2])
assert(cancelOrder.cancelOrder(orderID, sender=contractsFixture.accounts[1]) == 1), "cancelOrder should succeed"
with raises(TransactionFailed):
cancelOrder.cancelOrder(orderID, sender=contractsFixture.accounts[1])
def test_cancelOrders(contractsFixture, cash, market, universe):
createOrder = contractsFixture.contracts['CreateOrder']
cancelOrder = contractsFixture.contracts['CancelOrder']
orders = contractsFixture.contracts['Orders']
orderType = BID
amount = fix(1)
fxpPrice = 60
outcomeID = YES
tradeGroupID = longTo32Bytes(42)
orderIDs = []
for i in range(10):
with BuyWithCash(cash, fix(fxpPrice + i), contractsFixture.accounts[0], "create order"):
orderIDs.append(createOrder.publicCreateOrder(orderType, amount, fxpPrice + i, market.address, outcomeID, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, nullAddress))
for i in range(10):
assert orders.getAmount(orderIDs[i]) == amount
assert cancelOrder.cancelOrders(orderIDs)
for i in range(10):
assert orders.getAmount(orderIDs[i]) == 0
|
"""The kwb component."""
|
"""Unit test for analysis.py module"""
import datetime
import numpy as np
from matplotlib.dates import date2num
from floodsystem.analysis import polyfit, forecast
def test_polyfit():
dates = [datetime.datetime(2016, 12, 30), datetime.datetime(2016, 12, 31), datetime.datetime(2017, 1, 1),
datetime.datetime(2017, 1, 2), datetime.datetime(2017, 1, 3), datetime.datetime(2017, 1, 4),
datetime.datetime(2017, 1, 5)]
t = date2num(dates)
f = np.poly1d([1, -2, 10, 4])
# create simple polynomial and see if function gives the same polynomial
y = [f(n-t[0]) for n in t]
f, x0 = polyfit(dates, y, 3)
assert round(f.coefficients[0]) == 1
assert round(f.coefficients[1]) == -2
assert round(f.coefficients[2]) == 10
assert round(f.coefficients[3]) == 4
assert x0 == t[0]
def test_forecast():
f = np.poly1d([1, -2, 10, 4])
now = date2num(datetime.datetime.now())
change = forecast(f, now)
# gradient at x=0 should be 10, so change over 0.5 days will bbe 5
assert round(change) == 5
|
import collections
import logging
import os
import pprint
from .simple_pwatcher_bridge import (PypeTask, Dist)
from . import io
LOG = logging.getLogger(__name__)
def task_generic_bash_script(self):
"""Generic script task.
The script template should be in
self.bash_template
The template will be substituted by
the content of "self" and of "self.parameters".
(That is a little messy, but good enough for now.)
"""
self_dict = dict()
self_dict.update(self.__dict__)
self_dict.update(self.parameters)
script_unsub = self.bash_template
script = script_unsub.format(**self_dict)
script_fn = 'script.sh'
with open(script_fn, 'w') as ofs:
ofs.write(script)
self.generated_script_fn = script_fn
def gen_task(script, inputs, outputs, parameters=None, dist=None):
"""
dist is used in two ways:
1) in the pwatcher, to control job-distribution
2) as additional parameters:
- params.pypeflow_nproc
- params.pypeflow_mb
"""
if parameters is None:
parameters = dict()
if dist is None:
dist = Dist()
LOG.debug('gen_task({}\n\tinputs={!r},\n\toutputs={!r})'.format(
script, inputs, outputs))
parameters = dict(parameters) # copy
parameters['pypeflow_nproc'] = dist.pypeflow_nproc
parameters['pypeflow_mb'] = dist.pypeflow_mb
LOG.debug(' parameters={}'.format(
pprint.pformat(parameters)))
LOG.debug(' dist.job_dict={}'.format(
pprint.pformat(dist.job_dict)))
def validate_dict(mydict):
"Python identifiers are illegal as keys."
try:
collections.namedtuple('validate', list(mydict.keys()))
except ValueError as exc:
LOG.exception('Bad key name in task definition dict {!r}'.format(mydict))
raise
validate_dict(inputs)
validate_dict(outputs)
validate_dict(parameters)
make_task = PypeTask(
inputs={k: v for k,v in inputs.items()},
outputs={k: v for k,v in outputs.items()},
parameters=parameters,
bash_template=script,
dist=dist,
)
return make_task(task_generic_bash_script)
|
# copied from python-2.7.3's traceback.py
# CHANGES:
# - some_str is replaced, trying to create unicode strings
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import types
from six import text_type
def format_exception_only(etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.last_type and sys.last_value. The return value is a list of
strings, each ending in a newline.
Normally, the list contains a single string; however, for
SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the list.
"""
# An instance should not have a meaningful value parameter, but
# sometimes does, particularly for string exceptions, such as
# >>> raise string1, string2 # deprecated
#
# Clear these out first because issubtype(string1, SyntaxError)
# would throw another exception and mask the original problem.
if (
isinstance(etype, BaseException)
or isinstance(etype, types.InstanceType)
or etype is None
or type(etype) is str
):
return [_format_final_exc_line(etype, value)]
stype = etype.__name__
if not issubclass(etype, SyntaxError):
return [_format_final_exc_line(stype, value)]
# It was a syntax error; show exactly where the problem was found.
lines = []
try:
msg, (filename, lineno, offset, badline) = value.args
except Exception:
pass
else:
filename = filename or "<string>"
lines.append(' File "{}", line {}\n'.format(filename, lineno))
if badline is not None:
if isinstance(badline, bytes): # python 2 only
badline = badline.decode("utf-8", "replace")
lines.append(" {}\n".format(badline.strip()))
if offset is not None:
caretspace = badline.rstrip("\n")[:offset].lstrip()
# non-space whitespace (likes tabs) must be kept for alignment
caretspace = ((c.isspace() and c or " ") for c in caretspace)
# only three spaces to account for offset1 == pos 0
lines.append(" {}^\n".format("".join(caretspace)))
value = msg
lines.append(_format_final_exc_line(stype, value))
return lines
def _format_final_exc_line(etype, value):
"""Return a list of a single line -- normal case for format_exception_only"""
valuestr = _some_str(value)
if value is None or not valuestr:
line = "{}\n".format(etype)
else:
line = "{}: {}\n".format(etype, valuestr)
return line
def _some_str(value):
try:
return text_type(value)
except Exception:
try:
return bytes(value).decode("UTF-8", "replace")
except Exception:
pass
return "<unprintable {} object>".format(type(value).__name__)
|
import torch
import torch.nn as nn
from math import sqrt
from IPython import embed
import sys
sys.path.append('./')
from .recognizer.tps_spatial_transformer import TPSSpatialTransformer
from .recognizer.stn_head import STNHead
import torch.nn.functional as F
class Conv_ReLU_Block(nn.Module):
def __init__(self):
super(Conv_ReLU_Block, self).__init__()
self.conv = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.relu(self.conv(x)) + x
class Conv_ReLU_Block_TL(nn.Module):
def __init__(self, out_text_channels=32):
super(Conv_ReLU_Block_TL, self).__init__()
self.conv = nn.Conv2d(in_channels=64 + out_text_channels, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, text_emb):
# x, text_emb = x_in
# print("x.shape", type(x_in))
############ Fusing with TL ############
cat_feature = torch.cat([x, text_emb], 1)
# residual = self.concat_conv(cat_feature)
########################################
return self.relu(self.conv(cat_feature)) + x
class VDSR(nn.Module):
def __init__(self, scale_factor=2, in_planes=3, width=32, height=128, STN=False):
super(VDSR, self).__init__()
self.upscale_factor = scale_factor
self.residual_layer = self.make_layer(Conv_ReLU_Block, 6)
self.input = nn.Conv2d(in_channels=in_planes, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False)
self.output = nn.Conv2d(in_channels=64, out_channels=in_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, sqrt(2. / n))
self.tps_inputsize = [height // scale_factor, width // scale_factor]
tps_outputsize = [height, width]
num_control_points = 20
tps_margins = [0.05, 0.05]
self.stn = False
if self.stn:
self.tps = TPSSpatialTransformer(
output_image_size=tuple(tps_outputsize),
num_control_points=num_control_points,
margins=tuple(tps_margins))
self.stn_head = STNHead(
in_planes=3,
num_ctrlpoints=num_control_points,
activation='none')
def make_layer(self, block, num_of_layer):
layers = []
for _ in range(num_of_layer):
layers.append(block())
return nn.Sequential(*layers)
def forward(self, x):
if self.stn:
_, ctrl_points_x = self.stn_head(x)
x, _ = self.tps(x, ctrl_points_x)
else:
x = torch.nn.functional.interpolate(x, scale_factor=self.upscale_factor)
residual = x
out = self.relu(self.input(x))
# out = self.residual_layer(out)
for block in self.residual_layer:
out = block(out)
out = self.output(out)
out = torch.add(out, residual)
return out
class InfoGen(nn.Module):
def __init__(
self,
t_emb,
output_size
):
super(InfoGen, self).__init__()
self.tconv1 = nn.ConvTranspose2d(t_emb, 512, 3, 2, bias=False)
self.bn1 = nn.BatchNorm2d(512)
self.tconv2 = nn.ConvTranspose2d(512, 128, 3, 2, bias=False)
self.bn2 = nn.BatchNorm2d(128)
self.tconv3 = nn.ConvTranspose2d(128, 64, 3, 2, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(64)
self.tconv4 = nn.ConvTranspose2d(64, output_size, 3, (2, 1), padding=1, bias=False)
self.bn4 = nn.BatchNorm2d(output_size)
def forward(self, t_embedding):
# t_embedding += noise.to(t_embedding.device)
x = F.relu(self.bn1(self.tconv1(t_embedding)))
x = F.relu(self.bn2(self.tconv2(x)))
x = F.relu(self.bn3(self.tconv3(x)))
x = F.relu(self.bn4(self.tconv4(x)))
return x
class VDSR_TL(nn.Module):
def __init__(self, scale_factor=2,
in_planes=4,
width=32,
height=128,
STN=False,
text_emb=37, # 26+26+1
out_text_channels=32):
# print("in_planes:", in_planes)
super(VDSR_TL, self).__init__()
self.upscale_factor = scale_factor
self.out_text_channels = out_text_channels
# self.residual_layer = self.make_layer(Conv_ReLU_Block_TL, 8)
self.input = nn.Conv2d(in_channels=in_planes, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False)
self.output = nn.Conv2d(in_channels=64, out_channels=in_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
# From [1, 1] -> [16, 16]
self.infoGen = InfoGen(text_emb, out_text_channels)
block = Conv_ReLU_Block_TL
self.block1 = block(self.out_text_channels)
self.block2 = block(self.out_text_channels)
self.block3 = block(self.out_text_channels)
self.block4 = block(self.out_text_channels)
self.block5 = block(self.out_text_channels)
self.block6 = block(self.out_text_channels)
#self.block7 = block(self.out_text_channels)
#self.block8 = block(self.out_text_channels)
#self.block9 = block(self.out_text_channels)
#self.block10 = block(self.out_text_channels)
#self.block11 = block(self.out_text_channels)
#self.block12 = block(self.out_text_channels)
#self.block13 = block(self.out_text_channels)
#self.block14 = block(self.out_text_channels)
#self.block15 = block(self.out_text_channels)
#self.block16 = block(self.out_text_channels)
#self.block17 = block(self.out_text_channels)
#self.block18 = block(self.out_text_channels)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, sqrt(2. / n))
self.tps_inputsize = [height // scale_factor, width // scale_factor]
self.tps_outputsize = [height, width]
num_control_points = 20
tps_margins = [0.05, 0.05]
self.stn = False
if self.stn:
self.tps = TPSSpatialTransformer(
output_image_size=tuple(tps_outputsize),
num_control_points=num_control_points,
margins=tuple(tps_margins))
self.stn_head = STNHead(
in_planes=in_planes,
num_ctrlpoints=num_control_points,
activation='none')
def make_layer(self, block, num_of_layer):
layers = []
for _ in range(num_of_layer):
layers.append(block(self.out_text_channels))
# self.internal_layers = nn.Sequential(*layers)
return nn.Sequential(*layers)
def forward(self, x, text_emb=None):
if self.stn:
_, ctrl_points_x = self.stn_head(x)
x, _ = self.tps(x, ctrl_points_x)
else:
x = torch.nn.functional.interpolate(x, scale_factor=self.upscale_factor)
spatial_t_emb = self.infoGen(text_emb)
spatial_t_emb = F.interpolate(spatial_t_emb, self.tps_outputsize, mode='bilinear', align_corners=True)
residual = x
out = self.relu(self.input(x))
# for block in self.blocks:
# out = self.block1(out, spatial_t_emb)
out = self.block1(out, spatial_t_emb)
out = self.block2(out, spatial_t_emb)
out = self.block3(out, spatial_t_emb)
out = self.block4(out, spatial_t_emb)
out = self.block5(out, spatial_t_emb)
out = self.block6(out, spatial_t_emb)
#out = self.block7(out, spatial_t_emb)
#out = self.block8(out, spatial_t_emb)
#out = self.block9(out, spatial_t_emb)
#out = self.block10(out, spatial_t_emb)
# out, spatial_t_emb = out
# out = self.block1(out, spatial_t_emb)
out = self.output(out)
out = torch.add(out, residual)
return out
if __name__=='__main__':
embed()
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from pathlib import Path
import yaml
from black import click
from cli.utils import (
explode_key_values,
iterate_configurations,
run_sacred_script,
)
from tsbench.constants import DEFAULT_DATA_PATH, DEFAULT_EVALUATIONS_PATH
from ._main import analysis
@analysis.command(short_help="Evaluate the performance of ensembles.")
@click.option(
"--experiment",
required=True,
help="The name of the experiment under which the individual training runs are grouped.",
)
@click.option(
"--config_path",
required=True,
help="The local path to the configuration file defining script options.",
)
@click.option(
"--data_path",
type=click.Path(exists=True),
default=DEFAULT_DATA_PATH,
show_default=True,
help="The path where datasets are stored.",
)
@click.option(
"--evaluations_path",
type=click.Path(exists=True),
default=DEFAULT_EVALUATIONS_PATH,
show_default=True,
help="The path where offline evaluations are stored.",
)
@click.option(
"--nskip",
default=0,
show_default=True,
help="The number of configurations to skip. Useful if some set of experiments failed.",
)
def ensemble(
experiment: str,
config_path: str,
data_path: str,
evaluations_path: str,
nskip: int,
):
"""
Evaluates the performance of an ensemble of best-performing models. This allows also to only
consider models of a particular type (thus building hyper-ensembles).
This call runs the Sacred script for each provided configuration sequentially and returns only
once all runs have completed.
"""
with Path(config_path).open("r", encoding="utf-8") as f:
content = yaml.safe_load(f)
configs = explode_key_values("__", content)
for configuration in iterate_configurations(configs, nskip):
run_sacred_script(
"ensemble.py",
experiment=experiment,
data_path=data_path,
evaluations_path=evaluations_path,
**{k: v for k, v in configuration.items() if k != "__"},
)
|
import argparse
import os
from guacamol.assess_distribution_learning import assess_distribution_learning
from guacamol.utils.helpers import setup_default_logger
from .generator import RandomSmilesSampler
if __name__ == "__main__":
setup_default_logger()
parser = argparse.ArgumentParser(
description="Molecule distribution learning benchmark for random smiles sampler",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--dist_file", default="data/guacamol_v1_all.smiles")
parser.add_argument("--output_dir", default=None, help="Output directory")
parser.add_argument("--suite", default="v2")
args = parser.parse_args()
if args.output_dir is None:
args.output_dir = os.path.dirname(os.path.realpath(__file__))
with open(args.dist_file, "r") as smiles_file:
smiles_list = [line.strip() for line in smiles_file.readlines()]
generator = RandomSmilesSampler(molecules=smiles_list)
json_file_path = os.path.join(args.output_dir, "distribution_learning_results.json")
assess_distribution_learning(
generator,
chembl_training_file=args.dist_file,
json_output_file=json_file_path,
benchmark_version=args.suite,
)
|
# -*- coding: utf-8 -*-
"""The main h application."""
from __future__ import unicode_literals
from h._compat import urlparse
import logging
import transaction
from pyramid.settings import asbool
from pyramid.tweens import EXCVIEW
from h.config import configure
from h.views.client import DEFAULT_CLIENT_URL
log = logging.getLogger(__name__)
def configure_jinja2_assets(config):
jinja2_env = config.get_jinja2_environment()
jinja2_env.globals["asset_url"] = config.registry["assets_env"].url
jinja2_env.globals["asset_urls"] = config.registry["assets_env"].urls
def in_debug_mode(request):
return asbool(request.registry.settings.get("pyramid.debug_all"))
def create_app(global_config, **settings):
"""
Create the h WSGI application.
This function serves as a paste app factory.
"""
config = configure(settings=settings)
config.include(__name__)
return config.make_wsgi_app()
def includeme(config):
settings = config.registry.settings
config.set_root_factory("h.traversal:Root")
config.add_subscriber(
"h.subscribers.add_renderer_globals", "pyramid.events.BeforeRender"
)
config.add_subscriber(
"h.subscribers.publish_annotation_event", "h.events.AnnotationEvent"
)
config.add_subscriber(
"h.subscribers.send_reply_notifications", "h.events.AnnotationEvent"
)
config.add_tween("h.tweens.conditional_http_tween_factory", under=EXCVIEW)
config.add_tween("h.tweens.redirect_tween_factory")
config.add_tween("h.tweens.invalid_path_tween_factory")
config.add_tween("h.tweens.security_header_tween_factory")
config.add_tween("h.tweens.cache_header_tween_factory")
config.add_tween("h.tweens.encode_headers_tween_factory")
config.add_request_method(in_debug_mode, "debug", reify=True)
config.include("pyramid_jinja2")
config.add_jinja2_extension("h.jinja_extensions.Filters")
config.add_jinja2_extension("h.jinja_extensions.SvgIcon")
# Register a deferred action to setup the assets environment
# when the configuration is committed.
config.action(None, configure_jinja2_assets, args=(config,))
# Pyramid layouts: provides support for reusable components ('panels')
# that are used across multiple pages
config.include("pyramid_layout")
config.registry.settings.setdefault(
"mail.default_sender", '"Annotation Daemon" <no-reply@localhost>'
)
if asbool(config.registry.settings.get("h.debug")):
config.include("pyramid_mailer.debug")
else:
config.include("pyramid_mailer")
# Pyramid service layer: provides infrastructure for registering and
# retrieving services bound to the request.
config.include("pyramid_services")
# Configure the transaction manager to support retrying retryable
# exceptions, and generate a new transaction manager for each request.
config.add_settings(
{
"tm.attempts": 3,
"tm.manager_hook": lambda request: transaction.TransactionManager(),
"tm.annotate_user": False,
}
)
config.include("pyramid_tm")
# Define the global default Content Security Policy
client_url = settings.get("h.client_url", DEFAULT_CLIENT_URL)
client_host = urlparse.urlparse(client_url).netloc
settings["csp"] = {
"font-src": ["'self'", "fonts.gstatic.com", client_host],
"script-src": ["'self'", client_host, "www.google-analytics.com"],
# Allow inline styles until https://github.com/hypothesis/client/issues/293
# is resolved as otherwise our own tool would break on the site,
# including on /docs/help.
"style-src": ["'self'", "fonts.googleapis.com", client_host, "'unsafe-inline'"],
}
if "csp.report_uri" in settings:
settings["csp"]["report-uri"] = [settings["csp.report_uri"]]
# Core site modules
config.include("h.assets")
config.include("h.auth")
config.include("h.authz")
config.include("h.db")
config.include("h.eventqueue")
config.include("h.form")
config.include("h.indexer")
config.include("h.panels")
config.include("h.realtime")
config.include("h.renderers")
config.include("h.routes")
config.include("h.search")
config.include("h.sentry")
config.include("h.services")
config.include("h.session")
config.include("h.stats")
config.include("h.viewderivers")
config.include("h.viewpredicates")
config.include("h.views")
# Site modules
config.include("h.accounts")
config.include("h.links")
config.include("h.nipsa")
config.include("h.notification")
# Debugging assistance
if asbool(config.registry.settings.get("h.debug")):
config.include("pyramid_debugtoolbar")
|
#! /usr/bin/env python3
# Copyright 2020 Desh Raj
# Apache 2.0.
"""This script takes an RTTM file and removes same-speaker segments
which may be present at the same time across streams. This is meant
to be used as a post-processing step after performing clustering-based
diarization on top of separated streams of audio. The idea is to
eliminate false alarms caused by leakage, since the separation
method may not be perfect."""
import argparse, os
import itertools
from collections import defaultdict
def get_args():
parser = argparse.ArgumentParser(
description="""This script takes an RTTM file and removes same-speaker segments
which may be present at the same time across streams. This is meant
to be used as a post-processing step after performing clustering-based
diarization on top of separated streams of audio. The idea is to
eliminate false alarms caused by leakage, since the separation
method may not be perfect.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("input_rttm", type=str,
help="path of input rttm file")
parser.add_argument("output_rttm", type=str,
help="path of output rttm file")
args = parser.parse_args()
return args
class Segment:
def __init__(self, parts):
self.reco_id = '_'.join(parts[1].split('_')[:-1])
self.stream = int(parts[1].split('_')[-1])
self.start_time = float(parts[3])
self.duration = float(parts[4])
self.end_time = self.start_time + self.duration
self.label = int(parts[7])
def main():
args = get_args()
# First we read all segments and store as a list of objects
segments = []
with open(args.input_rttm,'r') as f:
for line in f.readlines():
parts = line.strip().split()
segments.append(Segment(parts))
groupfn = lambda x: (x.reco_id,x.label)
sort(segments, key=groupfn)
# We group the segment list into a dictionary indexed by (reco_id, spk_id)
reco_and_spk_to_segs = defaultdict(list,
{uid : list(g) for uid, g in itertools.groupby(segments, groupfn)})
reco_and_spk_to_final_segs = {}
for uid in reco_and_spk_to_segs.keys():
reco_id, spk_id = uid
segs = reco_and_spk_to_segs[uid]
tokens = []
for seg in segs:
tokens.append(('BEG',seg.start_time,seg.stream))
tokens.append(('END',seg.end_time,seg.stream))
tokens.sort(key=lambda x:x[1])
# Remove segments which lie completely inside another segment
running_segs = {}
new_segs = [] # (start_time, end_time, stream)
for token in tokens:
if token[0] == 'BEG':
running_segs[token[2]] = token[1]
else:
seg_start = running_segs[token[2]]
seg_end = token[1]
seg_stream = token[2]
new_seg = (seg_start, seg_end, seg_stream)
del running_segs[token[2]]
# if this segment was the only running segment, then append
if len(running_segs) == 0:
new_segs.append(new_seg)
continue
# if any running segment started before this one, it means, this
# segment is totally enclosed within the other, so we don't add it
if not any(i < new_seg[0] for i in running_segs.values()):
new_segs.append(new_seg)
new_segs.sort(key=lambda x: x[0])
num_segs = len(new_segs)
# Now we have partially overlapping segments. We divide the overlapping
# portion equally.
final_segs = [] # (start_time, end_time, stream)
for i in range(num_segs):
seg = new_segs[i]
# If it is last segment in recording or last contiguous segment, add it to new_segs
if (i == num_segs-1 or seg[1] <= new_segs[i+1][0]):
final_segs.append(seg)
# Otherwise split overlapping interval between current and next segment
else:
avg = (new_segs[i+1][0] + seg[1]) / 2
final_segs.append((seg[0], avg, seg[2]))
if not (avg < new_segs[i+1][1]):
print (reco_id, spk_id, seg, new_segs[i+1])
new_segs[i+1] = (avg, new_segs[i+1][1], new_segs[i+1][2])
new_segs[i+1:].sort(key=lambda x: x[0])
reco_and_spk_to_final_segs[(reco_id, spk_id)] = final_segs
rttm_str = "SPEAKER {0} 1 {1:7.3f} {2:7.3f} <NA> <NA> {3} <NA> <NA>\n"
with open(args.output_rttm, 'w') as f:
for (reco_id, spk_id) in sorted(reco_and_spk_to_final_segs):
segs = reco_and_spk_to_final_segs[(reco_id, spk_id)]
for seg in segs:
utt_id = "{}_{}".format(reco_id, seg[2])
dur = seg[1] - seg[0]
if dur > 0.025:
f.write(rttm_str.format(utt_id, seg[0], dur, spk_id))
if __name__ == '__main__':
main()
|
nome = str(input('Digite seu nome: ')).strip()
print(f'{nome} tem Silva em seu nome?', 'SILVA' in nome.upper())
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 6 21:38:13 2022
@author: DanielT17
"""
# %% Imports
from math import ceil
# %% Functions
def Preprocessing(packets,crc_width):
'''
Description:
This function preprocess the data by spliting the packets list into packet
and crc combination by using crc_width parameter, the new sub lists are
made of byte arrays.
Inputs:
packets - list - a list of packets
crc_width - int - length or degree of the crc.
Outputs:
first_step_packets - a list of lists of packets of equal length
second_step_packets - a list of lists of packets of unequal length
'''
new_packets = []
for packet in packets:
cur_packet = packet[:-crc_width//8]; cur_crc = packet[-crc_width//8:]
packet_crc_pair = [cur_packet,cur_crc]
new_packets.append(packet_crc_pair)
num_packets = len(new_packets)
first_step_packets_num = ceil(num_packets/1.5)
if first_step_packets_num % 2 == 1:
first_step_packets_num = first_step_packets_num - 1
first_step_packets = new_packets[:first_step_packets_num]
second_step_packets = new_packets[first_step_packets_num+1:]
return first_step_packets,second_step_packets
|
ies = []
ies.append({ "iei" : "", "value" : "Payload container type", "type" : "Payload container type", "reference" : "9.10.3.36", "presence" : "M", "format" : "V", "length" : "1/2"})
ies.append({ "iei" : "", "value" : "Payload container", "type" : "Payload container", "reference" : "9.10.3.35", "presence" : "M", "format" : "LV-E", "length" : "3-65537"})
ies.append({ "iei" : "70", "value" : "PDU session ID", "type" : "PDU session identity 2", "reference" : "9.10.3.37", "presence" : "C", "format" : "TV", "length" : "2"})
ies.append({ "iei" : "24", "value" : "Additional information", "type" : "Additional information", "reference" : "9.10.2.1", "presence" : "O", "format" : "TLV", "length" : "3-n"})
ies.append({ "iei" : "58", "value" : "5GMM cause", "type" : "5GMM cause", "reference" : "9.10.3.2", "presence" : "O", "format" : "TV", "length" : "2"})
ies.append({ "iei" : "37", "value" : "Back-off timer value", "type" : "GPRS timer 3", "reference" : "9.10.2.5", "presence" : "O", "format" : "TLV", "length" : "3"})
msg_list[key]["ies"] = ies
|
#!/usr/bin/env python3
import sys
import time
import numpy as np
import tensorflow as tf
import cv2
# from object_detector_detection_api import ObjectDetectorDetectionAPI, PATH_TO_LABELS, NUM_CLASSES
class FROZEN_GRAPH_INFERENCE:
def __init__(self, model_person):
"""Tensorflow detector
"""
self.inference_list = list()
PATH_TO_CKPT = model_person
self.count = 0
# NUM_CLASSES = num_classes
# PATH_TO_LABELS = config.PATH_TO_LABELS
# label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
# categories = label_map_util.convert_label_map_to_categories(label_map,
# max_num_classes=NUM_CLASSES, use_display_name=True)
# self.category_index = label_map_util.create_category_index(categories)
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
with self.detection_graph.as_default():
config = tf.ConfigProto()
# Allow growth: (more flexible)
#config.gpu_options.allow_growth = False
config.gpu_options.allow_growth = True
#Allocate fixed memory
#config.gpu_options.per_process_gpu_memory_fraction = 0.3
self.sess = tf.Session(graph=self.detection_graph, config=config)
self.windowNotSet = True
def draw_bounding_box(self, image, scores, boxes, classes, im_width, im_height):
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes).astype(np.int32)
persons = list()
bboxs = list()
idx = 0
for score, box, name in zip(scores, boxes, classes):
if name == 1 and score > 0.6:
# ymin, xmin, ymax, xmax = box
left = int(box[1]*im_width)
top = int(box[0]*im_height)
right = int(box[3]*im_width)
bottom = int(box[2]*im_height)
bboxs.append([left, top, right, bottom])
cv2.rectangle(image, (left, top), (right, bottom), (0, 0, 255), 2, 8)
width = right - left
height = bottom - top
bottom_mid = (left + int(width / 2), top + height)
confidence = score
label = name
mydict = {
"width": width,
"height": height,
"left": left,
"right": right,
"top": top,
"bottom": bottom,
"confidence": confidence,
"label": None,
"bottom_mid": bottom_mid,
"model_type": 'FROZEN_GRAPH',
"bboxs": bboxs
}
persons.append(mydict)
idx += 1
return image, persons
def run_model(self, image, im_width, im_height):
"""image: bgr image
return (boxes, scores, classes, num_detections)
"""
image_np = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
start_time = time.time()
(boxes, scores, classes, num_detections) = self.sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
elapsed_time = time.time() - start_time
self.inference_list.append(elapsed_time)
self.count = self.count + 1
average_inference = sum(self.inference_list)/self.count
# print('Average inference time: {}'.format(average_inference))
# Draw bounding boxes on the image
frame, persons = self.draw_bounding_box(image, scores, boxes, classes, im_width, im_height)
return frame, persons
|
while True:
temp = []
try:
a, k = [int(x) for x in input().split()]
except EOFError:
break
for _ in range(a):
temp.append(input())
print(temp[-k])
|
from __future__ import print_function, division, absolute_import
import numpy as np
def spin(x, dancers):
"""
Remove x characters from the end of the string
and place them, order unchanged, on the front.
Parameters
----------
x : int
Number of characters to be moved from end of dancers
to the front of dancers.
dancers : str
A mutable sequence of characters
"""
end = dancers[-x:]
return end + dancers[:len(dancers)-x]
def exchange(a, b, dancers):
"""
Swap places of dancers a and b.
Parameters
----------
a : int
Index of the first character to be swapped
b : int
Index of the second character to be swapped
dancers : list
A mutable sequence of characters, modified in place.
"""
dancer_list = [char for char in dancers]
dancer_list[b], dancer_list[a] = dancer_list[a], dancer_list[b]
return ''.join(dancer_list)
def partner(a, b, dancers):
"""
Swap places of dancers named a and b.
Parameters
----------
a : str
Name of the first character to be swapped
b : str
Name of the second character to be swapped
dancers : list
A mutable sequence of characters, modified in place.
"""
a_idx = dancers.index(a)
b_idx = dancers.index(b)
dancer_list = [char for char in dancers]
dancer_list[b_idx], dancer_list[a_idx] = dancer_list[a_idx], dancer_list[b_idx]
return ''.join(dancer_list)
def solve(dancers, instructions, charset='abcdefghijklmnop', num_times=1, find_cycle=True):
# print(''.join(dancers))
#
# print(len('instructions'))
cycle_length = -1
for j in range(num_times):
for i, step in enumerate(instructions):
move = step[0]
if move == 's':
x = int(step[1:])
dancers = spin(x, dancers)
elif move == 'x':
a, b = step[1:].split('/')
dancers = exchange(int(a), int(b), dancers)
elif move == 'p':
a, b = step[1:].split('/')
dancers = partner(a, b, dancers)
if find_cycle and dancers == 'abcdefghijklmnop':
return None, None, j+1
permutation = [charset.index(char) for char in dancers]
return dancers, permutation, cycle_length
def test_solution(dancers, charset='abcdefghijklmnop'):
flag = False
#print('testing:', dancers)
for char in charset:
if char not in dancers:
flag = True
print('{0} not in dancers!'.format(char))
if flag:
print(''.join(dancers))
raise ValueError('error')
def permute(n, seq, permutation):
"""
This is the code to permute the solution without having to perform the 10000
instruction sequence.
"""
for i in range(n):
seq = seq[permutation]
return seq
def decode_permutation(chars, permutation):
return ''.join([chars[i] for i in permutation])
if __name__ == '__main__':
dancers = 'abcde'
instructions = ['s1', 'x3/4', 'pe/b']
sol, permutaion, _ = solve(dancers, instructions, charset='abcde')
assert(sol == 'baedc')
dancers = 'abcdefghijklmnop'
with open('input.txt', 'r') as f:
instructions = f.read().split(',')
sol, permutation_1x, _ = solve(dancers, instructions)
print('Solution', sol) # iabmedjhclofgknp
sol, _, _ = solve('abcdefghijklmnop', instructions, num_times=75, find_cycle=False)
sol, _, cycle_length = solve(dancers, instructions,
num_times=1000, find_cycle=True)
print('Cycle Length:', cycle_length)
sol, _, _ = solve('abcdefghijklmnop', instructions, num_times=1000000000 % cycle_length)
print('Solution after 1E9 iterations:', sol) # oildcmfeajhbpngk
|
# import gym
# import random
# from config import *
# from cartpole import *
# from NEAT import Node, Edge, innov_maker
# glb_innov = {}
# glb_node_index = {}
# def init_nodes(num_input,num_output):
# #Is_input_node, Index, value, children, edges, move
# input_nodes = [Node(True, i, None, None, None) for i in range(num_input)]
# hidden_nodes = []
# #Output node gets every input node as children
# output_nodes = [Node(False, i, None, input_nodes, None) for i in range(num_input, num_input+num_output)]
# #Setting available moves to the output_nodes
# for i in range(len(output_nodes)):
# output_nodes[i].move = moves[i]
# init_edges(input_nodes, output_nodes)
# return input_nodes,hidden_nodes,output_nodes
# def init_edges(input_node, output_node):
# #Exclusive number for every edge
# for parent in output_node:
# parent.edges = []
# for child in parent.children:
# #Puts every edge into the global node innov dictionary
# innov = innov_maker(glb_innov,child,parent)
# #Puts every node_index into the global node index dictionary
# innov_maker(glb_node_index, parent, child)
# weight = random.uniform(0,1)
# parent.edges.append(Edge(weight, innov,True))
# return None
# def init_genomes(num_agents, num_input, num_output):
# all_agents = []
# for _ in range(num_agents):
# input_nodes,hidden_nodes,output_nodes = init_nodes(num_input, num_output)
# agent = Genome(input_nodes,hidden_nodes,output_nodes,0)
# all_agents.append(agent)
# return all_agents
|
from data_miner import plot_tally
import pytest
d_country = ["Vietnam", "Singapore", "US", "Malaysia"]
d_date = "03-05-2020"
d_timespan = 40
d_scale = ["linear", "log", "log", "linear"]
d_plot_type = "cdra"
def test_A():
assert plot_tally(d_country, d_date, d_timespan, scale=d_scale, plot_type=d_plot_type) == 0
def test_B():
assert plot_tally(d_country, d_date, d_timespan, scale=d_scale, plot_type=d_plot_type, transpose=True) == 0
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class DeploymentScriptsOperations(object):
"""DeploymentScriptsOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api version. Constant value: "2020-10-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2020-10-01"
self.config = config
def _create_initial(
self, resource_group_name, script_name, deployment_script, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'scriptName': self._serialize.url("script_name", script_name, 'str', max_length=90, min_length=1)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(deployment_script, 'DeploymentScript')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
raise models.DeploymentScriptsErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentScript', response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentScript', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, resource_group_name, script_name, deployment_script, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates a deployment script.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param script_name: Name of the deployment script.
:type script_name: str
:param deployment_script: Deployment script supplied to the operation.
:type deployment_script:
~azure.mgmt.resource.deploymentscripts.v2020_10_01.models.DeploymentScript
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns DeploymentScript or
ClientRawResponse<DeploymentScript> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.resource.deploymentscripts.v2020_10_01.models.DeploymentScript]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.resource.deploymentscripts.v2020_10_01.models.DeploymentScript]]
:raises:
:class:`DeploymentScriptsErrorException<azure.mgmt.resource.deploymentscripts.v2020_10_01.models.DeploymentScriptsErrorException>`
"""
raw_result = self._create_initial(
resource_group_name=resource_group_name,
script_name=script_name,
deployment_script=deployment_script,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('DeploymentScript', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deploymentScripts/{scriptName}'}
def update(
self, resource_group_name, script_name, tags=None, custom_headers=None, raw=False, **operation_config):
"""Updates deployment script tags with specified values.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param script_name: Name of the deployment script.
:type script_name: str
:param tags: Resource tags to be updated.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DeploymentScript or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.deploymentscripts.v2020_10_01.models.DeploymentScript
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`DeploymentScriptsErrorException<azure.mgmt.resource.deploymentscripts.v2020_10_01.models.DeploymentScriptsErrorException>`
"""
deployment_script = None
if tags is not None:
deployment_script = models.DeploymentScriptUpdateParameter(tags=tags)
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'scriptName': self._serialize.url("script_name", script_name, 'str', max_length=90, min_length=1)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
if deployment_script is not None:
body_content = self._serialize.body(deployment_script, 'DeploymentScriptUpdateParameter')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DeploymentScriptsErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentScript', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deploymentScripts/{scriptName}'}
def get(
self, resource_group_name, script_name, custom_headers=None, raw=False, **operation_config):
"""Gets a deployment script with a given name.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param script_name: Name of the deployment script.
:type script_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DeploymentScript or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.deploymentscripts.v2020_10_01.models.DeploymentScript
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`DeploymentScriptsErrorException<azure.mgmt.resource.deploymentscripts.v2020_10_01.models.DeploymentScriptsErrorException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'scriptName': self._serialize.url("script_name", script_name, 'str', max_length=90, min_length=1)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DeploymentScriptsErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentScript', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deploymentScripts/{scriptName}'}
def delete(
self, resource_group_name, script_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a deployment script. When operation completes, status code 200
returned without content.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param script_name: Name of the deployment script.
:type script_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`DeploymentScriptsErrorException<azure.mgmt.resource.deploymentscripts.v2020_10_01.models.DeploymentScriptsErrorException>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'scriptName': self._serialize.url("script_name", script_name, 'str', max_length=90, min_length=1)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.DeploymentScriptsErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deploymentScripts/{scriptName}'}
def list_by_subscription(
self, custom_headers=None, raw=False, **operation_config):
"""Lists all deployment scripts for a given subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DeploymentScript
:rtype:
~azure.mgmt.resource.deploymentscripts.v2020_10_01.models.DeploymentScriptPaged[~azure.mgmt.resource.deploymentscripts.v2020_10_01.models.DeploymentScript]
:raises:
:class:`DeploymentScriptsErrorException<azure.mgmt.resource.deploymentscripts.v2020_10_01.models.DeploymentScriptsErrorException>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DeploymentScriptsErrorException(self._deserialize, response)
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.DeploymentScriptPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deploymentScripts'}
def get_logs(
self, resource_group_name, script_name, custom_headers=None, raw=False, **operation_config):
"""Gets deployment script logs for a given deployment script name.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param script_name: Name of the deployment script.
:type script_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ScriptLogsList or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.deploymentscripts.v2020_10_01.models.ScriptLogsList
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`DeploymentScriptsErrorException<azure.mgmt.resource.deploymentscripts.v2020_10_01.models.DeploymentScriptsErrorException>`
"""
# Construct URL
url = self.get_logs.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'scriptName': self._serialize.url("script_name", script_name, 'str', max_length=90, min_length=1)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DeploymentScriptsErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ScriptLogsList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_logs.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deploymentScripts/{scriptName}/logs'}
def get_logs_default(
self, resource_group_name, script_name, tail=None, custom_headers=None, raw=False, **operation_config):
"""Gets deployment script logs for a given deployment script name.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param script_name: Name of the deployment script.
:type script_name: str
:param tail: The number of lines to show from the tail of the
deployment script log. Valid value is a positive number up to 1000. If
'tail' is not provided, all available logs are shown up to container
instance log capacity of 4mb.
:type tail: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ScriptLog or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.deploymentscripts.v2020_10_01.models.ScriptLog or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DeploymentScriptsErrorException<azure.mgmt.resource.deploymentscripts.v2020_10_01.models.DeploymentScriptsErrorException>`
"""
# Construct URL
url = self.get_logs_default.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'scriptName': self._serialize.url("script_name", script_name, 'str', max_length=90, min_length=1)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if tail is not None:
query_parameters['tail'] = self._serialize.query("tail", tail, 'int')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DeploymentScriptsErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ScriptLog', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_logs_default.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deploymentScripts/{scriptName}/logs/default'}
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists deployments scripts.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DeploymentScript
:rtype:
~azure.mgmt.resource.deploymentscripts.v2020_10_01.models.DeploymentScriptPaged[~azure.mgmt.resource.deploymentscripts.v2020_10_01.models.DeploymentScript]
:raises:
:class:`DeploymentScriptsErrorException<azure.mgmt.resource.deploymentscripts.v2020_10_01.models.DeploymentScriptsErrorException>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DeploymentScriptsErrorException(self._deserialize, response)
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.DeploymentScriptPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deploymentScripts'}
|
"""empty message
Revision ID: ad15ef3317a6
Revises:
Create Date: 2019-09-09 07:24:22.448481
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'ad15ef3317a6'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('network_state')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('network_state',
sa.Column('id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('time', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.Column('most_common_height', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('most_common_count', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('height_distribution', sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column('timeout_count', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('nodes_tested', sa.INTEGER(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='network_state_pkey')
)
# ### end Alembic commands ###
|
"""Classes to represent and query categorical systems."""
import datetime
import importlib
import importlib.resources
import pathlib
import pickle
import typing
import natsort
import networkx as nx
import pandas
import strictyaml as sy
from black import Mode, format_str
from ruamel.yaml import YAML
from . import data
from ._conversions import Conversion, ConversionSpec
class Category:
"""A single category."""
_strictyaml_schema = sy.Map(
{
"title": sy.Str(),
sy.Optional("comment"): sy.Str(),
sy.Optional("alternative_codes"): sy.Seq(sy.Str()),
sy.Optional("info"): sy.MapPattern(sy.Str(), sy.Any()),
}
)
def __init__(
self,
codes: typing.Tuple[str],
categorization: "Categorization",
title: str,
comment: typing.Optional[str] = None,
info: typing.Optional[dict] = None,
):
self.codes = codes
self.title = title
self.comment = comment
self.categorization = categorization
if info is None:
self.info = {}
else:
self.info = info
self._hash = None
@classmethod
def from_spec(cls, code: str, spec: typing.Dict, categorization: "Categorization"):
codes = [code]
if "alternative_codes" in spec:
codes += spec["alternative_codes"]
del spec["alternative_codes"]
return cls(
codes=tuple(codes),
categorization=categorization,
title=spec["title"],
comment=spec.get("comment", None),
info=spec.get("info", None),
)
def to_spec(self) -> (str, typing.Dict[str, typing.Union[str, dict, list]]):
"""Turn this category into a specification ready to be written to a yaml file.
Returns
-------
(code: str, spec: dict)
Primary code and specification dict
"""
code = self.codes[0]
spec = {"title": self.title}
if self.comment is not None:
spec["comment"] = self.comment
if len(self.codes) > 1:
spec["alternative_codes"] = list(self.codes[1:])
if self.info:
spec["info"] = self.info
return code, spec
def __str__(self) -> str:
return f"{self.codes[0]} {self.title}"
def __eq__(self, other: "Category"):
if not isinstance(other, Category):
return False
return any((x in other.codes for x in self.codes)) and (
self.categorization is other.categorization
or self.categorization.name.startswith(f"{other.categorization.name}_")
or other.categorization.name.startswith(f"{self.categorization.name}_")
or self.categorization.name == other.categorization.name
)
def __repr__(self) -> str:
return f"<{self.categorization.name}: {self.codes[0]!r}>"
def __hash__(self):
if self._hash is None:
self._hash = hash(self.categorization.name + self.codes[0])
return self._hash
def __lt__(self, other):
s = natsort.natsorted((self.codes[0], other.codes[0]))
return s[0] == self.codes[0] and self != other
class HierarchicalCategory(Category):
"""A single category from a HierarchicalCategorization."""
_strictyaml_schema = sy.Map(
{
"title": sy.Str(),
sy.Optional("comment"): sy.Str(),
sy.Optional("alternative_codes"): sy.Seq(sy.Str()),
sy.Optional("info"): sy.MapPattern(sy.Str(), sy.Any()),
sy.Optional("children"): sy.Seq(sy.Seq(sy.Str())),
}
)
def __init__(
self,
codes: typing.Tuple[str],
categorization: "HierarchicalCategorization",
title: str,
comment: typing.Optional[str] = None,
info: typing.Optional[dict] = None,
):
Category.__init__(self, codes, categorization, title, comment, info)
self.categorization = categorization
def to_spec(self) -> (str, typing.Dict[str, typing.Union[str, dict, list]]):
"""Turn this category into a specification ready to be written to a yaml file.
Returns
-------
(code: str, spec: dict)
Primary code and specification dict
"""
code, spec = Category.to_spec(self)
children = []
for child_set in self.children:
children.append(list(sorted((c.codes[0] for c in child_set))))
if children:
spec["children"] = children
return code, spec
@property
def children(self) -> typing.List[typing.Set["HierarchicalCategory"]]:
"""The sets of subcategories comprising this category.
The first set is canonical, the other sets are alternative.
Only the canonical sets are used to calculate the level of a category."""
return self.categorization.children(self)
@property
def parents(self) -> typing.Set["HierarchicalCategory"]:
"""The super-categories where this category is a member of any set of children.
Note that all possible parents are returned, not "canonical" parents.
"""
return self.categorization.parents(self)
@property
def ancestors(self) -> typing.Set["HierarchicalCategory"]:
"""The super-categories where this category or any of its parents is a member
of any set of children, transitively.
Note that all possible ancestors are returned, not only "canonical" ones.
"""
return self.categorization.ancestors(self)
@property
def descendants(self) -> typing.Set["HierarchicalCategory"]:
"""The sets of subcategories comprising this category directly or indirectly.
Note that all possible descendants are returned, not only "canonical" ones."""
return self.categorization.descendants(self)
@property
def level(self) -> int:
"""The level of the category.
The canonical top-level category has level 1 and its children have level 2 etc.
To calculate the level, only the first ("canonical") set of children is
considered for intermediate categories.
"""
return self.categorization.level(self)
class Categorization:
"""A single categorization system.
A categorization system comprises a set of categories, and their relationships as
well as metadata describing the categorization system itself.
Use the categorization object like a dictionary, where codes can be translated
to their meaning using ``cat[code]`` and all codes are available using
``cat.keys()``. Metadata about the categorization is provided in attributes.
If `pandas` is available, you can access a `pandas.DataFrame` with all
category codes, and their meanings at ``cat.df``.
Attributes
----------
name : str
The unique name/code
references : str
Citable reference(s)
title : str
A short, descriptive title for humans
comment : str
Notes and explanations for humans
institution : str
Where the categorization originates
last_update : datetime.date
The date of the last change
version : str, optional
The version of the Categorization, if there are multiple versions
hierarchical : bool
True if descendants and ancestors are defined
"""
hierarchical: bool = False
_strictyaml_schema = sy.Map(
{
"name": sy.Str(),
"title": sy.Str(),
"comment": sy.Str(),
"references": sy.Str(),
"institution": sy.Str(),
"last_update": sy.Str(),
"hierarchical": sy.Bool(),
sy.Optional("version"): sy.Str(),
"categories": sy.MapPattern(sy.Str(), Category._strictyaml_schema),
}
)
def _add_categories(self, categories: typing.Dict[str, typing.Dict]):
for code, spec in categories.items():
cat = Category.from_spec(code=code, spec=spec, categorization=self)
self._primary_code_map[code] = cat
for icode in cat.codes:
self._all_codes_map[icode] = cat
def __init__(
self,
*,
categories: typing.Dict[str, typing.Dict],
name: str,
title: str,
comment: str,
references: str,
institution: str,
last_update: datetime.date,
version: typing.Optional[str] = None,
):
self._primary_code_map = {}
self._all_codes_map = {}
self.name = name
self.references = references
self.title = title
self.comment = comment
self.institution = institution
self.last_update = last_update
self.version = version
self._add_categories(categories)
# is filled in __init__.py to contain all categorizations
self._cats: typing.Dict[str, "Categorization"] = {}
def __hash__(self):
return hash(self.name)
@classmethod
def from_yaml(
cls, filepath: typing.Union[str, pathlib.Path, typing.IO[bytes]]
) -> "Categorization":
"""Read Categorization from a StrictYaml file."""
try:
yaml = sy.load(filepath.read(), schema=cls._strictyaml_schema)
except AttributeError:
with open(filepath) as fd:
yaml = sy.load(fd.read(), schema=cls._strictyaml_schema)
return cls.from_spec(yaml.data)
@classmethod
def from_spec(cls, spec: typing.Dict[str, typing.Any]) -> "Categorization":
"""Create Categorization from a Dictionary specification."""
if spec["hierarchical"] != cls.hierarchical:
raise ValueError(
"Specification is for a hierarchical categorization, use"
"HierarchicalCategorization.from_spec."
)
last_update = datetime.date.fromisoformat(spec["last_update"])
return cls(
categories=spec["categories"],
name=spec["name"],
title=spec["title"],
comment=spec["comment"],
references=spec["references"],
institution=spec["institution"],
last_update=last_update,
version=spec.get("version", None),
)
@staticmethod
def from_pickle(
filepath: typing.Union[str, pathlib.Path, typing.IO[bytes]]
) -> "Categorization":
"""De-serialize Categorization from a file written by to_pickle.
Note that this uses the pickle module, which executes arbitrary code in the
provided file. Only load from pickle files that you trust."""
return from_pickle(filepath)
@staticmethod
def from_python(
filepath: typing.Union[str, pathlib.Path, typing.IO[bytes]]
) -> "Categorization":
"""De-serialize Categorization from a file written by to_python.
Note that this executes the python cache file. Only load from python cache files
you trust."""
return from_python(filepath)
def to_spec(self) -> typing.Dict[str, typing.Any]:
"""Turn this categorization into a specification dictionary ready to be written
to a yaml file.
Returns
-------
spec: dict
Specification dictionary understood by `from_spec`.
"""
spec = {
"name": self.name,
"title": self.title,
"comment": self.comment,
"references": self.references,
"institution": self.institution,
"hierarchical": self.hierarchical,
"last_update": self.last_update.isoformat(),
}
if self.version is not None:
spec["version"] = self.version
spec["categories"] = {}
for cat in self.values():
code, cat_spec = cat.to_spec()
spec["categories"][code] = cat_spec
return spec
def to_yaml(self, filepath: typing.Union[str, pathlib.Path]) -> None:
"""Write to a YAML file."""
spec = self.to_spec()
yaml = YAML()
yaml.default_flow_style = False
with open(filepath, "w") as fd:
yaml.dump(spec, fd)
def to_python(self, filepath: typing.Union[str, pathlib.Path]) -> None:
"""Write spec to a Python file."""
spec = self.to_spec()
comment = (
"# Do not edit this file. It was auto-generated from the\n"
"# corresponding YAML file.\n"
)
with open(filepath, "w") as f:
f.write(comment)
f.write(f"spec = {format_str(repr(spec), mode=Mode())}")
def to_pickle(self, filepath: typing.Union[str, pathlib.Path]) -> None:
"""Serialize to a file using python's pickle."""
spec = self.to_spec()
with open(filepath, "wb") as fd:
pickle.dump(spec, fd, protocol=4)
def keys(self) -> typing.KeysView[str]:
"""Iterate over the codes for all categories."""
return self._primary_code_map.keys()
def values(self) -> typing.ValuesView[Category]:
"""Iterate over the categories."""
return self._primary_code_map.values()
def items(self) -> typing.ItemsView[str, Category]:
"""Iterate over (primary code, category) pairs."""
return self._primary_code_map.items()
def all_keys(self) -> typing.KeysView[str]:
"""Iterate over all codes for all categories."""
return self._all_codes_map.keys()
def __iter__(self) -> typing.Iterable[str]:
return iter(self._primary_code_map)
def __getitem__(self, code: str) -> Category:
"""Get the category for a code."""
return self._all_codes_map[code]
def __contains__(self, code: str) -> bool:
"""Can the code be mapped to a category?"""
return code in self._all_codes_map
def __len__(self) -> int:
return len(self._primary_code_map)
def __repr__(self) -> str:
return (
f"<Categorization {self.name} {self.title!r} with {len(self)} categories>"
)
def __str__(self) -> str:
return self.name
@property
def df(self) -> "pandas.DataFrame":
"""All category codes as a pandas dataframe."""
titles = []
comments = []
alternative_codes = []
for cat in self.values():
titles.append(cat.title)
comments.append(cat.comment)
alternative_codes.append(cat.codes[1:])
return pandas.DataFrame(
index=self.keys(),
data={
"title": titles,
"comment": comments,
"alternative_codes": alternative_codes,
},
)
def _extend_prepare(
self,
*,
categories: typing.Optional[typing.Dict[str, typing.Dict]] = None,
alternative_codes: typing.Optional[typing.Dict[str, str]] = None,
name: str,
title: typing.Optional[str] = None,
comment: typing.Optional[str] = None,
last_update: typing.Optional[datetime.date] = None,
) -> typing.Dict[str, typing.Any]:
spec = self.to_spec()
spec["name"] = f"{self.name}_{name}"
spec["references"] = ""
spec["institution"] = ""
if title is None:
spec["title"] = f"{self.title} + {name}"
else:
spec["title"] = self.title + title
if comment is None:
spec["comment"] = f"{self.comment} extended by {name}"
else:
spec["comment"] = self.comment + comment
if last_update is None:
spec["last_update"] = datetime.date.today().isoformat()
else:
spec["last_update"] = last_update.isoformat()
if categories is not None:
spec["categories"].update(categories)
if alternative_codes is not None:
for alias, primary in alternative_codes.items():
if "alternative_codes" not in spec["categories"][primary]:
spec["categories"][primary]["alternative_codes"] = []
spec["categories"][primary]["alternative_codes"].append(alias)
return spec
def extend(
self,
*,
categories: typing.Optional[typing.Dict[str, typing.Dict]] = None,
alternative_codes: typing.Optional[typing.Dict[str, str]] = None,
name: str,
title: typing.Optional[str] = None,
comment: typing.Optional[str] = None,
last_update: typing.Optional[datetime.date] = None,
) -> "Categorization":
"""Extend the categorization with additional categories, yielding a new
categorization.
Metadata: the ``name``, ``title``, ``comment``, and ``last_update`` are updated
automatically (see below), the ``institution`` and ``references`` are deleted
and the values for ``version`` and ``hierarchical`` are kept.
You can set more accurate metadata (for example, your institution) on the
returned object if needed.
Parameters
----------
categories: dict, optional
Map of new category codes to their specification. The specification is a
dictionary with the keys "title", optionally "comment", and optionally
"alternative_codes".
alternative_codes: dict, optional
Map of new alternative codes. A dictionary with the new alternative code
as key and existing code as value.
name : str
The name of your extension. The returned Categorization will have a name
of "{old_name}_{name}", indicating that it is an extension of the underlying
Categorization.
title : str, optional
A string to add to the original title. If not provided, " + {name}" will be
used.
comment : str, optional
A string to add to the original comment. If not provided,
" extended by {name}" will be used.
last_update : datetime.date, optional
The date of the last update to this extension. Today will be used if not
provided.
Returns
-------
Extended categorization : Categorization
"""
spec = self._extend_prepare(
name=name,
categories=categories,
title=title,
comment=comment,
last_update=last_update,
alternative_codes=alternative_codes,
)
return Categorization.from_spec(spec)
def __eq__(self, other):
if not isinstance(other, Categorization):
return False
if self.name != other.name:
return False
return self._primary_code_map == other._primary_code_map
def conversion_to(self, other: typing.Union["Categorization", str]) -> Conversion:
"""Get conversion to other categorization.
If conversion rules for this conversion are not included, raises
NotImplementedError."""
if isinstance(other, str):
other_name = other
else:
other_name = other.name
forward_csv_name = f"conversion.{self.name}.{other_name}.csv"
if importlib.resources.is_resource(data, forward_csv_name):
fd = importlib.resources.open_text(data, forward_csv_name)
return ConversionSpec.from_csv(fd).hydrate(cats=self._cats)
reversed_csv_name = f"conversion.{other_name}.{self.name}.csv"
if importlib.resources.is_resource(data, reversed_csv_name):
fd = importlib.resources.open_text(data, reversed_csv_name)
return ConversionSpec.from_csv(fd).hydrate(cats=self._cats).reversed()
raise NotImplementedError(
f"Conversion between {self.name} and {other_name} not yet included."
)
class HierarchicalCategorization(Categorization):
"""In a hierarchical categorization, descendants and ancestors (parents and
children) are defined for each category.
Attributes
----------
total_sum : bool
If the sum of the values of children equals the value of the parent for
extensive quantities. For example, a Categorization containing the Countries in
the EU and the EU could set `total_sum = True`, because the emissions of all
parts of the EU must equal the emissions of the EU. On the contrary, a
categorization of Industries with categories `Power:Fossil Fuels` and
`Power:Gas` which are both children of `Power` must set `total_sum = False`
to avoid double counting of fossil gas.
canonical_top_level_category : HierarchicalCategory
The level of a category is calculated with respect to the canonical top level
category. Commonly, this will be the world total or a similar category. If the
canonical top level category is not set (i.e. is ``None``), levels are not
defined for categories.
"""
hierarchical = True
_strictyaml_schema = sy.Map(
{
"name": sy.Str(),
"title": sy.Str(),
"comment": sy.Str(),
"references": sy.Str(),
"institution": sy.Str(),
"last_update": sy.Str(),
"hierarchical": sy.Bool(),
sy.Optional("version"): sy.Str(),
"total_sum": sy.Bool(),
sy.Optional("canonical_top_level_category"): sy.Str(),
"categories": sy.MapPattern(
sy.Str(), HierarchicalCategory._strictyaml_schema
),
}
)
def _add_categories(self, categories: typing.Dict[str, typing.Dict]):
for code, spec in categories.items():
cat = HierarchicalCategory.from_spec(
code=code, spec=spec, categorization=self
)
self._primary_code_map[code] = cat
self._graph.add_node(cat)
for icode in cat.codes:
self._all_codes_map[icode] = cat
for code, spec in categories.items():
if "children" in spec:
parent = self._all_codes_map[code]
for i, child_set in enumerate(spec["children"]):
for child_code in child_set:
self._graph.add_edge(
parent, self._all_codes_map[child_code], set=i
)
def __init__(
self,
*,
categories: typing.Dict[str, typing.Dict],
name: str,
title: str,
comment: str,
references: str,
institution: str,
last_update: datetime.date,
version: typing.Optional[str] = None,
total_sum: bool,
canonical_top_level_category: typing.Optional[str] = None,
):
self._graph = nx.MultiDiGraph()
Categorization.__init__(
self,
categories=categories,
name=name,
title=title,
comment=comment,
references=references,
institution=institution,
last_update=last_update,
version=version,
)
self.total_sum = total_sum
if canonical_top_level_category is None:
self.canonical_top_level_category: typing.Optional[
HierarchicalCategory
] = None
else:
self.canonical_top_level_category = self._all_codes_map[
canonical_top_level_category
]
def __getitem__(self, code: str) -> HierarchicalCategory:
"""Get the category for a code."""
return self._all_codes_map[code]
def values(self) -> typing.ValuesView[HierarchicalCategory]:
"""Iterate over the categories."""
return self._primary_code_map.values()
def items(self) -> typing.ItemsView[str, HierarchicalCategory]:
"""Iterate over (primary code, category) pairs."""
return self._primary_code_map.items()
@classmethod
def from_spec(
cls, spec: typing.Dict[str, typing.Any]
) -> "HierarchicalCategorization":
"""Create Categorization from a Dictionary specification."""
if spec["hierarchical"] != cls.hierarchical:
raise ValueError(
"Specification is for a non-hierarchical categorization, use"
"Categorization.from_spec."
)
last_update = datetime.date.fromisoformat(spec["last_update"])
return cls(
categories=spec["categories"],
name=spec["name"],
title=spec["title"],
comment=spec["comment"],
references=spec["references"],
institution=spec["institution"],
last_update=last_update,
version=spec.get("version", None),
total_sum=spec["total_sum"],
canonical_top_level_category=spec.get("canonical_top_level_category", None),
)
def to_spec(self) -> typing.Dict[str, typing.Any]:
"""Turn this categorization into a specification dictionary ready to be written
to a yaml file.
Returns
-------
spec: dict
Specification dictionary understood by `from_spec`.
"""
# we can't call Categorization.to_spec here because we need to control ordering
# in the returned dict so that we get nicely ordered yaml files.
spec = {
"name": self.name,
"title": self.title,
"comment": self.comment,
"references": self.references,
"institution": self.institution,
"hierarchical": self.hierarchical,
"last_update": self.last_update.isoformat(),
}
if self.version is not None:
spec["version"] = self.version
spec["total_sum"] = self.total_sum
if self.canonical_top_level_category is not None:
spec[
"canonical_top_level_category"
] = self.canonical_top_level_category.codes[0]
spec["categories"] = {}
for cat in self.values():
code, cat_spec = cat.to_spec()
spec["categories"][code] = cat_spec
return spec
@property
def _canonical_subgraph(self) -> nx.DiGraph:
# TODO: from python 3.8 on, there is functools.cached_property to
# automatically cache this - as soon as we drop python 3.7 support, we can
# easily add it.
return nx.DiGraph(
self._graph.edge_subgraph(
((u, v, 0) for (u, v, s) in self._graph.edges(data="set") if s == 0)
)
)
def _show_subtree_children(
self,
children: typing.Iterable[HierarchicalCategory],
format_func: typing.Callable,
prefix: str,
maxdepth: typing.Optional[int],
) -> str:
children_sorted = natsort.natsorted(children, key=format_func)
r = "".join(
self._show_subtree(
node=child,
prefix=prefix + "│",
format_func=format_func,
maxdepth=maxdepth,
)
for child in children_sorted[:-1]
)
# Last child needs to be called slightly differently
r += self._show_subtree(
node=children_sorted[-1],
prefix=prefix + " ",
last=True,
format_func=format_func,
maxdepth=maxdepth,
)
return r
@staticmethod
def _render_node(
node: HierarchicalCategory,
last: bool,
prefix: str,
format_func: typing.Callable[[HierarchicalCategory], str],
):
formatted = format_func(node)
if prefix:
if last:
return f"{prefix[:-1]}╰{formatted}\n"
else:
return f"{prefix[:-1]}├{formatted}\n"
else:
return f"{formatted}\n"
def _show_subtree(
self,
*,
node: HierarchicalCategory,
prefix="",
last=False,
format_func: typing.Callable[[HierarchicalCategory], str] = str,
maxdepth: typing.Optional[int],
) -> str:
"""Recursively-called function to show a subtree starting at the given node."""
r = self._render_node(node, last=last, prefix=prefix, format_func=format_func)
if maxdepth is not None:
maxdepth -= 1
if maxdepth == 0: # maxdepth reached, nothing more to do
return r
child_sets = node.children
if len(child_sets) == 1:
children = child_sets[0]
if children:
r += self._show_subtree_children(
children=children,
format_func=format_func,
maxdepth=maxdepth,
prefix=prefix,
)
elif len(child_sets) > 1:
prefix += "║"
i = 1
for children in child_sets:
if children:
if i == 1:
r += (
f"{prefix[:-1]}╠╤══ ('{format_func(node)}'s children,"
f" option 1)\n"
)
else:
r += (
f"{prefix[:-1]}╠╕ ('{format_func(node)}'s children,"
f" option {i})\n"
)
r += self._show_subtree_children(
children=children,
format_func=format_func,
maxdepth=maxdepth,
prefix=prefix,
)
i += 1
r += f"{prefix[:-1]}╚═══\n"
return r
def show_as_tree(
self,
*,
format_func: typing.Callable[[HierarchicalCategory], str] = str,
maxdepth: typing.Optional[int] = None,
root: typing.Optional[typing.Union[HierarchicalCategory, str]] = None,
) -> str:
"""Format the hierarchy as a tree.
Starting from the given root, or - if no root is given - the top-level
categories (i.e. categories without parents), the tree of categories that are
transitive children of the root is show, with children connected to their
parents using lines. If a parent category has one set of children, the children
are connected to each other and the parent with a simple line. If a parent
category has multiple sets of children, the sets are connected to parent with
double lines and the children in a set are connected to each other with simple
lines.
Parameters
----------
format_func: callable, optional
Function to call to format categories for display. Each category is
formatted for display using format_func(category), so format_func should
return a string without line breaks, otherwise the tree will look weird.
By default, str() is used, so that the first code and the title of the
category are used.
maxdepth: int, optional
Maximum depth to show in the tree. By default, goes to arbitrary depth.
root: HierarchicalCategory or str, optional
HierarchicalCategory object or code to use as the top-most category.
If not given, the whole tree is shown, starting from all categories without
parents.
Returns
-------
tree_str: str
Representation of the hierarchy as formatted string. print() it for optimal
viewing.
"""
if root is None:
top_level_nodes = (node for node in self.values() if not node.parents)
else:
if not isinstance(root, HierarchicalCategory):
root = self[root]
top_level_nodes = [root]
return "\n".join(
(
self._show_subtree(
node=top_level_node, format_func=format_func, maxdepth=maxdepth
)
)
for top_level_node in top_level_nodes
)
def extend(
self,
*,
categories: typing.Optional[typing.Dict[str, typing.Dict]] = None,
alternative_codes: typing.Optional[typing.Dict[str, str]] = None,
children: typing.Optional[typing.List[tuple]] = None,
name: str,
title: typing.Optional[str] = None,
comment: typing.Optional[str] = None,
last_update: typing.Optional[datetime.date] = None,
) -> "HierarchicalCategorization":
"""Extend the categorization with additional categories and relationships,
yielding a new categorization.
Metadata: the ``name``, ``title``, ``comment``, and ``last_update`` are updated
automatically (see below), the ``institution`` and ``references`` are deleted
and the values for ``version``, ``hierarchical``, ``total_sum``, and
``canonical_top_level_category`` are kept.
You can set more accurate metadata (for example, your institution) on the
returned object if needed.
Parameters
----------
categories: dict, optional
Map of new category codes to their specification. The specification is a
dictionary with the keys "title", optionally "comment", and optionally
"alternative_codes".
alternative_codes: dict, optional
Map of new alternative codes. A dictionary with the new alternative code
as key and existing code as value.
children: list, optional
List of ``(parent, (child1, child2, …))`` pairs. The given relationships will
be inserted in the extended categorization.
name : str
The name of your extension. The returned Categorization will have a name
of "{old_name}_{name}", indicating that it is an extension of the underlying
Categorization.
title : str, optional
A string to add to the original title. If not provided, " + {name}" will be
used.
comment : str, optional
A string to add to the original comment. If not provided,
" extended by {name}" will be used.
last_update : datetime.date, optional
The date of the last update to this extension. Today will be used if not
provided.
Returns
-------
Extended categorization : HierarchicalCategorization
"""
spec = self._extend_prepare(
name=name,
categories=categories,
title=title,
comment=comment,
last_update=last_update,
alternative_codes=alternative_codes,
)
if children is not None:
for parent, child_set in children:
if "children" not in spec["categories"][parent]:
spec["categories"][parent]["children"] = []
spec["categories"][parent]["children"].append(child_set)
return HierarchicalCategorization.from_spec(spec)
@property
def df(self) -> "pandas.DataFrame":
"""All category codes as a pandas dataframe."""
titles = []
comments = []
alternative_codes = []
children = []
for cat in self.values():
titles.append(cat.title)
comments.append(cat.comment)
alternative_codes.append(cat.codes[1:])
children.append(
tuple(tuple(sorted(c.codes[0] for c in cs)) for cs in cat.children)
)
return pandas.DataFrame(
index=self.keys(),
data={
"title": titles,
"comment": comments,
"alternative_codes": alternative_codes,
"children": children,
},
)
def level(self, cat: typing.Union[str, HierarchicalCategory]) -> int:
"""The level of the given category.
The canonical top-level category has level 1 and its children have level 2 etc.
To calculate the level, first only the first ("canonical") set of children is
considered. Only if no path from the canonical top-level category to the
given category can be found all other sets of children are considered to
calculate the level.
"""
if not isinstance(cat, HierarchicalCategory):
return self.level(self[cat])
if not isinstance(self.canonical_top_level_category, HierarchicalCategory):
raise ValueError(
"Can not calculate the level without a canonical_top_level_category."
)
# first use the canonical subgraph for shortest paths
csg = self._canonical_subgraph
try:
sp = nx.shortest_path_length(csg, self.canonical_top_level_category, cat)
except (nx.NetworkXNoPath, nx.NodeNotFound):
try:
sp = nx.shortest_path_length(
self._graph, self.canonical_top_level_category, cat
)
except (nx.NetworkXNoPath, nx.NodeNotFound):
raise ValueError(
f"{cat.codes[0]!r} is not a transitive child of the "
f"canonical top level "
f"{self.canonical_top_level_category.codes[0]!r}."
)
return sp + 1
def parents(
self, cat: typing.Union[str, HierarchicalCategory]
) -> typing.Set[HierarchicalCategory]:
"""The direct parents of the given category."""
if not isinstance(cat, HierarchicalCategory):
return self.parents(self._all_codes_map[cat])
return set(self._graph.predecessors(cat))
def ancestors(
self, cat: typing.Union[str, HierarchicalCategory]
) -> typing.Set[HierarchicalCategory]:
"""All ancestors of the given category, i.e. the direct parents and their
parents, etc."""
if not isinstance(cat, HierarchicalCategory):
return self.ancestors(self._all_codes_map[cat])
return set(nx.ancestors(self._graph, cat))
def children(
self, cat: typing.Union[str, HierarchicalCategory]
) -> typing.List[typing.Set[HierarchicalCategory]]:
"""The list of sets of direct children of the given category."""
if not isinstance(cat, HierarchicalCategory):
return self.children(self._all_codes_map[cat])
children_dict = {}
for (_, child, setno) in self._graph.edges(cat, "set"):
if setno not in children_dict:
children_dict[setno] = []
children_dict[setno].append(child)
return [set(children_dict[x]) for x in sorted(children_dict.keys())]
def descendants(self, cat: typing.Union[str, HierarchicalCategory]):
"""All descendants of the given category, i.e. the direct children and their
children, etc."""
if not isinstance(cat, HierarchicalCategory):
return self.descendants(self._all_codes_map[cat])
return set(nx.descendants(self._graph, cat))
def from_pickle(
filepath: typing.Union[str, pathlib.Path, typing.IO[bytes]]
) -> typing.Union[Categorization, HierarchicalCategorization]:
"""De-serialize Categorization or HierarchicalCategorization from a file written by
to_pickle.
Note that this uses the pickle module, which executes arbitrary code in the
provided file. Only load from pickle files that you trust."""
try:
spec = pickle.load(filepath)
except TypeError:
with open(filepath, "rb") as fd:
spec = pickle.load(fd)
return from_spec(spec)
def from_python(
filepath: typing.Union[str, pathlib.Path, typing.IO[bytes]]
) -> typing.Union[Categorization, HierarchicalCategorization]:
"""Read Categorization or HierarchicalCategorization from a python cache file.
Note that this executes the python cache file. Only load from python cache files
you trust."""
try:
python_code = filepath.read()
filepath.seek(0)
except AttributeError:
with open(filepath) as fd:
python_code = fd.read()
variables = {}
exec(python_code, variables)
spec = variables["spec"]
if spec["hierarchical"]:
cls = HierarchicalCategorization
else:
cls = Categorization
return cls.from_spec(spec)
def from_spec(
spec: typing.Dict[str, typing.Any]
) -> typing.Union[Categorization, HierarchicalCategorization]:
"""Create Categorization or HierarchicalCategorization from a dict specification."""
if spec["hierarchical"]:
return HierarchicalCategorization.from_spec(spec)
else:
return Categorization.from_spec(spec)
def from_yaml(
filepath: typing.Union[str, pathlib.Path, typing.IO[bytes]]
) -> typing.Union[Categorization, HierarchicalCategorization]:
"""Read Categorization or HierarchicalCategorization from a StrictYaml file."""
try:
yaml = sy.load(filepath.read())
filepath.seek(0)
except AttributeError:
with open(filepath) as fd:
yaml = sy.load(fd.read())
hier = yaml.data["hierarchical"]
if hier in ("yes", "true", "True"):
cls = HierarchicalCategorization
elif hier in ("no", "false", "False"):
cls = Categorization
else:
raise ValueError(
f"'hierarchical' must be 'yes', 'true', 'True', 'no', 'false' or 'False',"
f" not {hier!r}."
)
return cls.from_yaml(filepath)
|
from typing import TYPE_CHECKING, Iterable, Optional
from ..discount import DiscountInfo
from ..plugins.manager import get_plugins_manager
if TYPE_CHECKING:
from prices import TaxedMoney
from .models import Checkout, CheckoutLine
def checkout_shipping_price(
*,
checkout: "Checkout",
lines: Iterable["CheckoutLine"],
discounts: Optional[Iterable[DiscountInfo]] = None,
) -> "TaxedMoney":
"""Return checkout shipping price.
It takes in account all plugins.
"""
return get_plugins_manager().calculate_checkout_shipping(
checkout, lines, discounts or []
)
def checkout_subtotal(
*,
checkout: "Checkout",
lines: Iterable["CheckoutLine"],
discounts: Optional[Iterable[DiscountInfo]] = None,
) -> "TaxedMoney":
"""Return the total cost of all the checkout lines, taxes included.
It takes in account all plugins.
"""
return get_plugins_manager().calculate_checkout_subtotal(
checkout, lines, discounts or []
)
def checkout_total(
*,
checkout: "Checkout",
lines: Iterable["CheckoutLine"],
discounts: Optional[Iterable[DiscountInfo]] = None,
) -> "TaxedMoney":
"""Return the total cost of the checkout.
Total is a cost of all lines and shipping fees, minus checkout discounts,
taxes included.
It takes in account all plugins.
"""
return get_plugins_manager().calculate_checkout_total(
checkout, lines, discounts or []
)
def checkout_line_total(
*, line: "CheckoutLine", discounts: Optional[Iterable[DiscountInfo]] = None
) -> "TaxedMoney":
"""Return the total price of provided line, taxes included.
It takes in account all plugins.
"""
return get_plugins_manager().calculate_checkout_line_total(line, discounts or [])
|
# @Author: Manuel Rodriguez <valle>
# @Date: 16-Aug-2017
# @Email: valle.mrv@gmail.com
# @Filename: controllers.py
# @Last modified by: valle
# @Last modified time: 06-Sep-2017
# @License: Apache license vesion 2.0
import os
from valleorm.models import *
class HelperBase(object):
def __init__(self, JSONQuery, JSONResult):
self.JSONResult = JSONResult
self.JSONQuery = JSONQuery
self.db = JSONQuery.get("db")
self.alter_table = alter_table
for tb, val in JSONQuery.items():
if tb == "db":
pass
elif type(val) is list:
rows = val
for row in rows:
keys = row.keys()
if len(keys) == 1:
self.action(row, keys[0])
else:
self.action(row, tb)
else:
self.action(val, tb)
def decode_qson(self, qson, tb):
query = []
decoder = {"condition":{}, 'tb':tb, 'childs':{"decoders":[]}}
for col, val in qson.items():
isWordReserver = col == 'columns' or col == 'limit' or col == 'offset'
isWordReserver = isWordReserver or col == 'query' or col == 'order'
isWordReserver = isWordReserver or col == 'joins' or col == 'group'
if isWordReserver:
decoder["condition"] = {key: val}
elif not isWordReserver and type(val) is dict :
child_decoder = self.decode_qson(val, col)
decoder['childs']["decoders"].append(child_decoder)
else:
packQuery = self.getPackQuery(col, val)
query.append(packQuery)
if 'query' in decoder["condition"] and len(query) > 0:
decoder["condition"]['query'] += " AND "+" AND ".join(query)
elif len(query) > 0:
decoder["condition"]['query'] = " AND ".join(query)
return decoder
def getPackQuery(self, col, val):
if type(val) is unicode:
return col + " LIKE '"+val+"'"
elif type(val) is float:
return col + "="+str(val)
elif type(val) is int:
return col + "="+str(val)
else:
return col + " LIKE '"+val+"'"
class AddHelper(HelperBase):
def action(self, qson, tb):
if not 'add' in self.JSONResult:
self.JSONResult["add"] = {tb:[]}
decoder = self.decode_qson(qson, tb)
row = self.modify_row(decoder)
if row:
row.save()
row_send = row.toDICT()
self.JSONResult["add"][tb].append(row_send)
for dchild in decoder["childs"]["decoders"]:
fieldName = decoder["childs"]['fieldName']
if not fieldName in row_send:
row_send[fieldName] = []
child = self.modify_row(dchild)
if child:
getattr(row, fieldName).add(child)
row_send[fieldName].append(child.toDICT())
def modify_row(self, decoder):
row = Model(decoder['tb'], self.db, decoder['model'])
if 'ID' in decoder["fields"]:
row.loadByPk(decoder['fields']['ID'])
if row.ID == -1: return None
for key, v in decoder["fields"].items():
setattr(row, key, v)
return row
def decode_qson(self, qson, tb):
exists_tabla = False
hasChange = False
if Model.exitsTable(self.db, tb):
model = Model.getModel(self.db, tb)
exists_tabla = True
else:
model = {"fields":[], "relationship": []}
decoder = {'model':model, 'tb':tb, 'fields': {},
'childs': {'fieldName':'', 'decoders':[]}}
for key, v in qson.items():
if not type(v) is list and not type(v) is dict:
if key == "ID":
decoder['fields'][key] = v
else:
default, tipo = self.get_tipo(v)
decoder['fields'][key] = v
field = {
'fieldName': key,
'fieldDato': default,
'fieldTipo': tipo,
}
if exists_tabla:
hasChange = self.compare_and_repare_field(tb, model, field)
else:
model["fields"].append(field)
else:
fieldName = key
relationName = key
tipo = "MANY"
childs = []
if type(v) is dict:
child = v
childs = [v]
elif type(v) is list:
child = v[0]
childs = v
for kr, vr in child.items():
if type(vr) is dict:
tipo = "MANYTOMANY"
relationName = kr
break;
else:
break;
rship = {
'fieldName': fieldName,
'relationName': relationName,
'relationTipo': tipo,
}
for child in childs:
if tipo is "MANYTOMANY":
tb = rship['relationName']
child = child[tb]
subdecoder = self.decode_qson(child, rship["relationName"])
decoder["childs"]["fieldName"] = rship["fieldName"]
decoder["childs"]["decoders"].append(subdecoder)
if exists_tabla:
hasChange = self.compare_and_repare_ship(model, rship)
else:
model["relationship"].append(rship)
if hasChange:
Model.alter_model(self.db, tb, model, alter_table=self.alter_table)
return decoder
def compare_and_repare_field(self, tb, model, field):
hasChange = False
key = field['fieldName']
search = filter(lambda field: field['fieldName'] == key, model['fields'])
if len(search) <= 0:
hasChange = True
model['fields'].append(field)
Model.alter(dbName=self.db, tableName=tb, field=field,
alter_table=self.alter_table)
return hasChange
def compare_and_repare_ship(self, model, qrelation):
hasChange = False
key = qrelation['fieldName']
search = filter(lambda field: field['fieldName'] == key, model['relationship'])
if len(search) <= 0:
hasChange = True
model['relationship'].append(qrelation)
return hasChange
def get_tipo(self, val):
val = self.can_convert(val, op='int')
val = self.can_convert(val, op='float')
if type(val) is unicode:
return ("None", "TEXT")
elif type(val) is float:
return (None, "REAL")
elif type(val) is int:
return (None, "INTEGER")
else:
return ("None", "TEXT")
def can_convert(self, value, op='int'):
try:
if type(value) is unicode:
if op == 'int':
value = int(value)
if op == 'float' and value.find(".") > 0:
value = float(value)
return value
except ValueError:
return value
class GetHelper(HelperBase):
def action(self, qson, tb):
if not 'get' in self.JSONResult:
self.JSONResult["get"] = {tb:[]}
if not Model.exitsTable(self.db, tb):
return ''
decoder = self.decode_qson(qson, tb)
row = Model(tb, self.db)
rows = row.getAll(decoder['condition'])
for r in rows:
row_send = r.toDICT()
self.JSONResult['get'][tb].append(row_send)
for dchild in decoder["childs"]["decoders"]:
childs = getattr(r, dchild['tb']).get(dchild["condition"])
row_send[dchild['tb']] = Model.toArrayDict(childs)
class RmHelper(HelperBase):
def action(self, qson, tb):
if not 'rm' in self.JSONResult:
self.JSONResult["rm"] = {tb:[]}
hasChild = False
if not Model.exitsTable(self.db, tb):
return ''
decoder = self.decode_qson(qson, tb)
row = Model(tb, self.db)
rows = row.getAll(decoder['condition'])
for r in rows:
row_send = r.toDICT()
for dchild in decoder["childs"]["decoders"]:
childs = getattr(r, dchild['tb']).get(dchild["condition"])
hasChild = True
row_send[dchild['tb']] = Model.removeRows(childs)
if hasChild:
row_send = {"ID": r.ID, "remove": "True"}
r.remove()
hasChild = False
self.JSONResult['get'][tb].append(row_send)
class QSonHelper(object):
def __init__(self, default_db):
self.JSONResult = {}
self.default_db = default_db
def decode_qson(self, qson):
for name in qson.keys():
if "add" == name:
QSONRequire = qson.get("add")
QSONRequire["db"] = self.default_db
AddHelper(JSONQuery=QSONRequire,
JSONResult=self.JSONResult, alter_table=alter_table)
if "get" == name:
QSONRequire = qson.get("get")
QSONRequire["db"] = self.default_db
GetHelper(JSONQuery=QSONRequire,
JSONResult=self.JSONResult)
if "rm" == name:
QSONRequire = qson.get("rm")
QSONRequire["db"] = self.default_db
RmHelper(JSONQuery=QSONRequire,
JSONResult=self.JSONResult)
return self.JSONResult
|
#! /usr/bin/python
# -*-coding:utf-8-*-
import mmap
from ctypes import *
#本函数只使用了python提供的库mmap,不直接涉及python调用c的操作,但是mmap可能是调用c来实现的,所以需要包含ctypes库。
#本函数完成内存映射,并且提供内存读写接口。
class dev_mem:
DEVNAME = '/dev/mem'
def __init__(self,BASE,LEN):
self.len = LEN
self.base = BASE
self.fd = open(dev_mem.DEVNAME,'r+b',buffering=0)
self.mmap = mmap.mmap(self.fd.fileno(),LEN, offset=BASE)
def ioread(self,addr):
buf = self.memread(addr,1)
return buf[0]
def iowrite(self,addr,d):
buf = (c_uint*1)()
buf[0] = d
self.memwrite(addr,buf)
def memread(self,addr,len):
buf = (c_uint*len)()
self.mmap.seek(addr)
##seek()----set the file's current position.
memmove(buf,self.mmap.read(4*len),4*len)
return buf
def bufread(self,addr,len):
self.mmap.seek(addr)
buf=self.mmap.read(len)
return buf
def SetOffset(self,offset):
self.mmap.seek(offset)
def memwrite(self,addr,buf):
self.mmap.seek(addr)
self.mmap.write(buf)
def deinit(self):
self.mmap.close()
def main():
uut = dev_mem(0xfffc0000,0x10000)
print uut.mmap[:5]
##uut.mmap[:5] ????
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
lantz.drivers.labjack._internal
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Python drivers provided by LabJack.
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
|
"""Logging module for sentinel"""
import logging
import sys
def set_logging(level=None):
"""Set a logger STDOUT"""
logger = logging.getLogger('STDOUT')
out_hdlr = logging.StreamHandler(sys.stdout)
out_hdlr.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(out_hdlr)
logger.setLevel(logging.getLevelName(level))
return logger
|
from manti_by.apps.core.services import (
convert_to_mp3_preview,
convert_to_ogg_preview,
convert_to_ogg_release,
)
from manti_by.apps.core.utils import get_rq_queue
queue = get_rq_queue()
def generate_preview_for_post(post):
if not post.release:
return
if not post.mp3_preview_ready:
queue.enqueue(convert_to_mp3_preview, post.id)
if not post.ogg_preview_ready:
queue.enqueue(convert_to_ogg_preview, post.id)
if not post.ogg_release_ready:
queue.enqueue(convert_to_ogg_release, post.id)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Graham Pugh
# Copyright 2019 Matthew Warren / haircut
# Copyright 2020 Everette Allen
#
# Based on the 'Slacker' PostProcessor by Graham R Pugh
# https://grahamrpugh.com/2017/12/22/slack-for-autopkg-jssimporter.html
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
import requests
from autopkglib import Processor, ProcessorError
# Set the webhook_url to the one provided by Hangouts Chat
# See https://developers.google.com/hangouts/chat/how-tos/webhooks
__all__ = ["HangoutsChatJPUNotifier"]
class HangoutsChatJPUNotifier(Processor):
description = ("Posts a Card notification to a Hangouts Chat room"
"via webhook based on output of a JamfPackageUploader run.")
input_variables = {
"JSS_URL": {
"required": False,
"description": ("JSS_URL.")
},
"category": {
"required": False,
"description": ("Package Category.")
},
"version": {
"required": False,
"description": ("Package Version.")
},
"pkg_name": {
"required": False,
"description": ("Title (NAME)")
},
"pkg_path": {
"required": False,
"description": ("The created package.")
},
"jamfpackageuploader_summary_result": {
"required": False,
"description": ("Description of interesting results.")
},
"hangoutschatjpu_webhook_url": {
"required": False,
"description": ("Hangouts Chat webhook url.")
},
"hangoutschatjpu_should_report": {
"required": False,
"description": ("Hangouts Chat Notifier should always report or not.")
}
}
output_variables = {
}
__doc__ = description
def main(self):
JSS_URL = self.env.get("JSS_URL")
webhook_url = self.env.get("hangoutschatjpu_webhook_url")
try:
should_report = self.env.get("hangoutschatjpu_should_report")
except:
should_report = False
# JPU Summary
try:
jamfpackageuploader_summary_result = self.env.get("jamfpackageuploader_summary_result")
version = jamfpackageuploader_summary_result["data"]["version"]
category = jamfpackageuploader_summary_result["data"]["category"]
pkg_name = jamfpackageuploader_summary_result["data"]["pkg_name"]
pkg_path = jamfpackageuploader_summary_result["data"]["pkg_path"]
pkg_status = jamfpackageuploader_summary_result["data"]["pkg_status"]
pkg_date = jamfpackageuploader_summary_result["data"]["pkg_date"]
JPUTitle = "New Item Upload Attempt to JSS"
JPUIcon = "STAR"
except Exception as e:
print(e)
category = "Unknown"
version = "Unknown"
pkg_name = "Unknown"
pkg_path = "Unknown"
pkg_status = "Unknown"
pkg_date = "unknown"
JPUTitle = "Upload Status Unknown"
JPUIcon = "DESCRIPTION"
# VirusTotal data
# set VIRUSTOTAL_ALWAYS_REPORT to true to report even if no new package
try:
virus_total_analyzer_summary_result = self.env.get("virus_total_analyzer_summary_result")
vtname = virus_total_analyzer_summary_result["data"]["name"]
ratio = virus_total_analyzer_summary_result["data"]["ratio"]
permalink = virus_total_analyzer_summary_result["data"]["permalink"]
except:
ratio = "Not Checked"
print("****HangoutsChatJPU Information Summary: ")
print("JSS address: %s" % JSS_URL)
print("Package: %s" % pkg_name)
print("Path: %s" % pkg_path)
print("Version: %s" % version)
print("Category: %s" % category)
print("Status: %s" % pkg_status)
print("TimeStamp: %s" % pkg_date)
hangoutschat_data = {
"cards": [
{
"header": {
"title": JPUTitle,
"subtitle": JSS_URL
},
"sections": [
{
"widgets": [
{
"keyValue": {
"topLabel": "Title",
"content": pkg_name,
"icon": JPUIcon
}
},
{
"keyValue": {
"topLabel": "Version",
"content": version
}
},
{
"keyValue": {
"topLabel": "Category",
"content": category
}
},
{
"keyValue": {
"topLabel": "Status",
"content": pkg_status
}
},
{
"keyValue": {
"topLabel": "Virus Total Result",
"content": ratio
}
},
{
"keyValue": {
"topLabel": "TimeStamp",
"content": pkg_date
}
}
]
}
]
}
]
}
if not ("Unchanged" in pkg_status) or should_report:
response = requests.post(webhook_url, json=hangoutschat_data)
if response.status_code != 200:
raise ValueError(
'Request to Hangouts Chat returned an error %s, the response is:\n%s'
% (response.status_code, response.text)
)
if __name__ == "__main__":
processor = HangoutsChatJPUNotifier()
processor.execute_shell()
|
import matplotlib.pyplot as plt
from pyhdx.fileIO import csv_to_protein
from pyhdx.alignment import align_dataframes
from matplotlib.colors import to_hex
from functions.align import alignments
from functions.base import *
from functions.formatting import *
from functions.logging import write_log
from functions.plotting import plot_aligned
import pandas as pd
write_log(__file__)
output = 'save'
output_dir = current_dir / 'figures' / 'Supplement'
output_dir.mkdir(parents=True, exist_ok=True)
fname = 'Fig_SI_7_alignments_metric'
fit_kwargs = settings_dict['ecsecb_mtsecb']
output_folder = 'ecSecB_mtSecB'
states = ['ecSecB', 'mtSecB']
labels = ['$\it{ec}$SecB', '$\it{mt}$SecB']
# List of fit results: secondary structure
"""
List of input dataframes by fitting method
Batch fit (secondary structure alignment)
Batch fit (clustal)
Single fit
Batch fit (alignment by residue number)
"""
fit_result_dict = {}
df = csv_to_protein(fitresults_dir / 'batch_fits' / output_folder / 'secondary_structure' / f'fit_result.csv')
df.rename(columns={'SecB WT apo': 'ecSecB', 'SecB_apo': 'mtSecB'}, inplace=True)
fit_result_dict['Secondary Structure'] = df
df = csv_to_protein(fitresults_dir / 'batch_fits' / output_folder / 'clustal' / f'fit_result.csv')
df.rename(columns={'SecB WT apo': 'ecSecB', 'SecB_apo': 'mtSecB'}, inplace=True)
fit_result_dict['Clustal Alignment'] = df
proteins = {state: csv_to_protein(fitresults_dir / 'ecSecB_mtSecB_single' / state / 'fit_result.csv').df for state in states}
df = pd.concat(proteins.values(), keys=proteins.keys(), axis=1)
fit_result_dict['Single fits'] = df
df = csv_to_protein(fitresults_dir / 'batch_fits' / output_folder / 'unaligned' / f'fit_result.csv')
df.rename(columns={'SecB WT apo': 'ecSecB', 'SecB_apo': 'mtSecB'}, inplace=True)
fit_result_dict['Unaligned'] = df
names = ['Single fits', 'Secondary Structure', 'Clustal Alignment', 'Unaligned']
fig, axes = pplt.subplots(ncols=1, nrows=4, aspect=7, axwidth=160/25.4, sharey=1)
ax_iter = iter(axes)
colors = ['pdark_blue', 'pdark_green']
format_kwargs = [
{'color': color_dict['pdark_blue'], 'label': '$\it{ec}$SecB'},
{'edgecolors': color_dict['pdark_green'], 'facecolors': 'none', 'label': '$\it{mt}$SecB'}
]
#indices = [0, 1, 3, 5]
#for idx, name in zip(indices, names):
for name in names:
df = fit_result_dict[name]
df_dict = {state: df[state] for state in states}
aligned_dataframe = align_dataframes(df_dict, alignments['secondary_structure'])
diffs = (aligned_dataframe[states[0]]['deltaG'] - aligned_dataframe[states[1]]['deltaG']).abs()
title = f'{name}, Mean absolute difference: {diffs.mean()*1e-3:.2f} kJ/mol'
ax = next(ax_iter)
#ax = axes[idx]
for kw, state in zip(format_kwargs, states):
protein = aligned_dataframe[state]
ax.axvspan(101, 110, color='#e8d261', alpha=0.5, lw=0, zorder=-10)
ax.axvspan(173, 187, color='#e8d261', alpha=0.5, lw=0, zorder=-10)
yvals = protein['deltaG']*1e-3
ax.errorbar(protein.index, yvals, yerr=protein['covariance'] * 1e-3, **errorbar_kwargs, zorder=-1)
ax.scatter(protein.index, yvals, **scatter_kwargs, **kw)
ax.format(title=title)
axes.format(ylim=(45, 0), ylabel=dG_ylabel, xlabel='Alignment index')
hs = axes[0].get_legend_handles_labels()
axes[0].legend(*hs, loc='ul')
if output == 'show':
plt.show()
elif output == 'save':
plt.savefig(output_dir / f'{fname}.png')
plt.savefig(output_dir / f'{fname}.pdf')
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
# Register your models here.
from django.contrib import admin
from .models import Board
admin.site.register(Board)
|
import torch
from torch import Tensor
class BPMLLLoss(torch.nn.Module):
def __init__(self, bias=(1, 1)):
super(BPMLLLoss, self).__init__()
self.bias = bias
assert len(self.bias) == 2 and all(map(lambda x: isinstance(x, int) and x > 0, bias)), \
"bias must be positive integers"
def forward(self, c: Tensor, y: Tensor) -> Tensor:
r"""
compute the loss, which has the form:
L = \sum_{i=1}^{m} \frac{1}{|Y_i| \cdot |\bar{Y}_i|} \sum_{(k, l) \in Y_i \times \bar{Y}_i} \exp{-c^i_k+c^i_l}
:param c: prediction tensor, size: batch_size * n_labels
:param y: target tensor, size: batch_size * n_labels
:return: size: scalar tensor
"""
y = y.float()
y_bar = -y + 1
y_norm = torch.pow(y.sum(dim=(1,)), self.bias[0])
y_bar_norm = torch.pow(y_bar.sum(dim=(1,)), self.bias[1])
assert torch.all(y_norm != 0) or torch.all(y_bar_norm != 0), "an instance cannot have none or all the labels"
return torch.mean(1 / torch.mul(y_norm, y_bar_norm) * self.pairwise_sub_exp(y, y_bar, c))
def pairwise_sub_exp(self, y: Tensor, y_bar: Tensor, c: Tensor) -> Tensor:
r"""
compute \sum_{(k, l) \in Y_i \times \bar{Y}_i} \exp{-c^i_k+c^i_l}
"""
truth_matrix = y.unsqueeze(2).float() @ y_bar.unsqueeze(1).float()
exp_matrix = torch.exp(c.unsqueeze(1) - c.unsqueeze(2))
return (torch.mul(truth_matrix, exp_matrix)).sum(dim=(1, 2))
def hamming_loss(c: Tensor, y: Tensor, threshold=0.8) -> Tensor:
"""
compute the hamming loss (refer to the origin paper)
:param c: size: batch_size * n_labels, output of NN
:param y: size: batch_size * n_labels, target
:return: Scalar
"""
assert 0 <= threshold <= 1, "threshold should be between 0 and 1"
p, q = c.size()
return 1.0 / (p * q) * (((c > threshold).int() - y) != 0).float().sum()
def one_errors(c: Tensor, y: Tensor) -> Tensor:
"""
compute the one-error function
"""
p, _ = c.size()
return (y[0, torch.argmax(c, dim=1)] != 1).float().sum() / p
|
import json
import logging
import os
import pysftp
import pdb
import sys
import click
def get_login_details(login_file):
try:
login_details = json.load(login_file)
username = login_details["username"]
password = login_details["password"]
except Exception as e:
logging.error("Failed to process login file.")
raise
return (username, password)
def recursive_list(sftp_conn, dir):
curr_list = []
def appendToList(item):
curr_list.append(item)
sftp_conn.walktree(dir, appendToList, appendToList, appendToList)
return curr_list
def setup_local_dir(local_data_dir):
'''Check that the specified data directory exists, and generate csv
and pdf directories within it if they do not already exist.'''
if not os.path.exists(local_data_dir):
logging.error("Error: Specified local data directory does not exist: " +
local_data_dir)
# FIXME: Is this the right approach (raising an IOError)?
raise IOError("Local data directory does not exist: " + local_data_dir)
# Make local csv and pdf directories if they do not exist:
csv_dir = os.path.join(local_data_dir, "csv")
pdf_dir = os.path.join(local_data_dir, "pdf")
if not os.path.exists(csv_dir):
os.mkdir(csv_dir)
if not os.path.exists(pdf_dir):
os.mkdir(pdf_dir)
return (csv_dir, pdf_dir)
def download_files(conn, remote_files, local_dir):
'''Download the specified remote files to the specified local directory
using the given sftp connection.'''
logging.info("Downloading {} files to {}".format(len(remote_files), local_dir))
for curr_file in remote_files:
remote_file_name = os.path.split(curr_file)[1]
try:
logging.info("{}".format(curr_file))
conn.get(curr_file, localpath=os.path.join(local_dir, remote_file_name))
except Exception:
logging.error("ERROR: File failed to download: {}".format(curr_file))
raise
@click.command()
@click.option('--referrals-login', type=str, default=os.path.expanduser("~/.refslogin"),
help="Location of file with login credentials for the referrals server.")
@click.option('--local-data-dir', type=str, required=True,
default='')
@click.option('--remote-data-dir', type=str, required=True)
@click.pass_context
def fetch(ctx, referrals_login, local_data_dir, remote_data_dir):
'''Copy new referrals from the specified folder on the remote sftp server to the local data directory.
Recursively identify all csv and pdf files in the remote data folder and if any are new then copy them
to respective csv and pdf folders within the local data folder. Generates the csv and pdf folders if
they do not already exist.
'''
logging.info("Running fetch: Fetching from {} to {}".format(remote_data_dir, local_data_dir))
with open(referrals_login) as f:
(username, password) = get_login_details(f)
# Identify all relevant remote files:
conn = pysftp.Connection('kundftp.biobank.ki.se', username=username, password=password)
remote_listing = recursive_list(conn, remote_data_dir)
remote_csv = list(filter(lambda curr_path: curr_path.endswith('.csv'), remote_listing))
remote_pdf = list(filter(lambda curr_path: curr_path.endswith('.pdf'), remote_listing))
# Check and setup specified local directory:
(csv_dir, pdf_dir) = setup_local_dir(local_data_dir)
# Identify all relevant local files:
local_csv = set([filename for dir_path, dir_name, filenames in
os.walk(csv_dir) for filename in filenames])
local_pdf = set([filename for dir_path, dir_name, filenames in
os.walk(pdf_dir) for filename in filenames])
# Obtain a list of csv and pdf files present remotely but not locally:
csv_to_download = list(filter(lambda remote_file: not(os.path.split(remote_file)[1] in local_csv), remote_csv))
pdf_to_download = list(filter(lambda remote_file: not(os.path.split(remote_file)[1] in local_pdf), remote_pdf))
# Download the resulting files to the respective local csv and pdf folders:
download_files(conn, csv_to_download, csv_dir)
download_files(conn, pdf_to_download, pdf_dir)
|
"""
Copyright (c) 2016-2017 Dell Inc. or its subsidiaries. All Rights Reserved.
"""
import fit_path # NOQA: unused import
import fit_common
import flogging
from common import api_utils
from nosedep import depends
from config.api2_0_config import config
from obm_settings import obmSettings
from nose.plugins.attrib import attr
logs = flogging.get_loggers()
@attr(regression=False, smoke=True, obm_api2_tests=True)
class OBMTests(fit_common.unittest.TestCase):
def setUp(self):
self.__client = config.api_client
# OBM ipmi-obm-service currently applies to compute nodes
def test_get_compute_nodes(self):
nodes = api_utils.api_node_select(self.__client, node_type='compute')
logs.info(" List of compute nodes: %s", nodes)
self.assertNotEqual(0, len(nodes), msg='ipmi-obm-service - Node list was empty!')
# @test(groups=['obm_api2.tests', 'set-ipmi-obm_api2'], depends_on_groups=['nodes_api2.tests'])
@depends(after='test_get_compute_nodes')
def test_setup_ipmi_obm_api2(self):
# """ Setup IPMI OBM settings with PATCH:/nodes """
self.assertEqual(len(obmSettings().setup_nodes(service_type='ipmi-obm-service')), 0)
# @test(groups=['obm_api2.tests', 'check-obm_api2'], depends_on_groups=['set-ipmi-obm_api2'])
@depends(after='test_setup_ipmi_obm_api2')
def test_check_ipmi_obm_api2_settings(self):
# """ Checking IPMI OBM settings GET:/nodes """
self.assertEqual(len(obmSettings().check_nodes(service_type='ipmi-obm-service')), 0,
msg='There are missing IPMI OBM settings!')
# OBM ipmi-snmp-service currently applies to switch and pdu nodes
def test_get_switch_nodes(self):
nodes = api_utils.api_node_select(self.__client, node_type='switch')
logs.info(" List of switch nodes: %s", nodes)
pdu_nodes = api_utils.api_node_select(self.__client, node_type='pdu')
logs.info(" List of pdu nodes: %s", pdu_nodes)
nodes.append(pdu_nodes)
self.assertNotEqual(0, len(nodes), msg='snmp-obm-service - Node list was empty!')
# @test(groups=['obm_api2.tests', 'set-snmp-obm_api2'], depends_on_groups=['nodes_api2.tests'])
@depends(after='test_get_switch_nodes')
def test_setup_snmp_obm_api2(self):
# """ Setup SNMP OBM settings with PATCH:/nodes """
self.assertEqual(len(obmSettings().setup_nodes(service_type='snmp-obm-service')), 0)
# @test(groups=['obm_api2.tests', 'check-obm_api2'], depends_on_groups=['set-snmp-obm_api2'])
@depends(after='test_setup_snmp_obm_api2')
def test_check_snmp_obm_settings(self):
# """ Checking SNMP OBM settings GET:/nodes """
self.assertEqual(len(obmSettings().check_nodes(service_type='snmp-obm-service')), 0,
msg='There are missing SNMP OBM settings!')
|
#!/usr/bin/env python3
# Copyright (c) 2019 Arm Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Entry point for save-api-key action."""
from mbl.cli.utils.store import Store
from mbl.cli.utils.cloudapi import valid_api_key
def execute(args):
"""Execute the save-api-key action."""
store_handle = Store("user")
if not valid_api_key(args.key):
raise ValueError("API key not recognised by Pelion Device Management.")
store_handle.api_key = args.key
store_handle.save()
|
"""
Basic unit tests for the awbw module on select sample replays.
To run:
python -m unittest -v
"""
import unittest
from .game import DefaultDict
# pylint: disable=no-self-use
class TestDefaultDict(unittest.TestCase):
"""Tests for the DefaultDict class"""
class ColorDict(DefaultDict):
"""Example DefaultDict"""
ALLOWED_DATA = {
"r": 0,
"g": 0,
"b": 0,
}
def test_color_dict(self):
"""Test that we can create and use ColorDict as a dict"""
test_dict = TestDefaultDict.ColorDict(r=100)
assert test_dict["r"] == 100
assert test_dict["b"] == 0
assert test_dict["g"] == 0
test_dict = test_dict | { "r": 255, "b": 255 }
assert test_dict["r"] == 255
assert test_dict["b"] == 255
assert test_dict["g"] == 0
test_dict = TestDefaultDict.ColorDict(r=123, b=45, g=67)
assert test_dict["r"] == 123
assert test_dict["b"] == 45
assert test_dict["g"] == 67
test_dict = TestDefaultDict.ColorDict({"r": 15, "b": 22, "g": 81})
assert test_dict["r"] == 15
assert test_dict["b"] == 22
assert test_dict["g"] == 81
zero_dict = TestDefaultDict.ColorDict()
assert zero_dict["r"] == 0
assert zero_dict["b"] == 0
assert zero_dict["g"] == 0
def test_bad_keys(self):
"""Test that an exception is raised when an invalid key is used"""
with self.assertRaises(KeyError):
DefaultDict(r=100)
with self.assertRaises(KeyError):
DefaultDict({"r":100})
with self.assertRaises(KeyError):
TestDefaultDict.ColorDict(pi=3.1415)
with self.assertRaises(KeyError):
TestDefaultDict.ColorDict({"pi":3.1415})
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
from secretpy import Atbash
from secretpy import alphabets
import unittest
class TestAtbash(unittest.TestCase):
alphabet = (
alphabets.ENGLISH,
alphabets.RUSSIAN,
alphabets.GERMAN,
alphabets.SPANISH,
alphabets.JAPANESE_HIRAGANA
)
key = 0
plaintext = (
u"text",
u"текст",
u"textnachtricht",
u"unmensajedetexto",
u"だやぎへぐゆぢ")
ciphertext = (
u"gvcg",
u"мъфнм",
u"kzgkqßöwkmvöwk",
u"fnñvnhzqvwvgvcgl",
u"ばさりづらごほ")
def test_encrypt(self):
for i, alphabet in enumerate(self.alphabet):
enc = Atbash().encrypt(self.plaintext[i], self.key, alphabet)
self.assertEqual(enc, self.ciphertext[i])
def test_decrypt(self):
for i, alphabet in enumerate(self.alphabet):
dec = Atbash().decrypt(self.ciphertext[i], self.key, alphabet)
self.assertEqual(dec, self.plaintext[i])
if __name__ == '__main__':
unittest.main()
|
from dataclasses import dataclass
from typing import Union
import math
import numpy as np
import statistics
@dataclass
class Stat:
mean: Union[np.ndarray, float]
std: Union[np.ndarray, float]
n: int
def __repr__(self) -> str:
return f"Stat(mean: {self.mean}, std: {self.std}, n: {self.n})"
class StatTracker:
def __init__(self):
self.sum = 0
self.sqsum = 0
self.n = 0
def add(self, v: float):
if isinstance(v, np.ndarray):
v = v.astype(np.float32)
self.sum = self.sum + v
self.sqsum = self.sqsum + v**2
self.n += 1
def get(self) -> Stat:
assert self.n > 0
mean = self.sum / self.n
var = (self.sqsum / self.n - mean ** 2) * self.n/(self.n-1) if self.n>1 else 0
return Stat(mean, np.sqrt(np.maximum(var,0)), self.n)
def __repr__(self) -> str:
return self.get().__repr__()
def __add__(self, other):
res = StatTracker()
res.sum = other.sum + self.sum
res.sqsum = other.sqsum + self.sqsum
res.n = other.n + self.n
return res
class MedianTracker:
def __init__(self):
self.elems = []
def add(self, v: float):
if isinstance(v, np.ndarray):
v = v.astype(np.float32)
self.elems.append(v)
def get(self) -> float:
assert len(self.elems) > 0
return statistics.median(self.elems)
def __repr__(self) -> str:
return f"Median({self.get()})"
def __add__(self, other):
res = MedianTracker()
res.elems = self.elems + other.elems
return res
|
import websocket
import threading
import traceback
from time import sleep
import json
import urllib
import math
import logging
from qbrobot.util.api_key import generate_nonce, generate_signature
from qbrobot.util import log
logger = logging.getLogger()
# Naive implementation of connecting to BitMEX websocket for streaming realtime data.
# The Marketmaker still interacts with this as if it were a REST Endpoint, but now it can get
# much more realtime data without polling the hell out of the API.
#
# The Websocket offers a bunch of data as raw properties right on the object.
# On connect, it synchronously asks for a push of all this data then returns.
# Right after, the MM can start using its data. It will be updated in realtime, so the MM can
# poll really often if it wants.
class BitMEXWebsocket:
# Don't grow a table larger than this amount. Helps cap memory usage.
MAX_TABLE_LEN = 200
def __init__(self, data_q, name , exchange, endpoint, symbol, api_key=None, api_secret=None, channels = None):
'''Connect to the websocket and initialize data stores.'''
#self.logger = logging.getLogger(__name__)
self.logger = logger
self.logger.debug("Initializing WebSocket.%s"%name)
self.q = data_q
self.name = name
self.exchange = exchange
self.endpoint = endpoint
self.symbol = symbol
self.channels = channels
# 用于记录订阅频道是否成功
self.subscribed = dict()
if api_key is not None and api_secret is None:
raise ValueError('api_secret is required if api_key is provided')
if api_key is None and api_secret is not None:
raise ValueError('api_key is required if api_secret is provided')
self.api_key = api_key
self.api_secret = api_secret
self.data = {}
self.keys = {}
self.exited = False
self.isReady = False
# We can subscribe right in the connection querystring, so let's build that.
# Subscribe to all pertinent endpoints
wsURL = self.__get_url()
self.logger.info("Connecting to %s" % wsURL)
ret = self.__connect(wsURL, symbol)
if ret :
# 开始订阅数据频道
timeout = 5
while not self.isReady and timeout :
self.logger.debug( self.subscribed )
self.__subscribe_chans_symbs(self.channels, self.symbol)
sleep(1)
if self.__check_subscribed_chan_symbol():
self.isReady = True
timeout -= 1
self.logger.info('%s have connected to %s.'%(self.name, wsURL))
"""
# Connected. Wait for partials 数据订阅已经成功,不用再等待了。
if ret :
self.__wait_for_symbol(symbol)
if api_key:
self.__wait_for_account()
"""
self.isReady = True
def exit(self):
'''Call this to exit - will close websocket.'''
timeout = 5
while self.isReady and timeout :
self.__unsubscribe_chans_symbs(self.channels, self.symbol)
sleep(1)
if not any( self.subscribed.values() ) :
# 有任何一个频道还没有断开,就继续尝试申请
self.isReady = False
timeout -= 1
self.exited = True
self.ws.close()
def get_instrument(self):
'''Get the raw instrument data for this symbol.'''
# Turn the 'tickSize' into 'tickLog' for use in rounding
instrument = self.data['instrument'][0]
instrument['tickLog'] = int(math.fabs(math.log10(instrument['tickSize'])))
return instrument
def get_ticker(self):
'''Return a ticker object. Generated from quote and trade.'''
lastQuote = self.data['quote'][-1]
lastTrade = self.data['trade'][-1]
ticker = {
"last": lastTrade['price'],
"buy": lastQuote['bidPrice'],
"sell": lastQuote['askPrice'],
"mid": (float(lastQuote['bidPrice'] or 0) + float(lastQuote['askPrice'] or 0)) / 2
}
# The instrument has a tickSize. Use it to round values.
instrument = self.data['instrument'][0]
return {k: round(float(v or 0), instrument['tickLog']) for k, v in ticker.items()}
def funds(self):
'''Get your margin details.'''
return self.data['margin'][0]
def market_depth(self):
'''Get market depth (orderbook). Returns all levels.'''
return self.data['orderBookL2']
def open_orders(self, clOrdIDPrefix):
'''Get all your open orders.'''
orders = self.data['order']
# Filter to only open orders (leavesQty > 0) and those that we actually placed
return [o for o in orders if str(o['clOrdID']).startswith(clOrdIDPrefix) and o['leavesQty'] > 0]
def recent_trades(self):
'''Get recent trades.'''
return self.data['trade']
#
# End Public Methods
#
def __connect(self, wsURL, symbol):
'''Connect to the websocket in a thread.'''
# 根据bitmex官方的建议:不再支持通过 WebSocket API 的 ping 命令。
# 如果你担心你的连接被默默地终止,我们推荐你采用以下流程:
# 在接收到每条消息后,设置一个 5 秒钟的定时器。
# 如果在定时器触发收到任何新消息,则重置定时器。
# 如果定时器被触发了(意味着 5 秒内没有收到新消息),发送一个 ping 数据帧(如果支持的话),或者发送字符串 'ping'。
# 期待一个原始的pong框架或文字字符串'pong'作为回应。 如果在5秒内未收到,请发出错误或重新连接。
self.ws = websocket.WebSocketApp(wsURL,
on_message=self.__on_message,
on_close=self.__on_close,
on_open=self.__on_open,
on_error=self.__on_error,
on_pong=self.__on_pong,
header=self.__get_auth())
self.wst = threading.Thread(target=lambda: self.ws.run_forever(ping_interval=20, ping_timeout=5,))
self.wst.daemon = True
self.wst.start()
self.logger.debug(("Started thread websocket connect %s"%wsURL))
# Wait for connect before continuing
conn_timeout = 5
while ( not self.ws.sock or not self.ws.sock.connected ) and conn_timeout:
logger.debug("%s wait for timeout: %d"%(self.name, conn_timeout ) )
sleep(1)
conn_timeout -= 1
if not conn_timeout:
self.logger.error("%s couldn't connect to WS! Exiting."%(self.name))
self.exited = True
self.exit()
#raise websocket.WebSocketTimeoutException('Couldn\'t connect to WS! Exiting.')
return False
return True
def __get_auth(self):
'''Return auth headers. Will use API Keys if present in settings.'''
if self.api_key:
self.logger.info("Authenticating with API Key.")
# To auth to the WS using an API key, we generate a signature of a nonce and
# the WS API endpoint.
nonce = generate_nonce()
return [
"api-nonce: " + str(nonce),
"api-signature: " + generate_signature(self.api_secret, 'GET', '/realtime', nonce, ''),
"api-key:" + self.api_key
]
else:
self.logger.info("Not authenticating.")
return []
def __get_url(self):
'''
Generate a connection URL. only endpoint.
'''
urlParts = list(urllib.parse.urlparse(self.endpoint))
urlParts[0] = urlParts[0].replace('http', 'ws')
urlParts[2] = "/realtime"
return urllib.parse.urlunparse(urlParts)
def __wait_for_account(self):
'''On subscribe, this data will come down. Wait for it.'''
# Wait for the keys to show up from the ws
while not {'margin', 'position', 'order'} <= set(self.data):
sleep(0.1)
def __wait_for_symbol(self, symbol):
'''On subscribe, this data will come down. Wait for it.'''
while not {'instrument', 'trade', 'quote'} <= set(self.data):
sleep(0.1)
#
# subscribe interactive Methods added by Huangkj
#
def __subscribe_chans_symbs(self, channels, symbols ):
"""
发起订阅,按照channels和symbols循环订阅,并记录下每个channel的状态是否OK
"""
if not (channels and symbols ):
return False
for ch in channels :
if channels[ch]['auth'] :
if not self.auth:
continue
chan = channels[ch]['altername']
for sy in symbols:
chanid = chan +':'+ sy
if not chanid in self.subscribed or not self.subscribed[chanid]:
self.__subscribe_chan_symbol( chan, sy )
def __unsubscribe_chans_symbs(self, channels, symbols ):
"""
发起订阅,按照channels和symbols循环订阅,并记录下每个channel的状态是否OK
"""
if not (channels and symbols ):
return False
for ch in channels :
if channels[ch]['auth'] :
if not self.auth:
continue
chan = channels[ch]['altername']
for sy in symbols:
chanid = chan +':'+ sy
if chanid in self.subscribed and self.subscribed[chanid]:
self.__unsubscribe_chan_symbol( chan, sy )
def __subscribe_chan_symbol(self, channel, symbol):
"""
发起一次订阅,为了便于监控,一次只订阅一个channel和symbol,预先设置self.subscribed[chanid]为False
Parameters:
channel - 数据频道,在bitmex中,对应的是subscibe,
symbol - 品种
Returns:
None
Raises:
None
"""
chanid = channel + ':' + symbol
self.__register_subscribed_chan_symbol( chanid=chanid, stat=False)
self.__send_command(command='subscribe', args=[chanid])
def __unsubscribe_chan_symbol(self, channel, symbol):
"""
发起一次订阅,为了便于监控,一次只订阅一个channel和symbol,预先设置self.subscribed[chanid]为False
Parameters:
channel - 数据频道,在bitmex中,对应的是subscibe,
symbol - 品种
Returns:
None
Raises:
None
"""
chanid = channel + ':' + symbol
self.__send_command(command='unsubscribe', args=[chanid])
def __register_subscribed_chan_symbol(self, chanid = None, stat=False ):
"""
设置self.subscribed为False
Parameters:
chanid - 数据频道,chanid = channel +':' + symbol
Returns:
None
Raises:
None
"""
if chanid :
self.subscribed[chanid]=True
def __check_subscribed_chan_symbol(self):
"""
检查self.subscribed的所有频道是否为True,如果有没有成功的频道继续订阅
Parameters:
Returns:
ret - True / False
Raises:
None
"""
if all( self.subscribed.values() ):
logger.info("Subscribed channels %s success.", self.subscribed.keys())
return True
else:
return False
def __send_command(self, command, args=None):
'''Send a raw command.'''
if args is None:
args = []
self.ws.send(json.dumps({"op": command, "args": args}))
def __pass_to_robot(self, channel, data ):
"""
数据传递到后台。这里不用做channel的转换, 之前已经转回通用的base channel
"""
logger.debug( "%s %s"%(channel, data ) )
return self.q.put(( channel, data ))
def __on_message(self, ws, message):
'''Handler for parsing WS messages.'''
message = json.loads(message)
self.logger.debug("recv message: %s"%json.dumps(message))
table = message['table'] if 'table' in message else None
action = message['action'] if 'action' in message else None
try:
if 'subscribe' in message:
chanid = message['subscribe']
# 分析message, 确定订阅是否成功,成功则设置 self.subscribed[channel][symbol] = True
if 'success' in message:
self.logger.debug("Subscribed success to %s." % chanid )
self.__register_subscribed_chan_symbol( chanid=chanid, stat=True )
elif 'unsubscribe' in message:
chanid = message['unsubscribe']
if 'success' in message:
self.logger.debug("Unsubscribe success to %s." % chanid )
self.__register_subscribed_chan_symbol( chanid=chanid, stat=False )
elif action:
if table not in self.data:
self.data[table] = []
# There are four possible actions from the WS:
# 'partial' - full table image
# 'insert' - new row
# 'update' - update row
# 'delete' - delete row
if action == 'partial':
#self.logger.debug("%s: partial" % table)
self.data[table] += message['data']
# Keys are communicated on partials to let you know how to uniquely identify
# an item. We use it for updates.
self.keys[table] = message['keys']
elif action == 'insert':
#self.logger.debug('%s: inserting %s' % (table, message['data']))
self.data[table] += message['data']
# Limit the max length of the table to avoid excessive memory usage.
# Don't trim orders because we'll lose valuable state if we do.
if table not in ['order', 'orderBookL2', 'orderBookL2_25'] and len(self.data[table]) > BitMEXWebsocket.MAX_TABLE_LEN:
self.data[table] = self.data[table][int(BitMEXWebsocket.MAX_TABLE_LEN / 2):]
elif action == 'update':
#self.logger.debug('%s: updating %s' % (table, message['data']))
# Locate the item in the collection and update it.
for updateData in message['data']:
item = findItemByKeys(self.keys[table], self.data[table], updateData)
if not item:
return # No item found to update. Could happen before push
item.update(updateData)
# Remove cancelled / filled orders
if table == 'order' and item['leavesQty'] <= 0:
self.data[table].remove(item)
elif action == 'delete':
#self.logger.debug('%s: deleting %s' % (table, message['data']))
# Locate the item in the collection and remove it.
for deleteData in message['data']:
item = findItemByKeys(self.keys[table], self.data[table], deleteData)
self.data[table].remove(item)
else:
raise Exception("Unknown action: %s" % action)
# added by huangkj
# 有数据,就开始向后传递数据
if table :
ch = findChanByAltername(channels = self.channels, altername=table)
datas = self.__parse_channel_data(ch, self.data[table])
for symbol in datas :
channel = ( self.exchange, ch, symbol )
self.__pass_to_robot( channel, datas[symbol] )
except:
self.logger.error(traceback.format_exc())
def __parse_channel_data(self, channel, data ):
"""
从bitmex接收到的数据,需要按照不同的table解析,得到symbol,然后按照symbol返回数据
所有channel的处理格式都统一为 dict[ symbol:[data1,data2,data3] ], 所以不在按照以下channel区分处理
if channel == 'ticker':
ret = __handler_ticker_data(data)
elif channel == 'book':
ret = __handler_orderbook_data(data)
elif channel == 'trade':
ret = __handler_trade_data(data)
elif channel == 'instrument':
ret = __handler_instrumen_data(data)
"""
ret = dict()
if channel == 'book':
ret = __handler_orderbook_data(data)
else:
for item in data :
symbol = item['symbol']
if symbol in ret:
ret[symbol].append( item )
else:
ret[symbol] = list()
ret[symbol].append(item)
return ret
def __handler_instrumen_data(self, data ):
"""
return ret = {
'ETHUSD':[{'volume24h': 11974029, 'impactBidPrice': 85.75, 'underlying': 'ETH', 'inverseLeg': '', 'fairMethod': 'FundingRate', 'vwap': 83.01, 'turnover24h': 99393388060,
'optionStrikePrice': None, 'listing': '2018-07-18T12:00:00.000Z', 'prevTotalTurnover': 9831235185370, 'deleverage': True, 'markMethod': 'FairPrice', 'lastPrice': 85.85,
'timestamp': '2018-12-16T03:54:00.000Z', 'bankruptLimitUpPrice': None, 'fundingRate': 0.0001, 'positionCurrency': '', 'limitUpPrice': None, 'front': '2018-07-18T12:00:00.000Z',
'indicativeFundingRate': 0.0001, 'underlyingToSettleMultiplier': None, 'relistInterval': None, 'maxPrice': 1000000, 'rootSymbol': 'ETH', 'takerFee': 0.00075, 'fairPrice': 85.85,
'prevPrice24h': 83.7, 'taxed': True, 'calcInterval': None, 'reference': 'BMEX', 'publishTime': None, 'limitDownPrice': None, 'maxOrderQty': 10000000, 'riskLimit': 5000000000,
'totalTurnover': 9831895712035, 'referenceSymbol': '.BETH', 'sellLeg': '', 'quoteToSettleMultiplier': 30951, 'state': 'Open', 'settle': None, 'lotSize': 1, 'impactMidPrice': 85.825,
'openInterest': 5663679, 'prevTotalVolume': 475303713, 'askPrice': 85.85, 'expiry': None, 'multiplier': 100, 'fundingPremiumSymbol': '.ETHUSDPI8H', 'lastTickDirection': 'PlusTick',
'fundingTimestamp': '2018-12-16T04:00:00.000Z', 'sessionInterval': '2000-01-01T01:00:00.000Z', 'quoteCurrency': 'USD', 'optionUnderlyingPrice': None, 'fundingBaseSymbol': '.ETHBON8H',
'bidPrice': 85.75, 'capped': False, 'settlCurrency': 'XBt', 'highPrice': 86.5, 'publishInterval': None, 'optionStrikeRound': None, 'volume': 76619, 'openValue': 48622684215, 'hasLiquidity': True,
'fairBasis': 0, 'underlyingToPositionMultiplier': None, 'lowPrice': 81.05, 'limit': None, 'markPrice': 85.85, 'rebalanceTimestamp': None, 'optionMultiplier': None, 'totalVolume': 475380332,
'riskStep': 5000000000, 'insuranceFee': 0, 'isQuanto': True, 'foreignNotional24h': 3163930.509693124, 'isInverse': False, 'indicativeSettlePrice': 85.85, 'prevClosePrice': 82.71, 'closingTimestamp': '2018-12-16T04:00:00.000Z',
'settledPrice': None, 'bankruptLimitDownPrice': None, 'midPrice': 85.8, 'fundingInterval': '2000-01-01T08:00:00.000Z', 'lastPriceProtected': 85.85, 'impactAskPrice': 85.9, 'fairBasisRate': 0.1095,
'homeNotional24h': 38114.25537049419, 'fundingQuoteSymbol': '.USDBON8H', 'maintMargin': 0.01, 'settlementFee': 0, 'underlyingSymbol': 'ETH=', 'symbol': 'ETHUSD', 'optionStrikePcnt': None, 'initMargin': 0.02, 'typ': 'FFWCSX',
'makerFee': -0.00025, 'buyLeg': '', 'indicativeTaxRate': 0, 'rebalanceInterval': None, 'openingTimestamp': '2018-12-16T03:00:00.000Z', 'tickSize': 0.05, 'lastChangePcnt': 0.0257, 'turnover': 660526665}],
'XBTUSD':[{'volume24h': 20258479, 'impactBidPrice': 3162.5553, 'underlying': 'XBT', 'inverseLeg': '', 'fairMethod': 'FundingRate', 'vwap': 3126.7588, 'turnover24h': 647913907426,
'optionStrikePrice': None, 'listing': '2016-05-04T12:00:00.000Z', 'prevTotalTurnover': 1475330078895908, 'deleverage': True, 'markMethod': 'FairPrice', 'lastPrice': 3165,
'timestamp': '2018-12-16T03:54:05.000Z', 'bankruptLimitUpPrice': None, 'fundingRate': -0.00375, 'positionCurrency': 'USD', 'limitUpPrice': None, 'front': '2016-05-04T12:00:00.000Z',
'indicativeFundingRate': -0.00375, 'underlyingToSettleMultiplier': -100000000, 'relistInterval': None, 'maxPrice': 1000000, 'rootSymbol': 'XBT', 'takerFee': 0.00075, 'fairPrice': 3230.72,
'prevPrice24h': 3120, 'taxed': True, 'calcInterval': None, 'reference': 'BMEX', 'publishTime': None, 'limitDownPrice': None, 'maxOrderQty': 10000000, 'riskLimit': 20000000000, 'totalTurnover': 1475341288249414,
'referenceSymbol': '.BXBT', 'sellLeg': '', 'quoteToSettleMultiplier': None, 'state': 'Open', 'settle': None, 'lotSize': 1, 'impactMidPrice': 3163.75, 'openInterest': 98770714, 'prevTotalVolume': 101498190355, 'askPrice': 3164, 'expiry': None,
'multiplier': -100000000, 'fundingPremiumSymbol': '.XBTUSDPI8H', 'lastTickDirection': 'PlusTick', 'fundingTimestamp': '2018-12-16T04:00:00.000Z', 'sessionInterval': '2000-01-01T01:00:00.000Z', 'quoteCurrency': 'USD', 'optionUnderlyingPrice': None,
'fundingBaseSymbol': '.XBTBON8H', 'bidPrice': 3163.5, 'capped': False, 'settlCurrency': 'XBt', 'highPrice': 3185, 'publishInterval': None, 'optionStrikeRound': None, 'volume': 354201, 'openValue': 3057249910442, 'hasLiquidity': True, 'fairBasis': -0.15,
'underlyingToPositionMultiplier': None, 'lowPrice': 3050, 'limit': None, 'markPrice': 3230.72, 'rebalanceTimestamp': None, 'optionMultiplier': None, 'totalVolume': 101498544556, 'riskStep': 10000000000, 'insuranceFee': 0, 'isQuanto': False,
'foreignNotional24h': 20258479, 'isInverse': True, 'indicativeSettlePrice': 3231.53, 'prevClosePrice': 3163.73, 'closingTimestamp': '2018-12-16T04:00:00.000Z', 'settledPrice': None, 'bankruptLimitDownPrice': None, 'midPrice': 3163.75,
'fundingInterval': '2000-01-01T08:00:00.000Z', 'lastPriceProtected': 3163.5, 'impactAskPrice': 3164.9576, 'fairBasisRate': -4.10625, 'homeNotional24h': 6479.1390742599915, 'fundingQuoteSymbol': '.USDBON8H', 'maintMargin': 0.005, 'settlementFee': 0,
'underlyingSymbol': 'XBT=', 'symbol': 'XBTUSD', 'optionStrikePcnt': None, 'initMargin': 0.01, 'typ': 'FFWCSX', 'makerFee': -0.00025, 'buyLeg': '', 'indicativeTaxRate': 0, 'rebalanceInterval': None, 'openingTimestamp': '2018-12-16T03:00:00.000Z',
'tickSize': 0.5, 'lastChangePcnt': 0.0144, 'turnover': 11209353506}],
}
"""
ret = dict()
if data:
for item in data :
symbol = item['symbol']
if symbol in ret:
ret[symbol].append( item )
else:
ret[symbol] = list()
ret[symbol].append(item)
return ret
def __handler_trade_data(self, data ):
"""
return ret = {
'ETHUSD':[{'price': 85.85, 'homeNotional': 0.01937608990505716, 'foreignNotional': 1.663437318349157, 'grossValue': 51510, 'trdMatchID': '4125a744-bfaa-8449-eb9f-de8d556a8639',
'timestamp': '2018-12-16T03:52:34.227Z', 'symbol': 'ETHUSD', 'tickDirection': 'PlusTick', 'side': 'Buy', 'size': 6}],
'XBTUSD':[{'price': 3163.5, 'homeNotional': 0.031611, 'foreignNotional': 100, 'grossValue': 3161100, 'trdMatchID': '261841bc-2efd-e80b-01b9-5accd95ba177',
'timestamp': '2018-12-16T03:53:38.501Z', 'symbol': 'XBTUSD', 'tickDirection': 'ZeroMinusTick', 'side': 'Sell', 'size': 100},],
}
"""
ret = dict()
if data:
for item in data :
symbol = item['symbol']
if symbol in ret:
ret[symbol].append( item )
else:
ret[symbol] = list()
ret[symbol].append(item)
return ret
def __handler_orderbook_data(self, data ):
"""
return ret = {
'ETHUSD':[[{'symbol': 'ETHUSD', 'id': 38699998224, 'side': 'Sell', 'size': 72, 'price': 88.8},
{'symbol': 'ETHUSD', 'id': 38699998227, 'side': 'Sell', 'size': 22500, 'price': 88.65},],
'XBTUSD':[{'symbol': 'XBTUSD', 'id': 15599682250, 'side': 'Sell', 'size': 12737, 'price': 3177.5},
{'symbol': 'XBTUSD', 'id': 15599682300, 'side': 'Sell', 'size': 4000, 'price': 3177},],
}
"""
ret = dict()
result = {
'bids': [],
'asks': [],
'timestamp': None,
'datetime': None,
'nonce': None,
}
if data:
for item in data :
symbol = item['symbol']
if symbol in ret:
ret[symbol].append( item )
else:
ret[symbol] = list()
ret[symbol].append(item)
orderbook = ret if ret else None
for o in range(0, len(orderbook)):
order = orderbook[o]
side = 'asks' if (order['side'] == 'Sell') else 'bids'
amount = round(order['size'],2)
price = round(order['price'],2)
result[side].append([price, amount])
result['bids'] = self.sort_by(result['bids'], 0, True)
result['asks'] = self.sort_by(result['asks'], 0)
result['timestamp'] = time.time()
return ret
def __handler_orderbook_data(self, data ):
"""
return ret = {
'ETHUSD':[[{'symbol': 'ETHUSD', 'id': 38699998224, 'side': 'Sell', 'size': 72, 'price': 88.8},
{'symbol': 'ETHUSD', 'id': 38699998227, 'side': 'Sell', 'size': 22500, 'price': 88.65},],
'XBTUSD':[{'symbol': 'XBTUSD', 'id': 15599682250, 'side': 'Sell', 'size': 12737, 'price': 3177.5},
{'symbol': 'XBTUSD', 'id': 15599682300, 'side': 'Sell', 'size': 4000, 'price': 3177},],
}
"""
ret = dict()
if data:
for item in data :
symbol = item['symbol']
if symbol in ret:
ret[symbol].append( item )
else:
ret[symbol] = list()
ret[symbol].append(item)
return ret
def __handler_ticker_data(self, data ):
"""
return ret = {
'ETHUSD':[{'timestamp': '2018-12-16T03:53:50.045Z', 'symbol': 'ETHUSD', 'bidSize': 174784, 'bidPrice': 85.75, 'askPrice': 85.85, 'askSize': 41}, ],
'XBTUSD':[{'timestamp': '2018-12-16T03:53:53.225Z', 'symbol': 'XBTUSD', 'bidSize': 6275, 'bidPrice': 3163.5, 'askPrice': 3164, 'askSize': 7},
{'timestamp': '2018-12-16T03:54:04.596Z', 'symbol': 'XBTUSD', 'bidSize': 6275, 'bidPrice': 3163.5, 'askPrice': 3165, 'askSize': 37049}, ],
}
"""
ret = dict()
if data:
for item in data :
symbol = item['symbol']
if symbol in ret:
ret[symbol].append( item )
else:
ret[symbol] = list()
ret[symbol].append(item)
return ret
def __on_error(self, ws, error):
'''Called on fatal websocket errors. We exit on these.'''
if not self.exited:
self.logger.error("%s Error : %s" % (self.name, error) )
#raise websocket.WebSocketException(error)
self.isReady = False
def __on_pong(self, ws, message ):
logger.debug("%s ---on pong---- "% self.name)
self.isReady = True
def __on_open(self, ws):
'''Called when the WS opens.'''
self.logger.debug("%s Websocket Opened."%self.name)
def __on_close(self, ws):
'''Called on websocket close.'''
self.exited = True
self.isReady = False
self.logger.debug( ( '%s Websocket Closed')%(self.name) )
# Utility method for finding an item in the store.
# When an update comes through on the websocket, we need to figure out which item in the array it is
# in order to match that item.
#
def findChanByAltername( channels, altername ):
base = ''
for ch in channels:
if altername == channels[ch]['altername']:
base = ch
break
return base
# Helpfully, on a data push (or on an HTTP hit to /api/v1/schema), we have a "keys" array. These are the
# fields we can use to uniquely identify an item. Sometimes there is more than one, so we iterate through all
# provided keys.
def findItemByKeys(keys, table, matchData):
for item in table:
matched = True
for key in keys:
if item[key] != matchData[key]:
matched = False
if matched:
return item
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.10
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3,0,0):
new_instancemethod = lambda func, inst, cls: _RWStl.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_RWStl', [dirname(__file__)])
except ImportError:
import _RWStl
return _RWStl
if fp is not None:
try:
_mod = imp.load_module('_RWStl', fp, pathname, description)
finally:
fp.close()
return _mod
_RWStl = swig_import_helper()
del swig_import_helper
else:
import _RWStl
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
class SwigPyIterator(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _RWStl.delete_SwigPyIterator
def __iter__(self): return self
SwigPyIterator.value = new_instancemethod(_RWStl.SwigPyIterator_value,None,SwigPyIterator)
SwigPyIterator.incr = new_instancemethod(_RWStl.SwigPyIterator_incr,None,SwigPyIterator)
SwigPyIterator.decr = new_instancemethod(_RWStl.SwigPyIterator_decr,None,SwigPyIterator)
SwigPyIterator.distance = new_instancemethod(_RWStl.SwigPyIterator_distance,None,SwigPyIterator)
SwigPyIterator.equal = new_instancemethod(_RWStl.SwigPyIterator_equal,None,SwigPyIterator)
SwigPyIterator.copy = new_instancemethod(_RWStl.SwigPyIterator_copy,None,SwigPyIterator)
SwigPyIterator.next = new_instancemethod(_RWStl.SwigPyIterator_next,None,SwigPyIterator)
SwigPyIterator.__next__ = new_instancemethod(_RWStl.SwigPyIterator___next__,None,SwigPyIterator)
SwigPyIterator.previous = new_instancemethod(_RWStl.SwigPyIterator_previous,None,SwigPyIterator)
SwigPyIterator.advance = new_instancemethod(_RWStl.SwigPyIterator_advance,None,SwigPyIterator)
SwigPyIterator.__eq__ = new_instancemethod(_RWStl.SwigPyIterator___eq__,None,SwigPyIterator)
SwigPyIterator.__ne__ = new_instancemethod(_RWStl.SwigPyIterator___ne__,None,SwigPyIterator)
SwigPyIterator.__iadd__ = new_instancemethod(_RWStl.SwigPyIterator___iadd__,None,SwigPyIterator)
SwigPyIterator.__isub__ = new_instancemethod(_RWStl.SwigPyIterator___isub__,None,SwigPyIterator)
SwigPyIterator.__add__ = new_instancemethod(_RWStl.SwigPyIterator___add__,None,SwigPyIterator)
SwigPyIterator.__sub__ = new_instancemethod(_RWStl.SwigPyIterator___sub__,None,SwigPyIterator)
SwigPyIterator_swigregister = _RWStl.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
import OCC.StlMesh
import OCC.MMgt
import OCC.Standard
import OCC.gp
import OCC.TColgp
import OCC.TCollection
import OCC.OSD
import OCC.Message
import OCC.TColStd
def register_handle(handle, base_object):
"""
Inserts the handle into the base object to
prevent memory corruption in certain cases
"""
try:
if base_object.IsKind("Standard_Transient"):
base_object.thisHandle = handle
base_object.thisown = False
except:
pass
class rwstl(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def WriteBinary(*args):
"""
* write the meshing in a file following the binary format of an STL file. Returns false if the cannot be opened;
:param aMesh:
:type aMesh: Handle_StlMesh_Mesh &
:param aPath:
:type aPath: OSD_Path &
:param aProgInd: default value is NULL
:type aProgInd: Handle_Message_ProgressIndicator &
:rtype: bool
"""
return _RWStl.rwstl_WriteBinary(*args)
WriteBinary = staticmethod(WriteBinary)
def WriteAscii(*args):
"""
* write the meshing in a file following the Ascii format of an STL file. Returns false if the cannot be opened;
:param aMesh:
:type aMesh: Handle_StlMesh_Mesh &
:param aPath:
:type aPath: OSD_Path &
:param aProgInd: default value is NULL
:type aProgInd: Handle_Message_ProgressIndicator &
:rtype: bool
"""
return _RWStl.rwstl_WriteAscii(*args)
WriteAscii = staticmethod(WriteAscii)
def ReadFile(*args):
"""
* This method will chwck if the file is a binary file or an AsciiFile testing the 5 first characters of the file wich are :'solid' in an ascii file. If we do not find that word we assume that it is a binary file.
:param aPath:
:type aPath: OSD_Path &
:param aProgInd: default value is NULL
:type aProgInd: Handle_Message_ProgressIndicator &
:rtype: Handle_StlMesh_Mesh
"""
return _RWStl.rwstl_ReadFile(*args)
ReadFile = staticmethod(ReadFile)
def ReadBinary(*args):
"""
* Read a meshing from a binary file Raises NoMoreObject from Standard if a statement does not contain the right number of tokens Raises TypeMisMatch if a token has not the good type (often real)
:param aPath:
:type aPath: OSD_Path &
:param aProgInd: default value is NULL
:type aProgInd: Handle_Message_ProgressIndicator &
:rtype: Handle_StlMesh_Mesh
"""
return _RWStl.rwstl_ReadBinary(*args)
ReadBinary = staticmethod(ReadBinary)
def ReadAscii(*args):
"""
* Read a meshing from a binary file Raises NoMoreObject from Standard if a statement does not contain the right number of tokens Raises TypeMisMatch if a token has not the good type (often real) Raises MoMoreObject if a file is finished before having found the word endsolid;
:param aPath:
:type aPath: OSD_Path &
:param aProgInd: default value is NULL
:type aProgInd: Handle_Message_ProgressIndicator &
:rtype: Handle_StlMesh_Mesh
"""
return _RWStl.rwstl_ReadAscii(*args)
ReadAscii = staticmethod(ReadAscii)
def __init__(self):
_RWStl.rwstl_swiginit(self,_RWStl.new_rwstl())
__swig_destroy__ = _RWStl.delete_rwstl
rwstl_swigregister = _RWStl.rwstl_swigregister
rwstl_swigregister(rwstl)
def rwstl_WriteBinary(*args):
"""
* write the meshing in a file following the binary format of an STL file. Returns false if the cannot be opened;
:param aMesh:
:type aMesh: Handle_StlMesh_Mesh &
:param aPath:
:type aPath: OSD_Path &
:param aProgInd: default value is NULL
:type aProgInd: Handle_Message_ProgressIndicator &
:rtype: bool
"""
return _RWStl.rwstl_WriteBinary(*args)
def rwstl_WriteAscii(*args):
"""
* write the meshing in a file following the Ascii format of an STL file. Returns false if the cannot be opened;
:param aMesh:
:type aMesh: Handle_StlMesh_Mesh &
:param aPath:
:type aPath: OSD_Path &
:param aProgInd: default value is NULL
:type aProgInd: Handle_Message_ProgressIndicator &
:rtype: bool
"""
return _RWStl.rwstl_WriteAscii(*args)
def rwstl_ReadFile(*args):
"""
* This method will chwck if the file is a binary file or an AsciiFile testing the 5 first characters of the file wich are :'solid' in an ascii file. If we do not find that word we assume that it is a binary file.
:param aPath:
:type aPath: OSD_Path &
:param aProgInd: default value is NULL
:type aProgInd: Handle_Message_ProgressIndicator &
:rtype: Handle_StlMesh_Mesh
"""
return _RWStl.rwstl_ReadFile(*args)
def rwstl_ReadBinary(*args):
"""
* Read a meshing from a binary file Raises NoMoreObject from Standard if a statement does not contain the right number of tokens Raises TypeMisMatch if a token has not the good type (often real)
:param aPath:
:type aPath: OSD_Path &
:param aProgInd: default value is NULL
:type aProgInd: Handle_Message_ProgressIndicator &
:rtype: Handle_StlMesh_Mesh
"""
return _RWStl.rwstl_ReadBinary(*args)
def rwstl_ReadAscii(*args):
"""
* Read a meshing from a binary file Raises NoMoreObject from Standard if a statement does not contain the right number of tokens Raises TypeMisMatch if a token has not the good type (often real) Raises MoMoreObject if a file is finished before having found the word endsolid;
:param aPath:
:type aPath: OSD_Path &
:param aProgInd: default value is NULL
:type aProgInd: Handle_Message_ProgressIndicator &
:rtype: Handle_StlMesh_Mesh
"""
return _RWStl.rwstl_ReadAscii(*args)
|
import importlib
import re
from functools import lru_cache
from inspect import getsource
from pathlib import Path
from typing import Iterator, Optional, List
from pycaro.api.constants import BUILTIN_OBJECTS
from pycaro.api.files import find_project_root, get_path_from_root, get_absolute_path
from pycaro.api.logger import get_logger
from pycaro.api.pycaro_types import (
UnstableVar,
UnstableModuleObject,
UnstableModule,
FuncCoVarsAttr,
)
_logger = get_logger()
def _are_co_var_attributes(
func,
) -> Iterator[FuncCoVarsAttr]:
co_var_names = func.__code__.co_varnames
source = getsource(func)
for co_var_name in co_var_names:
yield FuncCoVarsAttr(
co_var_name=co_var_name, is_attribute=f".{co_var_name}" in source
)
class VarNotFoundInMethodException(Exception):
""" """
def __init__(self, method_name: str, var_name: str, *args):
self.method_name = method_name
self.var_name = var_name
super(
f"Could not find the var `{self.var_name}` in `{self.method_name}`", *args
)
class ModuleChecker:
def __init__(
self,
file_path: Path,
):
self.file_path = file_path
self.root = find_project_root(())
self.absolute_path = get_absolute_path(
path=self.file_path,
root=self.root,
)
self.file_path_normalized = get_path_from_root(
path=self.absolute_path,
root=self.root,
)
self.importable_module_path = self.file_path_normalized.replace(
".py", ""
).replace("/", ".")
_logger.debug(self.importable_module_path)
self.visited = importlib.import_module(
self.importable_module_path,
)
visited_all_objects = vars(self.visited)
self.valid_names = list(visited_all_objects.keys()) + list(
visited_all_objects["__builtins__"].keys()
)
self.module_imported_objects = {
obj_name
for obj_name in visited_all_objects.keys()
if not hasattr(visited_all_objects[obj_name], "__code__")
}
self.module_objects = {
obj_name
for obj_name in set(visited_all_objects.keys())
.difference(set(BUILTIN_OBJECTS))
.difference(self.module_imported_objects)
}
@property
@lru_cache(None)
def is_stable(self) -> bool:
"""
Considered stable
:return:
"""
return (
len(self.module_objects) == 0
or len(list(map(self.get_method_var_names, self.module_objects))) == 0
)
def get_method_var_names(self, module_object: str) -> List[str]:
"""
For a given method, return the names of local vars
:param module_object: Name of the method to explore
:return:
"""
obj = vars(self.visited)[module_object]
# locals
used_var = vars(self.visited)[module_object].__code__.co_names
# Remove builtins
local_vars = set(used_var).difference(vars(self.visited)["__builtins__"].keys())
# Remove imported
local_vars = local_vars.difference(self.module_imported_objects)
local_vars = local_vars.difference(
{
func_co_var_attr.co_var_name
for func_co_var_attr in _are_co_var_attributes(obj)
if func_co_var_attr.is_attribute
}
)
return list(local_vars)
def get_var_name_first_usage(
self, module_object: str, var_name: str
) -> UnstableVar:
# Patterns definition
def_pattern = re.compile(rf"^\s*def\s{re.escape(module_object)}\(.*$")
var_pattern = re.compile(rf"^.*[^\w]{re.escape(var_name)}[^\w|$]")
with open(self.absolute_path.as_posix(), "r") as f:
lines = f.readlines()
# Find the definition starting line
for i, line in enumerate(lines):
if def_pattern.match(line):
break
# Once the definition starting line is found, find the
# var name first occurrence and return
for j, method_line in enumerate(lines[i:]):
if var_pattern.match(method_line):
return UnstableVar(
var_name=var_name,
first_oc_line_no=i + 1 + j,
line_preview=method_line.rstrip("\n"),
)
# We should not end up here. Right now this helps find uncharted patterns
raise VarNotFoundInMethodException(
method_name=module_object,
var_name=var_name,
)
def check(self, module_object: str):
"""
Returns an index of all var names with their stability evaluation.
:param module_object: The name of the method on which we check variables
"""
return {
var_name: var_name in self.valid_names
for var_name in self.get_method_var_names(module_object=module_object)
}
def check_all(self):
return [
{"method_name": module_object, "check": self.check(module_object)}
for module_object in self.module_objects
]
@property
def unstable_module_objects(self) -> Iterator[UnstableModuleObject]:
"""
Generator property of all unstable module objects in the given module
"""
for module_object in self.module_objects:
# Check all vars of the given method {method_name}
vars_checked = self.check(
module_object=module_object,
)
if all(vars_checked.values()):
# No need to go any further if all vars
# are bounded for {method_name}
continue
# Build the list of unstable variables
unstable_vars = [
self.get_var_name_first_usage(
module_object=module_object,
var_name=var_name,
)
for var_name, stable in vars_checked.items()
if not stable
]
yield UnstableModuleObject(
module_object=module_object,
unstable_vars=unstable_vars,
)
@property
@lru_cache(None)
def as_unstable_module(self) -> Optional[UnstableModule]:
if self.is_stable or next(self.unstable_module_objects) is None:
return None
return UnstableModule(
module_path=self.file_path.as_posix(),
unstable_module_objects=self.unstable_module_objects,
)
class ModuleCollectionCheck:
def __init__(self, paths: List[Path]):
self.paths = paths
def __iter__(self):
pass
def __next__(self):
pass
def get_module_checker_generator(
paths: List[Path],
) -> Optional[Iterator[ModuleChecker]]:
"""
Generate a list of unstable module object after having checked that the module contains unstable methods
:param paths: paths to scan
:return:
"""
for path in paths:
checker = ModuleChecker(path)
unstable = checker.as_unstable_module
if unstable:
yield unstable
|
import sys
import pdb
import pickle
import argparse
sys.path.append("../src")
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("estNumber", help="Estimation number", type=int)
args = parser.parse_args()
estNumber = args.estNumber
modelSaveFilename = "results/{:08d}_estimatedModel.pickle".format(estNumber)
with open(modelSaveFilename, "rb") as f: savedResults = pickle.load(f)
model = savedResults["model"]
lowerBound = model.eval()
print("Achieved lower bound {:f}".format(lowerBound))
pdb.set_trace()
if __name__=="__main__":
main(sys.argv)
|
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.shortcuts import render, get_object_or_404, redirect
from garbageday.models import GarbageDay
from houses.models import House
@login_required(login_url="account_login")
def garbageday_manage(request, house):
housemodel = get_object_or_404(House, pk=house)
if request.user != housemodel.user:
raise Http404
garbageday = GarbageDay.objects.filter(house=housemodel)
if garbageday.count() == 0:
return redirect('garbageday_create', house=house)
else:
return redirect('garbageday_edit', house=house)
@login_required(login_url="account_login")
def garbageday_create(request, house):
housemodel = get_object_or_404(House, pk=house)
if request.user != housemodel.user:
raise Http404
if housemodel.garbageday_set.count() > 0:
return redirect(housemodel.get_absolute_url())
if request.method == 'POST':
garbageday = GarbageDay()
garbageday.user = request.user
garbageday.house = housemodel
garbageday.last_garbage_day = request.POST['LastGarbageDay']
garbageday.next_garbage_day = request.POST['NextGarbageDay']
garbageday.save()
return redirect(housemodel.get_absolute_url())
else:
return render(request, 'garbageday/garbageday_create.html', {'house': housemodel})
@login_required(login_url="account_login")
def garbageday_edit(request, house):
housemodel = get_object_or_404(House, pk=house)
garbageday = housemodel.garbageday_set.first()
if request.user != housemodel.user:
raise Http404
if request.method == 'POST':
garbageday.last_garbage_day = request.POST['LastGarbageDay']
garbageday.next_garbage_day = request.POST['NextGarbageDay']
garbageday.save()
return redirect(housemodel.get_absolute_url())
else:
return render(request, 'garbageday/garbageday_edit.html', {'house': housemodel, 'garbageday': garbageday})
|
#coding:utf-8
from basic_config import *
PCAS_PATH = 'D:\\datasets\\APS\\PCAS.txt'
PCAS_CODES = []
def select_topic(topic_list):
reserved_pcases = []
for pcas in topic_list:
if len(pcas) != 2:
continue
if pcas[0] not in [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'
] or pcas[1] not in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
continue
if pcas.endswith("0"):
continue
reserved_pcases.append(pcas)
if len(reserved_pcases) == 0:
return None, 0
pcas_counter = Counter(reserved_pcases)
return sorted(pcas_counter.keys(),
key=lambda x: pcas_counter[x],
reverse=True)[0], len(set(reserved_pcases))
## 主题数量是如何随时间变化的
def topic_nums():
## 文章发表的年份
paper_year = json.loads(open('data/paper_year.json').read())
logging.info(f'{len(paper_year.keys())} papers has year labels ... ')
## 主题每年的数量
pcas_year_papers = defaultdict(lambda: defaultdict(list))
## 主题的数量
pcas_nums = defaultdict(list)
num_topic_list = []
pid_topic = {}
## PCAS 文件
for line in open(PCAS_PATH, encoding='utf-8'):
line = line.strip()
if line.startswith('DOI'):
continue
try:
doi, pcas1, pcas2, pcas3, pcas4, pcas5 = line.split(',')
except:
continue
pcas1, pcas2, pcas3, pcas4, pcas5 = pcas1.strip(
).split('.')[0], pcas2.strip().split('.')[0], pcas3.strip().split(
'.')[0], pcas4.strip().split('.')[0], pcas5.strip().split('.')[0]
pcas_list = [pcas1, pcas2, pcas3, pcas4, pcas5]
year = paper_year.get(doi, -1)
pcas, tn = select_topic(pcas_list)
if pcas is None:
continue
if tn > 0:
num_topic_list.append(tn)
pcas_nums[pcas].append(doi)
pid_topic[doi] = pcas
if year != -1:
pcas_year_papers[pcas][year].append(doi)
open('data/pid_topic.json', 'w').write(json.dumps(pid_topic))
logging.info('pid totpic saved to data/pid_topic.json.')
num_topic_counter = Counter(num_topic_list)
plt.figure(figsize=(3.5, 2.8))
xs = []
ys = []
for num in sorted(num_topic_counter.keys()):
tn = num_topic_counter[num]
xs.append(num)
ys.append(tn)
plt.bar(range(len(xs)), ys)
plt.xlabel('number of topics')
plt.ylabel('number of papers')
plt.xticks(range(len(xs)), xs)
plt.yscale('log')
plt.tight_layout()
plt.savefig('fig/topic_num_dis.png', dpi=800)
open('data/topic_year_dois.json', 'w').write(json.dumps(pcas_year_papers))
logging.info('data saved to data/topic_year_dois.json')
### 各个主题按照数量多少画图
ys = []
xs = []
topic_nums = {}
tn_list = []
for pcas in sorted(pcas_nums.keys(),
key=lambda x: len(pcas_nums[x]),
reverse=True):
tn = len(pcas_nums[pcas])
if pcas == '' or tn < 100:
continue
tn_list.append(tn)
xs.append(pcas)
ys.append(tn)
topic_nums[pcas] = pcas_nums[pcas]
open('data/topic_papers.json', 'w').write(json.dumps(topic_nums))
logging.info('data saved to data/topic_papers.json')
open('data/selected_topics.txt', 'w').write('\n'.join(xs))
logging.info('data saved to data/selected_topics.txt')
### 分别输出多少个PCAS
logging.info(f'Num of PCAS:{len(xs)}')
## 画出柱状图
plt.figure(figsize=(10, 2.8))
plt.bar(range(len(xs)), ys)
plt.xticks(range(len(xs)), xs, rotation=90)
plt.ylim(1, 100000)
plt.yscale("log")
plt.xlabel('topic rank')
plt.ylabel('number of papers')
plt.tight_layout()
plt.savefig('fig/topic_nums.png', dpi=800)
logging.info('topic nums saved to fig/topic_nums.png')
### 拟合曲线
plt.figure(figsize=(3.5, 2.8))
expfunc = lambda t, a, b: a * np.exp(b * t)
index_xs = np.arange(len(xs)) + 1
fit_ys = np.array(ys) / float(np.sum(ys))
popt, pcov = scipy.optimize.curve_fit(expfunc,
index_xs,
fit_ys,
p0=(0.2, -2))
plt.plot(np.array(index_xs), fit_ys)
plt.plot(index_xs, [expfunc(x, *popt) for x in index_xs],
'--',
label=u'Fitted Curve: $p(n)=%.2f*e^{%.2fn}$' % (popt[0], popt[1]),
c='r')
plt.xlabel('field rank')
plt.ylabel('probability')
plt.ylim(0.0001, 0.1)
plt.yscale('log')
plt.legend(prop={'family': 'SimHei', 'size': 8})
plt.tight_layout()
fitted_xs = range(1, 101)
fitted_ys = [expfunc(x, *popt) for x in fitted_xs]
fitted_ys = list(np.array(fitted_ys) / np.sum(fitted_ys))
topic_dis = {}
topic_dis['x'] = list(fitted_xs)
topic_dis['y'] = list(fitted_ys)
open('data/topic_dis.json', 'w').write(json.dumps(topic_dis))
logging.info('topic dis saved to data/topic_dis.json.')
plt.savefig('fig/topic_nums_fit.png', dpi=800)
logging.info('fig saved to fig/topic/topic_nums_fit.png')
selected_topics = set(xs)
new_pid_topic = {}
for pid in pid_topic.keys():
topic = pid_topic[pid]
if topic in selected_topics:
new_pid_topic[pid] = topic
logging.info(f'{len(new_pid_topic.keys())} papers reserved.')
open('data/new_pid_topic.json', 'w').write(json.dumps(new_pid_topic))
logging.info('new pid topic saved.')
## 主题相关性
def topic_relevance():
pid_refs = json.loads(open('data/pid_all_refs.json').read())
pid_topic = json.loads(open('data/new_pid_topic.json').read())
all_num = len(set(pid_refs.keys()) & set(pid_topic.keys()))
topic_nums = json.loads(open('data/topic_nums.json').read())
logging.info(f'{all_num} papers loaded')
topics = sorted(topic_nums.keys(),
key=lambda x: len(topic_nums[x]),
reverse=True)[:15]
all_topics = [line.strip() for line in open('data/selected_topics.txt')]
t1_t2_num = defaultdict(lambda: defaultdict(int))
t1_refnum = defaultdict(int)
progress = 0
for pid in pid_refs.keys():
progress += 1
if progress % 10000 == 0:
logging.info(f'progress {progress} ...')
topic = pid_topic.get(pid, '-1')
if topic == '-1':
continue
refs = pid_refs[pid]
for ref in refs:
ref_topic = pid_topic.get(ref, '-1')
if ref_topic == '-1':
ref_topic = topic
t1_t2_num[topic][ref_topic] += 1
t1_refnum[topic] += 1
print('number of topics:', len(all_topics))
t1_t2_rel = defaultdict(dict)
for t1 in all_topics:
## 该主题引用总次数
refnum = t1_refnum[t1]
row = []
for t2 in all_topics:
num = t1_t2_num[t1].get(t2, 0)
## 主题2对主题1的相关性
rel_2_1 = num / float(refnum)
t1_t2_rel[t1][t2] = rel_2_1
open('data/topic_rel_matrix.json', 'w').write(json.dumps(t1_t2_rel))
logging.info('topic relevance matrix saved to data/topic_rel_matrix.json.')
rels = ['t1,t2,rel']
for t1 in topics:
## 该主题引用总次数
refnum = t1_refnum[t1]
row = []
## 主题1引用主题2的次数
for t2 in topics:
num = t1_t2_num[t1].get(t2, 0)
## 主题2对主题1的相关性
rel_2_1 = num / float(refnum)
rels.append('{:},{:},{:}'.format(t1, t2, rel_2_1))
open('data/topic_relevance.csv', 'w').write('\n'.join(rels))
logging.info('topic relevance saved to data/topic_relevance.csv')
## 画热力图
plot_heatmap('data/topic_relevance.csv', 'correlation matrix', 'field',
'field', 'fig/topic_rel_matrix.png')
## 画出前15的排序相关性
plt.figure(figsize=(5, 4))
all_topics = t1_t2_num.keys()
all_num_list = []
# all_rels =
for t1 in all_topics:
t2_num = t1_t2_num[t1]
refnum = t1_refnum[t1]
num_list = []
for t2 in all_topics:
num = t1_t2_num[t1].get(t2, 0)
num_list.append(num / float(refnum))
if t1 in topics:
plt.plot(range(1,
len(all_topics) + 1),
sorted(num_list, reverse=True),
alpha=0.6)
all_num_list.append(sorted(num_list, reverse=True))
all_avg = [np.mean([i for i in a if i > 0]) for a in zip(*all_num_list)]
plt.plot(range(1,
len(all_topics) + 1),
all_avg,
'--',
linewidth=2,
c='r',
label=u'mean')
# xs = []
# ys = []
# for num_list in all_num_list:
# for i, num in enumerate(sorted(num_list, reverse=True)):
# if num > 0:
# xs.append(i + 1)
# ys.append(num)
# plaw = lambda t, a, b: a * t**b
# # expfunc = lambda t,a,b:a*np.exp(b*t)
# popt, pcov = scipy.optimize.curve_fit(plaw, xs, ys, p0=(0.2, -1))
# plt.plot(np.linspace(1, np.max(xs), 10),
# [plaw(x + 1, *popt) for x in np.linspace(1, np.max(xs), 10)],
# '-^',
# label=u'$f(i)=%.2f \\times i^{%.2f} $' % (popt[0], popt[1]),
# c='b')
plt.xlabel('rank')
plt.ylabel('correlation')
plt.yscale('log')
# plt.xscale('log')
plt.legend(prop={'family': 'SimHei', 'size': 8})
plt.tight_layout()
plt.savefig('fig/topic_rel_dis.png', dpi=800)
logging.info('fig saved to fig/topic/topic_rel_dis.png')
if __name__ == '__main__':
# topic_nums()
topic_relevance()
|
from src.utils.dataset_loader import load_olid_data_taska
import torch
from transformers import Trainer, BertForSequenceClassification, TrainingArguments, BertTokenizer
[train, test, dev] = load_olid_data_taska()
model = BertForSequenceClassification.from_pretrained('olid_clean/checkpoint-2500')
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
train_sents = [i[0] for i in train]
train_labels = torch.tensor([i[1] for i in train])
train_encoded = tokenizer(train_sents, padding=True, truncation=True, return_tensors='pt')
train_dataset = torch.utils.data.TensorDataset(train_encoded['input_ids'], train_encoded['attention_mask'], train_labels)
test_sents = [i[0] for i in test]
test_labels = torch.tensor([i[1] for i in test])
test_encoded = tokenizer(test_sents, padding=True, truncation=True, return_tensors='pt')
test_dataset = torch.utils.data.TensorDataset(test_encoded['input_ids'], test_encoded['attention_mask'], test_labels)
def dummy_data_collector(features):
batch = {}
batch['input_ids'] = torch.stack([f[0] for f in features])
batch['attention_mask'] = torch.stack([f[1] for f in features])
batch['labels'] = torch.stack([f[2] for f in features])
return batch
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='binary')
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
'f1': f1,
'precision': precision,
'recall': recall
}
training_args = TrainingArguments(
output_dir='./olid_clean', # output directory
num_train_epochs=5, # total # of training epochs
per_device_train_batch_size=16, # batch size per device during training
per_device_eval_batch_size=64, # batch size for evaluation
warmup_steps=500, # number of warmup steps for learning rate scheduler
weight_decay=0.01, # strength of weight decay
logging_dir='./logs', # directory for storing logs
)
trainer = Trainer(
model=model, # the instantiated 🤗 Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=test_dataset, # evaluation dataset
data_collator=dummy_data_collector,
compute_metrics=compute_metrics
)
#trainer.train()
print(trainer.evaluate())
|
from sqlalchemy import Column, Integer, SmallInteger, String, Text, DateTime, Boolean
from sqlalchemy import TypeDecorator, ForeignKey, inspect
from proj.config import CONF
from proj.extensions import sql_db
class ModelMixin(object):
def save(self):
sql_db.session.add(self)
sql_db.session.commit()
class TimestampMixin(object):
created_at = Column(DateTime, default=now, nullable=False)
updated_at = Column(DateTime, default=now, onupdate=now, nullable=False)
class MySqlModel(sql_db.Model, ModelMixin, TimestampMixin):
__tablename__ = 'my_model'
id = Column(Integer, primary_key=True)
def to_dict(self):
return {
'id': self.id
}
|
'''
Given a binary tree where node values are digits from 1
to 9. A path in the binary tree is said to be
pseudo-palindromic if at least one permutation of the
node values in the path is a palindrome.
Return the number of pseudo-palindromic paths going from
the root node to leaf nodes.
Example:
Input: root = [2,3,1,3,1,null,1]
Output: 2
Explanation: The figure above represents the given binary
ree. There are three paths going from the
root node to leaf nodes: the red path [2,3,3],
the green path [2,1,1], and the path [2,3,1].
Among these paths only red path and green
path are pseudo-palindromic paths since the
red path [2,3,3] can be rearranged in [3,2,3]
(palindrome) and the green path [2,1,1] can
be rearranged in [1,2,1] (palindrome).
Example:
Input: root = [2,1,1,1,3,null,null,null,null,null,1]
Output: 1
Explanation: The figure above represents the given binary
tree. There are three paths going from the
root node to leaf nodes: the green path
[2,1,1], the path [2,1,3,1], and the path
[2,1]. Among these paths only the green path
is pseudo-palindromic since [2,1,1] can be
rearranged in [1,2,1] (palindrome).
Example:
Input: root = [9]
Output: 1
Constraints:
- The given binary tree will have between 1 and
10^5 nodes.
- Node values are digits from 1 to 9.
'''
#Difficulty: Medium
#53 / 53 test cases passed.
#Runtime: 540 ms
#Memory Usage: 49.1 MB
#Runtime: 540 ms, faster than 33.16% of Python3 online submissions for Pseudo-Palindromic Paths in a Binary Tree.
#Memory Usage: 49.1 MB, less than 96.05% of Python3 online submissions for Pseudo-Palindromic Paths in a Binary Tree.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def pseudoPalindromicPaths (self, root: TreeNode) -> int:
self.result = 0
self.dfs(root, [])
return self.result
def dfs(self, root, path):
if root:
path.append(root.val)
if not root.left and not root.right:
if self.pathCheck(path):
self.result += 1
if root.left:
self.dfs(root.left, path)
if root.right:
self.dfs(root.right, path)
path.pop()
def pathCheck(self, path):
count = {}
odd = 0
for val in path:
if val not in count:
count[val] = 0
count[val] += 1
for value in count.values():
if value % 2:
odd += 1
if odd > 1:
return False
return True
|
from conans import ConanFile, CMake, tools
class MichaQtUtilisLibConan(ConanFile):
name = "MichaQtUtilisLib"
version = "0.2"
license = "MIT - https://github.com/jackdaimond/MichaQtUtilisLib/blob/master/LICENSE"
author = "Michael Kloske"
url = "https://github.com/jackdaimond/MichaQtUtilisLib.git"
description = "Contains several functionalities often used in other Qt applications."
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
generators = "qmake"
scm = {
"type": "git",
"subfolder": "",
"url": "https://github.com/jackdaimond/MichaQtUtilisLib.git",
"revision": "auto"
}
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
#def source(self):
# self.run("git clone https://github.com/conan-io/hello.git")
# # This small hack might be useful to guarantee proper /MT /MD linkage
# # in MSVC if the packaged project doesn't have variables to set it
# # properly
def build(self):
if(self.settings.build_type == "Debug"):
self.run("qmake MichaQtUtilisLib.pro CONFIG+=debug")
else:
self.run("qmake MichaQtUtilisLib.pro CONFIG+=release")
self.run("nmake")
# Explicit way:
# self.run('cmake %s/hello %s'
# % (self.source_folder, cmake.command_line))
# self.run("cmake --build . %s" % cmake.build_config)
def package(self):
self.copy("*.h*", dst="include/MichaQtUtilisLib", src="src")
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.dylib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["MichaQtUtilisLib"]
|
from unittest import TestCase
import astropy.units as u
import numpy as np
from esbo_etc.classes.Config import Configuration
from esbo_etc.classes.target.FileTarget import FileTarget
from esbo_etc.classes.target.BlackBodyTarget import BlackBodyTarget
from esbo_etc.classes.optical_component.StrayLight import StrayLight
from esbo_etc.classes.sensor.Imager import Imager
class TestImager(TestCase):
def setUp(self):
self.config = Configuration("tests/data/esbo-etc_defaults.xml").conf
self.imager_args = dict(quantum_efficiency=0.9 * u.electron / u.photon,
pixel_geometry=np.array([1024, 1024]) << u.pix,
pixel_size=6.5 * u.um, sigma_read_out=1.4 * u.electron ** 0.5 / u.pix,
dark_current=0.6 * u.electron / u.pix / u.second, well_capacity=30000 * u.electron,
f_number=13, common_conf=self.config.common, center_offset=np.array([0, 0]) << u.pix,
shape="circle", contained_energy="FWHM", aperture_size=None)
self.target = FileTarget("tests/data/target/target_demo_1.csv", np.arange(200, 210) << u.nm)
self.zodiac = StrayLight(self.target, "tests/data/straylight/zodiacal_emission_1.csv")
self.imager = Imager(self.zodiac, **self.imager_args)
def test_getSNR(self):
snr = self.imager.getSNR(0.1 * u.s)
self.assertAlmostEqual(snr.value, 7.112939048582188)
def test_getExpTime(self):
exp_time = 0.1 * u.s
snr = self.imager.getSNR(exp_time)
exp_time_ = self.imager.getExpTime(snr)
self.assertAlmostEqual(exp_time.value, exp_time_.value)
def test_getSensitivity(self):
exp_time = 100 * u.s
target = BlackBodyTarget(np.arange(200, 210) << u.nm, mag=20 * u.mag)
zodiac = StrayLight(target, "tests/data/straylight/zodiacal_emission_1.csv")
imager = Imager(zodiac, **self.imager_args)
snr = imager.getSNR(exp_time)
target = BlackBodyTarget(np.arange(200, 210) << u.nm, mag=10 * u.mag)
zodiac = StrayLight(target, "tests/data/straylight/zodiacal_emission_1.csv")
imager = Imager(zodiac, **self.imager_args)
sensitivity = imager.getSensitivity(exp_time, snr, 10 * u.mag)
self.assertAlmostEqual(sensitivity.value, 20)
|
#!/usr/bin/env python
import os
import pygame
from pygame.locals import *
#define some colors
#color R G B
white = (255, 255, 255)
red = (255, 0, 0)
green = ( 0, 255, 0)
blue = ( 0, 0, 255)
black = ( 0, 0, 0)
cyan = ( 0, 255, 255)
btnCycle_col = white
btnPrev_col = white
btnNext_col = white
btnF1_col = cyan
btnF2_col = blue
btnF3_col = red
btnF4_col = green
btnF5_col = cyan
# directory structure
core_data = 'data'
image_dir = 'images'
video_dir = 'videos'
initial_image = 'HAL900_320x240.png'
"""
Screen layout:
|------------- 320 -------------|
C y c l e
+-------------------------------+ ---
20 |### ####################### ###| 20 |
|### ###| |
|### ###| |
P |### ###| N |
R |### ###| E 240
E |### ###| X |
V |### ###| T |
|### ###| |
|### ###| |
|### ### ### ### ### ### ### ###| |
+-------------------------------+ ---
40 F1 F2 F3 F4 F5 F6 40
"""
#screen size
width = 320
height = 240
size = (width, height)
# button definitions
# pressed button 21, 219
number_of_x_buttons = 5
number_of_y_buttons = 2
btn_width = 40
btn_height = 40
safetyMargin = 2
# evenly distribute function buttons
btnDistance_x = ((width - 2 * btn_width) - (number_of_x_buttons * btn_width)) / (number_of_x_buttons + 1)
btnDistance_y = ((height - btn_height) - (number_of_y_buttons * btn_height)) / (number_of_y_buttons + 1)
# these are the two big area to "scroll" left and right
btnPrev_x = 0
btnPrev_y = safetyMargin
btnPrev_width = btn_width
btnPrev_height = height - safetyMargin
btnNext_x = width - btn_width
btnNext_y = safetyMargin
btnNext_width = btn_width
btnNext_height = height - safetyMargin
btnCycle_x = 0 + (btn_width + safetyMargin)
btnCycle_y = 0
btnCycle_width = width - (2 * btn_width + 2 * safetyMargin)
btnCycle_height = btn_height
btnF1_x = 0 + (btn_width + safetyMargin)
btnF1_y = height - btn_height
btnF1_width = btn_width
btnF1_height = btn_height
btnF2_x = btnF1_x + btnDistance_x
btnF2_y = height - btn_height
btnF2_width = btn_width
btnF2_height = btn_height / 2
btnF3_x = btnF2_x + btnDistance_x
btnF3_y = height - btn_height
btnF3_width = btn_width
btnF3_height = btn_height / 2
btnF4_x = btnF3_x + btnDistance_x
btnF4_y = height - btn_height
btnF4_width = btn_width
btnF4_height = btn_height / 2
btnF5_x = btnF4_x + btnDistance_x
btnF5_y = height - btn_height
btnF5_width = btn_width
btnF5_height = btn_height / 2
# initialize pyGame and the screen
pygame.init()
screen = pygame.display.set_mode(size)
screen.fill((black))
touch_buttons = {
'btnPrev.png':(btnPrev_x, btnPrev_y, btnPrev_width, btnPrev_height) # Previous image button
,'btnNext.png':(btnNext_x,btnNext_y,btnNext_width, btnNext_height) # Next image button
,'btnCycle.png':(btnCycle_x,btnCycle_y,btnCycle_width, btnCycle_height) # Cycle screen button
,'btnF1.png':(btnF1_x,btnF1_y,btnF1_width, btnF1_height) # function 1 button
,'btnF1.png':(btnF2_x,btnF2_y,btnF2_width, btnF2_height) # function 2 button
,'btnF1.png':(btnF3_x,btnF3_y,btnF3_width, btnF3_height) # function 3 button
,'btnF1.png':(btnF4_x,btnF4_y,btnF4_width, btnF4_height) # function 4 button
,'btnF5.png':(btnF5_x,btnF5_y,btnF5_width, btnF5_height) # function 5 button
}
# functions
def prev_picture():
print 'prev picture called'
def next_picture():
print 'next picture called'
def cycle_function():
print 'cycle function called'
def display_image(directory, filename):
try:
# load from subfolder 'data'
img = pygame.image.load(os.path.join(directory,filename))
except:
raise UserWarning, "Unable to find the images in the folder 'data' :-( "
screen.blit(img,(0,0))
# This function takes the name of an image to load.
# It also optionally takes an argument it can use to set a colorkey for the image.
# A colorkey is used in graphics to represent a color of the image that is transparent.
# we also use this this function to initialize filenav.py -- see modules
def load_image(name, colorkey=None):
fullname = os.path.join('data', name)
try:
image = pygame.image.load(fullname)
except pygame.error, message:
print 'Cannot load image:', name
raise SystemExit, message
image = image.convert()
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, RLEACCEL)
return image, image.get_rect()
def show_controls():
# Draw a rectangle outline
pygame.draw.rect(screen, btnPrev_col, [btnPrev_x, btnPrev_y, btnPrev_width, btnPrev_height], 2)
#pygame.blit(source, dest, area=None, special_flags = 0) -> Rect
pygame.draw.rect(screen, btnNext_col, [btnNext_x, btnNext_y, btnNext_width, btnNext_height], 2)
pygame.draw.rect(screen, btnCycle_col, [btnCycle_x, btnCycle_y, btnCycle_width, btnCycle_height], 2)
#pygame.draw.rect(screen, btnF1_col, [btnF1_x, btnF1_y, btnF1_width, btnF1_height], 2)
#pygame.draw.rect(screen, btnF2_col, [btnF2_x, btnF2_y, btnF2_width, btnF2_height], 2)
#pygame.draw.rect(screen, btnF3_col, [btnF3_x, btnF3_y, btnF3_width, btnF3_height], 2)
#pygame.draw.rect(screen, btnF4_col, [btnF4_x, btnF4_y, btnF4_width, btnF4_height], 2)
#pygame.draw.rect(screen, btnF5_col, [btnF5_x, btnF5_y, btnF5_width, btnF5_height], 2)
"""
for i,v in touch_buttons.items():
btn_image = pygame.image.load(os.path.join('data', i))
# X Y W H
rect = btn_image.set_rect(v[0], v[1], v[2], v[3])
screen.blit(btn_image, rect)
"""
# Go ahead and update the screen with what we've drawn.
# This MUST happen after all the other drawing commands.
pygame.display.flip()
def hide_controls():
display_image(core_data, current_image)
# Go ahead and update the screen with what we've drawn.
# This MUST happen after all the other drawing commands.
pygame.display.flip()
def get_display():
disp_no = os.getenv('DISPLAY')
if disp_no:
print "I'm running under X display = {0}".format(disp_no)
pygame.mouse.set_visible(True)
else:
drivers = ['directfb', 'fbcon', 'svgalib']
found = False
for driver in drivers:
if not os.getenv('SDL_VIDEODRIVER'):
os.putenv('SDL_VIDEODRIVER', driver)
try:
pygame.display.init()
except pygame.error:
print 'Driver: {0} failed.'.format(driver)
continue
found = True
print "I'm running on the framebuffer using driver " + str(driver)
pygame.mouse.set_visible(False)
break
if not found:
raise Exception('No suitable video driver found!')
os.environ["SDL_FBDEV"] = "/dev/fb1"
os.environ["SDL_MOUSEDEV"] = "/dev/input/touchscreen"
os.environ["SDL_MOUSEDRV"] = "TSLIB"
def run(done, toggle_controls):
display_image(core_data, current_image)
show_controls()
while not done:
# This limits the while loop to a max of 10 times per second.
# Leave this out and we will use all CPU we can.
clock.tick(10)
# Scan touchscreen events
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if(event.type is MOUSEBUTTONDOWN):
# get list of images in picture folder
pos = pygame.mouse.get_pos()
# Find which quarter of the screen we're in
x,y = pos
print 'pos is ' + str(pos)
# check which button was pressed
if btnPrev_x <= x <= btnPrev_x + btnPrev_width and btnPrev_y <= y <= btnPrev_y + btnPrev_height:
prev_picture()
elif btnNext_x <= x <= btnNext_x + btnNext_width and btnNext_y <= y <= btnNext_y + btnNext_height:
next_picture()
elif btnCycle_x <= x <= btnCycle_x + btnCycle_width and btnCycle_y <= y <= btnCycle_y + btnCycle_height:
cycle_function()
else:
print 'event outside of control buttons'
if (toggle_controls == True):
toggle_controls = False
print 'showing controls'
show_controls()
else:
toggle_controls = True
print 'hiding controls'
# Go ahead and update the screen with what we've drawn.
# This MUST happen after all the other drawing commands.
pygame.display.flip()
elif(event.type is MOUSEBUTTONUP):
pos = pygame.mouse.get_pos()
if __name__ == "__main__":
done = False
toggle_controls = True
get_display()
#define font
font = pygame.font.Font(None, 20)
font_big = pygame.font.Font(None, 50)
pygame.display.update()
current_image = initial_image
clock = pygame.time.Clock()
#background = pygame.Surface(screen.get_size())
#background = background.convert()
#background.fill((black))
run(done, toggle_controls)
# Be IDLE friendly
pygame.quit()
|
data = open('file.in','r').read().strip()
def reduce_data(text):
data_old = None
while data_old != text:
data_old = text
for i in range(26):
text = text.replace(chr(ord('a')+i)+chr(ord('A')+i),'')
text = text.replace(chr(ord('A')+i)+chr(ord('a')+i),'')
return(len(text))
print(reduce_data(data))
best = None
for i in range(26):
data_replaced = data
data_replaced = data_replaced.replace(chr(ord('a')+i),'')
data_replaced = data_replaced.replace(chr(ord('A')+i),'')
val = reduce_data(data_replaced)
if(best is None or val < best):
best = val
print(best)
|
from __future__ import annotations
import hashlib
import json
import logging
from typing import Optional
import secrets
import aiohttp
from aioredis import Redis
import discord
from itsdangerous import Signer, BadSignature
from sanic import Sanic, Blueprint, response, exceptions
from sanic.request import Request
from sanic.response import HTTPResponse
from ...config import config
from ..oauth2 import OAuth2API, OAuth2Context
from ... import author
SESSION_COOKIE_ID = "session"
REDIS_KEY_PREFIX = "auth:sessions:"
DISCORD_API_URL = "https://discordapp.com/api/v9"
REQUIRED_SCOPES = "identify"
cookie_signer = Signer(
bytes.fromhex(config.cookie_signer_key), digest_method=hashlib.sha256
)
auth_api = Blueprint("auth_api", url_prefix="/auth")
app = Sanic.get_app("basil")
oauth2_api = OAuth2API(
DISCORD_API_URL + "/oauth2/authorize",
DISCORD_API_URL + "/oauth2/token",
DISCORD_API_URL + "/oauth2/token/revoke",
config.oauth2_redirect_uri,
config.client_id,
config.client_secret,
"discord",
)
class DiscordUserInfo(object):
def __init__(
self,
user_id: int,
ctx: OAuth2Context,
):
self.id: int = int(user_id)
self.ctx: OAuth2Context = ctx
@property
def as_author(self) -> author.Author:
return author.Author.get_by_id(self.id)
async def save(self):
redis: Redis = app.ctx.redis
async with redis.pipeline(transaction=True) as tr:
tr.set("sessions:users:" + self.ctx.session_id, self.id)
tr.expireat(
"sessions:users:" + self.ctx.session_id, int(self.ctx.expire_time)
)
await tr.execute()
@classmethod
async def load(cls, req: Request) -> Optional[DiscordUserInfo]:
discord_ctx: OAuth2Context = await oauth2_api.load_request_context(req)
redis: Redis = app.ctx.redis
http_sess: aiohttp.ClientSession = app.ctx.http_session
auth = await discord_ctx.auth_header()
if auth is None:
# not logged in
await redis.delete("sessions:users:" + discord_ctx.session_id)
return None
cached_user_id = await redis.get("sessions:users:" + discord_ctx.session_id)
if cached_user_id is not None:
return cls(cached_user_id, discord_ctx)
async with http_sess.get(DISCORD_API_URL + "/users/@me", headers=auth) as resp:
if resp.status >= 400:
resp_text = await resp.text()
logging.error(
"Could not get user info for session {}: {}".format(
discord_ctx.session_id, resp_text
)
)
raise exceptions.ServerError("Could not get Discord user info")
user_data = await resp.json()
ret = cls(user_data["id"], discord_ctx)
await ret.save()
return ret
@app.middleware("request")
async def load_session_id(request: Request):
origin_ip = request.remote_addr
if origin_ip is None or len(origin_ip) == 0:
origin_ip = request.ip
sess_id = None
try:
cookie_data = request.cookies["session"]
sess_id = cookie_signer.unsign(cookie_data).decode("utf-8")
except (BadSignature, UnicodeDecodeError):
logging.warning("IP {} presented invalid session cookie".format(origin_ip))
except KeyError:
pass
if sess_id is None:
sess_id = secrets.token_urlsafe(16)
request.ctx.add_sess_cookie = True
else:
request.ctx.add_sess_cookie = False
request.ctx.session = sess_id
@app.middleware("response")
async def save_session_id(request: Request, response: HTTPResponse):
if request.ctx.session is not None and request.ctx.add_sess_cookie:
signed = cookie_signer.sign(request.ctx.session).decode("utf-8")
response.cookies["session"] = signed
response.cookies["session"]["secure"] = True
response.cookies["session"]["max-age"] = 86400 * 7
@app.middleware("request")
async def load_session_id(request: Request):
origin_ip = request.remote_addr
if origin_ip is None or len(origin_ip) == 0:
origin_ip = request.ip
sess_id = None
try:
cookie_data = request.cookies["session"]
sess_id = cookie_signer.unsign(cookie_data).decode("utf-8")
except (BadSignature, UnicodeDecodeError):
logging.warning("IP {} presented invalid session cookie".format(origin_ip))
except KeyError:
pass
if sess_id is None:
sess_id = secrets.token_urlsafe(16)
request.ctx.add_sess_cookie = True
else:
request.ctx.add_sess_cookie = False
request.ctx.session = sess_id
@app.middleware("response")
async def save_session_id(request: Request, response: HTTPResponse):
if request.ctx.session is not None and request.ctx.add_sess_cookie:
signed = cookie_signer.sign(request.ctx.session).decode("utf-8")
response.cookies["session"] = signed
response.cookies["session"]["secure"] = True
response.cookies["session"]["max-age"] = 86400 * 7
@auth_api.get("/me")
async def get_login_data(request: Request):
discord_user = await DiscordUserInfo.load(request)
data = {
"session_id": request.ctx.session,
"dev_mode": config.dev_mode,
}
if discord_user is not None:
data["user_data"] = discord_user.as_author.as_dict
else:
data["user_data"] = None
return response.json(data)
@auth_api.get("/logout")
async def logout(request: Request):
discord_ctx: OAuth2Context = await oauth2_api.load_request_context(request)
await discord_ctx.reset()
return response.redirect(config.login_redirect_target, status=303)
@auth_api.get("/login")
async def start_oauth2(request: Request):
discord_ctx: OAuth2Context = await oauth2_api.load_request_context(request)
return await discord_ctx.start(
REQUIRED_SCOPES, config.login_redirect_target, prompt="none"
)
@auth_api.get("/authorized")
async def oauth2_complete(request: Request):
try:
state: str = request.args["state"][0]
except (KeyError, IndexError):
raise exceptions.InvalidUsage("Missing required parameter 'state'")
try:
code: str = request.args["code"][0]
except (KeyError, IndexError):
raise exceptions.InvalidUsage("Missing required parameter 'code'")
discord_ctx: OAuth2Context = await oauth2_api.load_request_context(request)
return await discord_ctx.redirect(code, state)
|
import pytest
import mock
from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.common.exceptions import UnknownIdentifier, InvalidSignatureFormat, InsufficientCorrectSignatures, \
CouldNotAuthenticate
from plenum.server.client_authn import CoreAuthNr
from sovtoken.test.wallet import TokenWallet
from sovtoken.client_authnr import TokenAuthNr, AddressSigVerifier
from sovtoken.constants import INPUTS, OUTPUTS, EXTRA
from plenum.common.types import f, OPERATION
from sovtoken.test.helper import xfer_request
# -------------------------Class fixtures-------------------------------------------------------------------------------
@pytest.fixture
def node(txnPoolNodeSet):
a, b, c, d = txnPoolNodeSet
nodes = [a, b, c, d]
return nodes
@pytest.fixture(scope="function")
def user1_token_wallet():
return TokenWallet('user1')
@pytest.fixture(scope="function")
def user2_token_wallet():
return TokenWallet('user2')
@pytest.fixture(scope="function")
def SF_token_wallet():
return TokenWallet('SF_MASTER')
@pytest.fixture(scope="function")
def user1_address(user1_token_wallet):
seed = 'user1000000000000000000000000000'.encode()
user1_token_wallet.add_new_address(seed=seed)
return next(iter(user1_token_wallet.addresses.keys()))
@pytest.fixture(scope="function")
def user2_address(user2_token_wallet):
seed = 'user2000000000000000000000000000'.encode()
user2_token_wallet.add_new_address(seed=seed)
return next(iter(user2_token_wallet.addresses.keys()))
@pytest.fixture(scope="function")
def SF_address(SF_token_wallet):
seed = 'sf000000000000000000000000000000'.encode()
SF_token_wallet.add_new_address(seed=seed)
return next(iter(SF_token_wallet.addresses.keys()))
@pytest.fixture()
def addresses(helpers):
return helpers.wallet.create_new_addresses(2)
# -------------------------VALID TEST CONSTANTS-------------------------------------------------------------------------
# this valid identifier represents a DID that submitted the transaction
VALID_IDENTIFIER = "6ouriXMZkLeHsuXrN1X1fd"
# Unique ID number of the request with transaction
VALID_REQID = 1524769177045798
VALID_XFER_REQID = 1525070057872273
# The version of client-to-node protocol.
PROTOCOL_VERSION = 1
# -------------------------Test AddressSigVerifier.verify method--------------------------------------------------------
# This test verifies that the sig param is the signature of the message given the verKey
# The hardcoded values come from running test_authenticate_xfer_success() in debug mode
def test_verify_success(node, user2_token_wallet, user2_address, user1_address):
addressSigVerifier_obj = AddressSigVerifier('AU8FgXMZtGMhua4g1e6J8c1sZx7Mp1yk6B69M3yCUsUw')
sig = b'w\xab\xc9\xaf\xbe\xba\xbaQ\xcac\x06:M\xc3)\xae\xd8\xa4\xf8\xb2;\xc9<C\xff\xc6\xc5i\xf1o\xf2\x87\xd4\xae' \
b'\xce\x95\xf4$\xfa\x92\xd6\xa7\xf6\x0b\x19@Q\xdd\xe7\x1a\xccArJ\xcaC\xb9]\xa0\x12\xa9\xb1y\x0b'
msg = b'identifier:24xHHVDRq97Hss5BxiTciEDsve7nYNx1pxAMi9RAvcWMouviSY|operation:extra:|inputs:24xHHVDRq97Hss5Bx' \
b'iTciEDsve7nYNx1pxAMi9RAvcWMouviSY,1|outputs:2jS4PHWQJKcawRxdW6GVsjnZBa1ecGdCssn7KhWYJZGTXgL7Es,10,24xHH' \
b'VDRq97Hss5BxiTciEDsve7nYNx1pxAMi9RAvcWMouviSY,10|type:10001|reqId:1525258251652534'
assert True == addressSigVerifier_obj.verify(sig, msg)
# This test that the verKey can't verify the signature. The hardcoded values come from debug mode of running
# The hardcoded values come from running test_authenticate_xfer_insufficient_correct_signatures() in debug mode
def test_verify_fail():
addressSigVerifier_obj = AddressSigVerifier('AU8FgXMZtGMhua4g1e6J8c1sZx7Mp1yk6B69M3yCUsUw')
sig = b"J\x97v\x10\tp\x0c9R\xcc\xfd\xc6\xfa\x9a\xca\xef\xf0\xfe'\xb2Gfg\xe0w\xa6\x1e\xc5*\x83\xea\x130\\\xa3T\n" \
b"\xb3\x12`\xf9)^[\x9d\x887\xa6\x87A,\x19\xdc\x1b\xdc\xb5S#9^\x12Yk\x0e"
ser_data = b'identifier:24xHHVDRq97Hss5BxiTciEDsve7nYNx1pxAMi9RAvcWMouviSY|operation:extra:|inputs:24xHHVDRq97H' \
b'ss5BxiTciEDsve7nYNx1pxAMi9RAvcWMouviSY,2|outputs:2jS4PHWQJKcawRxdW6GVsjnZBa1ecGdCssn7KhWYJZGTXgL7E' \
b's,10,24xHHVDRq97Hss5BxiTciEDsve7nYNx1pxAMi9RAvcWMouviSY,10|type:10001|reqId:1525258344537237'
assert False == addressSigVerifier_obj.verify(sig, ser_data)
# -------------------------Test authenticate method---------------------------------------------------------------------
# This test is used to check that invalid signatures are throwing an InsufficientCorrectSignatures exception
def test_authenticate_invalid_signatures_format(helpers, node, addresses):
[SF_address, user1_address] = addresses
token_authnr = TokenAuthNr(node[0].states[DOMAIN_LEDGER_ID])
outputs = [{"address": SF_address, "amount": 30}, {"address": user1_address, "amount": 30}]
request = helpers.request.mint(outputs)
req_data = request.as_dict
req_data[f.SIGS.nm] = {
'M9BJDuS24bqbJNvBRsoGg3': 'INVALID_SIG1',
'B8fV7naUqLATYocqu7yZ8W': 'INVALID_SIG2',
'E7QRhdcnhAwA6E46k9EtZo': 'INVALID_SIG3',
'CA4bVFDU4GLbX8xZju811o': 'INVALID_SIG3'}
with pytest.raises(InvalidSignatureFormat):
token_authnr.authenticate(req_data)
# This test is to validate properly formed invalid signatures are throwing an InsufficientCorrectSignatures
def test_authenticate_insufficient_valid_signatures_data(helpers, node, addresses):
[SF_address, user1_address] = addresses
token_authnr = TokenAuthNr(node[0].states[DOMAIN_LEDGER_ID])
outputs = [{"address": SF_address, "amount": 30}, {"address": user1_address, "amount": 30}]
request = helpers.request.mint(outputs)
req_data = request.as_dict
req_data[f.SIGS.nm]['E7QRhdcnhAwA6E46k9EtZo'] = \
'2EBZxZ3E2r2ZjCCBwgD6ipnHbskZb4Y4Yqm6haYEsr7hdM1m36yqLFrmNSB7JPqjAsMx6qjw6dWV5sRou1DgiKrM'
with pytest.raises(InsufficientCorrectSignatures):
token_authnr.authenticate(req_data)
# This test is checking to make sure a threshold of correct signatures is met
def test_authenticate_success_3_sigs(helpers, node, addresses):
[SF_address, user1_address] = addresses
token_authnr = TokenAuthNr(node[0].states[DOMAIN_LEDGER_ID])
outputs = [{"address": SF_address, "amount": 30}, {"address": user1_address, "amount": 30}]
request = helpers.request.mint(outputs)
req_data = request.as_dict
correct_sigs = token_authnr.authenticate(req_data)
assert len(correct_sigs) == 3
# This test is used to verify that authenticate_xfer is called with a XFER_PUBLIC type is given
def test_authenticate_calls_authenticate_xfer(helpers, node, addresses):
[SF_address, user1_address] = addresses
token_authnr = TokenAuthNr(node[0].states[DOMAIN_LEDGER_ID])
inputs = [{"address": SF_address, "seqNo": 1}]
outputs = [{"address": user1_address, "amount": 10}, {"address": SF_address, "amount": 10}]
request = helpers.request.transfer(inputs, outputs)
req_data = request.as_dict
token_authnr.authenticate_xfer = mock.Mock()
token_authnr.authenticate(req_data)
token_authnr.authenticate_xfer.assert_called()
# -------------------------Test authenticate_xfer method----------------------------------------------------------------
# This test verifies that authenticate_xfer verifies the signatures and returns data to represent this
def test_authenticate_xfer_success(node, user2_token_wallet, user2_address, user1_address):
token_authnr = TokenAuthNr(node[0].states[DOMAIN_LEDGER_ID])
inputs = [[user2_token_wallet, user2_address, 1]]
outputs = [{"address": user1_address, "amount": 10}, {"address": user2_address, "amount": 10}]
request = xfer_request(inputs, outputs)
req_data = request.as_dict
correct_sigs = token_authnr.authenticate_xfer(req_data, AddressSigVerifier)
assert len(correct_sigs) == 1
# This test verifies that authenticate_xfer raises an error when an invalid formatted signature is submitted
def test_authenticate_xfer_invalid_signature_format(node, user2_token_wallet, user2_address, user1_address):
token_authnr = TokenAuthNr(node[0].states[DOMAIN_LEDGER_ID])
inputs = [[user2_token_wallet, user2_address, 1]]
outputs = [[user1_address, 10], [user2_address, 10]]
request = xfer_request(inputs, outputs)
req_data = request.as_dict
req_data[OPERATION]["signatures"][0] = 'INVALID_SIGNATURE'
with pytest.raises(InvalidSignatureFormat):
token_authnr.authenticate_xfer(req_data, AddressSigVerifier)
# This test is intended to determine that authenticate_xfer raises an error if all sigantures are not valid
def test_authenticate_xfer_insufficient_correct_signatures(node, user2_token_wallet, user2_address, user1_address,
SF_address, SF_token_wallet):
token_authnr = TokenAuthNr(node[0].states[DOMAIN_LEDGER_ID])
inputs = [[user2_token_wallet, user2_address, 1], [SF_token_wallet, SF_address, 2]]
outputs = [[user1_address, 10], [user2_address, 10]]
request = xfer_request(inputs, outputs)
req_data = request.as_dict
# creating invalid signature in index 0
req_data[OPERATION]["signatures"][0] = req_data[OPERATION]["signatures"][1]
with pytest.raises(InsufficientCorrectSignatures):
token_authnr.authenticate_xfer(req_data, AddressSigVerifier)
# -------------------------Test serializeForSig method------------------------------------------------------------------
# This test that the serializeForSig method is being called when a XFER_PUBLIC request is submitted
@mock.patch.object(CoreAuthNr, 'serializeForSig', return_value=True)
def test_serializeForSig_XFER_PUBLIC_path(node, user2_token_wallet, user2_address,
SF_token_wallet, SF_address, user1_address):
token_authnr = TokenAuthNr(node[0].states[DOMAIN_LEDGER_ID])
inputs = [[user2_token_wallet, user2_address, 1], [SF_token_wallet, SF_address, 2]]
outputs = [[user1_address, 10], [user1_address, 10]]
request = xfer_request(inputs, outputs)
msg = request.as_dict
serialize_for_sig_called = token_authnr.serializeForSig(msg, VALID_IDENTIFIER, None)
assert serialize_for_sig_called == True
# This test that the serializeForSig method is being called when a MINT_PUBLIC request is submitted
@mock.patch.object(CoreAuthNr, 'serializeForSig')
def test_serializeForSig_MINT_PUBLIC_path(helpers, node, addresses):
[SF_address, user1_address] = addresses
token_authnr = TokenAuthNr(node[0].states[DOMAIN_LEDGER_ID])
outputs = [[SF_address, 30], [user1_address, 30]]
request = helpers.request.mint(outputs)
msg = request.as_dict
serialize_for_sig_called = token_authnr.serializeForSig(msg, VALID_IDENTIFIER, None)
token_authnr.serializeForSig.assert_called()
# -------------------------Test getVerkey method------------------------------------------------------------------------
# This test that a valid verkey of a DID is returned
def test_getVerkey_success(node):
token_authnr = TokenAuthNr(node[0].states[DOMAIN_LEDGER_ID])
ver_key = token_authnr.getVerkey(VALID_IDENTIFIER)
assert len(ver_key) == 23
assert ver_key[0] == '~'
# this test that if the identifier is a payment address with a checksum, then a payment verkey is returned
def test_getVerkey_pay_address_success(node):
token_authnr = TokenAuthNr(node[0].states[DOMAIN_LEDGER_ID])
# TODO change these to indicate they are addresses
identifier_43 = 'sjw1ceG7wtym3VcnyaYtf1xo37gCUQHDR5VWcKWNPLRZ1X8eC'
ver_key = token_authnr.getVerkey(identifier_43)
assert ver_key == '8kjqqnF3m6agp9auU7k4TWAhuGygFAgPzbNH3shp4HFL'
# this test that an exception is returned if an Unknown identifier is submitted
def test_getVerkey_invalid_identifier(node):
token_authnr = TokenAuthNr(node[0].states[DOMAIN_LEDGER_ID])
identifier_invalid = 'INVALID_IDENTIFIER'
with pytest.raises(UnknownIdentifier):
token_authnr.getVerkey(identifier_invalid)
# -------------------------Test get_xfer_ser_data method----------------------------------------------------------------
# This test verifies that given a properly formatted request will return xfer ser data
@pytest.mark.skip
def test_get_xfer_ser_data_success(node, user2_token_wallet, user2_address,
SF_token_wallet, SF_address, user1_address):
token_authnr = TokenAuthNr(node[0].states[DOMAIN_LEDGER_ID])
inputs = [[user2_token_wallet, user2_address, 1], [SF_token_wallet, SF_address, 2]]
outputs = [[user1_address, 10], [user1_address, 10]]
request = xfer_request(inputs, outputs)
msg = request.as_dict
# This test is marked as skip because the next line does not compile. _get_xfer_ser_data was removed from
# TokenAuthNr or its base class without explanation or update to this test. See TOK-317
ser_data = token_authnr._get_xfer_ser_data(msg, VALID_IDENTIFIER)
assert ser_data[OPERATION][INPUTS] == []
assert ser_data[OPERATION][OUTPUTS] == msg[OPERATION][OUTPUTS]
assert ser_data[OPERATION][EXTRA] == msg[OPERATION][EXTRA]
assert ser_data[f.REQ_ID.nm] == msg[f.REQ_ID.nm]
|
import urllib.request
import bs4
lienImage = "https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/significant_month.atom"
page = urllib.request.urlopen(lienImage)
xml = bs4.BeautifulSoup(page,'lxml')
e = xml.find_all('title')
for eq in e:
print(eq)
|
# O mesmo professor do desafio 019 quer sortear a ordem de apresentação de trabalhos dos alunos.
# Faça um programa que leia o nome dos quatro alunos e mostre a ordem sorteada.
import random
a1 = str(input('Informe o nome do(a) primeiro(a) aluno(a): '))
a2 = str(input('Informe o nome do(a) segundo(a) aluno(a): '))
a3 = str(input('Informe o nome do(a) terceiro(a) aluno(a): '))
a4 = str(input('Informe o nome do(a) quarto(a) aluno(a): '))
lista_alunos = [a1, a2, a3, a4]
ordem = random.shuffle(lista_alunos)
print('A ordem de apresentação será: {}'.format(lista_alunos))
|
import json
from galaxy_test.base.api_asserts import (
assert_has_keys,
assert_status_code_is,
)
from galaxy_test.base.populators import (
DatasetPopulator,
)
from ._framework import ApiTestCase
class RolesApiTestCase(ApiTestCase):
def setUp(self):
super().setUp()
self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
def test_list_and_show(self):
def check_roles_response(response):
assert response.status_code == 200
as_list = response.json()
assert isinstance(as_list, list)
assert len(as_list) > 0
for role in as_list:
RolesApiTestCase.check_role_dict(role)
user_role_id = self.dataset_populator.user_private_role_id()
with self._different_user():
different_user_role_id = self.dataset_populator.user_private_role_id()
admin_roles_response = self._get("roles", admin=True)
user_roles_response = self._get("roles")
check_roles_response(admin_roles_response)
check_roles_response(user_roles_response)
admin_roles_response_ids = [r["id"] for r in admin_roles_response.json()]
user_roles_response_ids = [r["id"] for r in user_roles_response.json()]
# User can see their own private role not the other users, admin can see both.
assert user_role_id in user_roles_response_ids
assert different_user_role_id not in user_roles_response_ids
assert user_role_id in admin_roles_response_ids
assert different_user_role_id in admin_roles_response_ids
# Check showing a valid, role.
role_response = self._get("roles/%s" % user_role_id)
assert role_response.status_code == 200
role = role_response.json()
RolesApiTestCase.check_role_dict(role, assert_id=user_role_id)
def test_create_valid(self):
name = self.dataset_populator.get_random_name()
description = "A test role."
payload = {
"name": name,
"description": description,
"user_ids": json.dumps([self.dataset_populator.user_id()]),
}
response = self._post("roles", payload, admin=True)
assert_status_code_is(response, 200)
# TODO: Why does this return a singleton list - that is bad - should be deprecated
# and return a single role.
role = response.json()[0]
RolesApiTestCase.check_role_dict(role)
assert role["name"] == name
assert role["description"] == description
user_roles_response = self._get("roles")
with self._different_user():
different_user_roles_response = self._get("roles")
user_roles_response_ids = [r["id"] for r in user_roles_response.json()]
different_user_roles_response_ids = [r["id"] for r in different_user_roles_response.json()]
# This new role is public, all users see it.
assert role["id"] in user_roles_response_ids
assert role["id"] in different_user_roles_response_ids
def test_show_error_codes(self):
# Bad role ids are 400.
response = self._get("roles/badroleid")
assert_status_code_is(response, 400)
# Trying to access roles are errors - should probably be 403 not 400 though?
with self._different_user():
different_user_role_id = self.dataset_populator.user_private_role_id()
response = self._get("roles/%s" % different_user_role_id)
assert_status_code_is(response, 400)
def test_create_only_admin(self):
response = self._post("roles")
assert_status_code_is(response, 403)
@staticmethod
def check_role_dict(role_dict, assert_id=None):
assert_has_keys(role_dict, "id", "name", "model_class", "url")
assert role_dict["model_class"] == "Role"
if assert_id is not None:
assert role_dict["id"] == assert_id
|
import logging
import pytest
from ECAgent.Core import *
# Unit testing for src framework
class TestEnvironment:
def test__init__(self):
model = Model()
assert len(model.environment.components) == 0
assert len(model.environment.agents) == 0
assert model.environment.model is model
assert model.environment.id == "ENVIRONMENT"
def test_addAgent(self):
model = Model()
agent = Agent("a1", model)
model.environment.addAgent(agent)
assert len(model.environment.agents) == 1
assert model.environment.getAgent(agent.id) == agent
with pytest.raises(Exception):
model.environment.addAgent(agent)
def test_removeAgent(self):
model = Model()
agent = Agent("a1", model)
model.environment.addAgent(agent)
model.environment.removeAgent(agent.id)
assert len(model.environment.agents) == 0
with pytest.raises(Exception):
model.environment.removeAgent(agent.id)
def test_getAgent(self):
model = Model()
agent = Agent("a1", model)
assert model.environment.getAgent(agent.id) is None
model.environment.addAgent(agent)
assert model.environment.getAgent(agent.id) == agent
def test_getRandomAgent(self):
env = Environment(None)
assert env.getRandomAgent() is None
model = Model()
agent1 = Agent("a1", model)
agent2 = Agent("a2", model)
assert model.environment.getRandomAgent() is None
model.environment.addAgent(agent1)
assert model.environment.getRandomAgent() is agent1
model.environment.addAgent(agent2)
random_agent = model.environment.getRandomAgent()
assert random_agent is agent1 or random_agent is agent2
# Test Component filter
class CustomComponent(Component):
def __init__(self, a, m):
super().__init__(a, m)
# Test for case in which no agents meet filter requirements
assert model.environment.getRandomAgent(CustomComponent) is None
# Test case where agent does meet requirement
agent1.addComponent(CustomComponent(agent1, model))
assert model.environment.getRandomAgent(CustomComponent) is agent1
def test_getAgents(self):
model = Model()
# Test empty list is returned when non agents occupy the environment
assert model.environment.getAgents() == []
# Test list when no filter is supplied but agents do occupy the environment
agent1 = Agent("a1", model)
agent2 = Agent("a2", model)
model.environment.addAgent(agent1)
model.environment.addAgent(agent2)
assert model.environment.getAgents() == [agent1, agent2]
# Test component filter when no agents meet the filter
assert model.environment.getAgents(Component) == []
# Test component filter when some agents meet the filter
agent1.addComponent(Component(agent1, model))
assert model.environment.getAgents(Component) == [agent1]
def test_setModel(self):
model = Model()
env = Environment(None)
assert env.model is not model
env.setModel(model)
assert env.model is model
def test__len__(self):
env = Environment(None)
# Test Empty case
assert len(env) == 0
# Test once agent has been added
env.addAgent(Agent("a1", None))
assert len(env) == 1
class TestModel:
def test__init__(self):
model = Model()
assert model.environment is not None
assert model.systemManager is not None
assert model.random is not None
assert model.logger is not None
assert model.logger.level == logging.INFO
logger = logging.getLogger('TEST')
logger.setLevel(logging.DEBUG)
model = Model(seed=30, logger=logger)
assert model.environment is not None
assert model.systemManager is not None
assert model.random.randint(25, 50) == 42
assert model.logger is logger
assert model.logger.level == logging.DEBUG
class TestComponent:
def test__init__(self):
model = Model()
agent = Agent("a1", model)
component = Component(agent, model)
assert component.model == model
assert component.agent == agent
class TestSystem:
def test__init__(self):
model = Model()
system = System("s1",model)
assert system.model == model
assert system.id == "s1"
assert system.start == 0
assert system.end == maxsize
assert system.frequency == 1
assert system.priority == 0
class TestSystemManager:
def test__init__(self):
model = Model()
sys_man = SystemManager(model)
assert sys_man.model == model
assert sys_man.timestep == 0
assert len(sys_man.systems) == 0
assert len(sys_man.executionQueue) == 0
assert len(sys_man.componentPools) == 0
def test_executeSystems(self):
model = Model()
s1 = System("s1", model)
model.systemManager.addSystem(s1)
s1 = System("s2", model)
model.systemManager.addSystem(s1)
model.systemManager.executeSystems()
assert model.systemManager.timestep == 1
def test_registerComponent(self):
model = Model()
s1 = System("s1", model)
model.systemManager.addSystem(s1)
assert Component not in model.systemManager.componentPools.keys()
agent1 = Agent("a1", model)
component1 = Component(agent1, model)
agent1.addComponent(component1)
assert len(model.systemManager.componentPools[Component]) == 1
assert model.systemManager.componentPools[Component][0] == component1
agent2 = Agent("a2", model)
component2 = Component(agent2, model)
agent2.addComponent(component2)
assert len(model.systemManager.componentPools[Component]) == 2
assert model.systemManager.componentPools[Component][0] == component1
assert model.systemManager.componentPools[Component][1] == component2
with pytest.raises(Exception):
model.systemManager.registerComponent(component1)
def test_deregisterComponent(self):
model = Model()
s1 = System("s1", model)
model.systemManager.addSystem(s1)
assert Component not in model.systemManager.componentPools.keys()
agent1 = Agent("a1", model)
component1 = Component(agent1, model)
agent1.addComponent(component1)
agent2 = Agent("a2", model)
component2 = Component(agent2, model)
agent2.addComponent(component2)
# deregister component 2 for basic remove check
model.systemManager.deregisterComponent(component2)
assert len(model.systemManager.componentPools[Component]) == 1
assert component2 not in model.systemManager.componentPools[Component]
# deregister a component that doesn't exist in the pool
with pytest.raises(Exception):
model.systemManager.deregisterComponent(component2)
# Empty the component pool. This delete the pool
model.systemManager.deregisterComponent(component1)
assert Component not in model.systemManager.componentPools.keys()
# Try delete from a pool that doesn't exist
with pytest.raises(Exception):
model.systemManager.deregisterComponent(component1)
def test_getComponents(self):
model = Model()
s1 = System("s1", model)
model.systemManager.addSystem(s1)
assert model.systemManager.getComponents(Component) is None
agent1 = Agent("a1", model)
component1 = Component(agent1, model)
agent1.addComponent(component1)
agent2 = Agent("a2", model)
component2 = Component(agent2, model)
agent2.addComponent(component2)
components = model.systemManager.getComponents(Component)
assert len(components) == 2
assert components[0] == component1
assert components[1] == component2
class TestAgent:
def test__init__(self):
model = Model()
agent = Agent("a1", model)
assert agent.model == model
assert agent.id == "a1"
assert len(agent.components) == 0
def test_addComponent(self):
model = Model()
agent = Agent("a1", model)
s1 = System("s1", model)
model.systemManager.addSystem(s1)
component = Component(agent, model)
agent.addComponent(component)
assert len(agent.components) == 1
with pytest.raises(Exception):
agent.addComponent(component)
def test_removeComponent(self):
model = Model()
agent = Agent("a1", model)
s1 = System("s1", model)
model.systemManager.addSystem(s1)
component = Component(agent, model)
agent.addComponent(component)
agent.removeComponent(Component)
assert len(agent.components) == 0
with pytest.raises(Exception):
agent.removeComponent(Component)
def test_getComponent(self):
model = Model()
agent = Agent("a1", model)
# Checks to see if getting a component that doesn't exist returns None
assert agent.getComponent(Component) is None
component = Component(agent, model)
agent.addComponent(component)
# Check to see if getting a component that does exist returns the component
assert agent.getComponent(Component) is component
def test__getitem__(self):
model = Model()
agent = Agent("a1", model)
# Checks to see if getting a component that doesn't exist returns None
assert agent[Component] is None
component = Component(agent, model)
agent.addComponent(component)
# Check to see if getting a component that does exist returns the component
assert agent[Component] is component
def test__len__(self):
model = Model()
agent = Agent("a1", model)
# Test empty case
assert len(agent) == 0
# Test case when component is added
agent.addComponent(Component(agent, model))
assert len(agent) == 1
def test_hasComponent(self):
model = Model()
agent = Agent("a1", model)
# False check
assert not agent.hasComponent(Component)
component = Component(agent, model)
agent.addComponent(component)
# True check
assert agent.hasComponent(Component)
# Check for multiple components
class CustomComponent(Component):
def __init__(self, a, m):
super().__init__(a, m)
# Test should fail on multiple components
assert not agent.hasComponent(Component, TestComponent)
# Test should pass on multiple components
agent.addComponent(CustomComponent(agent,model))
assert agent.hasComponent(Component, CustomComponent)
def test__contains__(self):
model = Model()
agent = Agent("a1", model)
# False check
assert Component not in agent
component = Component(agent, model)
agent.addComponent(component)
# True check
assert Component in agent
|
from dataclasses import dataclass
from typing import List, Optional
## Exceptions
@dataclass
class NameNotFound(Exception):
name: str
local: Optional[bool] = None
@dataclass
class NameNotAssigned(Exception):
name: str
@dataclass
class CannotRebindConstant(Exception):
name: str
## Classes
@dataclass
class Binding:
name: str
type: '_Type'
assigned: bool = False
const: bool = False
def rebind(self, type, assignment=True, const=False):
from .types import typecheck
typecheck(type, self.type)
if self.assigned and assignment and self.const:
raise CannotRebindConstant(self.name)
elif self.const and not const:
raise CannotRebindConstant(self.name)
else:
self.assigned |= assignment
self.const = const
@dataclass
class Scope:
def __init__(self, *bindings, parent=None):
self.bindings = list(bindings)
self.parent = parent
def __getitem__(self, item):
binding = self.bindings[item]
return binding
def index(self, name, local=None):
if local != False:
for index, binding in enumerate(self.bindings):
if binding.name == name:
return index, 0
if local != True:
if self.parent:
try:
index, scope = self.parent.index(name)
except NameNotFound as e:
e.local = local
raise e
if scope != -1:
scope += 1
return index, scope
raise NameNotFound(name, local)
def get(self, index, scope=0):
if scope == -1:
return builtins[index]
elif scope == 0:
return self[index]
elif self.parent:
return self.parent.get(index, scope-1)
else:
raise ValueError
def getname(self, name, local=None):
return self.get(*self.index(name, local))
def bind(self, name, type, assignment=True, const=False):
for index, binding in enumerate(self.bindings):
if binding.name == name:
binding.rebind(type, assignment, const)
return index
else:
index = len(self.bindings)
binding = Binding(name, type, assignment, const)
self.bindings.append(binding)
return index
def child(self, *bindings):
return Scope(*bindings, parent=self)
builtins = Scope(
)
|
import string
class Solution:
def isPalindrome(self, s: str) -> bool:
l = []
for char in s:
if char in string.ascii_letters or char in string.digits:
l.append(char.lower())
news = '_'.join(l)
return self.check(news)
def check(self, news):
# news = '_'.join(s)
length = len(news)
if length == 0:
return True
mid = length // 2
for i in range(mid+1):
x = mid + i
y = mid - i
if news[x] != news[y]:
return False
return True
s = '0P'
so = Solution()
r = so.isPalindrome(s)
print(r)
|
from django.test import TestCase
from demo import models
class CategoryTests(TestCase):
def setUp(self):
for i in range(10):
category = models.Category()
category.title = 'cat{}'.format(i)
category.save()
def test_count(self):
count = models.Category.objects.count()
self.assertEqual(count, 10)
class TagTests(TestCase):
def setUp(self):
for i in range(10):
tag = models.Tag()
tag.title = 'tag{}'.format(i)
tag.save()
def test_count(self):
count = models.Tag.objects.count()
self.assertEqual(count, 10)
class PostTests(TestCase):
def setUp(self):
for i in range(10):
post = models.Post()
post.title = 'post{}'.format(i)
post.save()
def test_count(self):
count = models.Post.objects.count()
self.assertEqual(count, 10)
|
class dotRebarSplice_t(object):
# no doc
BarPositions = None
Clearance = None
LapLength = None
ModelObject = None
Offset = None
Reinforcement1 = None
Reinforcement2 = None
Type = None
|
# -*- coding: utf-8 -*-
from slackbot.bot import respond_to
import re
import json
@respond_to('hello', re.IGNORECASE)
def greet(message):
attachments = [
{
'fallback': 'Fallback text',
'author_name': 'Hello from Keptn',
'author_link': 'https://keptn.sh',
'text': 'An opinionated open-source framework for event-based, automated continuous operations in cloud-native environments. Learn more https://keptn.sh',
'color': '#59afe1'
}]
message.send_webapi('', json.dumps(attachments))
|
from contractpy.main.contract_validator import ContractValidator
import pytest
from contractpy.main.types import Types
from contractpy.main.exceptions.invalid_format import InvalidFormat
from contractpy.main.exceptions.invalid_value import InvalidValue
def test_validate_contract_returns_none_for_valid_formats():
assert ContractValidator._validate_format({"a": "b"}) is None
assert ContractValidator._validate_format([{"a": "b"}]) is None
def test_validate_contract_raise_exception_for_unexpected_format():
with pytest.raises(InvalidFormat) as e:
ContractValidator._validate_format(('a', 'b'))
assert str(e.value) == "Invalid contract format. Valid formats are dict,list."
def test_validate_contract_returns_none_for_valid_contract():
valid_contracts = [
{
"first": Types.STRING,
"second": Types.INTEGER,
"third": Types.FLOAT
},
[{
"first": Types.STRING,
"second": Types.INTEGER,
"third": Types.FLOAT
}],
{
"first": Types.STRING,
"second": Types.INTEGER,
"third": Types.FLOAT,
"fourth": [{"fifth": Types.INTEGER}]
},
{
"first": Types.STRING,
"second": Types.INTEGER,
"third": Types.FLOAT,
"fourth": {
"fifth": [{
"sixth": Types.INTEGER
}]
}
},
[Types.STRING],
{
"first": Types.STRING,
"second": [{
"third": Types.INTEGER
}],
"third": [Types.FLOAT]
}
]
for contract in valid_contracts:
assert ContractValidator.validate(contract) is None
def test_validate_contract_raise_exception_for_unecpected_value():
with pytest.raises(InvalidValue) as e:
ContractValidator.validate({"first": 'Double'})
assert str(e.value) == 'Invalid value for the field first. Valid values are FLOAT, INTEGER, STRING.'
with pytest.raises(InvalidValue) as e:
ContractValidator.validate({"first": 'Double'})
assert str(e.value) == 'Invalid value for the field first. Valid values are FLOAT, INTEGER, STRING.'
|
from collections import namedtuple
from linkedlist import Node
NODEDATA = namedtuple("Node", ["left", "item"])
class SinglyLinkedTree:
def __init__(self, items=None):
"""
Initialize the binary search tree and insert the given items.
"""
self.root = None
self.size = 0
if items is not None:
for item in items:
self.insert(item)
def is_empty(self):
"""
Is empty function for singly linked tree
"""
return self.root is None
def height(self):
pass
def contains(self, item):
pass
def search(self, item):
pass
def insert(self, item):
if not self.root:
data = NODEDATA(None, item)
self.root = Node(data)
def delete(self, item):
pass
def _find_node_recursive(self, item, node):
pass
def _find_parent_node_recursive(self, item, node, parent=None):
pass
def items_in_order(self):
pass
|
from david.components.nlu.simple import SimpleNLU
|
from .context import tweet_sentiment
from sklearn.linear_model import SGDClassifier
def test_model_load():
model = tweet_sentiment.load()
pred = model.predict(['süper bir haber bu'])
assert pred[0] == 0
|
#!/usr/bin/env python
import argparse
from logging import StreamHandler, basicConfig, getLogger
import argcomplete
import pkg_resources
import atcoder_helper.command.gen
logger = getLogger(__name__)
logger.setLevel("DEBUG")
version = pkg_resources.get_distribution("atcoder-helper").version
def get_sub_commands():
return [v for v in vars(atcoder_helper.command) if not v.startswith("_")]
def get_sub_command_module(sub_command):
return getattr(atcoder_helper.command, sub_command)
def main() -> None:
handler = StreamHandler()
handler.setLevel("INFO")
basicConfig(handlers=[handler])
parser = argparse.ArgumentParser()
parser.add_argument("--version", action="version", version=f"%(prog)s {version}")
subparsers = parser.add_subparsers(dest="command")
for sub_command in get_sub_commands():
sub_command_module = get_sub_command_module(sub_command)
sub_command_module.add_arguments(subparsers.add_parser(sub_command))
argcomplete.autocomplete(parser, exclude=["-h", "--help", "--version"])
args = parser.parse_args()
sub_command_module = get_sub_command_module(args.command)
sub_command_module.main(args)
if __name__ == "__main__":
main()
|
from itertools import chain
import numpy as np
from circuit_recognizer.connect.line import DirectedLine, detect_intersection_rect
class TestLine:
def test_detect_intersection(self):
image = np.array(
[
[False, False, True, False, False, False, False],
[False, False, True, True, True, True, True],
[False, False, True, False, False, False, False],
[False, False, True, False, True, False, False],
[False, False, True, False, False, False, False],
[False, False, True, False, False, False, False],
[False, False, True, False, False, False, False],
]
)
line = DirectedLine.btwn(np.array([6, 2]), np.array([0, 2]))
intersection = line.detect_intersection(image)
assert intersection
(x, y, width, height) = intersection.bounding_box()
assert width == 1
assert height == 1
assert line.remove_overlap_into(intersection.bounding_box())
def test_detect_intersection_big(self):
image = np.zeros((20, 20), dtype=bool)
image[:, 12] = True
image[5, :] = True
line = DirectedLine.btwn(np.array([10, 12]), np.array([0, 12]))
intersection = line.detect_intersection(image)
assert intersection
(x, y, width, height) = intersection.bounding_box()
assert x == 12
assert y == 5
assert width == 1
assert height == 1
assert line.remove_overlap_into(intersection.bounding_box())
def test_detect_intersection_rect_T(self):
image = np.array(
[
[False, False, False, False],
[True, True, True, True],
[True, True, True, True],
[True, True, True, True],
[False, True, True, False],
]
)
i_regions = [
DirectedLine.btwn(*np.array([(3, 1), (1, 1)])),
DirectedLine.btwn(*np.array([(3, 2), (1, 2)])),
DirectedLine.btwn(*np.array([(3, 1), (3, 2)])),
DirectedLine.btwn(*np.array([(2, 1), (2, 2)])),
DirectedLine.btwn(*np.array([(1, 1), (1, 2)])),
]
for line in i_regions:
(top_left, bottom_right) = detect_intersection_rect(image, line)
assert (
top_left == 1
).all(), f"{line.direction}: Top left of junction should be [1 1] but was {top_left}"
assert (
bottom_right == np.array([3, 2])
).all(), f"{line.direction}: Intersection BR should be [3 2] but was {bottom_right}"
line = line.reverse()
(top_left, bottom_right) = detect_intersection_rect(image, line)
assert (
top_left == 1
).all(), f"{line.direction}: Top left of junction should be [1 1] but was {top_left}"
assert (
bottom_right == np.array([3, 2])
).all(), f"{line.direction}: Intersection BR should be [3 2] but was {bottom_right}"
def test_detect_intersection_rect_L(self):
image = np.array(
[
[False, True, True, False],
[False, True, True, True],
[False, True, True, True],
[False, False, False, False],
]
)
i_regions = [
DirectedLine.btwn(*np.array([(1, 1), (1, 2)])),
DirectedLine.btwn(*np.array([(1, 2), (1, 1)])),
DirectedLine.btwn(*np.array([(1, 2), (2, 2)])),
DirectedLine.btwn(*np.array([(2, 2), (1, 2)])),
]
for line in i_regions:
(top_left, bottom_right) = detect_intersection_rect(image, line)
assert (
top_left == 1
).all(), f"Expected top left to be [1 1]. Found {top_left}"
assert (
bottom_right == 2
).all(), f"Expected bottom right to be [2 2]. Found {bottom_right}"
def test_detect_intersection_rect_L_270(self):
image = np.array(
[
[False, True, True, False],
[True, True, True, False],
[True, True, True, False],
[False, False, False, False],
]
)
i_regions = [
DirectedLine.btwn(*np.array([(1, 1), (1, 2)])),
DirectedLine.btwn(*np.array([(1, 2), (1, 1)])),
DirectedLine.btwn(*np.array([(1, 1), (2, 1)])),
DirectedLine.btwn(*np.array([(2, 1), (1, 1)])),
]
for line in i_regions:
(top_left, bottom_right) = detect_intersection_rect(image, line)
assert (top_left == 1).all()
assert (bottom_right == 2).all()
def test_detect_intersection_L_1px(self):
image = np.array(
[
[False, True, False, False],
[False, True, False, False],
[False, True, True, True],
[False, False, False, False],
]
)
i_regions = [
DirectedLine.btwn(*np.array([(1, 1), (1, 2)])),
DirectedLine.btwn(*np.array([(1, 2), (1, 1)])),
DirectedLine.btwn(*np.array([(1, 2), (2, 2)])),
DirectedLine.btwn(*np.array([(2, 2), (1, 2)])),
]
for line in i_regions:
(top_left, bottom_right) = detect_intersection_rect(image, line)
assert (
top_left == 1
).all(), f"Expected top left to be [1 1]. Found {top_left}"
assert (
bottom_right == 2
).all(), f"Expected bottom right to be [2 2]. Found {bottom_right}"
|
nr = int(input("Digite um nr para ver a sua tabuada: "))
star = '-'
print(f'{star*12:^15}\n {nr} x 1 = {nr*1}\n {nr} x 2 = {nr*2}\n {nr} x 3 = {nr*3}\n {nr} x 4 = {nr*4}\n {nr} x 5 = {nr*5}\n {nr} x 6 = {nr*6}\n {nr} x 7 = {nr*7}\n {nr} x 8 = {nr*8}\n {nr} x 9 = {nr*9}\n {nr} x 10 = {nr*10}\n{star*12:^15}')
|
# Generated by Django 2.0.3 on 2018-05-19 11:02
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('tests', '0007_add_bandmember_favourite_restaurant'),
]
operations = [
migrations.CreateModel(
name='House',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('address', models.CharField(max_length=255)),
],
options={
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('houses', modelcluster.fields.ParentalManyToManyField(related_name='occupants', to='tests.House')),
],
options={
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
options={
'ordering': ['id'],
},
),
migrations.AddField(
model_name='house',
name='main_room',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='main_room_of', to='tests.Room'),
),
migrations.AddField(
model_name='house',
name='owner',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='tests.Person'),
),
]
|
# public interface
from Regex import Regex
def match(pattern, string):
"""Try to apply the pattern at the start of the string, returning true
or None if no match was found."""
return Regex(pattern).match(string)
def fullmatch(pattern, string):
"""Try to apply the pattern to all of the string, returning true
or None if no match was found."""
return Regex(pattern).fullmatch(string)
def search(pattern, string):
"""Scan through string looking for a match to the pattern, returning true
or None if no match was found."""
return Regex(pattern).search(string)
def compile(pattern):
"Compile a regular expression pattern, returning a pattern object."
return Regex(pattern)
|
import datetime
import matplotlib.pyplot as plt
import pynmea2
import simple_geo
def parse_nmea_messages(path: str):
nmea_messages = []
gps = open(path, 'r')
lines = gps.readlines()
for line in lines:
try:
# remove first character as csv has extra comma
nmea_message = pynmea2.parse(line.strip()[1:])
nmea_messages.append(nmea_message)
except pynmea2.ParseError as e:
print(f'Parse error {e}')
continue
return nmea_messages
def get_gga_messages(nmea_messages) -> [pynmea2.GGA]:
messages = []
for nmea_message in nmea_messages:
if isinstance(nmea_message, pynmea2.GGA):
messages.append(nmea_message)
return messages
def get_rmc_messages(nmea_messages) -> [pynmea2.RMC]:
messages = []
for nmea_message in nmea_messages:
if isinstance(nmea_message, pynmea2.RMC):
print(nmea_message)
messages.append(nmea_message)
return messages
def parse_gga_heading(gga_list: [pynmea2.GGA]):
last_position = None
headings = []
date_times = []
for gga in gga_list:
if last_position is None:
last_position = simple_geo.GeoPostion(gga.latitude, gga.longitude)
else:
current_position = simple_geo.GeoPostion(gga.latitude, gga.longitude)
heading = simple_geo.calculate_delta(current_position, last_position)[1]
headings.append(heading)
# todo not hardcode this part
date_prefix = datetime.date(2021, 6, 15)
datetime_gga = datetime.datetime.combine(date_prefix, gga.timestamp)
date_times.append(datetime_gga)
last_position = simple_geo.GeoPostion(gga.latitude, gga.longitude)
return date_times, headings
def parse_rmc_heading(rmc_list: [pynmea2.RMC]):
headings = []
date_times = []
for rmc in rmc_list:
headings.append(rmc.true_course)
date_times.append(rmc.datetime)
return date_times, headings
if __name__ == "__main__":
nmea_messages = parse_nmea_messages('data/june-16-moving-test/gps-10-raw-5m.csv')
nmea_messages2 = parse_nmea_messages('data/june-16-moving-test/gps-11-raw-5m.csv')
nmea_messages3 = parse_nmea_messages('data/june-16-moving-test/gps-12-raw-5m.csv')
rmc = get_rmc_messages(nmea_messages)
rmc2 = get_rmc_messages(nmea_messages2)
rmc3 = get_rmc_messages(nmea_messages3)
# gga = get_gga_messages(nmea_messages)
# gga2 = get_gga_messages(nmea_messages2)
# gga3 = get_gga_messages(nmea_messages3)
rmc_date_times, rmc_headings = parse_rmc_heading(rmc)
rmc_date_times2, rmc_headings2 = parse_rmc_heading(rmc2)
rmc_date_times3, rmc_headings3 = parse_rmc_heading(rmc3)
# gga_date_times, gga_headings = parse_gga_heading(gga)
# gga_date_times2, gga_headings2 = parse_gga_heading(gga2)
# gga_date_times3, gga_headings3 = parse_gga_heading(gga3)
plt.plot(rmc_date_times, rmc_headings, '.')
plt.plot(rmc_date_times2, rmc_headings2, '.')
plt.plot(rmc_date_times3, rmc_headings3, '.')
plt.show()
# plt.plot(gga_date_times, gga_headings, '.')
# plt.plot(gga_date_times2, gga_headings2, '.')
# plt.plot(gga_date_times3, gga_headings3, '.')
# plt.show()
|
import datetime
class Solution:
# @return a list of lists of length 3, [[val1,val2,val3]]
def threeSum(self, nums):
#starttime = datetime.datetime.now()
target = 0
nums.sort()
l = len(nums)
m = 0
r = {}
for x in range(l):
if nums[x] >0 and m == 0:
m = x
last = l
lastv = None
ind = 0
for x in range(l):
if x >0 and nums[x] == nums[x-1]:
continue
for y in range(x+1, l):
if y > x+1 and nums[y] == nums[y-1]:
continue
cur = nums[x]+nums[y]
if cur > target:
break
zs = y+1
ze = last
if lastv != None and cur < lastv:
zs = last
ze = l
if cur < 0 and zs < m:
zs = m
#find = False
for z in range(zs, ze):
if z > zs and nums[z] == nums[z-1]:
continue
n = cur + nums[z]
if n == target:
#find = True
r[ind] = ([nums[x],nums[y],nums[z]])
ind+=1
last = z
lastv = cur
break
elif cur > target:
break
#if find:
# break
#endtime = datetime.datetime.now()
#print (endtime - starttime)
return r.values()
s = Solution()
#print s.threeSum([-1,0,1,2,-1,-4])
#print s.threeSum([-9,14,-7,-8,9,1,-10,-8,13,12,6,9,3,-3,-15,-15,1,8,-7,-4,-6,8,2,-10,8,11,-15,3,0,-11,-1,-1,10,0,6,5,-14,3,12,-15,-7,-5,9,11,-1,1,3,-15,-5,11,-12,-4,-4,-2,-6,-10,-6,-6,0,2,-9,14,-14,-14,-9,-1,-2,-7,-12,-13,-15,-4,-3,1,14,3,-12,3,3,-10,-9,-1,-7,3,12,-6,0,13,4,-15,0,2,6,1,3,13,8,-13,13,11,11,13,14,-6])
#print s.threeSum([-10,5,-11,-15,7,-7,-10,-8,-3,13,9,-14,4,3,5,-7,13,1,-4,-11,5,9,-11,-4,14,0,3,-10,-3,-7,10,-5,13,14,-5,6,14,0,5,-12,-10,-1,-11,9,9,1,-13,0,-13,-1,4,0,-7,8,3,14,-15,-9,-10,-3,0,-15,-1,-2,6,9,11,6,-14,1,1,-9,-14,6,7,10,14,2,-13,-13,8,6,-6,8,-9,12,7,-9,-11,4,-4,-4,4,10,1,-12,-3,-2,1,-10,6,-13,-3,-1,0,11,-5,0,-2,-11,-6,-9,11,3,14,-13,0,7,-14,-4,-4,-11,-1,8,6,8,3])
#print s.threeSum([2,5,5,8,-7,-9,5,-1,-4,2,8,4,-6,-2,-2,9,-2,13,1,0,9,9,4,-13,13,3,-14,11,-5,-13,3,4,7,-15,-11,7,13,1,13,-14,11,-1,5,-10,12,11,14,-13,1,-8,3,-4,-14,14,-10,-15,-6,-9,3,-4,-7,-8,-15,8,-8,12,-8,13,-2,-9,14,-6,5,-3,-9,-6,-7,-10,-3,9,-2,7,-10,-9,-2,-5,13,7,-6,2,-12,-6,1,10,9,0,7,-13,-2,-9,-7,-2,-8,5,10,-1,6,-12,4,10,12,9,2,10,8,-15,12,6,-1,-9,-7,2])
starttime = datetime.datetime.now()
print s.threeSum(
[7,-1,14,-12,-8,7,2,-15,8,8,-8,-14,-4,-5,7,9,11,-4,-15,-6,1,-14,4,3,10,-5,2,1,6,11,2,-2,-5,-7,-6,2,-15,11,-6,8,-4,2,1,-1,4,-6,-15,1,5,-15,10,14,9,-8,-6,4,-6,11,12,-15,7,-1,-9,9,-1,0,-4,-1,-12,-2,14,-9,7,0,-3,-4,1,-2,12,14,-10,0,5,14,-1,14,3,8,10,-8,8,-5,-2,6,-11,12,13,-7,-12,8,6,-13,14,-2,-5,-11,1,3,-6]
)
endtime = datetime.datetime.now()
print (endtime - starttime)
#print s.threeSum([0,0,0])
#print [[-4,-2,6],[-4,0,4],[-4,1,3],[-4,2,2],[-2,-2,4],[-2,0,2]]
#print s.threeSum([-4,-2,-2,-2,0,1,2,2,2,3,3,4,4,6,6])
|
from __future__ import absolute_import
from . import plexapp
from . import util
class Captions(object):
def __init__(self):
self.deviceInfo = util.INTERFACE.getGlobal("deviceInfo")
self.textSize = util.AttributeDict({
'extrasmall': 15,
'small': 20,
'medium': 30,
'large': 45,
'extralarge': 65,
})
self.burnedSize = util.AttributeDict({
'extrasmall': "60",
'small': "80",
'medium': "100",
'large': "135",
'extralarge': "200"
})
self.colors = util.AttributeDict({
'white': 0xffffffff,
'black': 0x000000ff,
'red': 0xff0000ff,
'green': 0x008000ff,
'blue': 0x0000ffff,
'yellow': 0xffff00ff,
'magenta': 0xff00ffff,
'cyan': 0x00ffffff,
})
self.defaults = util.AttributeDict({
'textSize': self.textSize.medium,
'textColor': self.colors.white,
'textOpacity': 80,
'backgroundColor': self.colors.black,
'backgroundOpacity': 70,
'burnedSize': None
})
def getTextSize(self):
value = self.getOption("Text/Size")
return self.textSize.get(value) or self.defaults.textSize
def getTextColor(self):
value = self.getOption("Text/Color")
return self.colors.get(value) or self.defaults.textColor
def getTextOpacity(self):
value = self.getOption("Text/Opacity")
if value is None or value == "default":
return self.defaults.textOpacity
else:
return int(value)
def getBackgroundColor(self):
value = self.getOption("Background/Color")
return self.colors.get(value) or self.defaults.backgroundColor
def getBackgroundOpacity(self):
value = self.getOption("Background/Opacity")
if value is None or value == "default":
return self.defaults.backgroundOpacity
else:
return int(value)
def getBurnedSize(self):
value = self.getOption("Text/Size")
return self.burnedSize.get(value) or self.defaults.burnedSize
def getOption(self, key):
opt = self.deviceInfo.getCaptionsOption(key)
return opt is not None and opt.lower().replace(' ', '') or None
CAPTIONS = Captions()
|
class A():
def method(self):
print(1)
class B(A):
def method(self):
print(2)
a = A()
a.meth<caret>od()
|
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
import uuid
from tests import utils
from tests.modules.annotations.resources import utils as annot_utils
from tests.modules.asset_groups.resources import utils as sub_utils
from tests.modules.encounters.resources import utils as enc_utils
import pytest
from tests.utils import module_unavailable
@pytest.mark.skipif(
module_unavailable('asset_groups'), reason='AssetGroups module disabled'
)
def test_patch_annotation(
flask_app_client,
admin_user,
researcher_1,
test_clone_asset_group_data,
request,
test_root,
):
# pylint: disable=invalid-name
from app.modules.annotations.models import Annotation
from app.modules.encounters.models import Encounter
clone = sub_utils.clone_asset_group(
flask_app_client,
researcher_1,
test_clone_asset_group_data['asset_group_uuid'],
)
uuids = enc_utils.create_encounter(flask_app_client, researcher_1, request, test_root)
first_enc_guid = uuids['encounters'][0]
response = annot_utils.create_annotation(
flask_app_client,
researcher_1,
test_clone_asset_group_data['asset_uuids'][0],
first_enc_guid,
)
annotation_guid = response.json['guid']
read_annotation = Annotation.query.get(annotation_guid)
assert read_annotation.asset_guid == uuid.UUID(
test_clone_asset_group_data['asset_uuids'][0]
)
first_encounter = Encounter.query.get(first_enc_guid)
assert len(first_encounter.annotations) == 1
assert first_encounter.annotations[0].guid == uuid.UUID(annotation_guid)
uuids = enc_utils.create_encounter(flask_app_client, researcher_1, request, test_root)
second_enc_guid = uuids['encounters'][0]
move_to_second_enc = [
utils.patch_replace_op('encounter_guid', '%s' % second_enc_guid),
]
# Try changing encounter
annot_utils.patch_annotation(
flask_app_client, annotation_guid, researcher_1, move_to_second_enc
)
second_encounter = Encounter.query.get(second_enc_guid)
assert len(first_encounter.annotations) == 0
assert len(second_encounter.annotations) == 1
assert second_encounter.annotations[0].guid == uuid.UUID(annotation_guid)
# change ia_class via patch
assert read_annotation.ia_class == 'test'
new_ia_class = 'test2'
patch_arg = [
utils.patch_replace_op('ia_class', new_ia_class),
]
annot_utils.patch_annotation(
flask_app_client, annotation_guid, researcher_1, patch_arg
)
read_annotation = Annotation.query.get(annotation_guid)
assert read_annotation.ia_class == new_ia_class
# fail setting ia_class null
patch_arg = [
utils.patch_replace_op('ia_class', None),
]
annot_utils.patch_annotation(
flask_app_client,
annotation_guid,
researcher_1,
patch_arg,
expected_status_code=422,
)
read_annotation = Annotation.query.get(annotation_guid)
assert read_annotation.ia_class == new_ia_class # unchanged from before
# change bounds via patch
new_bounds = {'rect': [100, 200, 300, 400]}
patch_arg = [
utils.patch_replace_op('bounds', new_bounds),
]
annot_utils.patch_annotation(
flask_app_client, annotation_guid, researcher_1, patch_arg
)
read_annotation = Annotation.query.get(annotation_guid)
assert read_annotation.bounds == new_bounds
# change bounds via patch, but invalid bounds value
new_bounds = {'rect': [100, 200]}
patch_arg = [
utils.patch_replace_op('bounds', new_bounds),
]
response = annot_utils.patch_annotation(
flask_app_client,
annotation_guid,
researcher_1,
patch_arg,
expected_status_code=422,
)
assert response.json['message'] == 'bounds value is invalid'
read_annotation = Annotation.query.get(annotation_guid)
assert read_annotation.bounds != new_bounds
# And deleting it
annot_utils.delete_annotation(flask_app_client, researcher_1, annotation_guid)
read_annotation = Annotation.query.get(annotation_guid)
assert read_annotation is None
clone.cleanup()
|
import numpy as np
import os
from urllib.request import urlretrieve
from tensorflow import keras
MNIST_PATH = "./mnist.npz"
def load_mnist(path="./mnist.npz", norm=True):
if not os.path.isfile(path):
print("not mnist data is found, try downloading...")
urlretrieve("https://s3.amazonaws.com/img-datasets/mnist.npz", path)
with np.load(path, allow_pickle=True) as f:
x_train = f['x_train'].astype(np.float32)[:, :, :, None]
y_train = f['y_train'].astype(np.float32)[:, None]
x_test = f['x_test'].astype(np.float32)[:, :, :, None]
y_test = f['y_test'].astype(np.float32)[:, None]
if norm:
x_train /= 255
x_test /= 255
return (x_train, y_train), (x_test, y_test)
def save_model_structure(model: keras.Model, path=None):
if path is None:
path = "visual/{}/{}_structure.png".format(model.name, model.name)
os.makedirs(os.path.dirname(path), exist_ok=True)
try:
keras.utils.plot_model(model, show_shapes=True, expand_nested=True, dpi=150, to_file=path)
except Exception as e:
print(e)
def save_model_weights(model: keras.Model, path=None):
if path is None:
path = "visual/{}/model/net".format(model.name)
os.makedirs(os.path.dirname(path), exist_ok=True)
model.save_weights(path)
def load_model_weights(model: keras.Model, path=None):
if path is None:
path = "visual/{}/model/net".format(model.name)
model.load_weights(path)
|
import os
import re
class LanguageModelContent:
def __init__(self, words,count):
self.words = words
self.count = count
def __str__(self):
return self.words + '\t' +self.count
if __name__ == "__main__":
dir_list = sorted(os.listdir('/Users/geekye/Documents/Dataset/LM/UniBiGram'))
# because ngrams-[00030 - 00036]-of-00394 have no invalid data
filtered_list = [dir for dir in dir_list if dir >= 'ngrams-00001-of-00394' and dir <= 'ngrams-0029-of-00394']
for file_name in filtered_list:
grams_2 = []
with open('/Users/geekye/Documents/Dataset/LM/UniBiGram/'+ file_name) as file:
for line in file:
if re.match('^[\u4e00-\u9fa5]{1,8}[\s\t]{1,}[\u4e00-\u9fa5]{1,8}[\s\t]{1,}\d{1,}', line):
segments = line.split('\t')
words = segments[0]
count = segments[1]
model = LanguageModelContent(words, count)
grams_2.append(model)
if len(grams_2) == 0:
continue
with open('/Users/geekye/Documents/Dataset/LM/gram2'+ file_name, 'a') as file:
print(file_name+'has been started!')
for model in grams_2:
file.write(str(model) + '\n')
print(file_name+'has been processed!')
|
from django.apps import AppConfig
class AnalysisAndTrainingConfig(AppConfig):
name = "analysis_and_training"
|
print("hello world")
#what's up
|
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
# Turn on and off a LED from your Adafruit IO Dashboard.
# adafruit_circuitpython_adafruitio with an esp32spi_socket
import time
import board
import busio
from digitalio import DigitalInOut, Direction, Pull
import adafruit_esp32spi.adafruit_esp32spi_socket as socket
from adafruit_esp32spi import adafruit_esp32spi
import adafruit_requests as requests
from adafruit_io.adafruit_io import IO_HTTP, AdafruitIO_RequestError
import displayio
import terminalio
from adafruit_display_text import label
import adafruit_displayio_ssd1306
# pylint: disable=no-name-in-module,wrong-import-order
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
def ring(relay, snooze, stop):
start = time.monotonic
while not stop:
if snooze:
time.sleep(0.1)
if not stop:
time.sleep(300)
if time.monotonic - start >= 2:
relay.value = not relay.value
displayio.release_displays()
esp32_cs = DigitalInOut(board.D13)
esp32_ready = DigitalInOut(board.D11)
esp32_reset = DigitalInOut(board.D12)
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)
print(f"Connecting to AP {secrets['ssid']}...")
while not esp.is_connected:
try:
esp.connect_AP(secrets["ssid"], secrets["password"])
except RuntimeError as e:
print("could not connect to AP, retrying: ", e)
continue
print("Connected to", str(esp.ssid, "utf-8"), "\tRSSI:", esp.rssi)
socket.set_interface(esp)
requests.set_socket(socket, esp)
aio_username = secrets["aio_username"]
aio_key = secrets["aio_key"]
# Initialize an Adafruit IO HTTP API object
io = IO_HTTP(aio_username, aio_key, requests)
# Get the 'enge-1216.digital' feed from Adafruit IO
digital_feed = io.get_feed("enge-1216.digital")
# Get the 'enge-1216.alarm' feed from Adafruit IO
alarm_feed = io.get_feed("enge-1216.alarm")
# Get the 'enge-1216.alarm-default-days' feed from Adafruit IO
days_feed = io.get_feed("enge-1216.alarm-default-days")
# Get the 'enge-1216.alarm-time' feed from Adafruit IO
time_feed = io.get_feed("enge-1216.alarm-time")
# Get the 'enge-1216.skip-next-alarm' feed from Adafruit IO
skip_feed = io.get_feed("enge-1216.skip-next-alarm")
i2c = board.I2C()
display_bus = displayio.I2CDisplay(i2c, device_address=0x3C)
WIDTH = 128
HEIGHT = 32
BORDER = 2
display = adafruit_displayio_ssd1306.SSD1306(display_bus, width=WIDTH, height=HEIGHT)
splash = displayio.Group(max_size=10)
display.show(splash)
digital_label = label.Label(terminalio.FONT, text="digital: ", color=0xFFFFFF, x=4, y=4)
splash.append(digital_label)
alarm_label = label.Label(terminalio.FONT, text="Next: ", color=0xFFFFFF, x=4, y=14)
splash.append(alarm_label)
time_label = label.Label(terminalio.FONT, text=" ", color=0xFFFFFF, x=4, y=24)
splash.append(time_label)
display.refresh()
# Set up Relay
RELAY = DigitalInOut(board.D10)
RELAY.direction = Direction.OUTPUT
# Set up buttons
BTN_A = DigitalInOut(board.D9)
BTN_A.direction = Direction.INPUT
BTN_A.pull = Pull.UP
BTN_B = DigitalInOut(board.D6)
BTN_B.direction = Direction.INPUT
BTN_C = DigitalInOut(board.D5)
BTN_C.direction = Direction.INPUT
weekdays = {0: "Su", 1: "Mo", 2: "Tu", 3: "We", 4: "Th", 5: "Fr", 6: "Sa"}
last = 0
last_1 = 0
while True:
if time.monotonic() - last >= 5:
print("getting data from IO...")
if time.monotonic() - last_1 >= 1800:
# get data from alarm days feed
days = io.receive_data(days_feed["key"])["value"].split(",")
print(days)
# get data from alarm time feed
alarm_time = io.receive_data(time_feed["key"])["value"].split(":")
print(alarm_time)
# get data from skip alarm feed
skip = io.receive_data(skip_feed["key"])["value"]
print(skip)
last_1 = time.monotonic()
# Get data from digital feed
digital_feed_data = io.receive_data(digital_feed["key"])
digital = digital_feed_data["value"]
# Get the datetime
dt = io.receive_time()
print(dt)
# Format the datetime to iso8601
iso_8601 = (
f"{dt[0]:04d}-{dt[1]:02d}-{dt[2]:02d}T{dt[3]:02d}:{dt[4]:02d}:{dt[5]:02d}Z"
)
_time = (int(dt[3]), int(dt[4]), int(dt[5]))
print(iso_8601)
# Check if data is ON or OFF
if digital == "ON":
print("received <= ON\n")
RELAY.value = 1
elif digital == "OFF":
print("received <= OFF\n")
RELAY.value = 0
last = time.monotonic()
if int(alarm_time[0]) == _time[0] and int(alarm_time[1]) == _time[0]:
if weekdays[dt[6]] in days:
if skip == "OFF":
ring(RELAY, BTN_A, (BTN_A, BTN_B, BTN_C))
if skip == "SKIP":
io.send_data(skip_feed["key"], "OFF")
time.sleep(60)
print(weekdays[dt[6]], alarm_time[0], alarm_time[1])
next_alarm = "{} @ {:02d}:{:02d}".format(
str(weekdays[dt[6] + 1]), int(alarm_time[0]), int(alarm_time[1])
)
splash[0].text = "digital: " + digital
splash[1].text = "Next:" + next_alarm
splash[2].text = iso_8601
time.sleep(0.5)
display.refresh()
if any((not BTN_A.value, not BTN_B.value, not BTN_C.value)):
print(f"A: {not BTN_A.value}, B: {not BTN_B.value}, C: {not BTN_C.value}")
|
from socket import *
serverName = 'localhost'
serverPort = 12000
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((serverName,serverPort))
sentence = input('Input lowercase sentence:')
clientSocket.send(sentence.encode('utf-8'))
modifiedSentence = clientSocket.recv(1024)
print (modifiedSentence)
clientSocket.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.