content
stringlengths 5
1.05M
|
|---|
# %%
# Imports
import pickle
import pandas as pd
# %%
# Define functions
def clean_strings(row):
"""
Accepts a string of comma seprated topic names and converts that into a
list. Also removes the extraneous space
"""
topic_names = row['Topic name']
topic_names = topic_names.replace(', ', ',') # Remove spaces
topic_names = topic_names.split(',')
return topic_names
def get_list_event_dicts(topic):
"""
Input: accepts a topic label
Returns: a list of dictionaries. Each dictionary has key event name and
value event description.
"""
# %%
events_descriptions = pd.read_csv('./data/eventname_description_table_final.csv',
skiprows=0)
events_descriptions['Topic name'] = events_descriptions.apply(lambda row:
clean_strings(row), axis=1)
# %%
# We resort to manually constructing the dictionary from which to choose events
# Given more time, I would use a doc2vec, or better still, BERT embeddings to
# find more suitable events.
with open('./data/topic_labels_dict.data', 'rb') as filehandle:
topics_dict = pickle.load(filehandle)
unique_topic = set()
for market_index in topics_dict.keys():
labels = topics_dict[market_index].values()
unique_topic = unique_topic.union(labels)
# %%
# Building the dictionary
# The idea is to first make a dictionary by grouping by topic label. The key
# for the topic label is to have a list of dictionaries with event name as key
# and event description as label.
event_suggester_dict = {}
num_rows = events_descriptions.shape[0]
for topic in unique_topic:
event_suggester_dict[topic] = []
for row in range(num_rows):
topics_list = events_descriptions['Topic name'].iloc[row]
if topic in topics_list:
temp_dict = {}
event_name = events_descriptions['Event name'].iloc[row]
temp_dict[event_name] = events_descriptions['Event description'].iloc[row]
event_suggester_dict[topic] += [temp_dict]
with open('./data/event_suggester_dict.data', 'wb') as filehandle:
pickle.dump(event_suggester_dict, filehandle)
# %%
|
import torch
import torch.nn as nn
from torchvision.models.alexnet import AlexNet as VisionAlexNet
from torchvision.models.alexnet import model_urls
import torch.utils.model_zoo as model_zoo
__all__ = ['AlexNet', 'alexnet']
class AlexNet(VisionAlexNet):
def __init__(self, in_feat=3, num_classes=1000):
super(AlexNet, self).__init__()
self.features[0] = nn.Conv2d(in_feat, 64, kernel_size=11, stride=4, padding=2)
def alexnet(pretrained=True, in_feat=3, num_classes=1000, **kwargs):
if pretrained and in_feat != 3:
raise ValueError(f'when using pretrained=True, in_feat value is expected to be 3 but got {str(in_feat)}')
net = AlexNet(in_feat=in_feat, **kwargs)
if pretrained:
net.load_state_dict(model_zoo.load_url(model_urls['alexnet']))
if num_classes != 1000:
net.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
return net
if __name__ == '__main__':
x = torch.rand(1,1,64,64)
al = alexnet(pretrained=False, in_feat=1, num_classes=10)
print(al(x))
|
from stackformation import (Infra, BotoSession, Context)
from stackformation.aws.stacks import (vpc, ec2, s3)
import mock
import pytest
@mock.patch("stackformation.boto3.session.Session")
def test_boto_session(mock_sess):
mock_sess.return_value=True
session = BotoSession()
assert session.get_conf("region_name") == 'us-west-2'
session = BotoSession(
region_name='test-region',
profile_name='test-profile',
aws_secret_access_key='test-secret',
aws_access_key_id='test-access',
aws_session_token='test-token',
botocore_session='test-session'
)
assert session.get_conf('region_name') == 'test-region'
assert session.get_conf('profile_name') == 'test-profile'
assert session.get_conf('aws_secret_access_key') == 'test-secret'
assert session.get_conf('aws_access_key_id') == 'test-access'
assert session.get_conf('aws_session_token') == 'test-token'
assert session.get_conf('botocore_session') == 'test-session'
with pytest.raises(Exception) as e:
session.get_conf('nothere')
assert 'Conf Error: nothere' in str(e)
def test_context():
ctx = Context()
ctx.add_vars({
'test': 'test',
'test1': 'test1'
})
assert ctx.check_var('test') is not None
assert ctx.check_var('nothere') is None
assert ctx.get_var('test') == 'test'
assert ctx.get_var('nothere') is False
@mock.patch("stackformation.boto3.session.Session")
def test_infra(sess_mock):
sess_mock.return_value = True
session = BotoSession()
infra = Infra('Test', session)
vpc_stack = infra.add_stack(vpc.VPCStack())
s3_one = infra.add_stack(s3.S3Stack('one'))
s3_two = infra.add_stack(s3.S3Stack('two'))
# test find stack
vpc_find = infra.find_stack(vpc.VPCStack)
assert isinstance(vpc_find, (vpc.VPCStack))
assert infra.find_stack(s3.S3Stack, 'one').stack_name == 'one'
assert infra.find_stack(s3.S3Stack, 'two').stack_name == 'two'
# test list_stacks
assert len(infra.list_stacks()) == 3
# test sub
sub = infra.create_sub_infra('sub')
sub_sub = sub.create_sub_infra('sub')
assert sub_sub.prefix == ['sub', 'sub']
|
from django.http import HttpResponse
import json
from pokemon import utilities
# Create your views here.
def searchPokemonInfoByName(request):
try:
name = request.GET['name']
if len(name) < 4:
response = "The minimum character allowed is 4"
else:
utilities_pokemon = utilities.UtilitiesPokemon()
pokemon_list = utilities_pokemon.getPokemonEvolutionByName(name)
if len(pokemon_list) > 0:
response = pokemon_list
else:
response = "Not pokemon found"
except Exception as ex:
response = str(ex)
return HttpResponse(json.dumps(response), content_type='aplication/json')
def getAndSavePokemonChainID(request):
try:
chain_id = request.GET['chainID']
utilities_pokemon = utilities.UtilitiesPokemon()
response_utility = utilities_pokemon.searchChainByID(chain_id)
status = response_utility[0]
if status:
response = {'status': "Register created....",
'data': json.loads(response_utility[1])}
else:
response = "Register already exists"
except Exception as ex:
response = str(ex)
return HttpResponse(json.dumps(response), content_type='aplication/json')
|
import argparse
import os
from common.parse import parse_problem
def get_problem_name(problem):
sp = problem.url.split('/')
if 'problemset' in problem.url:
contest = sp[-2]
else:
contest = sp[-3]
index = sp[-1]
problemname = problem.name.split(' ', 1)[1]
return "CF_{}{}_{}".format(contest, index, problemname.replace(' ', '_'))
def main():
parser = argparse.ArgumentParser(description="Download a CF problem for copsy")
parser.add_argument('url', help='URL of Problem')
parser.add_argument('--destination', '-d', default=os.getcwd(), help='Folder to create problem folder in')
args = parser.parse_args()
problem = parse_problem(args.url, use_cookie=False)
destination = os.path.join(args.destination, get_problem_name(problem))
os.makedirs(destination)
with open(os.path.join(destination, 'samples.txt'), 'w') as f:
f.write("\n===\n".join(map(str, problem.samples)) + "\n")
with open(os.path.join(destination, 'desc.md'), 'w') as f:
print("#", problem.name, file=f)
print(file=f)
print(problem.url, file=f)
print(file=f)
print(problem.description, file=f)
print(file=f)
print("## ", end='', file=f)
print(problem.input, file=f)
print(file=f)
print("## ", end='', file=f)
print(problem.output, file=f)
with open(os.path.join(destination, 'problem.csy'), 'w') as f:
print("data: (int, int)", file=f)
print("init():", file=f)
print(" pre: true", file=f)
print(" post: () -> true", file=f)
print("solve(int, int) -> int:", file=f)
print(" pre: true", file=f)
print(" post: ((a, b), result, old_data, new_data) -> new_data == old_data && result == a + b", file=f)
if __name__ == '__main__':
main()
|
import tensorflow.keras.backend as K
def nll_gaussian(y_true, y_pred):
"""
Negative - log -likelihood for the prediction of a gaussian probability
"""
mean = y_pred[:,0]
sigma = y_pred[:,1] + 1e-6 # adding 1-e6 for numerical stability reasons
first = 0.5 * K.log(K.square(sigma))
second = K.square(y_true[:,0] - mean) / (2 * K.square(sigma))
summed = first + second
loss = K.mean(summed, axis=-1)
return loss
def nll_skewed_gaussian(y_true, y_pred):
"""
Negative - log -likelihood for the prediction of a gaussian probability
"""
mean = y_pred[:,0]
sigma = y_pred[:,1] + 1e-6 # adding 1-e6 for numerical stability reasons
alpha = y_pred[:,2]
first = 0.5 * K.log(K.square(sigma))
second = K.square(y_true[:,0] - mean) / (2 * K.square(sigma))
x = (y_true[:,0] - mean) / sigma
arg_erf = (1 + K.tf.math.erf(alpha * 0.5**0.5 * x)) + 1e-8 # adding 1-e8 for numerical stability reasons
third = - K.log(arg_erf)
summed = first + second + third
loss = K.mean(summed, axis=-1)
return loss
def tilted_loss(q, y, f):
"""
Tilted loss for quantile regression
"""
e = (y-f)
return K.mean(K.maximum(q*e, (q-1)*e), axis=-1)
def tilted_loss_multi(quant, y, f):
"""
Tilted loss for quantile regression
"""
q = K.constant(quant)
e = (y-f)
return K.sum(K.maximum(q*e, (q-1)*e), axis=-1)
#losses = []
# for i in range(len(q)):
# e = y-f[:,i]
# loss = K.mean(K.maximum(q[i]*e, (q[i]-1)*e), axis=-1)
## losses.append(loss)
#return K.reduce_mean(losses, axis=-1)
|
import platform
import psutil
import writer
#constant for multi platforming
uname = platform.uname()
def main():
configure_arr = writer.configure()
write_content = checks(configure_arr)
writer.write_info(write_content)
#checks the ticked boxes
def checks(check_arr):
content = []
if check_arr[0]:
cpu_content = cpu_tests()
content.append(cpu_content)
if check_arr[1]:
memory_content = memory_tests()
content.append(memory_content)
if check_arr[2]:
disk_content = disk_tests()
content.append(disk_content)
if check_arr[3]:
othersys_content = othersys_tests()
content.append(othersys_content)
if check_arr[4]:
process_content = process_tests()
content.append(process_content)
return content
def cpu_tests():
content = ["CPU Info: "]
try:
content.append("Cores:", psutil.cpu_count(logical=False))
content.append("Cores:", psutil.cpu_count(logical=False))
content.append("Logical Cores:", psutil.cpu_count(logical=True))
content.append(f"Max Frequency: {psutil.cpu_freq().current:.1f}Mhz")
content.append(f"CPU Usage: {psutil.cpu_percent()}%")
content.append("CPU Usage/Core:")
for i, perc in enumerate(psutil.cpu_percent(percpu=True, interval=1)):
content.append(f"Core {i}: {perc}%")
except Exception:
pass
return content
def memory_tests():
content = ["Memory Info: "]
return content
def disk_tests():
content = ["Disk Info: "]
for partition in psutil.disk_partitions():
content.append((f"Device: {partition.device} ", f"\tMountpoint: {partition.mountpoint} ",
f"\tFile system type: {partition.fstype} "))
try:
partition_usage = psutil.disk_usage(partition.mountpoint)
except PermissionError:
continue
content.append((f"Total Size: {partition_usage.total} ", f"Used: {partition_usage.used} ",
f"Percentage: {partition_usage.percent} "))
return content
def othersys_tests():
content = ["Other System Info: "]
return content
def process_tests():
content = ["Process Info: "]
return content
if __name__ == "__main__":
main()
|
import numpy as np
from math import sqrt
def manhattan(x,y):
x = np.array(x)
y = np.array(y)
diff = abs(x-y)
return diff.sum()
def get_intersect(A, B, C, D):
cx=0
cy=0
solved=False
if( ((C[1] < A[1] and D[1] > B[1]) or (C[1] > A[1] and D[1] < B[1]) ) and ((C[0] > A[0] and D[0] < B[0]) or (C[0] < A[0] and D[0] > B[0])) ):
if(C[0] == D[0]):
cx = C[0]
cy = A[1]
elif(C[1] == D[1]):
cx = B[0]
cy = C[1]
solved=True
return solved, cx, cy
def closest_intersection(wires):
intersections=[]
paths=[]
for wire in wires:
aux = []
position=[0,0]
for instruction in wire:
code = instruction[0]
steps = int(instruction[1:])
if(code=='R'):
move = [steps,0]
newposition = [position[0]+move[0], position[1]+move[1]]
aux.append([position,newposition])
position = newposition
elif(code=='L'):
move = [-steps,0]
newposition = [position[0]+move[0], position[1]+move[1]]
aux.append([position,newposition])
position = newposition
elif(code=='D'):
move = [0,-steps]
newposition = [position[0]+move[0], position[1]+move[1]]
aux.append([position,newposition])
position = newposition
elif(code=='U'):
move = [0,steps]
newposition = [position[0]+move[0], position[1]+move[1]]
aux.append([position,newposition])
position = newposition
paths.append(aux)
wire1 = paths[0]
wire2 = paths[1]
for path1 in wire1:
A,B = path1
for path2 in wire2:
C,D = path2
contained,x,y = get_intersect(A,B,C,D)
if(contained and [x,y]!=[0,0]):
intersections.append([[x,y], [A,B], [C,D]])
return intersections,wire1,wire2
def steps_to_intersection(wire1,wire2,intersections):
def distance(A, B):
return sqrt((A[0]-B[0])**2 + (A[1]-B[1])**2)
beststeps = float('inf')
for intersection,path1,path2 in intersections:
steps1=0
for path in wire1:
A,B = path
if(path==path1):
steps1+=distance(A,intersection)
break
else:
steps1+=distance(A,B)
steps2=0
for path in wire2:
A,B = path
if(path==path2):
steps2+=distance(A,intersection)
break
else:
steps2+=distance(A,B)
r = steps1+steps2
if(r<beststeps):
beststeps=r
return int(beststeps)
def read_input(path):
return [x.split(',') for x in open(path).readlines()]
if __name__ == "__main__":
#Read the input
wires = read_input("input.txt")
#Solve p1
intersections,wire1path,wire2path = closest_intersection(wires)
r = min([manhattan(x[0],[0,0]) for x in intersections])
print(f"--- Part One --- \n{r}")
#Solve p2
r = steps_to_intersection(wire1path,wire2path, intersections)
print(f"--- Part Two --- \n{r}")
|
import os
import shutil
import json
from datetime import datetime
src = r"C:\Users\jagan\Desktop\BackupSchedler\SRC"
dst = r"C:\Users\jagan\Desktop\BackupSchedler\DES"
def BackUp(src, dst, file):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, False, None)
print("Copying from " + s + " to " + d)
file.write("Copying from " + s + " to " + d + "\n")
else:
shutil.copy2(s, d)
print("Copying from " + s + " to " + d)
file.write("Copying from " + s + " to " + d + "\n")
return True
def ensure_dir(file_path):
if not os.path.exists(file_path):
os.makedirs(file_path)
def main():
global dst
print("BackupFiles from " + src + " to " + dst)
now = datetime.now()
#print("now =", now)
dt_string = now.strftime("\BackUp_%d_%m_%Y_%H_%M_%S")
#print("date and time =", dt_string)
dst += dt_string
ensure_dir(dst)
completeName = os.path.join(dst, "Log" +".txt")
logFile = open(completeName,"w+")
logFile.write("date and time = " + dt_string)
logFile.write("\n===========================\n")
#dst = dst + dt_string
BackUp(src, dst, logFile)
logFile.close()
if __name__== "__main__":
main()
|
from sketchresponse import sketchresponse
from sketchresponse.grader_lib import GradeableFunction
from sketchresponse.grader_lib import Asymptote
problemconfig = sketchresponse.config({
'width': 750,
'height': 420,
'xrange': [-3.5, 3.5],
'yrange': [-4.5, 4.5],
'xscale': 'linear',
'yscale': 'linear',
'coordinates': 'cartesian',
'debug': False,
'plugins': [
{'name': 'axes'},
{'name': 'freeform', 'id': 'f', 'label': 'Function f(x)', 'color':'blue'},
{'name': 'vertical-line', 'id': 'va', 'label': 'Vertical asymptote', 'color': 'gray', 'dashStyle': 'dashdotted'},
{'name': 'horizontal-line', 'id': 'ha', 'label': 'Horizontal asymptote', 'color': 'gray', 'dashStyle': 'dashdotted'},
{'name': 'point', 'id': 'cp', 'label': 'Extremum', 'color': 'black', 'size': 15},
{'name': 'point', 'id': 'ip', 'label': 'Inflection point', 'color':'orange','size': 15}
]
})
@sketchresponse.grader
def grader(f,cp,ip,va,ha):
f = GradeableFunction.GradeableFunction(f)
cp = GradeableFunction.GradeableFunction(cp)
va = Asymptote.VerticalAsymptotes(va)
ha = Asymptote.HorizontalAsymptotes(ha)
ip = GradeableFunction.GradeableFunction(ip)
msg=''
if cp.get_number_of_points() != 1:
if cp.get_number_of_points() == 3:
msg += '<font color="blue">Are you sure about the number of extrema? (note that you should not label the endpoints of your function)</font><br />'
else:
msg += '<font color="blue">Are you sure about the number of extrema?</font><br />'
if ip.get_number_of_points() != 0:
msg += '<font color="blue">Are you sure about the number of extrema?</font><br />'
if va.get_number_of_asyms() != 2:
msg += '<font color="blue"> Are you sure about the number of vertical asymptotes?</font><br />'
if ha.get_number_of_asyms() != 1:
msg += '<font color="blue"> Are you sure about the number of horizontal asymptotes?</font><br />'
if msg != '':
return False, msg
else:
if not cp.has_point_at(x=0):
msg += '<font color="blue"> Check the x value of your critical point</font><br />'
if not va.has_asym_at_value(-1) or not va.has_asym_at_value(1):
v1 = va.closest_asym_to_value(-1)
v2 = va.closest_asym_to_value(1)
msg += '<font color="blue"> Check the locations of your vertical asymptotes. </font><br />'
if not ha.has_asym_at_value(2):
ha1 = ha.closest_asym_to_value(2)
msg += '<font color="blue"> Check the locations of your horizontal asymptotes. </font><br />'
maxpt = cp.get_point_at(x=0)
if not f.has_value_y_at_x(maxpt.y, maxpt.x):
msg += '<font color="blue"> Make sure your critical points lie on your function!</font><br />'
increasing_ok = f.is_increasing_between(-4, -1) and f.is_increasing_between(-1, 0)
decreasing_ok = f.is_decreasing_between(0, 1) and f.is_decreasing_between(1, 4)
curvature_up_ok = f.has_positive_curvature_between(-4, -1) and f.has_positive_curvature_between(1, 4)
curvature_down_ok= f.has_negative_curvature_between(-1,1)
if not (increasing_ok and decreasing_ok):
msg += '<font color="blue"> Where should the graph be increasing and decreasing?</font><br />'
if not f.is_greater_than_y_between(2,-4,-1):
msg += '<font color="blue"> Your function seems to be in the wrong region on the interval (-4,-1)</font><br />'
if not f.is_greater_than_y_between(2,1,4):
msg += '<font color="blue"> Your function seems to be in the wrong region on the interval (1,4)</font><br />'
if not f.is_less_than_y_between(0,-1,1):
msg += '<font color="blue"> Your function seems to be in the wrong region on the interval (-1,1)</font><br />'
if not (curvature_up_ok and curvature_down_ok):
msg += '<font color="blue"> Where is the function convave up and concave down?</font><br />'
if msg == '':
return True,'Good Job'
else:
return False, msg
|
# Custom rate is calculated for unique events in the simulation
# Author = Thomas Davis, email = txd283@bham.ac.uk / University of Birmingham
# This rate calculator has a full implementation of the pair interaction method
# for up to second nearest neighbours
# Cu and Vacancies clustering works
# Only first neighbour hops are included in the processes.py, and thus no code
# here has been incoperated for second neighbour hops
from KMCLib import *
from math import floor
import numpy
import math
# values required for vacancy diffusion, energy in eV, T in K, v is the jump in s^-1
E_m_Fe = 0.722
E_m_Cu = 0.50
k = 0.862e-4
T = 563
v_Fe = 9.79e12
v_Cu = 7.16e12
kT = k*T
# pair interaction values
e_FeFe1 = -0.778
e_FeFe2 = -0.389
e_VaFe1 = -0.191
e_VaFe2 = -0.096
e_VaVa1 = 0.225
e_VaVa2 = -0.047
e_VaCu1 = -0.247
e_VaCu2 = -0.206
e_FeCu1 = -0.585
e_FeCu2 = -0.326
e_CuCu1 = -0.627
e_CuCu2 = -0.314
# The first nearest neighbours for all atoms in the lattice in types_before and types_after.
# Used to find local configurations for energy calculations
NN1 = [[ 1, 2, 3, 4, 5, 6, 7, 8,],
[ 0, 9, 10, 11, 15, 16, 19, 51,],
[ 0, 9, 10, 12, 15, 17, 20, 52,],
[ 0, 9, 11, 13, 16, 18, 21, 53,],
[ 0, 9, 12, 13, 17, 18, 22, 54,],
[ 0, 10, 11, 14, 19, 23, 24, 55,],
[ 0, 10, 12, 14, 20, 23, 25, 56,],
[ 0, 11, 13, 14, 21, 24, 26, 57,],
[ 0, 12, 13, 14, 22, 25, 26, 58,],
[ 1, 2, 3, 4, 27, 28, 29, 30,],
[ 1, 2, 5, 6, 31, 32, 39, 40,],
[ 1, 3, 5, 7, 33, 35, 41, 43,],
[ 2, 4, 6, 8, 34, 36, 42, 44,],
[ 3, 4, 7, 8, 37, 38, 45, 46,],
[ 5, 6, 7, 8, 47, 48, 49, 50,]]
# The second nearest neighbours for all atoms in the lattice in types_before and types_after.
# Used to find local configurations for energy calculations
NN2 = [[ 9, 10, 11, 12, 13, 14,],
[ 2, 3, 5, 27, 31, 33,],
[ 1, 4, 6, 28, 32, 34,],
[ 1, 4, 7, 29, 35, 37,],
[ 2, 3, 8, 30, 36, 38,],
[ 1, 6, 7, 39, 41, 47,],
[ 2, 5, 8, 40, 42, 48,],
[ 3, 5, 8, 43, 45, 49,],
[ 4, 6, 7, 44, 46, 50,],
[ 0, 15, 16, 17, 18, 59,],
[ 0, 15, 19, 20, 23, 60,],
[ 0, 16, 19, 21, 24, 61,],
[ 0, 17, 20, 22, 25, 62,],
[ 0, 18, 21, 22, 26, 63,],
[ 0, 23, 24, 25, 26, 64,]]
class CustomRateCalculator(KMCRateCalculatorPlugin):
# uncomment if you want a counter for the number of times the fuction rate() is called -- for diagnositics.
# def initialize(self):
# used for calculating how many times rate fuction is called.
#self._times_called = 0
def rate(self, geometry, types_before, types_after, rate_constant, process_number, coordinate):
# see above -- diagnositics
#self._times_called += 1
# find the new position of the moved atom
for i in range(1,len(types_before)):
if (float(types_before[i])-float(types_after[i])) != 0.0:
new_position = i
break
# define variables for the pair interaction. N_FeFe1_b is the number of Fe-Fe bonds before the move and 'a' stand for after the move.
N_FeFe1_b = 0.0
N_FeFe1_a = 0.0
N_FeFe2_b = 0.0
N_FeFe2_a = 0.0
N_CuCu1_b = 0.0
N_CuCu1_a = 0.0
N_CuCu2_b = 0.0
N_CuCu2_a = 0.0
N_VaVa1_b = 0.0
N_VaVa1_a = 0.0
N_VaVa2_b = 0.0
N_VaVa2_a = 0.0
N_VaFe1_b = 0.0
N_VaFe1_a = 0.0
N_VaFe2_b = 0.0
N_VaFe2_a = 0.0
N_FeVa1_b = 0.0
N_FeVa1_a = 0.0
N_FeVa2_b = 0.0
N_FeVa2_a = 0.0
N_VaCu1_b = 0.0
N_VaCu1_a = 0.0
N_VaCu2_b = 0.0
N_VaCu2_a = 0.0
N_CuVa1_b = 0.0
N_CuVa1_a = 0.0
N_CuVa2_b = 0.0
N_CuVa2_a = 0.0
N_FeCu1_b = 0.0
N_FeCu1_a = 0.0
N_FeCu2_b = 0.0
N_FeCu2_a = 0.0
N_CuFe1_b = 0.0
N_CuFe1_a = 0.0
N_CuFe2_b = 0.0
N_CuFe2_a = 0.0
# find first neighbours of Va before move
count = 0.0
# count the number of bonds at position 0 at all the possible nearest neighbours. Uses NN1 array.
for i in range(8):
count += float(types_before[int(NN1[0][i])])
# floor will reveal the number of 1st nearest neighbour Va-Fe bonds
N_VaFe1_b = floor(count)
# will reveal the number of 1st nearest neighbour Va-Cu bonds
N_VaCu1_b = (count - floor(count))*10.0
# remaining values will be 1st nearest neighbour Va-Va bonds
N_VaVa1_b = abs(8.0 - N_VaFe1_b - N_VaCu1_b)
# same method above, but now for 2nd nearest neighbours. uses NN2 array.
count = 0.0
for i in range(6):
count += float(types_before[int(NN2[0][i])])
N_VaFe2_b = floor(count)
N_VaCu2_b = (count - floor(count))*10.0
N_VaVa2_b = abs(6.0 - N_VaFe2_b - N_VaCu2_b)
# find first neighbours of Va after move
count = 0.0
for i in range(8):
count += float(types_after[int(NN1[new_position][i])])
N_VaFe1_a = floor(count)
N_VaCu1_a = (count - floor(count))*10.0
N_VaVa1_a = abs(8.0 - N_VaFe1_a - N_VaCu1_a)
# find second neighbours of Va after move
count = 0.0
for i in range(6):
count += float(types_after[int(NN2[new_position][i])])
N_VaFe2_a = floor(count)
N_VaCu2_a = (count - floor(count))*10.0
N_VaVa2_a = abs(6.0 - N_VaFe2_a - N_VaCu2_a)
# Find what atom the Va is swapping with - either a Fe (1) or Cu(0.1)
if types_after[0] == "1":
# find first neighbours of Fe before move
count = 0.0
for i in range(8):
count += float(types_before[int(NN1[new_position][i])])
N_FeFe1_b = floor(count)
N_FeCu1_b = (count - floor(count))*10.0
N_FeVa1_b = abs(8.0 - N_FeFe1_b - N_FeCu1_b)
# find second neighbours of Fe before move
count = 0.0
for i in range(6):
count += float(types_before[int(NN2[new_position][i])])
N_FeFe2_b = floor(count)
N_FeCu2_b = (count - floor(count))*10.0
N_FeVa2_b = abs(6.0 - N_FeFe2_b - N_FeCu2_b)
# find first neighbours of Fe after move
count = 0.0
for i in range(8):
count += float(types_after[int(NN1[0][i])])
N_FeFe1_a = floor(count)
N_FeCu1_a = (count - floor(count))*10.0
N_FeVa1_a = abs(8.0 - N_FeFe1_a - N_FeCu1_a)
# find second neighbours of Fe after move
count = 0.0
for i in range(6):
count += float(types_after[int(NN2[0][i])])
N_FeFe2_a = floor(count)
N_FeCu2_a = (count - floor(count))*10.0
N_FeVa2_a = abs(6.0 - N_FeFe2_a - N_FeCu2_a)
else:
# find first neighbours of Cu before move
count = 0.0
for i in range(8):
count += float(types_before[int(NN1[new_position][i])])
N_CuFe1_b = floor(count)
N_CuCu1_b = (count - floor(count))*10.0
N_CuVa1_b = abs(8.0 - N_CuFe1_b - N_CuCu1_b)
# find second neighbours of Cu before move
count = 0.0
for i in range(6):
count += float(types_before[int(NN2[new_position][i])])
N_CuFe2_b = floor(count)
N_CuCu2_b = (count - floor(count))*10.0
N_CuVa2_b = abs(6.0 - N_CuFe2_b - N_CuCu2_b)
# find first neighbours of Cu after move
count = 0.0
for i in range(8):
count += float(types_after[int(NN1[0][i])])
N_CuFe1_a = floor(count)
N_CuCu1_a = (count - floor(count))*10.0
N_CuVa1_a = abs(8.0 - N_CuFe1_a - N_CuCu1_a)
# find second neighbours of Cu after move
count = 0.0
for i in range(6):
count += float(types_after[int(NN2[0][i])])
N_CuFe2_a = floor(count)
N_CuCu2_a = (count - floor(count))*10.0
N_CuVa2_a = abs(6.0 - N_CuFe2_a - N_CuCu2_a)
# find the difference before and after the jump bonds.
D_N_FeFe1 = N_FeFe1_a - N_FeFe1_b
D_N_FeFe2 = N_FeFe2_a - N_FeFe2_b
D_N_CuCu1 = N_CuCu1_a - N_CuCu1_b
D_N_CuCu2 = N_CuCu2_a - N_CuCu2_b
D_N_VaVa1 = N_VaVa1_a - N_VaVa1_b
D_N_VaVa2 = N_VaVa2_a - N_VaVa2_b
D_N_VaFe1 = N_VaFe1_a + N_FeVa1_a - N_VaFe1_b - N_FeVa1_b
D_N_VaFe2 = N_VaFe2_a + N_FeVa2_a - N_VaFe2_b - N_FeVa2_b
D_N_VaCu1 = N_VaCu1_a + N_CuVa1_a - N_VaCu1_b - N_CuVa1_b
D_N_VaCu2 = N_VaCu2_a + N_CuVa2_a - N_VaCu2_b - N_CuVa2_b
D_N_FeCu1 = N_FeCu1_a + N_CuFe1_a - N_FeCu1_b - N_CuFe1_b
D_N_FeCu2 = N_FeCu2_a + N_CuFe2_a - N_FeCu2_b - N_CuFe2_b
# binding energy calculation
E_b = (D_N_VaVa1*e_VaVa1 +
D_N_VaFe1*e_VaFe1 +
D_N_FeFe1*e_FeFe1 +
D_N_VaVa2*e_VaVa2 +
D_N_VaFe2*e_VaFe2 +
D_N_FeFe2*e_FeFe2 +
D_N_VaCu1*e_VaCu1 +
D_N_VaCu2*e_VaCu2 +
D_N_FeCu1*e_FeCu1 +
D_N_FeCu2*e_FeCu2 +
D_N_CuCu1*e_CuCu1 +
D_N_CuCu2*e_CuCu2)/2
# if the atom is eith Fe (1) or Cu (0.1) and calculate the rate
if types_after[0] == '1':
E = E_m_Fe + E_b
rate = v_Fe*math.exp(-E/kT)
else:
E = E_m_Cu + E_b
rate = v_Cu*math.exp(-E/kT)
# commented out this code -- it is the implementation of second nearest neighbours hops. need to define new E_m and v_Fe/Cu values, as they are different for first and second neighbour hops.
"""
distance_sq = (geometry[new_position][0] - geometry[0][0] )**2 + (geometry[new_position][1] - geometry[0][1] )**2 + (geometry[new_position][2] - geometry[0][2] )**2
if types_after[0] == '1' and distance_sq == 0.75:
E = E_m_Fe_1 + E_b
rate = v_Fe_1*math.exp(-E/kT)
print("types_after[0] == '1' and distance_sq == 0.75:")
elif types_after[0] == '1' and distance_sq == 1.0:
E = E_m_Fe_2 + E_b
rate = v_Fe_2*math.exp(-E/kT)
print("types_after[0] == '1' and distance_sq == 1.0:")
elif types_after[0] == '0.1' and distance_sq == 0.75:
E = E_m_Cu_1 + E_b
rate = v_Cu_1*math.exp(-E/kT)
print("types_after[0] == '0.1' and distance_sq == 0.75:")
elif types_after[0] == '0.1' and distance_sq == 1.0:
E = E_m_Cu_2 + E_b
rate = v_Cu_2*math.exp(-E/kT)
print("types_after[0] == '0.1' and distance_sq == 1.0:")
# print out useful variables for diagnostics
print("----------------------- CHECK -----------------------")
print("Iteration = %i"%(self._times_called))
print("Vacancy moved to %i\n"%(new_position))
print("D_N_FeFe1 = %.0f"%(D_N_FeFe1))
print("D_N_VaFe1 = %.0f"%(D_N_VaFe1))
print("D_N_VaVa1 = %.0f"%(D_N_VaVa1))
print("D_N_VaCu1 = %.0f"%(D_N_VaCu1))
print("D_N_FeCu1 = %.0f"%(D_N_FeCu1))
print("D_N_CuCu1 = %.0f\n"%(D_N_CuCu1))
print("D_N_FeFe2 = %.0f"%(D_N_FeFe2))
print("D_N_VaFe2 = %.0f"%(D_N_VaFe2))
print("D_N_VaVa2 = %.0f"%(D_N_VaVa2))
print("D_N_VaCu2 = %.0f"%(D_N_VaCu2))
print("D_N_FeCu2 = %.0f"%(D_N_FeCu2))
print("D_N_CuCu2 = %.0f\n"%(D_N_CuCu2))
print("E_b = %.2f eV"%(E_b))
print("E = %.4f eV"%(E))
print ("Rate = %f\n"%(rate))
"""
# return the new rate value
return rate
def cutoff(self):
# cutoff value for types_before and types_after lattice points. 2.0 = two supercells.
return 2.0
|
import h5py
import os.path as osp
import numpy as np
from tempfile import TemporaryDirectory
from testpath import assert_isfile
from karabo_data import RunDirectory, H5File
def test_write_selected(mock_fxe_raw_run):
with TemporaryDirectory() as td:
new_file = osp.join(td, 'test.h5')
with RunDirectory(mock_fxe_raw_run) as run:
run.select('SPB_XTD9_XGM/*').write(new_file)
assert_isfile(new_file)
with H5File(new_file) as f:
assert f.control_sources == {'SPB_XTD9_XGM/DOOCS/MAIN'}
assert f.instrument_sources == {'SPB_XTD9_XGM/DOOCS/MAIN:output'}
s = f.get_series('SPB_XTD9_XGM/DOOCS/MAIN', 'beamPosition.ixPos.value')
# This should have concatenated the two sequence files (400 + 80)
assert len(s) == 480
a = f.get_array('SPB_XTD9_XGM/DOOCS/MAIN:output', 'data.intensityTD')
assert a.shape == (480, 1000)
def test_write_virtual(mock_fxe_raw_run):
with TemporaryDirectory() as td:
new_file = osp.join(td, 'test.h5')
with RunDirectory(mock_fxe_raw_run) as run:
run.write_virtual(new_file)
assert_isfile(new_file)
with h5py.File(new_file) as f:
ds = f['CONTROL/SPB_XTD9_XGM/DOOCS/MAIN/beamPosition/ixPos/value']
assert ds.is_virtual
with H5File(new_file) as f:
np.testing.assert_array_equal(f.train_ids,
np.arange(10000, 10480, dtype=np.uint64))
assert 'SPB_XTD9_XGM/DOOCS/MAIN' in f.control_sources
assert 'SPB_XTD9_XGM/DOOCS/MAIN:output' in f.instrument_sources
s = f.get_series('SPB_XTD9_XGM/DOOCS/MAIN', 'beamPosition.ixPos.value')
# This should have concatenated the two sequence files (400 + 80)
assert len(s) == 480
a = f.get_array('SPB_XTD9_XGM/DOOCS/MAIN:output', 'data.intensityTD')
assert a.shape == (480, 1000)
|
from math import log
import numpy as np
import dolfin as df
import operator
def estimate_from_convergence(y, x):
'''Half step estimate'''
assert len(x) == len(y)
if len(y) >= 2:
return -log(y[-1]/float(y[-2]))/log(x[-1]/x[-2])
else:
return np.nan
def least_square_estimate(y, x):
'''Fit for y = x^(-p)+a'''
assert len(x) == len(y)
if len(y) >= 2:
A = np.vstack([np.log(x), np.ones(len(x))]).T
p, _ = np.linalg.lstsq(A, np.log(y))[0]
return -p
else:
return np.nan
def collides(box, mesh):
b = box
for cell in tree:
# Collision with b
pass
def split(box):
'''Split the box into 8 children'''
x, y = box
m = (x+y)/2
return [(np.array([x[0], x[1], x[2]]), np.array([m[0], m[1], m[2]])),
(np.array([x[0], m[1], x[2]]), np.array([m[0], y[1], m[2]])),
(np.array([m[0], x[1], x[2]]), np.array([y[0], m[1], m[2]])),
(np.array([m[0], m[1], x[2]]), np.array([y[0], y[1], m[2]])),
(np.array([x[0], x[1], m[2]]), np.array([m[0], m[1], y[2]])),
(np.array([x[0], m[1], m[2]]), np.array([m[0], y[1], y[2]])),
(np.array([m[0], x[1], m[2]]), np.array([y[0], m[1], y[2]])),
(np.array([m[0], m[1], m[2]]), np.array([y[0], y[1], y[2]]))]
def fractal_dim(mesh):
'''
Produce estimates of fractal dimension of the mesh by box counting
'''
assert mesh.topology().dim() == 1 and mesh.geometry().dim() == 3
root = bbox(mesh)
gdim = mesh.geometry().dim()
sizes = root[1] - root[0]
size = reduce(operator.__mult__, sizes)**1./3
leafs = [root]
while True:
count = 0
for box in leafs:
if collides(box, cells(mesh)):
count += 1
new_leafs.extend(split(box))
yield count, size
leafs = new_leafs
size /= 2.
# -------------------------------------------------------------------
#if __name__ == '__main__':
mesh = df.Mesh('vasc_mesh.xml.gz')
N_history, eps_history = [], []
print bbox(mesh)
print bbox(next(df.cells(mesh)))
# i = 0
# for N, eps in fractal_dim(mesh):
# N_history.append(N)
# eps_history.append(eps)
# print i
# print N, eps
# print '\t current', -log(N)/log(eps)
# print '\t cvrg', estimate_from_convergence(N_history, eps_history)
# print '\t lstsq', least_square_estimate(N_history, eps_history)
# print
|
import os
import scrapy.settings
from ..items import Media
from ..services.storage_util import StorageUtil
class DirectDownloadSpider(scrapy.Spider):
name = "datacollector_direct_download"
start_urls = ['https://www.youtube.com']
custom_settings = {
"CONCURRENT_ITEMS": "1",
"MEDIA_ALLOW_REDIRECTS": "True",
"ROBOTSTXT_OBEY": "False",
"ITEM_PIPELINES": '{"data_acquisition_framework.pipelines.audio_pipeline.AudioPipeline": 1}'
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
StorageUtil().set_gcs_creds(str(kwargs["my_setting"]).replace("'", ""))
self.language = "Bodo"
self.language_code = "brx"
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = cls(
*args,
my_setting=crawler.settings.get("GCS_CREDS")
if "scrapinghub" in os.path.abspath("~")
else open("./credentials.json").read(),
**kwargs
)
spider._set_crawler(crawler)
return spider
def parse(self, response, **kwargs):
urls_path = os.path.dirname(os.path.realpath(__file__)) + "/../download_urls.txt"
with open(urls_path, "r") as f:
urls = f.read().splitlines()
for url in urls:
source_domain = self.extract_source_domain(url)
url_parts = url.split("/")
yield Media(
title=url_parts[-1],
file_urls=[url],
source=source_domain,
license_urls=[],
language=self.language,
source_url=url
)
def extract_source_domain(self, base_url):
source_domain = base_url[base_url.index("//") + 2:].split("/")[0]
if source_domain.startswith("www."):
source_domain = source_domain.replace("www.", "")
return source_domain
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.testutil.mock_logger import MockLogger as MockLogger # noqa
from pants_test.deprecated_testinfra import deprecated_testinfra_module
deprecated_testinfra_module('pants.testutil.mock_logger')
|
# -*- coding: utf-8 -*-
# @Time : 2019/1/9 12:46 PM
# @Author : Zhixin Piao
# @Email : piaozhx@shanghaitech.edu.cn
import sys
sys.path.append('./')
import smtplib
import time
from email.mime.text import MIMEText
from email.header import Header
from config import mail_host, mail_user, mail_pass
def send_email(chs_name, receiver, grade):
sender = 'piaozhx@shanghaitech.edu.cn'
receivers = [receiver]
mail_content = '''
Hi, %s:
你的CS172课程最终成绩: %f, 如对自己的成绩有疑问, 请在三天内来核查.
''' % (chs_name, grade)
message = MIMEText(mail_content, 'plain', 'utf-8')
message['From'] = Header("CS172TA", 'utf-8')
message['To'] = Header("CS172上课同学", 'utf-8')
message['Subject'] = Header('CS172课程成绩', 'utf-8')
try:
smtpObj = smtplib.SMTP()
smtpObj.connect(mail_host, 587) # 25 为 SMTP 端口号
smtpObj.ehlo()
smtpObj.starttls()
smtpObj.ehlo()
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(sender, receivers, message.as_string())
print("%s 邮件发送成功" % receiver)
return True
except smtplib.SMTPException:
print("Error: %s 无法发送邮件" % receiver)
return False
def main():
with open('cs172_grade.txt', 'r') as f:
student_info_list = f.readlines()
student_info_list = map(lambda x: x.split(), student_info_list)
for sid, sname, email, grade in student_info_list:
grade = float(grade)
print(sid, sname, email, grade)
success = send_email(sname, email, grade)
while not success:
# count = 0
time.sleep(10)
success = send_email(sname, email, grade)
# count += 1
#
# if count == 5:
# count = 0
# time.sleep(10)
if __name__ == '__main__':
main()
|
import copy
import pytest
from tempocli.cli import cli
from tempocli.cli import ENVVAR_PREFIX
from tests.helpers import write_yaml
def test_tempocli(cli_runner):
result = cli_runner.invoke(cli)
assert result.exit_code == 0
assert 'Usage:' in result.output
@pytest.mark.freeze_time('2018-08-05')
class TestTempoCliCreate(object):
data = {
'author_account_id': 'foo',
'issues': [
{
'issue': 'INT-8',
'time_spent': '30m',
'start_time': '09:30:00',
},
],
}
@pytest.fixture
def template_data(self):
return copy.deepcopy(self.data)
@pytest.fixture
def template(self, tmpdir):
return tmpdir.join('template.yml')
@pytest.fixture
def template_invoke(self, cli_invoke, config, template):
_args = [
'-vvv',
'--config',
config.strpath,
'create',
'--template',
template.strpath,
]
def func(args=None, **kwargs):
_args.extend(args or [])
return cli_invoke(cli, _args, **kwargs)
return func
def test_create_single(self, template, template_data, template_invoke, tempo_request):
write_yaml(template, template_data)
request = tempo_request.post('/worklogs')
result = template_invoke()
assert result.exit_code == 0
assert request.called_once
assert request.last_request.json() == {
'authorAccountId': 'foo',
'issueKey': self.data['issues'][0]['issue'],
'timeSpentSeconds': 1800,
'startDate': '2018-08-05',
'startTime': self.data['issues'][0]['start_time'],
'description': 'Working on issue {}'.format(self.data['issues'][0]['issue']),
}
def test_create_multiple(self, template, template_data, template_invoke, tempo_request):
template_data['issues'].append({
'issue': 'INT-10',
'time_spent': '30m',
'start_time': '09:30:00',
})
write_yaml(template, template_data)
request = tempo_request.post('/worklogs')
result = template_invoke()
assert result.exit_code == 0
assert request.call_count == 2
def test_create_author_override(self, template, template_data, template_invoke, tempo_request):
template_data['issues'][0]['author_account_id'] = 'bar'
write_yaml(template, template_data)
request = tempo_request.post('/worklogs')
result = template_invoke()
assert result.exit_code == 0
assert request.called_once
assert request.last_request.json()['authorAccountId'] == template_data['issues'][0]['author_account_id']
def test_create_extras_override(self, template, template_data, template_invoke, tempo_request):
template_data['issues'][0]['extras'] = {
'authorAccountId': 'bar',
}
write_yaml(template, template_data)
request = tempo_request.post('/worklogs')
result = template_invoke()
assert result.exit_code == 0
assert request.called_once
assert request.last_request.json()['authorAccountId'] == template_data['issues'][0]['extras']['authorAccountId']
def test_create_token_from_env(self, template, template_data, template_invoke, tempo_request):
token = 'fromenv' # noqa: S105
write_yaml(template, template_data)
request = tempo_request.post('/worklogs')
result = template_invoke(
env={
'{}_TOKEN'.format(ENVVAR_PREFIX): token,
},
)
assert result.exit_code == 0
assert request.called_once
assert request.last_request.headers['Authorization'] == 'Bearer {}'.format(token)
def test_create_future_date(self, template, template_data, template_invoke, tempo_request):
template_data['issues'][0]['start_time'] = 'Monday at 11am'
write_yaml(template, template_data)
request = tempo_request.post('/worklogs')
result = template_invoke()
assert result.exit_code == 0
assert request.called_once
assert request.last_request.json()['startDate'] == '2018-08-06'
assert request.last_request.json()['startTime'] == '11:00:00'
def test_create_http_error(self, template, template_data, template_invoke, tempo_request):
write_yaml(template, template_data)
request = tempo_request.post('/worklogs', status_code=500)
result = template_invoke()
assert "Could not create ('foo', 'INT-8'," in result.output
assert 'Traceback' in result.output
assert result.exit_code == 1
assert request.called_once
|
# Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model_analyzer.device.gpu_device import GPUDevice
import unittest
from .mocks.mock_server_local import MockServerLocalMethods
from .mocks.mock_perf_analyzer import MockPerfAnalyzerMethods
from .mocks.mock_client import MockTritonClientMethods
from .mocks.mock_psutil import MockPSUtil
from model_analyzer.triton.server.server_config import TritonServerConfig
from model_analyzer.triton.server.server_factory import TritonServerFactory
from model_analyzer.triton.client.client_factory import TritonClientFactory
from model_analyzer.perf_analyzer.perf_analyzer import PerfAnalyzer
from model_analyzer.perf_analyzer.perf_config import PerfAnalyzerConfig
from model_analyzer.model_analyzer_exceptions \
import TritonModelAnalyzerException
from model_analyzer.record.types.perf_throughput import PerfThroughput
from model_analyzer.record.types.perf_latency_avg import PerfLatencyAvg
from model_analyzer.record.types.perf_latency_p90 import PerfLatencyP90
from model_analyzer.record.types.perf_latency_p95 import PerfLatencyP95
from model_analyzer.record.types.perf_latency_p99 import PerfLatencyP99
from model_analyzer.record.types.perf_latency import PerfLatency
from model_analyzer.record.types.perf_client_response_wait \
import PerfClientResponseWait
from model_analyzer.record.types.perf_client_send_recv \
import PerfClientSendRecv
from model_analyzer.record.types.perf_server_queue \
import PerfServerQueue
from model_analyzer.record.types.perf_server_compute_input \
import PerfServerComputeInput
from model_analyzer.record.types.perf_server_compute_infer \
import PerfServerComputeInfer
from model_analyzer.record.types.perf_server_compute_output \
import PerfServerComputeOutput
from .common import test_result_collector as trc
# Test Parameters
MODEL_LOCAL_PATH = '/model_analyzer/models'
MODEL_REPOSITORY_PATH = '/model_analyzer/models'
PERF_BIN_PATH = 'perf_analyzer'
TRITON_LOCAL_BIN_PATH = 'test_path'
TEST_MODEL_NAME = 'test_model'
TEST_CONCURRENCY_RANGE = '1:16:2'
CONFIG_TEST_ARG = 'sync'
TEST_GRPC_URL = 'test_hostname:test_port'
class TestPerfAnalyzerMethods(trc.TestResultCollector):
def setUp(self):
# Mocks
self.server_local_mock = MockServerLocalMethods()
self.perf_mock = MockPerfAnalyzerMethods()
self.client_mock = MockTritonClientMethods()
self.mock_psutil = MockPSUtil()
self.mock_psutil.start()
self.server_local_mock.start()
self.perf_mock.start()
self.client_mock.start()
# PerfAnalyzer config for all tests
self.config = PerfAnalyzerConfig()
self.config['model-name'] = TEST_MODEL_NAME
self.config['measurement-interval'] = 1000
self.config['measurement-request-count'] = 50
self.gpus = [
GPUDevice('TEST_DEVICE_NAME', 0, "TEST_PCI_BUS_ID", "TEST_UUID")
]
# Triton Server
self.server = None
self.client = None
def test_perf_analyzer_config(self):
# Check config initializations
self.assertIsNone(self.config[CONFIG_TEST_ARG],
msg="Server config had unexpected initial"
f" value for {CONFIG_TEST_ARG}")
# Set value
self.config[CONFIG_TEST_ARG] = True
# Test get again
self.assertTrue(self.config[CONFIG_TEST_ARG],
msg=f"{CONFIG_TEST_ARG} was not set")
# Try to set an unsupported config argument, expect failure
with self.assertRaises(TritonModelAnalyzerException,
msg="Expected exception on trying to set"
"unsupported argument in perf_analyzer"
"config"):
self.config['dummy'] = 1
# set and get value for each subtype of arguments
self.config['model-name'] = TEST_MODEL_NAME
self.assertEqual(self.config['model-name'], TEST_MODEL_NAME)
self.config['concurrency-range'] = TEST_CONCURRENCY_RANGE
self.assertEqual(self.config['concurrency-range'],
TEST_CONCURRENCY_RANGE)
self.config['extra-verbose'] = True
self.assertTrue(self.config['extra-verbose'])
def test_perf_analyzer_additive_args(self):
shape = ['name1:1,2,3', 'name2:4,5,6']
expected_cli_str = '-m test_model --measurement-interval=1000 --shape=name1:1,2,3 --shape=name2:4,5,6 --measurement-request-count=50'
self.config['shape'] = shape[:]
self.assertEqual(self.config['shape'], shape)
self.assertEqual(self.config.to_cli_string(), expected_cli_str)
def test_run(self):
server_config = TritonServerConfig()
server_config['model-repository'] = MODEL_REPOSITORY_PATH
# Create server, client, PerfAnalyzer, and wait for server ready
self.server = TritonServerFactory.create_server_local(
path=TRITON_LOCAL_BIN_PATH, config=server_config, gpus=self.gpus)
perf_analyzer = PerfAnalyzer(path=PERF_BIN_PATH,
config=self.config,
max_retries=10,
timeout=100,
max_cpu_util=50)
self.client = TritonClientFactory.create_grpc_client(
server_url=TEST_GRPC_URL)
self.server.start()
self.client.wait_for_server_ready(num_retries=1)
# Run perf analyzer with dummy metrics to check command parsing
perf_metrics = [id]
test_latency_output = "Client:\n p99 latency: 5000 us\n\n\n\n"
self.perf_mock.set_perf_analyzer_result_string(test_latency_output)
with self.assertRaises(TritonModelAnalyzerException):
perf_analyzer.run(perf_metrics)
# Test avg latency parsing
test_latency_output = "Client:\n Avg latency: 5000 us\n\n\n\n"
self.perf_mock.set_perf_analyzer_result_string(test_latency_output)
perf_metrics = [PerfLatencyAvg]
perf_analyzer.run(perf_metrics)
records = perf_analyzer.get_records()
self.assertEqual(len(records), 1)
self.assertEqual(records[0].value(), 5)
# Test p90 latency parsing
test_latency_output = "Client:\n p90 latency: 5000 us\n\n\n\n"
self.perf_mock.set_perf_analyzer_result_string(test_latency_output)
perf_metrics = [PerfLatencyP90]
perf_analyzer.run(perf_metrics)
records = perf_analyzer.get_records()
self.assertEqual(len(records), 1)
self.assertEqual(records[0].value(), 5)
# Test p95 latency parsing
test_latency_output = "Client:\n p95 latency: 5000 us\n\n\n\n"
self.perf_mock.set_perf_analyzer_result_string(test_latency_output)
perf_metrics = [PerfLatencyP95]
perf_analyzer.run(perf_metrics)
records = perf_analyzer.get_records()
self.assertEqual(len(records), 1)
self.assertEqual(records[0].value(), 5)
# Test p99 latency parsing
test_latency_output = "Client:\n p99 latency: 5000 us\n\n\n\n"
self.perf_mock.set_perf_analyzer_result_string(test_latency_output)
perf_metrics = [PerfLatencyP99]
perf_analyzer.run(perf_metrics)
records = perf_analyzer.get_records()
self.assertEqual(len(records), 1)
self.assertEqual(records[0].value(), 5)
# Test latency parsing
test_latency_output = "Client:\n p99 latency: 5000 us\n\n\n\n"
self.perf_mock.set_perf_analyzer_result_string(test_latency_output)
perf_metrics = [PerfLatency]
perf_analyzer.run(perf_metrics)
records = perf_analyzer.get_records()
self.assertEqual(len(records), 1)
self.assertEqual(records[0].value(), 5)
# Test throughput parsing
test_throughput_output = "Client:\n Throughput: 46.8 infer/sec\n\n\n\n"
self.perf_mock.set_perf_analyzer_result_string(test_throughput_output)
perf_metrics = [PerfThroughput]
perf_analyzer.run(perf_metrics)
records = perf_analyzer.get_records()
self.assertEqual(len(records), 1)
self.assertEqual(records[0].value(), 46.8)
# Test parsing for both
test_both_output = "Client:\n Throughput: 0.001 infer/sec\nAvg latency: 3600 us\np90 latency: 3700 us\np95 latency: 3800 us\np99 latency: 3900 us\n\n\n\n"
self.perf_mock.set_perf_analyzer_result_string(test_both_output)
perf_metrics = [
PerfThroughput, PerfLatencyAvg, PerfLatencyP90, PerfLatencyP95,
PerfLatencyP99, PerfLatency
]
perf_analyzer.run(perf_metrics)
records = perf_analyzer.get_records()
self.assertEqual(len(records), 6)
self.assertEqual(records[0].value(), 0.001)
self.assertEqual(records[1].value(), 3.6)
self.assertEqual(records[2].value(), 3.7)
self.assertEqual(records[3].value(), 3.8)
self.assertEqual(records[4].value(), 3.9)
self.assertEqual(records[5].value(), 3.9)
# Test no exceptions are raised when nothing can be parsed
test_graceful_return = "?"
self.perf_mock.set_perf_analyzer_result_string(test_graceful_return)
perf_metrics = [
PerfThroughput, PerfLatency, PerfClientSendRecv,
PerfClientResponseWait, PerfServerQueue, PerfServerComputeInfer,
PerfServerComputeInput, PerfServerComputeOutput
]
perf_analyzer.run(perf_metrics)
# Test exception handling
self.perf_mock.set_perf_analyzer_return_code(1)
self.assertTrue(perf_analyzer.run(perf_metrics), 1)
self.server.stop()
# TODO: test perf_analyzer over utilization of resources.
def test_measurement_interval_increase(self):
server_config = TritonServerConfig()
server_config['model-repository'] = MODEL_REPOSITORY_PATH
# Create server, client, PerfAnalyzer, and wait for server ready
self.server = TritonServerFactory.create_server_local(
path=TRITON_LOCAL_BIN_PATH, config=server_config, gpus=self.gpus)
perf_analyzer_config = PerfAnalyzerConfig()
perf_analyzer_config['model-name'] = TEST_MODEL_NAME
perf_analyzer_config['concurrency-range'] = TEST_CONCURRENCY_RANGE
perf_analyzer_config['measurement-mode'] = 'time_windows'
perf_analyzer = PerfAnalyzer(path=PERF_BIN_PATH,
config=self.config,
max_retries=10,
timeout=100,
max_cpu_util=50)
self.client = TritonClientFactory.create_grpc_client(
server_url=TEST_GRPC_URL)
self.server.start()
# Test failure to stabilize for measurement windows
self.client.wait_for_server_ready(num_retries=1)
test_stabilize_output = "Please use a larger time window"
self.perf_mock.set_perf_analyzer_result_string(test_stabilize_output)
self.perf_mock.set_perf_analyzer_return_code(1)
perf_metrics = [PerfThroughput, PerfLatencyP99]
perf_analyzer.run(perf_metrics)
self.assertEqual(
self.perf_mock.get_perf_analyzer_popen_read_call_count(), 10)
def test_measurement_request_count_increase(self):
server_config = TritonServerConfig()
server_config['model-repository'] = MODEL_REPOSITORY_PATH
# Create server, client, PerfAnalyzer, and wait for server ready
self.server = TritonServerFactory.create_server_local(
path=TRITON_LOCAL_BIN_PATH, config=server_config, gpus=self.gpus)
perf_analyzer = PerfAnalyzer(path=PERF_BIN_PATH,
config=self.config,
max_retries=10,
timeout=100,
max_cpu_util=50)
self.client = TritonClientFactory.create_grpc_client(
server_url=TEST_GRPC_URL)
self.server.start()
# Test the timeout for count mode
self.client.wait_for_server_ready(num_retries=1)
test_both_output = "Please use a larger time window"
self.perf_mock.set_perf_analyzer_result_string(test_both_output)
self.perf_mock.set_perf_analyzer_return_code(1)
perf_metrics = [PerfThroughput, PerfLatencyP99]
perf_analyzer.run(perf_metrics)
self.assertEqual(
self.perf_mock.get_perf_analyzer_popen_read_call_count(), 10)
def tearDown(self):
# In case test raises exception
if self.server is not None:
self.server.stop()
# Stop mocking
self.server_local_mock.stop()
self.perf_mock.stop()
self.client_mock.stop()
self.mock_psutil.stop()
if __name__ == '__main__':
unittest.main()
|
import turtle
# TODO: Explain superclass and subclass.
class DoubleTimeTurtle(turtle.Turtle):
def forward(self, distance):
turtle.Turtle.forward(self, 2 * distance)
def backward(self, distance):
turtle.Turtle.backward(self, 2 * distance)
class OppositeTurtle(turtle.Turtle):
def forward(self, distance):
turtle.Turtle.backward(self, distance)
def backward(self, distance):
turtle.Turtle.forward(self, distance)
def left(self, angle):
turtle.Turtle.right(self, angle)
def right(self, angle):
turtle.Turtle.left(self, angle)
regular_turtle = turtle.Turtle()
regular_turtle.color('red')
regular_turtle.left(90)
regular_turtle.forward(64)
double_time_turtle = DoubleTimeTurtle()
double_time_turtle.color('green') # TODO: Explain how `color` is inherited from the superclass.
double_time_turtle.left(90)
double_time_turtle.forward(64)
# TODO: Show methods of `double_time_turtle`.
for m in dir(double_time_turtle):
print m
opposite_turtle = OppositeTurtle()
opposite_turtle.color('blue')
opposite_turtle.left(90)
opposite_turtle.forward(64)
|
#! python3
# pdfParanoia.py - Add password in command line
# to every PDF in folder and subfolders.
import PyPDF2, os, sys
password = sys.argv[1]
for foldername, subfolders, filenames in os.walk(os.getcwd()):
# Find each PDF after walking through given directory.
for filename in filenames:
if (filename.endswith('.pdf')):
# Rewrite PDF to become encrypted.
pdfPath = os.path.join(foldername, filename)
pdfFile = open(pdfPath, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFile)
pdfWriter = PyPDF2.PdfFileWriter()
for pageNum in range(pdfReader.numPages):
pdfWriter.addPage(pdfReader.getPage(pageNum))
resultFilename = filename[:-4] + '_encrypted.pdf'
resultPath = os.path.join(foldername, resultFilename)
resultPdf = open(resultPath, 'wb')
pdfWriter.encrypt(password)
pdfWriter.write(resultPdf)
# Close original and result PDFs.
pdfFile.close()
resultPdf.close()
# Verify encryption.
verifyReader = PyPDF2.PdfFileReader(open(resultPath, 'rb'))
verifyReader.decrypt(password)
if verifyReader.getPage(0):
print('%s encrypted as %s. Deleting %s.' %
(filename, resultFilename, filename))
# Delete original.
os.unlink(pdfPath)
else:
print('Encryption failed.')
|
from __future__ import absolute_import, division, print_function
from odo.append import append
def test_append_list():
L = [1, 2, 3]
append(L, [4, 5, 6])
assert L == [1, 2, 3, 4, 5, 6]
def test_append_list_to_set():
s = set([1, 2, 3])
append(s, [4, 5, 6])
assert s == set([1, 2, 3, 4, 5, 6])
def test_append_set_to_list():
s = set([3, 4, 5])
lst = [1, 2, 3]
append(lst, s)
assert sorted(lst) == [1, 2, 3, 3, 4, 5]
def test_append_tuple_to_set():
s = set([1, 2, 3])
append(s, (3, 4, 5))
assert s == set([1, 2, 3, 4, 5])
|
# -*- coding: utf-8 -*-
from functools import wraps
def remove_empty_params_from_request(exclude=None):
"""
ENG: Remove empty query params from request
RUS: Удаляет пустые параметры из запроса
"""
if exclude is None:
exclude = []
def remove_empty_params_from_request_decorator(func):
"""
RUS: Удаляет пустые параметры из запроса в результате применения декоратора
"""
@wraps(func)
def func_wrapper(self, request, *args, **kwargs):
"""
Функция-обертка
"""
query_params = request.GET.copy()
for k, v in list(query_params.items()):
if v == '' and k not in exclude:
del query_params[k]
request.GET = query_params
return func(self, request, *args, **kwargs)
return func_wrapper
return remove_empty_params_from_request_decorator
class CustomSerializerViewSetMixin(object):
"""
Сериалайзер для запросов
"""
def get_serializer_class(self):
"""
ENG: Return the class to use for serializer w.r.t to the request method.
RUS: Возвращает класс для использования сериалайзера к методу запроса
"""
try:
return self.custom_serializer_classes[self.action]
except (KeyError, AttributeError):
return super(CustomSerializerViewSetMixin, self).get_serializer_class()
|
import ast
import requests
from bs4 import BeautifulSoup
from .objMusic import Music
class Album:
"""This object represents a RadioJvan album.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`url` is equal.
.. versionadded:: 1.0.0
Args:
url (:obj:`str`): Album url.
quality (:obj:`str`, optional): Album quality ('256' or '320').
Attributes:
artist (:obj:`str`): Album artist.
name (:obj:`str`): Album name.
url (:obj:`str`): Album url.
cover (:obj:`str`): Album cover url.
quality (:obj:`str`): Album quality.
date_added (:obj:`str`): Date that album was added on RadioJavan.
length (:obj:`int`): Album length.
Raises:
:class:`ValueError`
:class:`ConnectionError`
"""
def __init__(self, url: str, quality: str = "320"):
try:
response = requests.get(url, allow_redirects=True)
url = response.url
content = response.content
if not url.startswith("https://www.radiojavan.com/mp3s/album/"):
raise ValueError("Invalid url!")
if '?' in url:
url = url.split('?')[0]
self.url = url
if quality not in ["256", "320"]:
raise ValueError("This quality isn't available!")
self.quality = quality
data = BeautifulSoup(content, "html.parser").findAll("meta", href=False, attrs={"property": "og:title"})
self.artist, self.name = data[0].attrs["content"].split(" - ")
data = BeautifulSoup(content, "html.parser").findAll("img", href=False)
self.cover = data[-1]["src"]
data = BeautifulSoup(content, "html.parser").findAll("script", href=False)
self.__tracks = ast.literal_eval(str(data[-3]).split("\n")[11][17:-1])
self.length = len(self.__tracks)
data = BeautifulSoup(content, "html.parser").findAll("div", href=False, attrs={"class": "dateAdded"})
self.date_added = data[0].text.split(':')[1].strip()
except requests.exceptions.SSLError:
raise ValueError("Invalid url!") from None
except requests.exceptions.ConnectionError:
raise ConnectionError("Check your connection!") from None
def __eq__(self, other):
if not isinstance(other, Album):
return False
return self.url == other.url
def track(self, index: int) -> Music:
"""
Args:
index (:obj:`int`): Index of desired album track.
Returns:
:class:`rjdl.Music`
Raises:
:class:`IndexError`
:class:`ConnectionError`
"""
return Music("https://www.radiojavan.com/mp3s/mp3/" + self.__tracks[index]["mp3"], self.quality)
|
#
# file: final_program_distance.py
#
# RTK, 28-Jan-2021
# Last update: 28-Jan-2021
#
################################################################
import numpy as np
import editdistance
prg = [i[:-1] for i in open("runs_10.txt")]
dist = []
for i in range(len(prg)):
for j in range(len(prg)):
if (i == j):
continue
dist.append(editdistance.eval(prg[i],prg[j]))
dist = np.array(dist) / len(prg[0])
print()
print("10-instructions: %0.4f +/- %0.4f" % (dist.mean(), dist.std(ddof=1)/np.sqrt(len(dist))))
prg = [i[:-1] for i in open("runs_60.txt")]
dist = []
for i in range(len(prg)):
for j in range(len(prg)):
if (i == j):
continue
dist.append(editdistance.eval(prg[i],prg[j]))
dist = np.array(dist) / len(prg[0])
print("60-instructions: %0.4f +/- %0.4f" % (dist.mean(), dist.std(ddof=1)/np.sqrt(len(dist))))
print()
|
from kivymd.uix.dialog import MDDialog
from kivymd.uix.button import MDFlatButton
from kivy.uix.boxlayout import BoxLayout
from kivy.clock import Clock
from kivymd.uix.list import ThreeLineListItem
from kivy_garden.graph import Graph, MeshLinePlot
from kivymd.uix.label import MDLabel
class InfoDialog(MDDialog):
"""Basic dialog window"""
dialog = None
def __init__(self, text):
self.text = text
def dialog_(self, *args):
if not self.dialog:
self.dialog = MDDialog(
text=self.text,
radius=[20, 7, 20, 7],
)
self.dialog.open()
""" section graph dialog window """
class Content(BoxLayout):
"""
cls_content - Dialog window binded with ThreeLineAvatarItems - Cost in Project and Categories
cost and time comes from app.open_graph_dialog """
def __init__(self, cost, time, **kwargs):
super().__init__(**kwargs)
if len(cost) and len(time) > 1:
self.cost = cost
self.time = time
self.max = max(self.cost)
Clock.schedule_once(self.graph, 0)
else:
self.cost = []
self.time = []
self.l = MDLabel(text='Nie masz jeszcze kosztów do wyświetlenia.')
self.ids.graph.add_widget(self.l)
def graph(self, *args):
"""
https://github.com/kivy-garden/graph/blob/27c93e044cdae041c3fd1c98548bce7494f61e9e/kivy_garden/graph/__init__.py#L159
y_ticks_major:
if max(cost) in range(0,1)
y_t_m = max(cost)
else:
y_t_m = max(cost) / 10
"""
m = int(max(self.cost))
def f(x): return x / 10 if (x in range(0, 1)) else(x)
self.graph = Graph(xlabel='czas', ylabel='koszt',
y_ticks_major=f(m),
x_ticks_major=1,
border_color=[0.349, 0.349, 0.349, 1],
tick_color=[0.349, 0.349, 0.349, 1],
label_options={'color': [1, 0.647, 0], 'bold': False},
draw_border=True,
y_grid_label=True, x_grid_label=True,
xmin=0, xmax=len(self.time),
ymin=0, ymax=self.max)
self.plot = MeshLinePlot(color=[1, 0.647, 0])
self.plot.points = list(zip(range(len(self.time)), self.cost))
self.graph.add_plot(self.plot)
self.ids.graph.add_widget(self.graph)
" scroll view with date and costs under graph "
l = list(zip(self.time, self.cost))
for i in l:
self.ids.list.add_widget(ThreeLineListItem(text=f'{l.index(i)}', #i[0]
secondary_text=f'{i[1]} zł', #i[1]
tertiary_text=f'{i[0]}')) #l.index(i) +1
class GraphDialog(MDDialog):
dialog = None
def __init__(self, cost, time, title, **kwargs):
super().__init__(**kwargs)
if cost and time:
self.cost = cost
self.time = time
self.title = title
else:
self.cost = []
self.time = []
def show_graph(self):
if not self.dialog:
self.dialog = MDDialog(
title=f'{self.title}: ',
type="custom",
content_cls=Content(self.cost, self.time),
buttons=[
MDFlatButton(
text="ZAMKNIJ",
theme_text_color="Custom",
text_color=self.theme_cls.primary_color,
on_release=lambda _: self.dialog.dismiss()
),
],
)
self.dialog.open()
|
import tweepy
import random
from config import create_api, logger
RESPONSES = ["Iewl", "Huuuuu", "Brrrrr", "Vies hè", "Gedverderrie", "Blèh"]
RANDOM_INTERVAL = 5
class HutsbotStreamListener(tweepy.StreamListener):
"""
Listens to incoming tweets containing the word 'hutspot' and
quotes them
"""
def __init__(self, api):
self.api = api
self.random_counter = 0
def on_status(self, tweet: tweepy.models.Status):
"""
Take action when a new tweet comes in
"""
if not hasattr(tweet, "retweeted_status") and tweet.user != self.api.me():
logger.info(f"@{tweet.user.screen_name}: {tweet.text} ({tweet.id})")
quote_url = (
f"https://twitter.com/{tweet.user.screen_name}/status/{tweet.id}"
)
# Tweet something else than an emoji after 5 times
if self.random_counter < RANDOM_INTERVAL:
self.api.update_status("😖", attachment_url=quote_url)
self.random_counter += 1
else:
tweet = random.choice(RESPONSES)
self.api.update_status(tweet, attachment_url=quote_url)
self.random_counter = 0
def on_error(self, status: int):
"""
This only is called when something goes wrong
"""
if status == 420:
logger.error(f"{status}: Rate limit exceeded")
else:
logger.error(f"{status}: Other API error")
def main():
"""
Driver method
"""
api = create_api()
tweet_listener = HutsbotStreamListener(api)
stream = tweepy.Stream(api.auth, tweet_listener)
stream.filter(track=["hutspot"])
if __name__ == "__main__":
main()
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""The NCSNv2 model."""
import torch
import torch.nn as nn
import functools
from .utils import get_sigmas, register_model, diffusion_domain
from .layers import (CondRefineBlock, RefineBlock, ResidualBlock, ncsn_conv3x3,
ConditionalResidualBlock, get_act)
from .normalization import get_normalization
CondResidualBlock = ConditionalResidualBlock
conv3x3 = ncsn_conv3x3
def get_network(config):
if config.data.image_size < 96:
return functools.partial(NCSNv2, config=config)
elif 96 <= config.data.image_size <= 128:
return functools.partial(NCSNv2_128, config=config)
elif 128 < config.data.image_size <= 256:
return functools.partial(NCSNv2_256, config=config)
else:
raise NotImplementedError(
f'No network suitable for {config.data.image_size}px implemented yet.')
@register_model(name='ncsnv2_64')
class NCSNv2(nn.Module):
def __init__(self, config):
super().__init__()
self.norm = get_normalization(config)
self.nf = nf = config.model.nf
self.act = act = get_act(config)
self.register_buffer('sigmas', torch.tensor(get_sigmas(config)))
self.config = config
channels, image_size, self.centered = diffusion_domain(config)
self.begin_conv = nn.Conv2d(channels, nf, 3, stride=1, padding=1)
self.normalizer = self.norm(nf, config.model.num_scales)
self.end_conv = nn.Conv2d(nf, channels, 3, stride=1, padding=1)
self.res1 = nn.ModuleList([
ResidualBlock(self.nf, self.nf, resample=None, act=act,
normalization=self.norm),
ResidualBlock(self.nf, self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res2 = nn.ModuleList([
ResidualBlock(self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res3 = nn.ModuleList([
ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm, dilation=2),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=2)]
)
if image_size == 28:
self.res4 = nn.ModuleList([
ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm, adjust_padding=True, dilation=4),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=4)]
)
else:
self.res4 = nn.ModuleList([
ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm, adjust_padding=False, dilation=4),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=4)]
)
self.refine1 = RefineBlock([2 * self.nf], 2 * self.nf, act=act, start=True)
self.refine2 = RefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, act=act)
self.refine3 = RefineBlock([2 * self.nf, 2 * self.nf], self.nf, act=act)
self.refine4 = RefineBlock([self.nf, self.nf], self.nf, act=act, end=True)
def _compute_cond_module(self, module, x):
for m in module:
x = m(x)
return x
def forward(self, x, y):
if not self.centered:
h = 2 * x - 1.
else:
h = x
output = self.begin_conv(h)
layer1 = self._compute_cond_module(self.res1, output)
layer2 = self._compute_cond_module(self.res2, layer1)
layer3 = self._compute_cond_module(self.res3, layer2)
layer4 = self._compute_cond_module(self.res4, layer3)
ref1 = self.refine1([layer4], layer4.shape[2:])
ref2 = self.refine2([layer3, ref1], layer3.shape[2:])
ref3 = self.refine3([layer2, ref2], layer2.shape[2:])
output = self.refine4([layer1, ref3], layer1.shape[2:])
output = self.normalizer(output)
output = self.act(output)
output = self.end_conv(output)
used_sigmas = self.sigmas[y].view(x.shape[0], *([1] * len(x.shape[1:])))
output = output / used_sigmas
return output
@register_model(name='ncsn')
class NCSN(nn.Module):
def __init__(self, config):
super().__init__()
self.centered = config.data.centered
self.norm = get_normalization(config)
self.nf = nf = config.model.nf
self.act = act = get_act(config)
self.config = config
self.begin_conv = nn.Conv2d(config.data.channels, nf, 3, stride=1, padding=1)
self.normalizer = self.norm(nf, config.model.num_scales)
self.end_conv = nn.Conv2d(nf, config.data.channels, 3, stride=1, padding=1)
self.res1 = nn.ModuleList([
ConditionalResidualBlock(self.nf, self.nf, config.model.num_scales, resample=None, act=act,
normalization=self.norm),
ConditionalResidualBlock(self.nf, self.nf, config.model.num_scales, resample=None, act=act,
normalization=self.norm)]
)
self.res2 = nn.ModuleList([
ConditionalResidualBlock(self.nf, 2 * self.nf, config.model.num_scales, resample='down', act=act,
normalization=self.norm),
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample=None, act=act,
normalization=self.norm)]
)
self.res3 = nn.ModuleList([
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample='down', act=act,
normalization=self.norm, dilation=2),
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample=None, act=act,
normalization=self.norm, dilation=2)]
)
if config.data.image_size == 28:
self.res4 = nn.ModuleList([
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample='down', act=act,
normalization=self.norm, adjust_padding=True, dilation=4),
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample=None, act=act,
normalization=self.norm, dilation=4)]
)
else:
self.res4 = nn.ModuleList([
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample='down', act=act,
normalization=self.norm, adjust_padding=False, dilation=4),
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample=None, act=act,
normalization=self.norm, dilation=4)]
)
self.refine1 = CondRefineBlock([2 * self.nf], 2 * self.nf, config.model.num_scales, self.norm, act=act, start=True)
self.refine2 = CondRefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, config.model.num_scales, self.norm, act=act)
self.refine3 = CondRefineBlock([2 * self.nf, 2 * self.nf], self.nf, config.model.num_scales, self.norm, act=act)
self.refine4 = CondRefineBlock([self.nf, self.nf], self.nf, config.model.num_scales, self.norm, act=act, end=True)
def _compute_cond_module(self, module, x, y):
for m in module:
x = m(x, y)
return x
def forward(self, x, y):
if not self.centered:
h = 2 * x - 1.
else:
h = x
output = self.begin_conv(h)
layer1 = self._compute_cond_module(self.res1, output, y)
layer2 = self._compute_cond_module(self.res2, layer1, y)
layer3 = self._compute_cond_module(self.res3, layer2, y)
layer4 = self._compute_cond_module(self.res4, layer3, y)
ref1 = self.refine1([layer4], y, layer4.shape[2:])
ref2 = self.refine2([layer3, ref1], y, layer3.shape[2:])
ref3 = self.refine3([layer2, ref2], y, layer2.shape[2:])
output = self.refine4([layer1, ref3], y, layer1.shape[2:])
output = self.normalizer(output, y)
output = self.act(output)
output = self.end_conv(output)
return output
@register_model(name='ncsnv2_128')
class NCSNv2_128(nn.Module):
"""NCSNv2 model architecture for 128px images."""
def __init__(self, config):
super().__init__()
self.centered = config.data.centered
self.norm = get_normalization(config)
self.nf = nf = config.model.nf
self.act = act = get_act(config)
self.register_buffer('sigmas', torch.tensor(get_sigmas(config)))
self.config = config
self.begin_conv = nn.Conv2d(config.data.channels, nf, 3, stride=1, padding=1)
self.normalizer = self.norm(nf, config.model.num_scales)
self.end_conv = nn.Conv2d(nf, config.data.channels, 3, stride=1, padding=1)
self.res1 = nn.ModuleList([
ResidualBlock(self.nf, self.nf, resample=None, act=act,
normalization=self.norm),
ResidualBlock(self.nf, self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res2 = nn.ModuleList([
ResidualBlock(self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res3 = nn.ModuleList([
ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res4 = nn.ModuleList([
ResidualBlock(2 * self.nf, 4 * self.nf, resample='down', act=act,
normalization=self.norm, dilation=2),
ResidualBlock(4 * self.nf, 4 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=2)]
)
self.res5 = nn.ModuleList([
ResidualBlock(4 * self.nf, 4 * self.nf, resample='down', act=act,
normalization=self.norm, dilation=4),
ResidualBlock(4 * self.nf, 4 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=4)]
)
self.refine1 = RefineBlock([4 * self.nf], 4 * self.nf, act=act, start=True)
self.refine2 = RefineBlock([4 * self.nf, 4 * self.nf], 2 * self.nf, act=act)
self.refine3 = RefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, act=act)
self.refine4 = RefineBlock([2 * self.nf, 2 * self.nf], self.nf, act=act)
self.refine5 = RefineBlock([self.nf, self.nf], self.nf, act=act, end=True)
def _compute_cond_module(self, module, x):
for m in module:
x = m(x)
return x
def forward(self, x, y):
if not self.centered:
h = 2 * x - 1.
else:
h = x
output = self.begin_conv(h)
layer1 = self._compute_cond_module(self.res1, output)
layer2 = self._compute_cond_module(self.res2, layer1)
layer3 = self._compute_cond_module(self.res3, layer2)
layer4 = self._compute_cond_module(self.res4, layer3)
layer5 = self._compute_cond_module(self.res5, layer4)
ref1 = self.refine1([layer5], layer5.shape[2:])
ref2 = self.refine2([layer4, ref1], layer4.shape[2:])
ref3 = self.refine3([layer3, ref2], layer3.shape[2:])
ref4 = self.refine4([layer2, ref3], layer2.shape[2:])
output = self.refine5([layer1, ref4], layer1.shape[2:])
output = self.normalizer(output)
output = self.act(output)
output = self.end_conv(output)
used_sigmas = self.sigmas[y].view(x.shape[0], *([1] * len(x.shape[1:])))
output = output / used_sigmas
return output
@register_model(name='ncsnv2_256')
class NCSNv2_256(nn.Module):
"""NCSNv2 model architecture for 256px images."""
def __init__(self, config):
super().__init__()
self.centered = config.data.centered
self.norm = get_normalization(config)
self.nf = nf = config.model.nf
self.act = act = get_act(config)
self.register_buffer('sigmas', torch.tensor(get_sigmas(config)))
self.config = config
self.begin_conv = nn.Conv2d(config.data.channels, nf, 3, stride=1, padding=1)
self.normalizer = self.norm(nf, config.model.num_scales)
self.end_conv = nn.Conv2d(nf, config.data.channels, 3, stride=1, padding=1)
self.res1 = nn.ModuleList([
ResidualBlock(self.nf, self.nf, resample=None, act=act,
normalization=self.norm),
ResidualBlock(self.nf, self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res2 = nn.ModuleList([
ResidualBlock(self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res3 = nn.ModuleList([
ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res31 = nn.ModuleList([
ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res4 = nn.ModuleList([
ResidualBlock(2 * self.nf, 4 * self.nf, resample='down', act=act,
normalization=self.norm, dilation=2),
ResidualBlock(4 * self.nf, 4 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=2)]
)
self.res5 = nn.ModuleList([
ResidualBlock(4 * self.nf, 4 * self.nf, resample='down', act=act,
normalization=self.norm, dilation=4),
ResidualBlock(4 * self.nf, 4 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=4)]
)
self.refine1 = RefineBlock([4 * self.nf], 4 * self.nf, act=act, start=True)
self.refine2 = RefineBlock([4 * self.nf, 4 * self.nf], 2 * self.nf, act=act)
self.refine3 = RefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, act=act)
self.refine31 = RefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, act=act)
self.refine4 = RefineBlock([2 * self.nf, 2 * self.nf], self.nf, act=act)
self.refine5 = RefineBlock([self.nf, self.nf], self.nf, act=act, end=True)
def _compute_cond_module(self, module, x):
for m in module:
x = m(x)
return x
def forward(self, x, y):
if not self.centered:
h = 2 * x - 1.
else:
h = x
output = self.begin_conv(h)
layer1 = self._compute_cond_module(self.res1, output)
layer2 = self._compute_cond_module(self.res2, layer1)
layer3 = self._compute_cond_module(self.res3, layer2)
layer31 = self._compute_cond_module(self.res31, layer3)
layer4 = self._compute_cond_module(self.res4, layer31)
layer5 = self._compute_cond_module(self.res5, layer4)
ref1 = self.refine1([layer5], layer5.shape[2:])
ref2 = self.refine2([layer4, ref1], layer4.shape[2:])
ref31 = self.refine31([layer31, ref2], layer31.shape[2:])
ref3 = self.refine3([layer3, ref31], layer3.shape[2:])
ref4 = self.refine4([layer2, ref3], layer2.shape[2:])
output = self.refine5([layer1, ref4], layer1.shape[2:])
output = self.normalizer(output)
output = self.act(output)
output = self.end_conv(output)
used_sigmas = self.sigmas[y].view(x.shape[0], *([1] * len(x.shape[1:])))
output = output / used_sigmas
return output
|
# coding: utf-8
import ctypes
from objc_util import c, ObjCInstance
import matrix4
import matrix3
import matrix2
import vector3
import vector4
def CFAllocatorGetDefault():
func = c.CFAllocatorGetDefault
func.argtypes = None
func.restype = ctypes.c_void_p
return ObjCInstance(func())
def GLKMatrixStackCreate(alloc):
func = c.GLKMatrixStackCreate
func.argtypes = [ctypes.c_void_p]
func.restype = ctypes.c_void_p
return ObjCInstance(func(ObjCInstance(alloc).ptr))
def GLKMatrixStackGetTypeID(stack_ptr):
func = c.GLKMatrixStackGetTypeID
func.argtypes = None
func.restype = ctypes.c_void_p
return ObjCInstance(func())
def GLKMatrixStackPush(stack):
func = c.GLKMatrixStackPush
func.argtypes = [ctypes.c_void_p]
func.restype = None
return func(ObjCInstance(stack).ptr)
def GLKMatrixStackPop(stack):
func = c.GLKMatrixStackPop
func.argtypes = [ctypes.c_void_p]
func.restype = None
return func(ObjCInstance(stack).ptr)
def GLKMatrixStackSize(stack):
func = c.GLKMatrixStackSize
func.argtypes = [ctypes.c_void_p]
func.restype = ctypes.c_int
return func(ObjCInstance(stack).ptr)
def GLKMatrixStackLoadMatrix4(stack, matrix):
func = c.GLKMatrixStackLoadMatrix4
func.argtypes = [ctypes.c_void_p, matrix4.GLKMatrix4]
func.restype = None
return func(ObjCInstance(stack).ptr, matrix)
def GLKMatrixStackGetMatrix4(stack):
func = c.GLKMatrixStackGetMatrix4
func.argtypes = [ctypes.c_void_p]
func.restype = matrix4.GLKMatrix4
return func(ObjCInstance(stack).ptr)
def GLKMatrixStackGetMatrix3(stack):
func = c.GLKMatrixStackGetMatrix3
func.argtypes = [ctypes.c_void_p]
func.restype = matrix3.GLKMatrix3
return func(ObjCInstance(stack).ptr)
def GLKMatrixStackGetMatrix2(stack):
func = c.GLKMatrixStackGetMatrix2
func.argtypes = [ctypes.c_void_p]
func.restype = matrix2.GLKMatrix2
return func(ObjCInstance(stack).ptr)
def GLKMatrixStackGetMatrix4Inverse(stack):
func = c.GLKMatrixStackGetMatrix4Inverse
func.argtypes = [ctypes.c_void_p]
func.restype = matrix4.GLKMatrix4
return func(ObjCInstance(stack).ptr)
def GLKMatrixStackGetMatrix4InverseTranspose(stack):
func = c.GLKMatrixStackGetMatrix4InverseTranspose
func.argtypes = [ctypes.c_void_p]
func.restype = matrix4.GLKMatrix4
return func(ObjCInstance(stack).ptr)
def GLKMatrixStackGetMatrix3Inverse(stack):
func = c.GLKMatrixStackGetMatrix3Inverse
func.argtypes = [ctypes.c_void_p]
func.restype = matrix3.GLKMatrix3
return func(ObjCInstance(stack).ptr)
def GLKMatrixStackGetMatrix3InverseTranspose(stack):
func = c.GLKMatrixStackGetMatrix3InverseTranspose
func.argtypes = [ctypes.c_void_p]
func.restype = matrix3.GLKMatrix3
return func(ObjCInstance(stack).ptr)
def GLKMatrixStackMultiplyMatrix4(stack, matrix):
func = c.GLKMatrixStackMultiplyMatrix4
func.argtypes = [ctypes.c_void_p, matrix4.GLKMatrix4]
func.restype = None
return func(ObjCInstance(stack).ptr, matrix)
def GLKMatrixStackMultiplyMatrixStack(stackLeft, stackRight):
func = c.GLKMatrixStackMultiplyMatrixStack
func.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
func.restype = None
return func(ObjCInstance(stackLeft).ptr, ObjCInstance(stackRight).ptr)
def GLKMatrixStackTranslate(stack, tx, ty, tz):
func = c.GLKMatrixStackTranslate
func.argtypes = [
ctypes.c_void_p, ctypes.c_float, ctypes.c_float, ctypes.c_float
]
func.restype = None
return func(ObjCInstance(stack), tx, ty, tz)
def GLKMatrixStackTranslateWithVector3(stack, translationVector):
func = c.GLKMatrixStackTranslateWithVector3
func.argtypes = [ctypes.c_void_p, vector3.GLKVector3]
func.restype = None
return func(ObjCInstance(stack).ptr, translationVector)
def GLKMatrixStackTranslateWithVector4(stack, translationVector):
func = c.GLKMatrixStackTranslateWithVector4
func.argtypes = [ctypes.c_void_p, vector4.GLKVector4]
func.restype = None
return func(ObjCInstance(stack).ptr, translationVector)
def GLKMatrixStackScale(stack, sx, sy, sz):
func = c.GLKMatrixStackScale
func.argtypes = [
ctypes.c_void_p, ctypes.c_float, ctypes.c_float, ctypes.c_float
]
func.restype = None
return func(ObjCInstance(stack), sx, sy, sz)
def GLKMatrixStackScaleWithVector3(stack, scaleVector):
func = c.GLKMatrixStackScaleWithVector3
func.argtypes = [ctypes.c_void_p, vector3.GLKVector3]
func.restype = None
return func(ObjCInstance(stack).ptr, scaleVector)
def GLKMatrixStackScaleWithVector4(stack, scaleVector):
func = c.GLKMatrixStackScaleWithVector4
func.argtypes = [ctypes.c_void_p, vector4.GLKVector4]
func.restype = None
return func(ObjCInstance(stack).ptr, scaleVector)
def GLKMatrixStackRotate(stack, radians, x, y, z):
func = c.GLKMatrixStackRotate
func.argtypes = [
ctypes.c_void_p, ctypes.c_float, ctypes.c_float, ctypes.c_float,
ctypes.c_float
]
func.restype = None
return func(ObjCInstance(stack), radians, x, y, z)
def GLKMatrixStackRotateWithVector3(stack, radians, axisVector):
func = c.GLKMatrixStackRotateWithVector3
func.argtypes = [ctypes.c_void_p, ctypes.c_float, vector3.GLKVector3]
func.restype = None
return func(ObjCInstance(stack).ptr, radians, axisVector)
def GLKMatrixStackRotateWithVector4(stack, radians, axisVector):
func = c.GLKMatrixStackRotateWithVector4
func.argtypes = [ctypes.c_void_p, ctypes.c_float, vector4.GLKVector4]
func.restype = None
return func(ObjCInstance(stack).ptr, radians, axisVector)
def GLKMatrixStackRotateX(stack, radians):
func = c.GLKMatrixStackRotateX
func.argtypes = [ctypes.c_void_p, ctypes.c_float]
func.restype = None
return func(ObjCInstance(stack).ptr, radians)
def GLKMatrixStackRotateY(stack, radians):
func = c.GLKMatrixStackRotateY
func.argtypes = [ctypes.c_void_p, ctypes.c_float]
func.restype = None
return func(ObjCInstance(stack).ptr, radians)
def GLKMatrixStackRotateZ(stack, radians):
func = c.GLKMatrixStackRotateZ
func.argtypes = [ctypes.c_void_p, ctypes.c_float]
func.restype = None
return func(ObjCInstance(stack).ptr, radians)
__all__ = [
'CFAllocatorGetDefault', 'GLKMatrixStackCreate', 'GLKMatrixStackGetTypeID',
'GLKMatrixStackPush', 'GLKMatrixStackPop', 'GLKMatrixStackSize',
'GLKMatrixStackLoadMatrix4', 'GLKMatrixStackGetMatrix4',
'GLKMatrixStackGetMatrix3', 'GLKMatrixStackGetMatrix2',
'GLKMatrixStackGetMatrix4Inverse',
'GLKMatrixStackGetMatrix4InverseTranspose',
'GLKMatrixStackGetMatrix3Inverse',
'GLKMatrixStackGetMatrix3InverseTranspose', 'GLKMatrixStackMultiplyMatrix4',
'GLKMatrixStackMultiplyMatrixStack', 'GLKMatrixStackTranslate',
'GLKMatrixStackTranslateWithVector3', 'GLKMatrixStackTranslateWithVector4',
'GLKMatrixStackScale', 'GLKMatrixStackScaleWithVector3',
'GLKMatrixStackScaleWithVector4', 'GLKMatrixStackRotate',
'GLKMatrixStackRotateWithVector3', 'GLKMatrixStackRotateWithVector4',
'GLKMatrixStackRotateX', 'GLKMatrixStackRotateY', 'GLKMatrixStackRotateZ'
]
if __name__ == '__main__':
stack = GLKMatrixStackCreate(CFAllocatorGetDefault())
GLKMatrixStackPush(stack)
GLKMatrixStackPush(stack)
GLKMatrixStackPop(stack)
GLKMatrixStackPop(stack)
GLKMatrixStackPop(stack)
GLKMatrixStackPop(stack)
print(stack)
print(GLKMatrixStackSize(stack))
GLKMatrixStackLoadMatrix4(stack, matrix4.GLKMatrix4MakeTranslation(10, 0, 0))
print(GLKMatrixStackGetMatrix4(stack))
GLKMatrixStackTranslate(stack, 10, 4, 8)
|
import csv
import os
import rpy2.robjects as robjects # R integration
from rpy2.robjects.packages import importr # import the importr package from R
#from orm.glmcoefficients import * # to store the glm coefficients
#from db import * # postgresql db information
import math
from commit_guru.caslogging import logging
class LinearRegressionModel:
"""
builds the generalized linear regression model (GLM).
all coefficients stored in the database under the glm_coefficients table
probability: intercept + sum([metric_coefficient] * metric)
"""
def __init__(self, metrics, repo_id, testingCommits):
"""
@metrics - this is the list of metrics from the TRAINING data set.
@repo_id - the repository repo_id
@testingCommits - this is commits from the TESTING data set
"""
self.metrics = metrics
self.repo_id = repo_id
self.stats = importr('stats', robject_translations={'format_perc': '_format_perc'})
self.base = importr('base')
self.readcsv = robjects.r['read.csv']
self.sig_threshold = 0.05
self.data = None
self.commits = testingCommits
def buildModel(self):
"""
Builds the GLM model, stores the coefficients, and calculates the probability based on model that a commit
will introduce a bug.
"""
self._buildDataSet()
self._buildModelIncrementally()
def _buildDataSet(self):
"""
builds the data set to be used for getting the linear regression model.
saves datasets in the datasets folder as csv files to easily be imported
or used by R.
"""
# to write dataset file in this directory (git ignored!)
current_dir = os.path.dirname(__file__)
dir_of_datasets = current_dir + "/datasets/"
num_buggy = getattr(self.metrics, "num_buggy")
num_nonbuggy = getattr(self.metrics, "num_nonbuggy")
with open(dir_of_datasets + self.repo_id + ".csv", "w") as file:
csv_writer = csv.writer(file, dialect="excel")
# write the columns
csv_writer.writerow(["ns","nd","nf","entrophy","la","ld","lt","ndev","age","nuc","exp","rexp","sexp","is_buggy"])
# write the relevant data - start w/ the buggy data first
for buggy_index in range(0,num_buggy):
ns = self.metrics.ns_buggy[buggy_index]
nd = self.metrics.nd_buggy[buggy_index]
nf = self.metrics.nf_buggy[buggy_index]
entrophy = self.metrics.entrophy_buggy[buggy_index]
la = self.metrics.la_buggy[buggy_index]
ld = self.metrics.ld_buggy[buggy_index]
lt = self.metrics.lt_buggy[buggy_index]
ndev = self.metrics.ndev_buggy[buggy_index]
age = self.metrics.age_buggy[buggy_index]
nuc = self.metrics.nuc_buggy[buggy_index]
exp = self.metrics.exp_buggy[buggy_index]
rexp = self.metrics.rexp_buggy[buggy_index]
sexp = self.metrics.sexp_buggy[buggy_index]
csv_writer.writerow([ns,nd,nf,entrophy,la,ld,lt,ndev,age,nuc,exp,rexp,sexp,True])
# end buggy data
# write the non buggy data
for nonbuggy_index in range(0,num_nonbuggy):
ns = self.metrics.ns_nonbuggy[nonbuggy_index]
nd = self.metrics.nd_nonbuggy[nonbuggy_index]
nf = self.metrics.nf_nonbuggy[nonbuggy_index]
entrophy = self.metrics.entrophy_nonbuggy[nonbuggy_index]
la = self.metrics.la_nonbuggy[nonbuggy_index]
ld = self.metrics.ld_nonbuggy[nonbuggy_index]
lt = self.metrics.lt_nonbuggy[nonbuggy_index]
ndev = self.metrics.ndev_nonbuggy[nonbuggy_index]
age = self.metrics.age_nonbuggy[nonbuggy_index]
nuc = self.metrics.nuc_nonbuggy[nonbuggy_index]
exp = self.metrics.exp_nonbuggy[nonbuggy_index]
rexp = self.metrics.rexp_nonbuggy[nonbuggy_index]
sexp = self.metrics.sexp_nonbuggy[nonbuggy_index]
csv_writer.writerow([ns,nd,nf,entrophy,la,ld,lt,ndev,age,nuc,exp,rexp,sexp,False])
# end non buggy data
# end file
def _isMetricSignificant(self, formula_metrics, metric):
"""
Checks if adding a metric to the already significant metrics in formula_metrics in a GLM model is significant. If significant,
and doesn't cause any previous metric in formula_metrics to become non significant, we return true. Otherwise, false.
Note: The p-value is always given in the 4th column of the summary matrix!
"""
sig_column = 4
# Case 1: no existing metrics in the formula
if len(formula_metrics) == 0:
formula = "is_buggy~" + metric
fit = self.stats.glm(formula, data=self.data, family="binomial")
summary = self.base.summary(fit)
# Note - first row is the intercept information so we start at second row!
try:
metric_sig = summary.rx2('coefficients').rx(2,sig_column)[0] # Second row, 4th column of the summary matrix.
if metric_sig <= self.sig_threshold:
return True
else:
return False
except:
# If we have two metrics that are perfectly collinear it will not build the model with the metrics
# and we will get an exception when trying to find the significance of *all values*. Indeed, do not add
# this value to the model!
return False
# Case 2: existing metrics in the formula
else:
num_metrics = len(formula_metrics)+2 # plus one for the new metric we are adding and one for intercept
formula = "is_buggy~" + "+".join(formula_metrics) + "+" + metric
fit = self.stats.glm(formula, data=self.data, family="binomial")
summary = self.base.summary(fit)
# If any metric is now not significant, than we should not have added this metric to the formula
# There are (intercept) + num_metrics rows in the matrix to check - starts at second row skipping intercept
try:
for row in range(2,num_metrics+1):
metric_sig = summary.rx2('coefficients').rx(row,sig_column)[0]
if metric_sig > self.sig_threshold:
return False
return True # old metrics added to model ARE significant still as well as the new one being tested
except:
# If we have two metrics that are perfectly collinear it will not build the model with the metrics
# and we will get an exception when trying to find the significance of *all values*. Indeed, do not add
# this value to the model!
return False
def _buildModelIncrementally(self):
"""
Builds the linear regression model incrementally. It adds one metric at the time to the formula and keeps it
if it is significant. However, if adding it to the model casuses any other metric already added to the formula
to become not significant anymore, we do add it to the glm forumla.
"""
metrics_list = ["la","ld","lt","ns","nd","nf","ndev","age","nuc","exp","rexp","sexp","entrophy"]
formula_metrics = []
current_dir = os.path.dirname(__file__)
dir_of_datasets = current_dir + "/datasets/"
self.data = self.readcsv(dir_of_datasets + self.repo_id + ".csv", header=True, sep = ",")
for metric in metrics_list:
if self._isMetricSignificant(formula_metrics, metric):
formula_metrics.append(metric)
# Store coefficients of our model w/ formula containing only the sig coefficients
self._storeCoefficients(formula_metrics)
# Calculate all probability for each commit to introduce a bug
self.calculateCommitRiskyness(self.commits, formula_metrics)
def _getCoefficients(self, formula_coefs):
"""
Builds a GLM model with a formula based on the passed in coefficients and retuns a dictionary containing each
coefficient with its value.
"""
coef_dict = {} # a dict containing glm coefficients {name -> value}
formula = "is_buggy~" + "+".join(formula_coefs)
fit = self.stats.glm(formula, data=self.data, family="binomial")
for coef in formula_coefs:
coef_dict[coef] = fit.rx2('coefficients').rx2(coef)[0]
return coef_dict
def _getInterceptValue(self, coefs):
"""
Return the Intercept value of a GLM model and the p-value
Assumes that model can be built!
"""
formula = "is_buggy~" + "+".join(coefs)
fit = self.stats.glm(formula, data=self.data, family="binomial")
summary = self.base.summary(fit)
return summary.rx2('coefficients').rx(1)[0], summary.rx2('coefficients').rx(1,4)[0]
def _getCoefficientObject(self, coef_name, coef_value):
"""
returns a JSON object representation of coefficient given the name and value. if coefficient significance, true or false
is given depending on if it meets the significance threshold
"""
coef_object = ""
coef_object += '"' + str(coef_name) + '":"' + str(coef_value)
return coef_object + '",'
def _storeCoefficients(self, coefficient_names):
"""
stores the glm coefficients in the database
"""
# We are making this into JSON to simply store it in the database.
coefs = ""
coefs += '"repo":"' + str(self.repo_id) + '",'
# 2 Cases: where there are NO significant coefficients and the revese case.
if len(coefficient_names) == 0:
coefficient_dict = {}
else:
coefficient_dict = self._getCoefficients(coefficient_names)
# get the constant (aka intercept value)
intercept_value, intercept_pvalue = self._getInterceptValue(coefficient_names)
if intercept_pvalue <= self.sig_threshold:
intercept_sig = 1
else:
intercept_sig = 0
coefs += self._getCoefficientObject("intercept", intercept_value)
coefs += self._getCoefficientObject("intercept_sig", intercept_sig)
# Keep track of all and the subset of all that are significant as we need to record everything to the db
sig_coefs = []
all_coefs = ["ns", "nd", "nf", "entrophy", "la", "ld", "lt", "ndev", "age", "nuc", "exp", "rexp", "sexp"]
# iterate through all the values in the dict containing coeficients
for coef_name, coef_value in coefficient_dict.items():
coefs += self._getCoefficientObject(coef_name, coef_value)
coefs += self._getCoefficientObject(coef_name + "_sig", 1) # keep track more easily which are statistically significant in db
sig_coefs.append(coef_name)
# append the non significant coefficents as -1 and not significant
for c in all_coefs:
if c not in sig_coefs:
coefs += self._getCoefficientObject(c, -1)
coefs += self._getCoefficientObject(c + "_sig", 0)
# remove the trailing comma
coefs = coefs[:-1]
# Insert into the coefficient table
coefSession = Session()
allCoef = GlmCoefficients(json.loads('{' + coefs + '}'))
# Copy to db
coefSession.merge(allCoef)
# Write
coefSession.commit()
coefSession.close()
def calculateCommitRiskyness(self, commits, coefficient_names):
"""
calcualte the probability of commits to be buggy or not
using the linear regression model
estimated probability = 1/[1 + exp(-a - BX)]
"""
# 2 cases: model cannot possibly be build if no signficant coefficients available
# in this case, we just insert -1 for the probability to indicate no glm prediction possible
if len(coefficient_names) == 0:
coefficient_dict = {}
model_available = False
else:
coefficient_dict = self._getCoefficients(coefficient_names)
model_available = True
intercept_value, intercept_pvalue = self._getInterceptValue(coefficient_names)
for commit in commits:
if model_available == False:
commit.glm_probability = -1
else:
coefs_sum = 0
for coef_name, coef_value in coefficient_dict.items():
coefs_sum += (coef_value * getattr(commit, coef_name))
try:
riskyness = 1/(1+ math.exp(-intercept_value-coefs_sum))
except OverflowError:
logging.error("Overflow error for repo " + self.repo_id)
logging.error("Calculating riskyness for " + commit.commit_hash)
logging.error("Sum of coefficients: " + str(coefs_sum))
logging.error("Coeffiecents: " + str(coefficient_dict))
riskyness = 0.01
commit.glm_probability = riskyness
|
import pytest
pytest.importorskip('olm') # noqa
from matrix_client.crypto.encrypt_attachments import (encrypt_attachment,
decrypt_attachment)
def test_encrypt_decrypt():
message = b'test'
ciphertext, info = encrypt_attachment(message)
assert decrypt_attachment(ciphertext, info) == message
ciphertext += b'\x00'
with pytest.raises(RuntimeError):
decrypt_attachment(ciphertext, info)
|
# -*- coding: utf-8 -*-
"""
hss_app.milenage
~~~~~~~~~~~~~~~~
This module implements the Milenage algo set as per
3GPP TS 35.206 V9.0.0 (2009-12).
"""
import hmac
import random
from collections import namedtuple
from Crypto.Cipher import AES
AMF_DEFAULT_VALUE = bytes.fromhex("8000")
INITIALIZATION_VECTOR = 16 * bytes.fromhex("00")
#: Five 128-bit constants c1, c2, c3, c4, c5 are defined as per
#: ETSI TS 135 206 V9.0.0 (2010-02) in Section 4.1.
C1 = 16 * bytes.fromhex("00") # 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
C2 = 15 * bytes.fromhex("00") + bytes.fromhex("01") # 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x01
C3 = 15 * bytes.fromhex("00") + bytes.fromhex("02") # 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x02
C4 = 15 * bytes.fromhex("00") + bytes.fromhex("04") # 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x04
C5 = 15 * bytes.fromhex("00") + bytes.fromhex("08") # 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x08
R1 = 8 # rotate by 8 * 8 = 64 bits
R2 = 0 # rotate by 0 * 8 = 0 bits
R3 = 4 # rotate by 4 * 8 = 32 bits
R4 = 8 # rotate by 8 * 8 = 64 bits
R5 = 12 # rotate by 12 * 8 = 96 bits
#: Constants defined as per ETSI TS 133 401 V15.7.0 (2019-05) in Annex A.2
#: KASME derivation function
FC = bytes.fromhex("10") # 0x10
L0 = bytes.fromhex("0003") # 0x00 0x03
L1 = bytes.fromhex("0006") # 0x00 0x06
Vector = namedtuple("Vector", ["rand", "xres", "autn", "kasme"])
def xor(bytes1: bytes, bytes2: bytes) -> bytes:
"""Support function to perform Exclusive-OR operation on two bytes.
:param bytes1: set of bytes 1
:param bytes2: set of bytes 2
:returns: XORed data
"""
if len(bytes1) == len(bytes2):
return bytes(a ^ b for a, b in zip(bytes1, bytes2))
raise ValueError("Input values must have same length")
def rot(_input: bytes, _bytes: int) -> bytes:
"""Support function to rotate a byte stream by a given byte value.
:param _input: bytes stream
:param _bytes: bytes to be rotated
:returns: rotated data
"""
return bytes(_input[(i + _bytes) % len(_input)] for i in range(len(_input)))
def kdf(key: bytes, data: bytes) -> bytes:
"""Implementation of Generic Key Derivation Function in Annex B.2 of
ETSI TS 133 220 V11.4.0 (2012-10).
:param key: denoted key
:param data: data to be hashed
:returns: derived key, the hashed data
"""
return hmac.new(key, data, "sha256").digest()
def cipher(key: bytes, data: bytes, IV: bytes = INITIALIZATION_VECTOR) -> bytes:
"""Implementation of Rijndael (AES-128) encryption function used by
Milenage algo.
:param key: 128-bit subscriber key
:param data: 128-bit data to be encripted
:param IV: 128-bit initialization vector
:returns: encrypted data
"""
aes_cipher = AES.new(key, AES.MODE_CBC, IV)
return aes_cipher.encrypt(data)
def calculate_autn(sqn: bytes, ak: bytes, mac_a: bytes, amf: bytes = AMF_DEFAULT_VALUE) -> bytes:
"""Implementation of network authentication token calculation in
Section 5.1.1.1 of 3GPP TS 33.105 V13.0.0 (2016-01).
:param sqn: 48-bit sequence number
:param ak: 48-bit anonymity key
:param mac_a: 64-bit network authentication code
:param amf: 16-bit authentication management field
:returns: authentication token of 128 bits (AUTN)
"""
return xor(sqn, ak) + amf + mac_a
def calculate_kasme(ck: bytes, ik: bytes, plmn: bytes, sqn: bytes, ak: bytes) -> bytes:
"""Implementation of Kasme derivation function in Annex A.2 of
ETSI TS 133 401 V15.7.0 (2019-05).
:param ck: 32-bit confidentiality key
:param ik: 32-bit integrity key
:param plmn: 24-bit network identifier
:param sqn: 48-bit sequence number
:param ak: 48-bit anonymity key
:returns: 128-bit network base key
"""
return kdf(ck + ik, (FC + plmn + L0 + xor(sqn, ak) + L1))
def generate_opc(key: bytes, op: bytes) -> bytes:
"""Implementation of OPc computation in Section 8.2 of
3GPP TS 35.205 V5.0.0 (2002-06).
:param key: 128-bit subscriber key
:param op: 128-bit Operator Variant Algorithm Configuration Field
:returns: 128-bit value derived from OP & K used within the computation of the functions
"""
return xor(cipher(key, op), op)
def generate_rand() -> bytes:
"""Function that generates a 128-bit random challenge (RAND) for Milenage
algo.
:returns: 128-bit random challenge (RAND)
"""
return bytes(bytearray.fromhex("{:032x}".format(random.getrandbits(128))))
def calculate_output(key: bytes, rand: bytes, opc: bytes, r: int, c: bytes, sqn: bytes = None, amf: bytes = None) -> bytes:
"""Support function which represent the common operations along the set of
3GPP authentication and key generation functions f1, f1*, f2, f3, f4, f5 and
f5*.
:param key: 128-bit subscriber key
:param rand: 128-bit random challenge
:param opc: 128-bit value derived from OP & K
:param r: integers in the range 0–127 inclusive, which define amounts by which intermediate variables are cyclically rotated
:param c: 128-bit constants, which are XORed onto intermediate variables
:param sqn: 48-bit sequence number
:param amf: 16-bit authentication management field
:returns: output corresponding to 3GPP authentication function triggered
"""
if sqn is None and amf is None:
temp = xor(cipher(key, xor(rand, opc)), opc)
return xor(cipher(key, xor(rot(temp, r), c)), opc)
temp = cipher(key, xor(rand, opc))
in1 = (sqn[0:6] + amf[0:2]) * 2
return xor(opc, cipher(key, xor(temp, rot(xor(in1, opc), R1)), C1))
def get_mac_a(output: bytes) -> bytes:
"""Support function to get the 64-bit network authentication code (MAC-A)
from OUT1, the output of 3GPP f1 function.
:param output: OUT1
:returns: OUT1[0] .. OUT1[63]
"""
edge = 8 # = ceil(63/8)
return output[:edge]
def get_mac_s(output: bytes) -> bytes:
"""Support function to get the 64-bit resynchronisation authentication code
(MAC-S) from OUT1, the output of 3GPP f1* function.
:param output: OUT1
:returns: OUT1[64] .. OUT1[127]
"""
edge = 8 # = ceil(63/8)
return output[edge:]
def get_res(output: bytes) -> bytes:
"""Support function to get the 64-bit signed response (RES) from OUT2, the
output of 3GPP f2 function.
:param output: OUT2
:returns: OUT2[64] .. OUT2[127]
"""
lower_edge = 8 # = ceil(64/8)
upper_edge = 16 # = ceil(127/8)
return output[lower_edge:upper_edge]
def get_ak(output: bytes) -> bytes:
"""Support function to get the 48-bit anonimity key (AK) from OUT2, the
output of 3GPP f5 function.
:param output: OUT2
:returns: OUT2[0] .. OUT2[47]
"""
edge = 6 # = ceil(47/8)
return output[:edge]
def f1_and_f1_s(key: bytes, rand: bytes, opc: bytes, sqn: bytes, amf: bytes) -> bytes:
"""Implementation of key generation function f1 & f1* in Section 4.1 of
3GPP TS 35.206 V9.0.0 (2009-12), which calculates the authentication code
(MAC-A) and resynchronization authentication code (MAC-S) respectively.
:param key: 128-bit subscriber key
:param rand: 128-bit random challenge
:param opc: 128-bit value derived from OP & K
:param sqn: 48-bit sequence number
:param amf: 16-bit authentication management field
:returns:
- mac_a - 64-bit network authentication code (MAC-A)
- mac_s - 64-bit resynchronisation authentication code (MAC-S)
"""
output = calculate_output(key, rand, opc, R1, C1, sqn, amf)
return get_mac_a(output), get_mac_s(output)
def f2_and_f5(key: bytes, rand: bytes, opc: bytes) -> bytes:
"""Implementation of key generation functions f2 & f5 in Section 4.1 of
3GPP TS 35.206 V9.0.0 (2009-12), which calculates the result (RES) and
anonymity key (AK) respectively.
:param key: 128-bit subscriber key
:param rand: 128-bit random challenge
:param opc: 128-bit value derived from OP & K
:returns:
- res - 64-bit signed response (RES)
- ak - 48-bit anonymity key (AK)
"""
output = calculate_output(key, rand, opc, R2, C2)
return get_res(output), get_ak(output)
def f3(key: bytes, rand: bytes, opc: bytes) -> bytes:
"""Implementation of key generation function f3 in Section 4.1 of
3GPP TS 35.206 V9.0.0 (2009-12), which calculates the confidentiality key
(CK).
:param key: 128-bit subscriber key
:param rand: 128-bit random challenge
:param opc: 128-bit value derived from OP & K
:returns: 128-bit confidentiality key (CK)
"""
return calculate_output(key, rand, opc, R3, C3)
def f4(key: bytes, rand: bytes, opc: bytes) -> bytes:
"""Implementation of key generation function f4 in Section 4.1 of
3GPP TS 35.206 V9.0.0 (2009-12), which calculates the integrity key (IK).
:param key: 128-bit subscriber key
:param rand: 128-bit random challenge
:param opc: 128-bit value derived from OP & K
:returns: 128-bit integrity key (IK)
"""
return calculate_output(key, rand, opc, R4, C4)
def get_f5_s(output: bytes) -> bytes:
"""Support function to get the 48-bit anonimity key (AK) from OUT5, the
output of 3GPP f5* function.
:param output: OUT5
:returns: OUT5[0] .. OUT5[47]
"""
edge = 6 # = ceil(47/8)
return output[:edge]
def f5_s(key: bytes, rand: bytes, opc: bytes) -> bytes:
"""Implementation of key generation function f5* in Section 4.1 of
3GPP TS 35.206 V9.0.0 (2009-12), which calculates the anonymity key (AK).
:param key: 128-bit subscriber key
:param rand: 128-bit random challenge
:param opc: 128-bit value derived from OP & K
:returns: 128-bit anonymity key (AK)
"""
output = calculate_output(key, rand, opc, R5, C5)
return get_f5_s(output)
def calculate_eutran_vector(opc: bytes, key: bytes, amf: bytes, sqn: bytes, plmn: bytes, rand: bytes = None) -> Vector:
"""Implementation of E-UTRAN vector calculation based on Milenage algo set.
:param opc: 128-bit value derived from OP & K
:param key: 128-bit subscriber key
:param amf: 16-bit authentication management field
:param sqn: 48-bit sequence number
:param plmn: 24-bit network identifier
:param rand: 128-bit random challenge
:returns: Vector namedtuple
"""
if rand is None:
rand = generate_rand()
mac_a, _ = f1_and_f1_s(key, rand, opc, sqn, amf)
xres, ak = f2_and_f5(key, rand, opc)
ck = f3(key, rand, opc)
ik = f4(key, rand, opc)
autn = calculate_autn(sqn, ak, mac_a, amf)
kasme = calculate_kasme(ck, ik, plmn, sqn, ak)
return Vector(bytes(rand), bytes(xres), bytes(autn), bytes(kasme))
|
"""Implementation of the workflow for demultiplexing sequencing directories."""
import collections
import csv
import glob
import gzip
import itertools
import json
import logging
import os
import shutil
import subprocess
import sys
from threading import Thread, Lock
import tempfile
import xml.etree.ElementTree as ET
from snakemake.exceptions import WorkflowError
from digestiflow_demux import __version__
from .bases_mask import split_bases_mask, return_bases_mask, BaseMaskConfigException
from .api_client import ApiClient, ApiException
from .exceptions import ApiProblemException, MissingOutputFile
#: Path to the Snakefile.
PATH_SNAKEFILE = os.path.abspath(os.path.join(os.path.dirname(__file__), "Snakefile"))
#: Template for the success message.
TPL_MSG_SUCCESS = r"""
The demultiplexing succeeded for flow cell {flowcell[vendor_id]}.
See the attached files for quality reports.
The following attachments were not present (this is OK for HTML reports that are not generated
by Picard):
{missing_log_files}
--
This message was auto-created by digestiflow-demux v{version}.
"""
#: Template for the failure message.
TPL_MSG_FAILURE = r"""
The attempted demultiplexing for flow cell {flowcell[vendor_id]} has failed.
To try again, clean up any output files and mark as "ready" for demultiplexing again.
--
This message was auto-created by digestiflow-demux v{version}.
"""
def write_sample_sheet_v1(writer, flowcell, libraries):
"""Write V1 sample sheet"""
header = [
"FCID",
"Lane",
"SampleID",
"SampleRef",
"Index",
"Description",
"Control",
"Recipe",
"Operator",
"SampleProject",
]
writer.writerow(header)
demux_reads = flowcell.get("demux_reads") or flowcell["planned_reads"]
demux_reads = split_bases_mask(demux_reads)
lens = [count for base, count in demux_reads if base == "B"]
recipe = "PE_indexing" if demux_reads.count("T") > 1 else "SE_indexing"
for lib in libraries:
if lib["barcode2"]:
barcode = "".join((lib["barcode"][: lens[0]], "-", lib["barcode2"][: lens[1]]))
else:
barcode = lib["barcode"][: lens[0]]
for lane in sorted(lib["lanes"]):
data = [
flowcell["vendor_id"],
lane,
lib["name"],
lib["reference"],
barcode,
"",
"N",
recipe,
flowcell["operator"],
"Project",
]
writer.writerow(list(map(str, data)))
def write_sample_sheets_v2(flowcell, libraries, output_dir):
"""Write V2 sample sheets. Write one sample sheet for each bases_mask in the config."""
# re-shuffle dict from lib - lane - bases_mask to bases_mask - lib
d = collections.defaultdict(dict)
for key, lib in enumerate(libraries):
d[lib.get("demux_reads_override", flowcell["demux_reads"])][key] = lib
for bases_mask, libraries in d.items():
os.makedirs(
os.path.join(output_dir, "illumina_basesmask/{}".format(bases_mask)), exist_ok=True
)
with open(
os.path.join(output_dir, "illumina_basesmask/{}/SampleSheet.csv".format(bases_mask)),
"w",
) as f:
writer = csv.writer(f, delimiter=",")
write_sample_sheet_v2(writer, flowcell, libraries.values())
def write_sample_sheet_v2(writer, flowcell, libraries):
"""Write V2 sample sheet"""
# Write [Data] Section
writer.writerow(["[Data]"])
dual_indexing = any(library["barcode2"] for library in libraries)
if dual_indexing:
writer.writerow(["lane", "sample_id", "index", "index2", "sample_project"])
else:
writer.writerow(["lane", "sample_id", "index", "sample_project"])
rows = []
for lib in libraries:
for lane in sorted(lib["lanes"]):
barcodes = lib["barcode"].split(",")
for barcode in barcodes:
row = [lane, lib["name"], barcode]
if dual_indexing:
row.append(lib["barcode2"])
row.append("Project")
rows.append(row)
for row in sorted(rows):
writer.writerow(list(map(str, row)))
def write_sample_sheet_picard(flowcell, libraries, output_dir):
"""Write picard sample sheets, one per lane."""
dual_indexing = any(library["barcode2"] for library in libraries)
if not dual_indexing:
head_barcodes = ["barcode_sequence_1", "barcode_name", "library_name"]
head_samplesheet = ["OUTPUT_PREFIX", "BARCODE_1"]
else:
head_barcodes = ["barcode_sequence_1", "barcode_sequence_2", "barcode_name", "library_name"]
head_samplesheet = ["OUTPUT_PREFIX", "BARCODE_1", "BARCODE_2"]
# re-shuffle dict from lib - lane - barcode to lane - lib - barcode because picard works on lanes
d = collections.defaultdict(dict)
for lib in libraries:
for lane in sorted(lib["lanes"]):
d[lane][lib["name"]] = lib
# add Undetermined to samplesheet as picard crashes otherwise
for lane in d:
d[lane]["Undetermined"] = {"name": "Undetermined", "barcode": "N", "barcode2": ""}
if dual_indexing:
d[lane]["Undetermined"]["barcode2"] = "N"
for lane, libraries in d.items():
barcode_rows = []
samples_rows = []
for lib in libraries.values():
output_prefix = "{lane}/{name}".format(
name=lib["name"], flowcell=flowcell["vendor_id"], lane=lane
)
if dual_indexing:
# we do not pass the barcodes names, so we use the sample name.
barcode_row = [lib["barcode"], lib["barcode2"], lib["name"], lib["name"]]
samples_row = [output_prefix, lib["barcode"], lib["barcode2"]]
else:
barcode_row = [lib["barcode"], lib["name"], lib["name"]]
samples_row = [output_prefix, lib["barcode"]]
# barcode file should not contain dummy for unbarcoded reads, but samplesheet must.
if not lib["name"] == "Undetermined":
barcode_rows.append(barcode_row)
samples_rows.append(samples_row)
os.makedirs(os.path.join(output_dir, "picard_barcodes/{}".format(lane)), exist_ok=True)
with open(
os.path.join(output_dir, "picard_barcodes/{}/barcodes.txt".format(lane)), "w"
) as bf, open(
os.path.join(output_dir, "picard_barcodes/{}/samplesheet.txt".format(lane)), "w"
) as sf:
barcodewriter = csv.writer(bf, delimiter="\t")
sampleswriter = csv.writer(sf, delimiter="\t")
barcodewriter.writerow(head_barcodes)
sampleswriter.writerow(head_samplesheet)
for row in sorted(barcode_rows):
barcodewriter.writerow(list(map(str, row)))
for row in sorted(samples_rows):
sampleswriter.writerow(list(map(str, row)))
def reverse_complement(seq):
"""Return reverse-complemented version of ``seq``."""
mapping = {"A": "T", "a": "t", "C": "G", "c": "g", "G": "C", "g": "c", "T": "A", "t": "a"}
return "".join(reversed([mapping.get(i, i) for i in seq]))
def load_run_info(path_run_info_xml):
"""Load information from ``RunInfo.xml`` file."""
with open(path_run_info_xml, "rt") as xmlf:
xmls = xmlf.read()
root = ET.fromstring(xmls)
tag_run = root.find("Run")
return {
"run_id": tag_run.attrib["Id"],
"instrument": tag_run.find("Instrument").text,
"run_no": tag_run.attrib["Number"],
"flowcell": tag_run.find("Flowcell").text,
}
def load_run_parameters(path_run_parameters_xml):
"""Load information from ``runParameters.xml`` file."""
with open(path_run_parameters_xml, "rt") as xmlf:
xmls = xmlf.read()
root = ET.fromstring(xmls.lower())
version_string = next(root.iter("rtaversion")).text
if version_string.startswith("v"):
version_string = version_string[1:]
rta_version = tuple(map(int, version_string.split(".")))
return {"rta_version": rta_version}
def remove_old_samplesheets(output_dir):
"""Remove old sample sheets so that snakemake does not get confused."""
fls = ["SampleSheet.csv", "picard_barcodes", "illumina_basesmask"]
fls = [os.path.join(output_dir, f) for f in fls]
for f in fls:
if os.path.isdir(f):
shutil.rmtree(f)
elif os.path.exists(f):
os.remove(f)
def create_sample_sheet(config, input_dir, output_dir): # noqa: C901
"""Query the Digestiflow API for the necessary information for building the sample sheet."""
logging.info("Perform API queries and create sample sheet")
client = ApiClient(
api_url=config.api_url, api_token=config.api_token, project_uuid=config.project_uuid
)
logging.debug("Parsing RunInfo.xml file")
run_info = load_run_info(os.path.join(input_dir, "RunInfo.xml"))
path_run_info = glob.glob(os.path.join(input_dir, "?un?arameters.xml"))[0]
run_parameters = load_run_parameters(path_run_info)
logging.debug("RTA version is: %s", run_parameters["rta_version"])
logging.debug("Querying API for flow cell")
try:
flowcell = client.flowcell_resolve(
instrument_id=run_info["instrument"],
run_no=run_info["run_no"],
flowcell_id=run_info["flowcell"],
)
except ApiException as e:
raise ApiProblemException("Problem querying API for flow cell") from e
if flowcell is None:
logging.warning("Could not resolve flow cell via API. Not proceeding.")
return None
if flowcell["status_conversion"] != "ready" and not config.force_demultiplexing:
logging.warning('Status is not "ready", will skip flow cell.')
return None
if not flowcell["libraries"]:
logging.warning("There are no libraries in flow cell. I'm refusing to continue.")
return None
if not config.api_read_only:
try:
client.flowcell_update(flowcell["sodar_uuid"], status_conversion="in_progress")
except ApiException as e:
raise ApiProblemException('Could not update conversion status to "in_progress"') from e
logging.debug("Querying API for sequencing machine information")
try:
sequencer = client.sequencer_retrieve(sequencer=run_info["instrument"])
except ApiException as e:
raise ApiProblemException("Problem querying API for sequencer") from e
logging.debug("Querying for barcode information")
libraries = []
demux_reads_override = set()
for library in flowcell["libraries"]:
if not library["lane_numbers"]:
continue # do not consider library any further
if library.get("barcode_seq"):
barcode_seq = library.get("barcode_seq")
elif library.get("barcode"):
try:
barcode = client.barcodesetentry_retrieve(barcodesetentry=library.get("barcode"))
except ApiException as e:
raise ApiProblemException("Problem querying API for barcode #1") from e
barcode_seq = barcode["sequence"]
else:
barcode_seq = ""
if library.get("barcode_seq2"):
barcode_seq2 = library.get("barcode_seq2")
elif library.get("barcode2"):
try:
barcode2 = client.barcodesetentry_retrieve(barcodesetentry=library.get("barcode2"))
except ApiException as e:
raise ApiProblemException("Problem querying API for barcode #2") from e
barcode_seq2 = barcode2["sequence"]
else:
barcode_seq2 = ""
if sequencer["dual_index_workflow"] == "B":
barcode_seq2 = reverse_complement(barcode_seq2)
if library["demux_reads"]:
demux_reads = library["demux_reads"]
else:
demux_reads = flowcell["demux_reads"] or flowcell["planned_reads"]
try:
demux_reads = return_bases_mask(flowcell["planned_reads"], demux_reads, "picard")
demux_reads_override.add(demux_reads)
except BaseMaskConfigException as e:
logging.warning("There is a problem with the bases mask. %s", e)
logging.exception(e, exc_info=True)
libraries.append(
{
"name": library["name"],
"reference": library["reference"],
"barcode": barcode_seq,
"barcode2": barcode_seq2,
"lanes": library["lane_numbers"],
"demux_reads_override": demux_reads,
}
)
# Get delivery type from flowcell information.
delivery_type = flowcell["delivery_type"].split("_")
# Normalize bases masks, decide if paired-end, find all custom bases_masks
planned_reads = flowcell["planned_reads"]
demux_reads = flowcell.get("demux_reads") or planned_reads
demux_reads = return_bases_mask(planned_reads, demux_reads, "picard")
flowcell["demux_reads"] = demux_reads # not used by bcl2fastq2
flowcell["demux_reads_override"] = list(sorted(demux_reads_override))
rta_version = run_parameters["rta_version"]
if "M" in flowcell["demux_reads"]: # TODO: refine condition
demux_tool = "picard"
elif config.demux_tool == "bcl2fastq" and rta_version >= (1, 18, 54):
demux_tool = "bcl2fastq2"
elif config.demux_tool == "bcl2fastq":
demux_tool = "bcl2fastq1"
else:
demux_tool = "picard"
logging.info("Using demux tool %s", demux_tool)
bcl2fastq2_params = {
"with_failed_reads": config.with_failed_reads,
"create_fastq_for_index_reads": flowcell["create_fastq_for_index_reads"],
"minimum_trimmed_read_length": flowcell["minimum_trimmed_read_length"],
"mask_short_adapter_reads": flowcell["mask_short_adapter_reads"],
}
logging.debug("Writing out demultiplexing configuration")
# Get barcode mismatch count or default.
if flowcell["barcode_mismatches"] is None:
if flowcell["rta_version"] == 1:
barcode_mismatches = 0
else:
barcode_mismatches = 1
else:
barcode_mismatches = flowcell["barcode_mismatches"]
with open(os.path.join(output_dir, "demux_config.json"), "wt") as jsonf:
config_json = {
"barcode_mismatches": barcode_mismatches,
"bcl2fastq2_params": bcl2fastq2_params,
"cores": config.cores,
"delivery_type": delivery_type,
"demux_tool": demux_tool,
"flowcell": {**flowcell, "libraries": libraries},
"input_dir": input_dir,
"lanes": config.lanes,
"output_dir": output_dir,
"rta_version": flowcell["rta_version"],
"tiles": config.tiles,
}
json.dump(config_json, jsonf)
logging.debug("Writing out sample sheet information")
remove_old_samplesheets(output_dir)
if demux_tool == "bcl2fastq1":
with open(os.path.join(output_dir, "SampleSheet.csv"), "wt") as csvf:
write_sample_sheet_v1(csv.writer(csvf), flowcell, libraries)
elif demux_tool == "picard":
write_sample_sheet_picard(flowcell, libraries, output_dir)
else:
write_sample_sheets_v2(flowcell, libraries, output_dir)
return flowcell # Everything is fine
def send_flowcell_success_message(client, flowcell, output_dir, *log_files):
if "seq" in flowcell["delivery_type"]:
# Remove log files that do not exist.
existing_log_files = [p for p in log_files if os.path.exists(p)]
missing_log_files = [p for p in log_files if not os.path.exists(p)]
# Create renamed (and potentially compressed files
path_in = os.path.join(output_dir, "multiqc/multiqc_%s")
with tempfile.TemporaryDirectory() as tempdir:
path_out = os.path.join(tempdir, "MultiQC_%%s_%s.%%s" % flowcell["vendor_id"])
with open(path_in % "report.html", "rb") as f_in:
with gzip.open(path_out % ("Report", "html.gz"), "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
shutil.copyfile(path_in % "data.zip", path_out % ("Data", "zip"))
# Post with renamed files.
return client.message_send(
flowcell_uuid=flowcell["sodar_uuid"],
subject="Demultiplexing succeeded for flow cell %s" % flowcell["vendor_id"],
body=TPL_MSG_SUCCESS.format(
flowcell=flowcell,
version=__version__,
missing_log_files="\n".join(missing_log_files) or "none; all found",
),
attachments=list(
itertools.chain(
[path_out % ("Report", "html.gz"), path_out % ("Data", "zip")],
existing_log_files,
)
),
)
else:
# No sequences generated, no MultiQC created.
return client.message_send(
flowcell_uuid=flowcell["sodar_uuid"],
subject="Demultiplexing succeeded for flow cell %s" % flowcell["vendor_id"],
body=TPL_MSG_SUCCESS.format(flowcell=flowcell, version=__version__),
attachments=list(log_files),
)
def send_flowcell_failure_message(client, flowcell, *log_files):
return client.message_send(
flowcell_uuid=flowcell["sodar_uuid"],
subject="Demultiplexing FAILED for flow cell %s" % flowcell["vendor_id"],
body=TPL_MSG_FAILURE.format(flowcell=flowcell, version=__version__),
attachments=log_files,
)
def async_tee_pipe(process, input_file, out_file, out_file2, mutex):
"""Async tee-piping from input_file to two output files using the mutex."""
logging_thread = Thread(target=tee_pipe, args=(process, input_file, out_file, out_file2, mutex))
logging_thread.start()
return logging_thread
def tee_pipe(process, input_file, out_file, out_stream, mutex):
"""Tee-piping from input_file to two output files using the mutex."""
while 1:
line = input_file.readline()
if not line and process.poll() is not None:
break
else:
with mutex:
out_stream.write(line.decode("utf-8"))
out_file.write(line)
def launch_snakemake(config, flowcell, output_dir, work_dir):
"""Launch Snakemake and execute the demultiplexing"""
logging.info("Temporary directory is %s", work_dir)
logging.info("Start Snakemake workflow for demultiplexing")
client = ApiClient(
api_url=config.api_url, api_token=config.api_token, project_uuid=config.project_uuid
)
output_log_dir = os.path.join(output_dir, "log")
output_qc_dir = os.path.join(output_dir, "multiqc")
drmaa_log_dirs = [
os.path.join(output_log_dir, "digestiflow-demux-snakemake.log.gz"),
os.path.join(output_log_dir, "digestiflow-demux.log"),
]
if "seq" in flowcell["delivery_type"]:
drmaa_log_dirs += [
os.path.join(output_qc_dir, "multiqc_data.zip"),
os.path.join(output_qc_dir, "multiqc_report.html"),
]
if config.only_post_message:
for path in drmaa_log_dirs:
if not os.path.exists(path):
raise MissingOutputFile("Cannot post message with %s missing" % path)
if config.only_post_message:
logging.info("Only posting message, not running demultiplexing itself.")
failure = False
else:
argv = [
"--snakefile",
PATH_SNAKEFILE,
"--directory",
work_dir,
"--configfile",
os.path.join(output_dir, "demux_config.json"),
"--cores",
config.cores,
"--drmaa-log-dir",
output_log_dir,
"--max-jobs-per-second",
config.max_jobs_per_second,
"--use-conda",
"--config",
]
if config.jobscript:
argv += ["--jobscript", config.jobscript]
if config.verbose:
argv += ["--verbose", "--printshellcmds"]
if config.drmaa:
argv += ["--drmaa", config.drmaa]
if config.cluster_config:
argv += ["--cluster-config", config.cluster_config]
try:
subprocess.check_output(["which", "mamba"], stderr=subprocess.PIPE)
argv += ["--conda-frontend", "mamba"]
except subprocess.CalledProcessError:
pass
argv = list(map(str, argv))
logging.info("Executing: snakemake %s", " ".join(argv))
try:
# Launch Snakemake
proc = subprocess.Popen(
["snakemake"] + argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# Write output to temporary log file, to be attached later.
log_file_path = os.path.join(config.log_path, "digestiflow-demux-snakemake.log.gz")
with gzip.open(log_file_path, "wb") as log_file:
mutex = Lock()
logger_stderr = async_tee_pipe(proc, proc.stderr, log_file, sys.stderr, mutex)
logger_stdout = async_tee_pipe(proc, proc.stdout, log_file, sys.stdout, mutex)
logger_stderr.join()
logger_stdout.join()
# Copy out log file to log directory.
os.makedirs(output_log_dir, exist_ok=True)
shutil.copy(log_file_path, output_log_dir)
failure = proc.returncode != 0
except WorkflowError as e:
logging.warning("Running demultiplexing failed: %s", e)
failure = True
# Paths to tarballs with Illumina HTML reports.
paths_html_reports = [
os.path.join(output_dir, "html_report_%s.tar.gz" % bases_mask)
for bases_mask in flowcell["demux_reads_override"]
]
if not failure and not config.api_read_only:
message = send_flowcell_success_message(
client, flowcell, output_dir, log_file_path, *paths_html_reports
)
logging.info("Marking flowcell as complete...")
try:
client.flowcell_update(flowcell["sodar_uuid"], status_conversion="complete")
except ApiException as e:
logging.warning("Could not update conversion state to complete via API: %s", e)
logging.info("Done running Snakemake.")
elif flowcell and not config.api_read_only:
message = send_flowcell_failure_message(client, flowcell, log_file_path)
logging.info("Marking flowcell as failed...")
try:
client.flowcell_update(flowcell["sodar_uuid"], status_conversion="failed")
except ApiException as e:
logging.warning("Could not update conversion state to failed via API: %s", e)
else:
message = None
return (not failure, message, flowcell, client)
def perform_demultiplexing(config, input_dir, output_dir):
"""Prepare and execute the demultiplexing with the Snakemake workflow."""
logging.info("Starting to process input directory %s", input_dir)
logging.info("Output will go to %s", output_dir)
logging.debug("Creating output directory %s", output_dir)
os.makedirs(output_dir, exist_ok=True)
flowcell = create_sample_sheet(config, input_dir, output_dir)
if not flowcell:
return False, None, None, None
if config.work_dir:
logging.info("Using work directory %s", config.work_dir)
return launch_snakemake(config, flowcell, output_dir, config.work_dir)
elif config.keep_work_dir:
logging.info("Setup non-temporary work directory")
return launch_snakemake(config, flowcell, output_dir, tempfile.mkdtemp("-cubi-demux"))
else:
logging.info("Setup temporary work directory")
with tempfile.TemporaryDirectory("-cubi-demux") as work_dir:
return launch_snakemake(config, flowcell, output_dir, work_dir)
|
# -*- coding: utf-8 -*-
"""
:Author: Jaekyoung Kim
:Date: 2017. 11. 21.
"""
import matplotlib.pyplot as plt
from statistics.statistic_calculator import get_stats_results
from clustering.k_means import get_highest_volatility_group
from data.data_reader import get_market_capitalization_sum
def show_window_k_scatter(window_from, window_to, k_from, k_to):
"""
Show the intervals by a scatter plot.
"""
# Get the confidential interval by window and the number of group.
stats_results = get_stats_results(window_from, window_to, k_from, k_to)
# Grab some test data.
x = stats_results['k']
y = stats_results['window']
z = stats_results['interval']
# Plot a basic wireframe.
scatter = plt.scatter(x, y, c=z)
plt.colorbar(scatter)
plt.xlabel('k')
plt.ylabel('window')
plt.show()
def show_market_capitalization_line_graph(window, k):
"""
Show the simple plot
"""
highest_volatility_group = get_highest_volatility_group(window, k)
highest_volatility_market_capitalization_sum = get_market_capitalization_sum(highest_volatility_group)
highest_volatility_market_capitalization_sum['portion'].plot()
plt.ylim([0, 1])
plt.title('portion')
plt.show()
highest_volatility_market_capitalization_sum['selected_sum'].plot()
plt.ylim([0, highest_volatility_market_capitalization_sum['selected_sum'].max() * 2])
plt.title('market capitalization')
plt.show()
def show_highest_volatility_market_capitalization_portion_graph(window, k):
"""
Show the portion of market capitalization of a highest volatility group.
"""
highest_volatility_group = get_highest_volatility_group(window, k)
highest_volatility_market_capitalization_sum = get_market_capitalization_sum(highest_volatility_group)
df_x = highest_volatility_market_capitalization_sum.index
df_y = highest_volatility_market_capitalization_sum['portion']
plt.ylim([0, 1])
plt.fill_between(df_x, 0, df_y, facecolor='red', interpolate=True, alpha=0.3)
plt.fill_between(df_x, df_y, 1, facecolor='blue', interpolate=True, alpha=0.1)
plt.xlabel("Datetime")
plt.ylabel("Ratio against the market cap")
plt.plot(df_x, df_y, color='black', lw=1)
plt.grid(True)
plt.show()
|
# -*- coding: utf-8 -*-
from filebrowser.sites import site as filebrowser_site
urlpatterns = patterns('',
url(r'^admin/filebrowser/', include(filebrowser_site.urls)),
) + urlpatterns
|
# Generated by Django 2.0.1 on 2018-03-15 08:19
from django.db import migrations, models
import posts.models
class Migration(migrations.Migration):
dependencies = [
('posts', '0004_auto_20180315_1343'),
]
operations = [
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(default='s', upload_to=posts.models.upload_location),
preserve_default=False,
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 8 20:12:07 2020
@author: DS
"""
"""
Example of WebScrapping using python
"""
from pytube import YouTube
import bs4
import requests
playlist=[]
url=input("Enter the Youtube Playlist URL : ") #Takes the Playlist Link
try:
data = requests.get(url)
except:
print("An exception occured while downloading the playlist. Error: Unable to fetch data from the error or the link is not valid.")
exit()
soup=bs4.BeautifulSoup(data.text,'html.parser')
for links in soup.find_all('a'):
link=links.get('href')
if (link[0:6]=="/watch" and link[0]!="#"):
link="https://www.youtube.com"+link
link=str(link)
playlist.append(link)
del playlist[0:2]
count = 1
playlist = sorted(set(playlist), key = playlist.index)
vquality=input("Enter the video quality (1080,720,480,360,240,144):")
vquality=vquality+"p"
for link in playlist:
try:
yt = YouTube(link)
videos= yt.streams.filter(mime_type="video/mp4",res=vquality)
video = videos[0]
except:
print("Exception occured. Either the video has no quality as set by you, or it is not available. Skipping video {number}".format(number = count))
count += 1
continue
video.download("Downloads")
print(yt.title+" - has been downloaded !!!")
count += 1
|
import time
import os
from multiprocessing import Process
from gtmcore.configuration import Configuration
from gtmcore.files.lock import FileWriteLock
from gtmcore.fixtures import mock_config_file
def write_function(filename: str, delay: int, value: str, lock: FileWriteLock) -> None:
"""
A test function that appends to a file after a delay
"""
time.sleep(delay)
with lock.lock():
with open(filename, 'at') as f:
f.write(value)
class TestFileWriteLock(object):
def test_multiple_acquires(self, mock_config_file):
"""Test trying to lock around multiple writes"""
conf_instance, working_dir = mock_config_file
config = Configuration()
filename = os.path.join(working_dir, "testfile1.dat")
lock = FileWriteLock(filename, config)
proc1 = Process(target=write_function, args=(filename, 1, "1", lock))
proc1.start()
proc2 = Process(target=write_function, args=(filename, 0, "2", lock))
proc2.start()
proc3 = Process(target=write_function, args=(filename, .5, "3", lock))
proc3.start()
time.sleep(7)
proc1.join()
proc1.terminate()
proc2.join()
proc2.terminate()
proc3.join()
proc3.terminate()
with open(filename, 'rt') as f:
data = f.read()
assert data == "231"
def test_lock_independence(self, mock_config_file):
"""Test to verify different files have different locks automatically"""
conf_instance, working_dir = mock_config_file
config = Configuration()
filename1 = os.path.join(working_dir, "testfile1.dat")
lock1 = FileWriteLock(filename1, config)
filename2 = os.path.join(working_dir, "testfile2.dat")
lock2 = FileWriteLock(filename2, config)
proc1 = Process(target=write_function, args=(filename1, 1, "1", lock1))
proc1.start()
proc2 = Process(target=write_function, args=(filename1, 6, "2", lock1))
proc2.start()
proc3 = Process(target=write_function, args=(filename2, 0, "1", lock2))
proc3.start()
proc4 = Process(target=write_function, args=(filename2, 1, "2", lock2))
proc4.start()
time.sleep(3)
with open(filename1, 'rt') as f:
assert f.read() == '1'
with open(filename2, 'rt') as f:
assert f.read() == '12'
proc1.join()
proc1.terminate()
proc2.join()
proc2.terminate()
proc3.join()
proc3.terminate()
proc4.join()
proc4.terminate()
|
import requests
class Mailgun:
MAILGUN_DOMAIN = ''
MAILGUN_API_KEY = ''
FROM_NAME = ''
FROM_EMAIL = ''
@classmethod
def send(cls, to_emails, subject, content):
requests.post("https://api.mailgun.net/v3/{}/messages".format(cls.MAILGUN_DOMAIN),
auth=("api", cls.MAILGUN_API_KEY),
data={"from": "{} <{}>".format(cls.FROM_NAME, cls.FROM_EMAIL),
"to": to_emails,
"subject": subject,
"text": content})
|
from source.db_models.bets_models import *
from source.db_models.nhl_models import *
from datetime import date, datetime, timedelta
from sqlalchemy import func, and_, or_, not_, asc, desc
import pandas as pd
import sqlalchemy
from sqlalchemy import select
from sqlalchemy.orm import aliased
from tqdm import tqdm
from source.nhl_handler import *
import csv
def generate_data_for(player_id, nhl_session, games_to_go_back, season):
PlayerTeamStats = aliased(TeamStats)
OppTeamStats = aliased(TeamStats)
query = (
select(SkaterStats, Game, PlayerTeamStats, OppTeamStats)
.where(SkaterStats.playerId == player_id)
.join(Game, SkaterStats.gamePk == Game.gamePk)
.join(PlayerTeamStats, and_(SkaterStats.gamePk == PlayerTeamStats.gamePk, PlayerTeamStats.teamId == SkaterStats.team))
.join(OppTeamStats, and_(SkaterStats.gamePk == OppTeamStats.gamePk, OppTeamStats.teamId != SkaterStats.team))
.order_by(asc(Game.gameDate))
)
playerStatsForGames = pd.read_sql(query, nhl_session.bind)
playerStatsForGames.columns = [u + "_Skater" for u in SkaterStats.__table__.columns.keys()]\
+ [u + "_Game" for u in Game.__table__.columns.keys()] \
+ [u + "_PlayerTeam" for u in PlayerTeamStats.__table__.columns.keys()] \
+ [u + "_OppTeam" for u in OppTeamStats.__table__.columns.keys()]
playerStatsForGames["ans_O_1.5"] = (playerStatsForGames["shots_Skater"] > 1.5).astype(int)
playerStatsForGames["ans_O_2.5"] = (playerStatsForGames["shots_Skater"] > 2.5).astype(int)
playerStatsForGames["ans_O_3.5"] = (playerStatsForGames["shots_Skater"] > 3.5).astype(int)
playerStatsForGames["ans_O_4.5"] = (playerStatsForGames["shots_Skater"] > 4.5).astype(int)
df = clean_data(playerStatsForGames)
df = generate_prediction_data(df, nhl_session)
# One hot encode the categorical variables
df = pd.get_dummies(df, columns=one_hot_cols)
return df
def replace_team_data(df, nhl_session):
df.drop(forbidden_stats_PlayerTeam, axis=1, inplace=True)
df.drop(forbidden_stats_OppTeam, axis=1, inplace=True)
# Create empty df to fill
final_df = pd.DataFrame()
# Loop trough each game
for i, row in df.iterrows():
playerTeamId = row['teamId_PlayerTeam']
oppTeamId = row['teamId_OppTeam']
gamePk = row['gamePk']
season = row['season']
# Get the team stats for the player and the opponent
playerTeamStats = get_team_stats(playerTeamId, season, nhl_session, "PlayerTeam")
oppTeamStats = get_team_stats(oppTeamId, season, nhl_session, "OppTeam")
# Get the current games team stats
current_game_org_stats = df[df.gamePk == gamePk].reset_index()
current_game_team_stats = playerTeamStats[playerTeamStats.gamePk == gamePk].reset_index()
current_game_opp_stats = oppTeamStats[oppTeamStats.gamePk == gamePk].reset_index()
# Construct the new row
result = pd.concat([current_game_org_stats, current_game_team_stats], axis=1, join='inner')
result = pd.concat([result, current_game_opp_stats], axis=1, join='inner')
# Remove all duplicate columns in result
result = result.loc[:,~result.columns.duplicated()]
# Save to final df
final_df = pd.concat([final_df, result], axis=0, ignore_index=True)
return final_df
def get_team_stats(teamId, season, nhl_session, suffix):
# Get the team stats for this game
query = (
select(Game, TeamStats)
.where(and_(TeamStats.teamId == teamId, Game.season == season))
.join(Game, TeamStats.gamePk == Game.gamePk)
.order_by(asc(Game.gameDate))
)
teamStats = pd.read_sql(query, nhl_session.bind)
teamStats.columns = [u + "_Game" for u in Game.__table__.columns.keys()] \
+ [u + "_" + suffix for u in TeamStats.__table__.columns.keys()]
# Remove unwanted columns
teamStats = clean_data(teamStats, 'ignore')
forbidden_stats = forbidden_stats_PlayerTeam if suffix == "PlayerTeam" else forbidden_stats_OppTeam
# Create new columns for the team stats
for stat in forbidden_stats:
teamStats[f'{stat}_ema_1_game_back'] = teamStats[stat].ewm(span=1, min_periods=1).mean().shift(1).copy()
teamStats[f'{stat}_ema_3_game_back'] = teamStats[stat].ewm(span=3, min_periods=1).mean().shift(1).copy()
teamStats[f'{stat}_ema_10_game_back'] = teamStats[stat].ewm(span=10, min_periods=1).mean().shift(1).copy()
teamStats[f'{stat}_ema_1_season_back'] = teamStats[stat].ewm(span=10000, min_periods=1).mean().shift(1).copy()
teamStats = teamStats.drop(forbidden_stats, axis=1)
return teamStats
def generate_prediction_data(df, nhl_session):
# Remove unwanted team data and replace it with reasonable values
df = replace_team_data(df, nhl_session)
# Group the data by season
df_grouped = df.groupby(['season'])
# Create empty df to fill
final_df = pd.DataFrame()
# loop through the seasons
for season, season_df in df_grouped:
for stat in forbidden_stats_Skater:
# Calculate the EMA for each season
season_df[f'{stat}_ema_1_games_back'] = season_df[stat].ewm(span=1, min_periods=1).mean().shift(1).copy()
season_df[f'{stat}_ema_3_season_back'] = season_df[stat].ewm(span=3, min_periods=1).mean().shift(1).copy()
season_df[f'{stat}_ema_10_season_back'] = season_df[stat].ewm(span=10, min_periods=1).mean().shift(1).copy()
season_df[f'{stat}_ema_1_season_back'] = season_df[stat].ewm(span=10000, min_periods=1).mean().shift(1).copy()
# Save data to the final df
final_df = pd.concat([final_df, season_df])
# Remove forbidden_stats from df
final_df = final_df.drop(forbidden_stats_Skater, axis=1)
return final_df
def clean_data(df, errors='raise'):
# Drop the unnecessary columns
df.drop(remove_cols, axis=1, inplace=True, errors=errors)
# Rename columns
df.rename(columns=rename_cols, inplace=True, errors=errors)
# In each column of "fill_with_zeros" where there is not a number, put a zero
for col in fill_with_zeros:
if col in df.columns:
df[col].replace(r'^\s*$', 0, regex=True, inplace=True)
# Convert timestamps to datetime objects
for col in time_to_sec:
if col in df.columns:
df[col] = pd.to_timedelta(df[col].astype(str)).dt.total_seconds().astype(int)
return df
remove_cols = [
"added_Skater",
"team_Skater",
"updated_Skater",
"gamePk_Skater",
"abstractGameState_Game",
"detailedState_Game",
"statusCode_Game",
"startTimeTBD_Game",
"homeTeamId_Game",
"awayTeamId_Game",
"added_Game",
"updated_Game",
"gamePk_PlayerTeam",
"leagueRecordType_PlayerTeam",
"added_PlayerTeam",
"updated_PlayerTeam",
"gamePk_OppTeam",
"leagueRecordType_OppTeam",
"added_OppTeam",
"updated_OppTeam",
"isHome_OppTeam"
]
rename_cols = {
"playerId_Skater": "playerId",
"gamePk_Game": "gamePk",
"gameDate_Game": "date",
"codedGameState_Game": "gameState",
"isHome_PlayerTeam": "isHome",
"season_Game": "season"
}
one_hot_cols = [
"position_Skater",
"gameType_Game",
"season"
]
fill_with_zeros = [
"ot_OppTeam",
"ot_PlayerTeam"
]
time_to_sec = [
"timeOnIce_Skater",
"evenTimeOnIce_Skater",
"powerPlayTimeOnIce_Skater",
"shortHandedTimeOnIce_Skater"
]
forbidden_stats_Skater = [
"timeOnIce_Skater",
"assists_Skater",
"goals_Skater",
"shots_Skater",
"hits_Skater",
"powerPlayGoals_Skater",
"powerPlayAssists_Skater",
"penaltyMinutes_Skater",
"faceOffWins_Skater",
"faceoffTaken_Skater",
"takeaways_Skater",
"giveaways_Skater",
"shortHandedGoals_Skater",
"shortHandedAssists_Skater",
"blocked_Skater",
"plusMinus_Skater",
"evenTimeOnIce_Skater",
"powerPlayTimeOnIce_Skater",
"shortHandedTimeOnIce_Skater"
]
forbidden_stats_PlayerTeam = [
"goals_PlayerTeam",
"pim_PlayerTeam",
"shots_PlayerTeam",
"powerPlayPercentage_PlayerTeam",
"powerPlayGoals_PlayerTeam",
"powerPlayOpportunities_PlayerTeam",
"faceOffWinPercentage_PlayerTeam",
"blocked_PlayerTeam",
"takeaways_PlayerTeam",
"giveaways_PlayerTeam",
"hits_PlayerTeam",
"goalsAgainst_PlayerTeam",
"pimAgainst_PlayerTeam",
"shotsAgainst_PlayerTeam",
"powerPlayPercentageAgainst_PlayerTeam",
"powerPlayGoalsAgainst_PlayerTeam",
"powerPlayOpportunitiesAgainst_PlayerTeam",
"faceOffWinPercentageAgainst_PlayerTeam",
"blockedAgainst_PlayerTeam",
"takeawaysAgainst_PlayerTeam",
"giveawaysAgainst_PlayerTeam",
"hitsAgainst_PlayerTeam",
"wins_PlayerTeam",
"losses_PlayerTeam",
"ot_PlayerTeam",
"score_PlayerTeam"
]
forbidden_stats_OppTeam = [
"goals_OppTeam",
"pim_OppTeam",
"shots_OppTeam",
"powerPlayPercentage_OppTeam",
"powerPlayGoals_OppTeam",
"powerPlayOpportunities_OppTeam",
"faceOffWinPercentage_OppTeam",
"blocked_OppTeam",
"takeaways_OppTeam",
"giveaways_OppTeam",
"hits_OppTeam",
"goalsAgainst_OppTeam",
"pimAgainst_OppTeam",
"shotsAgainst_OppTeam",
"powerPlayPercentageAgainst_OppTeam",
"powerPlayGoalsAgainst_OppTeam",
"powerPlayOpportunitiesAgainst_OppTeam",
"faceOffWinPercentageAgainst_OppTeam",
"blockedAgainst_OppTeam",
"takeawaysAgainst_OppTeam",
"giveawaysAgainst_OppTeam",
"hitsAgainst_OppTeam",
"wins_OppTeam",
"losses_OppTeam",
"ot_OppTeam",
"score_OppTeam"
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "mengdj@outlook.com"
import copy
import struct
import sys
from io import BytesIO
"""兼容性处理"""
try:
from itertools import izip
compat_izip = izip
except ImportError:
compat_izip = zip
if sys.version_info < (3,):
def iteritems(d, **kw):
return d.iteritems(**kw)
else:
def iteritems(d, **kw):
return iter(d.items(**kw))
class _Meta(type):
"""struct元类,定制new"""
def __new__(cls, clsname, clsbases, clsdict):
t = type.__new__(cls, clsname, clsbases, clsdict)
st = getattr(t, '__hdr__', None)
if st is not None:
# 限制导出
clsdict['__slots__'] = [x[0] for x in st] + ['data']
t = type.__new__(cls, clsname, clsbases, clsdict)
# 变量
t.__hdr_fields__ = [x[0] for x in st]
# 格式(默认用了大端排序)
t.__hdr_fmt__ = getattr(t, '__byte_order__', '>') + ''.join([x[1] for x in st])
# 结构体
t.__hdr_len__ = struct.calcsize(t.__hdr_fmt__)
# 默认值
t.__hdr_defaults__ = dict(compat_izip(
t.__hdr_fields__, [x[2] for x in st]))
return t
class _CData(_Meta("Temp", (object,), {})):
def __init__(self, *args, **kwargs):
self.data = b''
if args:
try:
self.unpack(args[0])
except struct.error:
if len(args[0]) < self.__hdr_len__:
raise Exception
raise Exception('invalid %s: %r' %
(self.__class__.__name__, args[0]))
else:
# 子类属性赋值
for k in self.__hdr_fields__:
setattr(self, k, copy.copy(self.__hdr_defaults__[k]))
# 转换成迭代器
for k, v in iteritems(kwargs):
setattr(self, k, v)
def __len__(self):
return self.__hdr_len__ + len(self.data)
def __getitem__(self, k):
try:
return getattr(self, k)
except AttributeError:
raise KeyError
def __repr__(self):
l = []
for field_name, _, _ in getattr(self, '__hdr__', []):
field_value = getattr(self, field_name)
if field_value != self.__hdr_defaults__[field_name]:
if field_name[0] != '_':
l.append('%s=%r' % (field_name, field_value)) # (1)
else:
for prop_name in field_name.split('_'): # (2)
if isinstance(getattr(self.__class__, prop_name, None), property):
l.append('%s=%r' % (prop_name, getattr(self, prop_name)))
l.extend(
['%s=%r' % (attr_name, attr_value)
for attr_name, attr_value in iteritems(self.__dict__)
if attr_name[0] != '_' # exclude _private attributes
and attr_name != self.data.__class__.__name__.lower()]) # exclude fields like ip.udp
if self.data:
l.append('data=%r' % self.data)
return '%s(%s)' % (self.__class__.__name__, ', '.join(l))
def __str__(self):
return str(self.__bytes__())
def __bytes__(self):
return self.pack_hdr() + bytes(self.data)
def pack_hdr(self):
"""Return packed header string."""
try:
return struct.pack(self.__hdr_fmt__,
*[getattr(self, k) for k in self.__hdr_fields__])
except struct.error:
vals = []
for k in self.__hdr_fields__:
v = getattr(self, k)
if isinstance(v, tuple):
vals.extend(v)
else:
vals.append(v)
try:
return struct.pack(self.__hdr_fmt__, *vals)
except struct.error as e:
raise Exception(str(e))
def pack(self):
"""Return packed header + self.data string."""
return bytes(self)
def unpack(self, buf):
for k, v in compat_izip(self.__hdr_fields__,
struct.unpack(self.__hdr_fmt__, buf[:self.__hdr_len__])):
setattr(self, k, v)
self.data = buf[self.__hdr_len__:]
class ProcData(object):
__upper = 0
def __init__(self, upper=None):
self.__upper = upper
@property
def data(self):
"""返回上层数据,未处理分片"""
pass
@property
def upper(self):
return self.__upper
class AppProcData(object):
"""此接口由应用层来实现"""
def __init__(self):
pass
def find(self, data):
"""校验数据并完成初始化,成功返回self,链式调用"""
pass
class BytesOrder(object):
"""大小端排序工具类"""
order = "big"
@staticmethod
def bytes2int(data, ord=""):
if ord == "":
ord = BytesOrder.order
return int.from_bytes(data, ord)
class BytesBuffer(BytesIO):
"""封装BytesIO,增加重置"""
# 写入长度缓存
__length = 0
# 统计写入次数
__count = 0
def __len__(self):
"""获取长度,使用切片而不复制数据,同时增加计算缓存"""
if self.__length == 0:
self.__length = len(self.getbuffer())
return self.__length
def clear(self):
"""清理缓存区然后重置索引,seek必须调用"""
self.truncate(0)
self.seek(0)
self.__length = 0
self.__count = 0
def write(self, *args, **kwargs):
self.__length = 0
self.__count += 1
return super(BytesBuffer, self).write(*args, **kwargs)
def writelines(self, *args, **kwargs):
self.__length = 0
self.__count += 1
return super(BytesBuffer, self).writelines(*args, **kwargs)
def count(self):
return self.__count
|
# coding=utf-8
# Name: eva vanatta
# Date: july 11th 2018
#index - starts with 0 in an index
# slice of list
# print var[0:3]
# all but first
# print var [1:]
# all but the last
# print var [:-1]
# replace
#var[0] = "tree"
# loop
# for var in list:
# print item
# to change
# counter = 0
# for item in var:
# if item == "cat"
# var2[counter] = "dog"
# counter = counter + 1
# var.append(28)
# lst = []
# for letter in list:
# lst.append(letter)
"""
proj04
practice with lists
"""
#Part I
#Take a list, say for example this one:
# user_input = raw_input("Enter a number: ")
# list = []
# a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
#
# for item in a:
# if item < int(user_input):
# list.append(item)
# print list
#Part II
# Take two lists, say for example these two:
# from typing import List
# b = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
# c = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
# d = []
# and write a program that creates and prints a list that contains only the elements
# that are common between the lists (without duplicates).
# Make sure your program works on two lists of different sizes.
# for item in c:
# if item == item in b:
# d.append(item)
# print d
# extension
# import random
# number = random.randint(0,10)
# list = []
# list2 = []
#
# for item in range(0,10):
# number = random.randint(0,10)
# list.append(number)
# for item in range(0,10):
# number = random.randint(0,10)
# list2.append(number)
# print list
#
# print list2
#
# list3 = []
#
# for item in list:
# if item == item in list2:
# list3.append(item)
# print "these are the numbers in common within the 2 lists: "
# print list3
#Part III
# Take a list, say for example this one:
# variables = ["b", "a", "f", "y", "a", "t", "_", "p", "a", "R"]
# # and write a program that replaces all “a” with “*”.
#
# counter = 0
# for item in variables:
# if item == "a":
# variables[counter] = "*"
# counter = counter + 1
# print variables
# Part IV
# Ask the user for a string, and print out whether this string is a palindrome or not.
# list = []
# user_input = raw_input("Enter a word: ")
# user_input = user_input[0:].lower()
#
# for letter in user_input:
# list.append(letter)
#
# for i in range((len(user_input))/2):
# if list[0] == list[-1]:
# list = list[1:-1]
# print user_input, "is a palindrome"
# else:
# print user_input, "is not a palindrome"
import random
list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52]
number = random.randint(1, 52)
shuffledeck = []
for number in list:
if number != shuffledeck:
shuffledeck.append(number)
list.remove(number)
print shuffledeck
|
# Copyright 2016-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Unit tests for "openssh" module."""
import errno
import os
import stat
import subprocess
import sys
import unittest
try:
# Python 2.x
from cStringIO import StringIO
except ImportError:
# Python 3.x
from io import StringIO
if sys.hexversion >= 0x3040000:
# contextlib.redirect_stdout was introduced in Python 3.4
import contextlib
else:
# contextlib2 is a backport of contextlib from Python 3.5 and is compatible with Python2/3
import contextlib2 as contextlib
import botocore
import mock
import responses
import moduletests.src.openssh
class TestSSH(unittest.TestCase):
"""SSH tests."""
metadata_url = "http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key"
mock_counter = 0
maxDiff = None
def get_mocked_stat(*args, **kwargs):
TestSSH.mock_counter += 1
return mock.Mock(st_dev=TestSSH.mock_counter, st_ino=TestSSH.mock_counter)
@staticmethod
def return_true(*args, **kwargs):
return True
@staticmethod
def return_false(*args, **kwargs):
return False
def setUp(self):
self.path = moduletests.src.openssh.Path(
path_str="/tmp", e_uid=0, e_gid=0, v_bitmask=(stat.S_IWGRP | stat.S_IWOTH))
self.problem = moduletests.src.openssh.Problem(state="UNCHECKED",
item_type="Config",
item=self.path,
value=None,
value_str="Missing",
info_msg="Found bad lines in configuration file",
check=self.return_true,
check_msg="validity of configuration",
fix_msg=None,
fix=self.return_true)
moduletests.src.openssh.Problem.CONFIG_DICT = {"HOSTKEYS": [],
"AUTH_KEYS": {"absolute": ["/one/two/three/file1",
"/usr/secrets/file2"],
"relative": [".ssh/authorized_keys",
".keyfile1"]},
"CONFIG_PATH": "/etc/ssh/sshd_config",
"CONFIG_DICT": dict(),
"REMEDIATE": False,
"INJECT_KEY": False,
"INJECT_KEY_ONLY": False,
"CREATE_NEW_KEYS": False,
"NEW_KEY": None,
"NOT_AN_INSTANCE": False,
"BACKED_FILES": dict(),
"BACKUP_DIR": "/var/tmp/ec2rl_ssh/backup",
"LOG_DIR": "/var/tmp/ec2rl_ssh",
"PRIV_SEP_DIR": "/var/empty/sshd",
"ALL_SET_BITMASK": 0b111111111111111,
"G_O_WRITE_CHECKING_BITMASK": stat.S_IWGRP | stat.S_IWOTH,
"G_O_ALL_CHECKING_BITMASK": stat.S_IRWXG | stat.S_IRWXO}
self.vertex = moduletests.src.openssh.Vertex("example vertex", [1, 2, 3])
self.dag = moduletests.src.openssh.DirectedAcyclicGraph()
def tearDown(self):
self.path = None
self.problem = None
self.vertex = None
self.dag = None
def test_ssh_vertex_instantiation(self):
"""Test instantiation of a Vertex."""
self.assertEqual(self.vertex.visited, False)
self.assertEqual(self.vertex.continuable, True)
self.assertTrue(isinstance(self.vertex.successors, list))
self.assertEqual(len(self.vertex.successors), 0)
self.assertEqual(self.vertex.in_degree, 0)
self.assertEqual(self.vertex.label, "example vertex")
self.assertEqual(self.vertex.data, [1, 2, 3])
def test_ssh_vertex_add_successor(self):
self.assertTrue(self.vertex.add_successor(1))
self.assertFalse(self.vertex.add_successor(1))
def test_ssh_vertex_remove_successor(self):
self.vertex.add_successor(1)
self.assertTrue(self.vertex.remove_successor(1))
self.assertFalse(self.vertex.remove_successor(1))
def test_ssh_vertex_str(self):
self.assertEqual(self.vertex.__str__(), "example vertex")
def test_ssh_vertex_repr(self):
self.assertEqual(self.vertex.__repr__(), "Vertex(label='example vertex', data=[1, 2, 3])")
def test_ssh_vertex_iter(self):
self.assertTrue(self.vertex.add_successor(1))
self.assertTrue(self.vertex.add_successor(2))
self.assertTrue(iter(self.vertex))
def test_ssh_path_instantiation(self):
self.assertEqual(self.path.path_str, "/tmp")
self.assertEqual(self.path.e_mode, None)
self.assertEqual(self.path.e_uid, 0)
self.assertEqual(self.path.e_gid, 0)
self.assertEqual(self.path.v_bitmask, stat.S_IWGRP | stat.S_IWOTH)
@mock.patch("os.stat")
def test_ssh_path_property_mode(self, os_mock):
os_mock.return_value = mock.Mock(st_mode=666)
self.assertEqual(self.path.mode, 666)
self.assertTrue(os_mock.called)
@mock.patch("os.stat")
def test_ssh_path_property_uid(self, os_mock):
os_mock.return_value = mock.Mock(st_uid=666)
self.assertEqual(self.path.uid, 666)
self.assertTrue(os_mock.called)
@mock.patch("os.stat")
def test_ssh_path_property_gid(self, os_mock):
os_mock.return_value = mock.Mock(st_gid=666)
self.assertEqual(self.path.gid, 666)
self.assertTrue(os_mock.called)
@mock.patch("os.path.isdir", side_effect=[False, True])
def test_ssh_path_property_isdir(self, os_mock):
self.assertFalse(self.path.isdir)
self.assertTrue(self.path.isdir)
self.assertTrue(os_mock.called)
@mock.patch("os.path.isfile", side_effect=[False, True])
def test_ssh_path_property_isfile(self, os_mock):
self.assertFalse(self.path.isfile)
self.assertTrue(self.path.isfile)
self.assertTrue(os_mock.called)
def test_ssh_path_str(self):
self.assertEqual(str(self.path), "/tmp")
def test_ssh_path_repr(self):
self.assertEqual(repr(self.path), "Path(path_str=/tmp, e_mode=None, e_uid=0, e_gid=0 v_bitmask={})".format(
stat.S_IWGRP | stat.S_IWOTH))
def test_ssh_dag_instantiation(self):
self.assertEqual(len(self.dag), 0)
def test_ssh_dag_add_vertex(self):
self.assertTrue(self.dag.add_vertex(self.vertex))
self.assertTrue(self.vertex.label in self.dag.vertices.keys())
self.assertEqual(self.dag.vertices[self.vertex.label], self.vertex)
self.assertEqual(len(self.dag), 1)
# Adding a second time should fail
self.assertFalse(self.dag.add_vertex(self.vertex))
self.assertEqual(len(self.dag), 1)
def test_ssh_dag_add_edge(self):
new_vert = moduletests.src.openssh.Vertex("test", [])
self.dag.add_vertex(new_vert)
# One Vertex is not in the DAG
self.assertFalse(self.dag.add_edge(new_vert.label, self.vertex.label))
self.dag.add_vertex(self.vertex)
# Creates a loop
self.assertFalse(self.dag.add_edge(new_vert.label, new_vert.label))
# Valid
self.assertTrue(self.dag.add_edge(new_vert.label, self.vertex.label))
# Creates a cycle
self.assertFalse(self.dag.add_edge(self.vertex.label, new_vert.label))
def test_ssh_dag_remove_vertex(self):
# Add a Vertex
self.assertTrue(self.dag.add_vertex(self.vertex))
self.assertEqual(len(self.dag), 1)
# Add a second Vertex
new_vert = moduletests.src.openssh.Vertex("test", [])
self.assertTrue(self.dag.add_vertex(new_vert))
self.assertEqual(len(self.dag), 2)
# Remove the original Vertex
self.assertTrue(self.dag.remove_vertex(self.vertex.label))
self.assertEqual(len(self.dag), 1)
# Removing the Vertex when not in the DAG should fail
self.assertFalse(self.dag.remove_vertex(self.vertex.label))
# Re-add the original Vertex and add an edge so the remove successors branch can be tested
self.assertTrue(self.dag.add_vertex(self.vertex))
self.dag.add_edge(new_vert.label, self.vertex.label)
self.assertTrue(self.dag.remove_vertex(self.vertex.label))
self.assertTrue(self.vertex.label not in new_vert.successors)
def test_ssh_dag_remove_edge(self):
self.dag.add_vertex(self.vertex)
new_vert = moduletests.src.openssh.Vertex("test", [])
self.dag.add_vertex(new_vert)
self.dag.add_edge(new_vert.label, self.vertex.label)
self.assertTrue(self.vertex.label in new_vert.successors)
self.assertTrue(self.dag.remove_edge(new_vert.label, self.vertex.label))
self.assertFalse(self.vertex.label in new_vert.successors)
self.assertFalse(self.dag.remove_edge(new_vert.label, new_vert.label))
self.dag.remove_vertex(self.vertex.label)
self.assertFalse(self.dag.remove_edge(new_vert.label, self.vertex.label))
def test_ssh_dag_str(self):
new_vert = moduletests.src.openssh.Vertex("test", [1])
self.dag.add_vertex(new_vert)
self.dag.add_vertex(self.vertex)
self.dag.add_edge(new_vert.label, self.vertex.label)
self.assertEqual(str(self.dag), "example vertex : \ntest : example vertex")
def test_ssh_dag_topo_sort(self):
new_vert_one = moduletests.src.openssh.Vertex("testone", [1])
new_vert_two = moduletests.src.openssh.Vertex("testtwo", [2])
self.dag.add_vertex(new_vert_one)
self.dag.add_vertex(new_vert_two)
self.dag.add_vertex(self.vertex)
self.dag.add_edge(new_vert_one.label, self.vertex.label)
self.dag.add_edge(new_vert_two.label, self.vertex.label)
self.assertEqual(self.dag.topological_sort(), [new_vert_one.label, new_vert_two.label, self.vertex.label])
def test_ssh_dag_topo_solve(self):
problem_npf = moduletests.src.openssh.Problem(state="UNCHECKED",
item_type="Config",
item=self.path,
value=None,
value_str="Missing",
info_msg="NPF problem",
check=self.return_false,
check_msg="NPF problem",
fix_msg="No problem found",
fix=self.return_true)
problem_fix_failed = moduletests.src.openssh.Problem(state="UNCHECKED",
item_type="Config",
item=self.path,
value=None,
value_str="Missing",
info_msg="Fix failed problem",
check=self.return_true,
check_msg="Fix failed problem",
fix_msg="Can not be fixed",
fix=self.return_false)
problem_fixed = moduletests.src.openssh.Problem(state="UNCHECKED",
item_type="Config",
item=self.path,
value=None,
value_str="Missing",
info_msg="Fixed problem",
check=self.return_true,
check_msg="Fixed problem",
fix_msg="Can be fixed",
fix=self.return_true)
new_vert_a = moduletests.src.openssh.Vertex("a", problem_npf)
new_vert_b = moduletests.src.openssh.Vertex("b", problem_fix_failed)
new_vert_c = moduletests.src.openssh.Vertex("c", problem_fixed)
self.dag.add_vertex(new_vert_a)
self.dag.add_vertex(new_vert_b)
self.dag.add_vertex(new_vert_c)
with contextlib.redirect_stdout(StringIO()):
self.dag.add_edge(new_vert_a.label, new_vert_b.label)
self.dag.add_edge(new_vert_b.label, new_vert_c.label)
self.assertEqual(self.dag.topological_solve(remediate=True), [new_vert_a, new_vert_b])
self.assertEqual(self.dag.topological_solve(remediate=False), [new_vert_a, new_vert_b])
self.dag.remove_edge(new_vert_b.label, new_vert_c.label)
self.dag.add_edge(new_vert_a.label, new_vert_c.label)
self.assertEqual(self.dag.topological_solve(remediate=True), [new_vert_a, new_vert_b, new_vert_c])
self.assertEqual(self.dag.topological_solve(remediate=False), [new_vert_a, new_vert_b, new_vert_c])
self.dag.remove_edge(new_vert_a.label, new_vert_c.label)
self.dag.remove_edge(new_vert_a.label, new_vert_b.label)
self.dag.add_edge(new_vert_a.label, new_vert_b.label)
self.dag.add_edge(new_vert_c.label, new_vert_b.label)
self.assertEqual(self.dag.topological_solve(remediate=True), [new_vert_a, new_vert_c, new_vert_b])
self.assertEqual(self.dag.topological_solve(remediate=False), [new_vert_a, new_vert_c])
def test_ssh_dag_search_bfs(self):
new_vert_a = moduletests.src.openssh.Vertex("a", [])
new_vert_b = moduletests.src.openssh.Vertex("b", [])
new_vert_c = moduletests.src.openssh.Vertex("c", [])
new_vert_d = moduletests.src.openssh.Vertex("d", [])
new_vert_e = moduletests.src.openssh.Vertex("e", [])
new_vert_f = moduletests.src.openssh.Vertex("f", [])
self.dag.add_vertex(self.vertex)
self.dag.add_vertex(new_vert_a)
self.dag.add_vertex(new_vert_b)
self.dag.add_vertex(new_vert_c)
self.dag.add_vertex(new_vert_d)
self.dag.add_vertex(new_vert_e)
self.dag.add_vertex(new_vert_f)
self.dag.add_edge(self.vertex.label, new_vert_a.label)
self.dag.add_edge(self.vertex.label, new_vert_b.label)
self.dag.add_edge(new_vert_a.label, new_vert_c.label)
self.dag.add_edge(new_vert_b.label, new_vert_e.label)
self.dag.add_edge(new_vert_b.label, new_vert_d.label)
self.dag.add_edge(new_vert_b.label, new_vert_f.label)
self.dag.add_edge(new_vert_d.label, new_vert_f.label)
# The ordering is predictable since an OrderedDict is used
self.assertEqual(self.dag.search(mode="breadth"), ["example vertex", "a", "b", "c", "e", "d", "f"])
def test_ssh_dag_search_dfs(self):
new_vert_a = moduletests.src.openssh.Vertex("a", [])
new_vert_b = moduletests.src.openssh.Vertex("b", [])
new_vert_c = moduletests.src.openssh.Vertex("c", [])
new_vert_d = moduletests.src.openssh.Vertex("d", [])
new_vert_e = moduletests.src.openssh.Vertex("e", [])
self.dag.add_vertex(self.vertex)
self.dag.add_vertex(new_vert_a)
self.dag.add_vertex(new_vert_b)
self.dag.add_vertex(new_vert_c)
self.dag.add_vertex(new_vert_d)
self.dag.add_vertex(new_vert_e)
self.dag.add_edge(self.vertex.label, new_vert_a.label)
self.dag.add_edge(self.vertex.label, new_vert_b.label)
self.dag.add_edge(new_vert_a.label, new_vert_c.label)
self.dag.add_edge(new_vert_b.label, new_vert_e.label)
self.dag.add_edge(new_vert_b.label, new_vert_d.label)
# The ordering is predictable since an OrderedDict is used
self.assertEqual(self.dag.search(mode="depth"), ["example vertex", "b", "d", "e", "a", "c"])
def test_ssh_dag_search_invalid_mode(self):
self.assertFalse(self.dag.search(mode="invalid"))
def test_ssh_dag__search_from_vert_invalid_start(self):
self.dag.add_vertex(self.vertex)
self.assertFalse(self.dag._search_from_vert(mode="breadth", start="vertex", visited=set()))
def test_ssh_dag__search_from_vert_invalid_visited(self):
self.dag.add_vertex(self.vertex)
self.assertFalse(self.dag._search_from_vert(mode="breadth", start=self.vertex, visited=dict()))
def test_ssh_dag__search_from_vert_invalid_mode(self):
self.dag.add_vertex(self.vertex)
self.assertFalse(self.dag._search_from_vert(mode="invalid", start=self.vertex, visited=set()))
@mock.patch("moduletests.src.openssh.get_config_file_path", side_effect=["test"])
@mock.patch("moduletests.src.openssh.parse_configuration",
side_effect=[{"HostKey": ["/etc/ssh/ssh_host_rsa_key"],
"AuthorizedKeysFile": [".ssh/authorized_keys"]}])
def test_ssh_problem_setup_config_vars_relative_auth_keys(self, parse_mock, get_path_mock):
moduletests.src.openssh.Problem.setup_config_vars()
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["CONFIG_PATH"], "test")
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["CONFIG_DICT"],
{"HostKey": ["/etc/ssh/ssh_host_rsa_key"],
"AuthorizedKeysFile": [".ssh/authorized_keys"]})
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["HOSTKEYS"], ["/etc/ssh/ssh_host_rsa_key"])
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["AUTH_KEYS"],
{"relative": [".ssh/authorized_keys"], "absolute": []})
self.assertTrue(parse_mock.called)
self.assertTrue(get_path_mock.called)
@mock.patch("moduletests.src.openssh.get_config_file_path", side_effect=["test"])
@mock.patch("moduletests.src.openssh.parse_configuration",
side_effect=[{"HostKey": ["/etc/ssh/ssh_host_rsa_key"],
"AuthorizedKeysFile": ["/var/secrets/key"]}])
def test_ssh_problem_setup_config_vars_absolute_auth_keys(self, parse_mock, get_path_mock):
moduletests.src.openssh.Problem.setup_config_vars()
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["CONFIG_PATH"], "test")
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["CONFIG_DICT"],
{"HostKey": ["/etc/ssh/ssh_host_rsa_key"],
"AuthorizedKeysFile": ["/var/secrets/key"]})
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["HOSTKEYS"], ["/etc/ssh/ssh_host_rsa_key"])
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["AUTH_KEYS"],
{"relative": [], "absolute": ["/var/secrets/key"]})
self.assertTrue(parse_mock.called)
self.assertTrue(get_path_mock.called)
@mock.patch("moduletests.src.openssh.get_config_file_path", side_effect=["test"])
@mock.patch("moduletests.src.openssh.parse_configuration", side_effect=[{}])
def test_ssh_problem_setup_config_config_dict_empty(self, parse_mock, get_path_mock):
moduletests.src.openssh.Problem.setup_config_vars()
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["CONFIG_PATH"], "test")
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["CONFIG_DICT"], {})
self.assertTrue(parse_mock.called)
self.assertTrue(get_path_mock.called)
# noinspection PyUnresolvedReferences
@responses.activate
@mock.patch.dict(os.environ, {})
@mock.patch("moduletests.src.openssh.get_config_dict", return_value={"REMEDIATE": True,
"NOT_AN_INSTANCE": False,
"PRIV_SEP_DIR": "/var/empty/sshd"})
@mock.patch("moduletests.src.openssh.get_privilege_separation_dir", side_effect=[False])
def test_ssh_problem_setup_run_vars_unset(self, get_priv_sep_dir_mock, get_config_dict_mock):
responses.add(responses.GET, "http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key",
status=200,
body="test_key")
with contextlib.redirect_stdout(StringIO()):
moduletests.src.openssh.Problem.setup_run_vars(metadata_key_url=self.metadata_url)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["REMEDIATE"], True)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["CREATE_NEW_KEYS"], False)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["INJECT_KEY"], False)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["INJECT_KEY_ONLY"], False)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["NEW_KEY"], "test_key")
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["PRIV_SEP_DIR"], "/var/empty/sshd")
self.assertTrue(get_priv_sep_dir_mock.called)
self.assertTrue(get_config_dict_mock.called)
# noinspection PyUnresolvedReferences
@mock.patch.dict(os.environ, {})
@mock.patch("moduletests.src.openssh.get_config_dict", return_value={"REMEDIATE": True,
"NOT_AN_INSTANCE": True,
"PRIV_SEP_DIR": "/var/empty/sshd"})
@mock.patch("moduletests.src.openssh.get_privilege_separation_dir", side_effect=[False])
def test_ssh_problem_setup_run_vars_unset_notaninstance(self, get_priv_sep_dir_mock, get_config_dict_mock):
with contextlib.redirect_stdout(StringIO()):
moduletests.src.openssh.Problem.setup_run_vars(metadata_key_url=self.metadata_url)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["NEW_KEY"], None)
self.assertTrue(get_priv_sep_dir_mock.called)
self.assertTrue(get_config_dict_mock.called)
# noinspection PyUnresolvedReferences
@mock.patch.dict(os.environ, {"injectkey": "True",
"newsshkey": "test_key"})
@mock.patch("moduletests.src.openssh.get_config_dict", return_value={"REMEDIATE": True,
"NOT_AN_INSTANCE": False,
"PRIV_SEP_DIR": "/var/empty/sshd"})
@mock.patch("moduletests.src.openssh.get_privilege_separation_dir", side_effect=[False])
def test_ssh_problem_setup_run_vars_set_injectkey_new_key_valid(self, get_priv_sep_dir_mock, get_config_dict_mock):
with contextlib.redirect_stdout(StringIO()):
moduletests.src.openssh.Problem.setup_run_vars(metadata_key_url=self.metadata_url)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["REMEDIATE"], True)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["NOT_AN_INSTANCE"], False)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["INJECT_KEY"], True)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["NEW_KEY"], "test_key")
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["PRIV_SEP_DIR"], "/var/empty/sshd")
self.assertTrue(get_priv_sep_dir_mock.called)
self.assertTrue(get_config_dict_mock.called)
# noinspection PyUnresolvedReferences
@mock.patch.dict(os.environ, {"injectkey": "True",
"createnewkeys": "True"})
@mock.patch("moduletests.src.openssh.get_config_dict", return_value={"REMEDIATE": True,
"NOT_AN_INSTANCE": False,
"PRIV_SEP_DIR": "/var/empty/sshd"})
@mock.patch("moduletests.src.openssh.get_privilege_separation_dir", side_effect=[False])
def test_ssh_problem_setup_run_vars_set_injectkey_create_valid(self, get_priv_sep_dir_mock, get_config_dict_mock):
with contextlib.redirect_stdout(StringIO()):
moduletests.src.openssh.Problem.setup_run_vars(metadata_key_url=self.metadata_url)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["REMEDIATE"], True)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["NOT_AN_INSTANCE"], False)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["INJECT_KEY"], True)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["CREATE_NEW_KEYS"], True)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["NEW_KEY"], None)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["PRIV_SEP_DIR"], "/var/empty/sshd")
self.assertTrue(get_priv_sep_dir_mock.called)
self.assertTrue(get_config_dict_mock.called)
# noinspection PyUnresolvedReferences
@mock.patch.dict(os.environ, {"injectkey": "unexpected",
"newsshkey": "test_key"})
@mock.patch("moduletests.src.openssh.get_config_dict", return_value={"REMEDIATE": True,
"NOT_AN_INSTANCE": False,
"PRIV_SEP_DIR": "/var/empty/sshd"})
@mock.patch("moduletests.src.openssh.get_privilege_separation_dir", side_effect=[False])
def test_ssh_problem_setup_run_vars_set_injectkey_invalid(self, get_priv_sep_dir_mock, get_config_dict_mock):
with contextlib.redirect_stdout(StringIO()):
moduletests.src.openssh.Problem.setup_run_vars(metadata_key_url=self.metadata_url)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["REMEDIATE"], True)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["INJECT_KEY"], False)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["NEW_KEY"], "test_key")
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["PRIV_SEP_DIR"], "/var/empty/sshd")
self.assertTrue(get_priv_sep_dir_mock.called)
self.assertTrue(get_config_dict_mock.called)
# noinspection PyUnresolvedReferences
@mock.patch.dict(os.environ, {"injectkeyonly": "True",
"newsshkey": "test_key"})
@mock.patch("moduletests.src.openssh.get_config_dict", return_value={"REMEDIATE": True,
"NOT_AN_INSTANCE": False,
"PRIV_SEP_DIR": "/var/empty/sshd"})
@mock.patch("moduletests.src.openssh.get_privilege_separation_dir", side_effect=[False])
def test_ssh_problem_setup_run_vars_set_injectkeyonly_valid(self, get_priv_sep_dir_mock, get_config_dict_mock):
with contextlib.redirect_stdout(StringIO()):
moduletests.src.openssh.Problem.setup_run_vars(metadata_key_url=self.metadata_url)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["REMEDIATE"], True)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["INJECT_KEY_ONLY"], True)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["NEW_KEY"], "test_key")
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["PRIV_SEP_DIR"], "/var/empty/sshd")
self.assertTrue(get_priv_sep_dir_mock.called)
self.assertTrue(get_config_dict_mock.called)
# noinspection PyUnresolvedReferences
@responses.activate
@mock.patch.dict(os.environ, {})
@mock.patch("moduletests.src.openssh.get_config_dict", return_value={"REMEDIATE": True,
"NOT_AN_INSTANCE": False,
"PRIV_SEP_DIR": "/var/empty/sshd"})
@mock.patch("moduletests.src.openssh.get_privilege_separation_dir", return_value="test_priv_sep_dir")
def test_ssh_problem_setup_run_vars_unset_set_priv_sep_dir(self, get_priv_sep_dir_mock, get_config_dict_mock):
responses.add(responses.GET, "http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key",
status=200,
body="test_key")
with contextlib.redirect_stdout(StringIO()):
moduletests.src.openssh.Problem.setup_run_vars(metadata_key_url=self.metadata_url)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["REMEDIATE"], True)
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["NEW_KEY"], "test_key")
self.assertEqual(moduletests.src.openssh.Problem.CONFIG_DICT["PRIV_SEP_DIR"], "test_priv_sep_dir")
self.assertTrue(get_priv_sep_dir_mock.called)
self.assertTrue(get_config_dict_mock.called)
def test_ssh_problem_instantiation(self):
self.assertEqual(self.problem.state, "UNCHECKED")
self.assertEqual(self.problem.item_type, "Config")
self.assertTrue(self.problem.item is self.path)
self.assertEqual(self.problem.value_str, "Missing")
self.assertEqual(self.problem.info_msg, "Found bad lines in configuration file")
self.assertTrue(self.problem.check is self.return_true)
self.assertEqual(self.problem.check_msg, "validity of configuration")
self.assertEqual(self.problem.fix_msg, None)
self.assertTrue(self.problem.fix is self.return_true)
def test_ssh_problem_property_state_invalid(self):
self.assertEqual(self.problem.state, "UNCHECKED")
self.problem.state = "Invalid state"
self.assertEqual(self.problem.state, "UNCHECKED")
def test_ssh_problem_property_item_type_invalid(self):
self.assertEqual(self.problem.item_type, "Config")
self.problem.item_type = "Invalid type"
self.assertEqual(self.problem.item_type, "Config")
def test_ssh_problem_str(self):
self.assertEqual(str(self.problem), "UNCHECKED Config Missing /tmp")
def test_ssh_problem_get_missing_sshd_problem(self):
problem = moduletests.src.openssh.Problem.get_missing_sshd_problem()
self.assertTrue(isinstance(problem, moduletests.src.openssh.Problem))
def test_ssh_problem_get_dupe_keyfile_lines_problem(self):
problem = moduletests.src.openssh.Problem.get_dupe_keyfile_lines_problem()
self.assertTrue(isinstance(problem, moduletests.src.openssh.Problem))
def test_ssh_problem_get_mode_problem(self):
this_path = moduletests.src.openssh.Path(
path_str="/tmp", e_uid=0, e_gid=0, v_bitmask=(stat.S_IWGRP | stat.S_IWOTH))
problem = moduletests.src.openssh.Problem.get_mode_problem(this_path)
self.assertTrue(isinstance(problem, moduletests.src.openssh.Problem))
self.assertEqual(problem.info_msg, "Permission mode includes write for groups and/or other users")
this_path = moduletests.src.openssh.Path(
path_str="/tmp", e_uid=0, e_gid=0, v_bitmask=(stat.S_IRWXG | stat.S_IRWXO))
problem = moduletests.src.openssh.Problem.get_mode_problem(this_path)
self.assertTrue(isinstance(problem, moduletests.src.openssh.Problem))
self.assertEqual(problem.info_msg, "Permission mode includes permissions for groups and/or other users")
this_path = moduletests.src.openssh.Path(path_str="/tmp", e_uid=0, e_gid=0, v_bitmask=stat.S_IWGRP)
problem = moduletests.src.openssh.Problem.get_mode_problem(this_path)
self.assertTrue(isinstance(problem, moduletests.src.openssh.Problem))
self.assertEqual(problem.info_msg, "Permission mode includes write for groups and/or other users")
def test_ssh_problem_get_uid_problem(self):
this_path = moduletests.src.openssh.Path(
path_str="/tmp", e_uid=0, e_gid=0, v_bitmask=(stat.S_IWGRP | stat.S_IWOTH))
problem = moduletests.src.openssh.Problem.get_uid_problem(this_path)
self.assertTrue(isinstance(problem, moduletests.src.openssh.Problem))
def test_ssh_problem_get_missing_config_file_problem(self):
problem = moduletests.src.openssh.Problem.get_missing_config_file_problem()
self.assertTrue(isinstance(problem, moduletests.src.openssh.Problem))
def test_ssh_problem_get_bad_config_options_problem(self):
problem = moduletests.src.openssh.Problem.get_bad_config_options_problem()
self.assertTrue(isinstance(problem, moduletests.src.openssh.Problem))
def test_ssh_problem_get_missing_priv_sep_dir_problem(self):
problem = moduletests.src.openssh.Problem.get_missing_priv_sep_dir_problem()
self.assertTrue(isinstance(problem, moduletests.src.openssh.Problem))
def test_ssh_problem_get_missing_host_keys_problem(self):
problem = moduletests.src.openssh.Problem.get_missing_host_keys_problem()
self.assertTrue(isinstance(problem, moduletests.src.openssh.Problem))
def test_ssh_problem_get_missing_priv_sep_user_problem(self):
problem = moduletests.src.openssh.Problem.get_missing_priv_sep_user_problem()
self.assertTrue(isinstance(problem, moduletests.src.openssh.Problem))
def test_ssh_problem_get_missing_dir_problem(self):
problem = moduletests.src.openssh.Problem.get_missing_dir_problem(self.path)
self.assertTrue(isinstance(problem, moduletests.src.openssh.Problem))
def test_ssh_problem_get_missing_key_problem(self):
problem = moduletests.src.openssh.Problem.get_missing_key_problem(self.path)
self.assertTrue(isinstance(problem, moduletests.src.openssh.Problem))
def test_ssh_problem_fix_unfixable(self):
output = StringIO()
with contextlib.redirect_stdout(output):
self.assertFalse(self.problem._Problem__fix_unfixable(self.problem))
self.assertEqual(output.getvalue(), " Unable to automate remediation of this fault.\n")
@mock.patch("moduletests.src.openssh.Problem.setup_config_vars", side_effect=[True])
def test_ssh_problem_check_missing_sshd(self, setup_config_vars_mock):
self.problem.CONFIG_DICT["CONFIG_PATH"] = "/test"
self.assertFalse(self.problem._Problem__check_missing_sshd())
self.assertTrue(setup_config_vars_mock.called)
@mock.patch("moduletests.src.openssh.Problem.setup_config_vars", side_effect=[OSError(errno.ENOENT, "test")])
def test_ssh_problem_check_missing_sshd_enoent(self, setup_config_vars_mock):
self.assertTrue(self.problem._Problem__check_missing_sshd())
self.assertTrue(setup_config_vars_mock.called)
@mock.patch("moduletests.src.openssh.Problem.setup_config_vars", side_effect=[OSError(errno.EEXIST, "test")])
def test_ssh_problem_check_missing_sshd_eexist(self, setup_config_vars_mock):
self.assertFalse(self.problem._Problem__check_missing_sshd())
self.assertTrue(setup_config_vars_mock.called)
@mock.patch("moduletests.src.openssh.Problem.setup_config_vars", side_effect=[subprocess.CalledProcessError(
returncode=0, cmd="test")])
def test_ssh_problem_check_missing_sshd_cpe(self, setup_config_vars_mock):
self.assertFalse(self.problem._Problem__check_missing_sshd())
self.assertTrue(setup_config_vars_mock.called)
def test_ssh_problem_check_dupe_keyfile_lines_found(self):
self.problem.CONFIG_DICT["CONFIG_PATH"] = "/test"
open_mock = mock.mock_open(read_data="AuthorizedKeysFile a\nAuthorizedKeysFile a\n")
# mock_open does not have support for iteration so it must be added manually
# readline() until a blank line is reached (the sentinel)
def iter_func(self):
return iter(self.readline, "")
open_mock.return_value.__iter__ = iter_func
def py3_next_func(self):
return next(iter(self.readline, ""))
if sys.hexversion >= 0x3000000:
open_mock.return_value.__next__ = py3_next_func
with mock.patch("moduletests.src.openssh.open", open_mock):
self.assertTrue(self.problem._Problem__check_dupe_keyfile_lines())
def test_ssh_problem_check_dupe_keyfile_lines_not_found(self):
self.problem.CONFIG_DICT["CONFIG_PATH"] = "/test"
open_mock = mock.mock_open(read_data="Port 22\n# test\nAuthorizedKeysFile\n\n")
# mock_open does not have support for iteration so it must be added manually
# readline() until a blank line is reached (the sentinel)
def iter_func(self):
return iter(self.readline, "")
open_mock.return_value.__iter__ = iter_func
def py3_next_func(self):
return next(iter(self.readline, ""))
if sys.hexversion >= 0x3000000:
open_mock.return_value.__next__ = py3_next_func
with mock.patch("moduletests.src.openssh.open", open_mock):
self.assertFalse(self.problem._Problem__check_dupe_keyfile_lines())
@mock.patch("subprocess.check_output", side_effect=[True])
def test_ssh_problem_check_bad_config_options_not_found(self, check_output_mock):
self.assertFalse(self.problem._Problem__check_bad_config_options())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output", side_effect=subprocess.CalledProcessError(
1, "test",
"super awesome message\n"
"/etc/ssh/sshd_config: terminating, 2 bad configuration options\n"))
def test_ssh_problem_check_bad_config_options_exception(self, check_output_mock):
with self.assertRaises(Exception) as ex:
self.problem._Problem__check_bad_config_options()
self.assertEqual(ex.args, ("super awesome message\n",))
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output", side_effect=subprocess.CalledProcessError(
1, "test",
"/etc/ssh/sshd_config: line 1: Bad configuration option: badoption\n"
"/etc/ssh/sshd_config line 2: missing integer value.\n"
"/etc/ssh/sshd_config: terminating, 2 bad configuration options\n"))
def test_ssh_problem_check_bad_config_options_found(self, check_output_mock):
self.assertTrue(self.problem._Problem__check_bad_config_options())
self.assertEqual(self.problem.value, [1, 2])
self.assertEqual(self.problem.value_str, "1,2")
self.assertEqual(self.problem.fix_msg, "Remove/fix lines: 1,2 in /etc/ssh/sshd_config")
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output",
side_effect=subprocess.CalledProcessError(1, "test", "debug2: load_server_config: filename test_path"))
def test_ssh_problem_check_bad_config_options_cpe_not_found(self, check_output_mock):
self.assertFalse(self.problem._Problem__check_bad_config_options())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output", side_effect=subprocess.CalledProcessError(
1, "test", "Missing privilege separation directory: /var/empty"))
def test_ssh_problem_check_missing_priv_sep_dir_found(self, check_output_mock):
self.assertTrue(self.problem._Problem__check_missing_priv_sep_dir())
self.assertTrue(isinstance(self.problem.item, moduletests.src.openssh.Path))
self.assertEqual(self.problem.fix_msg, "Create privilege separation directory: /var/empty")
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output", side_effect=subprocess.CalledProcessError(
1, "test", "Some error"))
def test_ssh_problem_check_missing_priv_sep_dir_cpe_not_found(self, check_output_mock):
self.assertFalse(self.problem._Problem__check_missing_priv_sep_dir())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output", side_effect=[True])
def test_ssh_problem_check_missing_priv_sep_dir_not_found(self, check_output_mock):
self.assertFalse(self.problem._Problem__check_missing_priv_sep_dir())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output", side_effect=subprocess.CalledProcessError(
1, "test", "sshd: no hostkeys available -- exiting."))
def test_ssh_problem_check_missing_host_keys_not_found(self, check_output_mock):
self.assertTrue(self.problem._Problem__check_missing_host_keys())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output", side_effect=subprocess.CalledProcessError(
1, "test", "Some other error"))
def test_ssh_problem_check_missing_host_keys_cpe_not_found(self, check_output_mock):
self.assertFalse(self.problem._Problem__check_missing_host_keys())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output", side_effect=[True])
def test_ssh_problem_check_missing_host_keys_found(self, check_output_mock):
self.assertFalse(self.problem._Problem__check_missing_host_keys())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output", side_effect=[True])
def test_ssh_problem_check_missing_priv_sep_user_not_found(self, check_output_mock):
self.assertFalse(self.problem._Problem__check_missing_priv_sep_user())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output", side_effect=subprocess.CalledProcessError(
1, "test", "Privilege separation user sshd does not exist"))
def test_ssh_problem_check_missing_priv_sep_user_found(self, check_output_mock):
self.assertTrue(self.problem._Problem__check_missing_priv_sep_user())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output", side_effect=subprocess.CalledProcessError(
1, "test", "Some other error"))
def test_ssh_problem_check_missing_priv_sep_user_cpe_not_found(self, check_output_mock):
self.assertFalse(self.problem._Problem__check_missing_priv_sep_user())
self.assertTrue(check_output_mock.called)
@mock.patch("os.path.isdir", side_effect=[True, False])
@mock.patch("os.stat")
def test_ssh_problem_check_mode(self, stat_mock, isdir_mock):
stat_mock.return_value = mock.Mock(st_mode=0o777)
# Mode based on file being a directory
self.assertTrue(self.problem._Problem__check_mode())
self.assertEqual(self.problem.item.e_mode, 0o755)
# Mode based on file being a file
self.assertTrue(self.problem._Problem__check_mode())
self.assertEqual(self.problem.item.e_mode, 0o655)
# No problem found
self.problem.item.v_bitmask = 0b0
self.assertFalse(self.problem._Problem__check_mode())
self.assertTrue(stat_mock.called)
self.assertTrue(isdir_mock.called)
@mock.patch("os.stat")
def test_ssh_problem_check_uid(self, os_mock):
os_mock.return_value = mock.Mock(st_uid=666)
self.assertTrue(self.problem._Problem__check_uid())
self.problem.item.e_uid = 666
self.assertFalse(self.problem._Problem__check_uid())
self.assertTrue(os_mock.called)
@mock.patch("os.path.isdir", side_effect=[True, False])
def test_ssh_check_missing_dir(self, os_mock):
self.assertFalse(self.problem._Problem__check_missing_dir())
self.assertTrue(self.problem._Problem__check_missing_dir())
self.assertTrue(os_mock.called)
@mock.patch("os.path.isfile", side_effect=[True, False])
def test_ssh_check_missing_file(self, os_mock):
self.assertFalse(self.problem._Problem__check_missing_file())
self.assertTrue(self.problem._Problem__check_missing_file())
self.assertTrue(os_mock.called)
@mock.patch("subprocess.check_output", side_effect=[True])
def test_ssh_check_missing_config_file(self, subprocess_mock):
self.assertFalse(self.problem._Problem__check_missing_config_file())
self.assertTrue(subprocess_mock.called)
@mock.patch("subprocess.check_output",
side_effect=subprocess.CalledProcessError(1, "test",
"/etc/ssh/sshd_config: No such file or directory"))
def test_ssh_check_missing_config_file_cpe_no_such(self, subprocess_mock):
self.assertTrue(self.problem._Problem__check_missing_config_file())
self.assertTrue(subprocess_mock.called)
@mock.patch("subprocess.check_output",
side_effect=subprocess.CalledProcessError(1, "test",
"Not the problem we are looking for"))
def test_ssh_check_missing_config_file_cpe_other(self, subprocess_mock):
self.assertFalse(self.problem._Problem__check_missing_config_file())
self.assertTrue(subprocess_mock.called)
def test_ssh_problem_fix_mode_incorrect_item_type(self):
with self.assertRaises(Exception) as ex:
self.problem.item_type = "Incorrect"
self.problem._Problem__fix_mode()
self.assertEqual(ex, "Incorrect remediation function for this_problem type: Incorrect")
@mock.patch("os.stat")
@mock.patch("os.chmod", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem._Problem__check_mode", side_effect=[False])
def test_ssh_problem_fix_mode_fixed(self, check_mode_mock, os_chmod_mock, os_stat_mock):
os_stat_mock.return_value = mock.Mock(st_mode=0o777)
self.problem.item_type = "Mode"
with contextlib.redirect_stdout(StringIO()):
self.assertTrue(self.problem._Problem__fix_mode())
self.assertTrue(check_mode_mock.called)
self.assertTrue(os_chmod_mock.called)
self.assertTrue(os_stat_mock.called)
@mock.patch("os.stat")
@mock.patch("os.chmod", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem._Problem__check_mode", side_effect=[True])
def test_ssh_problem_fix_mode_not_fixed(self, check_mode_mock, os_chmod_mock, os_stat_mock):
os_stat_mock.return_value = mock.Mock(st_mode=0o777)
self.problem.item_type = "Mode"
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(self.problem._Problem__fix_mode())
self.assertTrue(check_mode_mock.called)
self.assertTrue(os_chmod_mock.called)
self.assertTrue(os_stat_mock.called)
def test_ssh_problem_fix_uid_incorrect_item_type(self):
with self.assertRaises(Exception) as ex:
self.problem.item_type = "Incorrect"
self.problem._Problem__fix_uid()
self.assertEqual(ex, "Incorrect remediation function for this_problem type: Incorrect")
@mock.patch("os.stat")
@mock.patch("os.chown", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem._Problem__check_uid", side_effect=[False])
def test_ssh_problem_fix_uid_fixed(self, check_uid_mock, os_chown_mock, os_stat_mock):
os_stat_mock.return_value = mock.Mock(st_uid=1)
self.problem.item_type = "UID"
with contextlib.redirect_stdout(StringIO()):
self.assertTrue(self.problem._Problem__fix_uid())
self.assertTrue(check_uid_mock.called)
self.assertTrue(os_chown_mock.called)
self.assertTrue(os_stat_mock.called)
@mock.patch("os.stat")
@mock.patch("os.chown", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem._Problem__check_uid", side_effect=[True])
def test_ssh_problem_fix_uid_not_fixed(self, check_uid_mock, os_chown_mock, os_stat_mock):
os_stat_mock.return_value = mock.Mock(st_uid=1)
self.problem.item_type = "UID"
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(self.problem._Problem__fix_uid())
self.assertTrue(check_uid_mock.called)
self.assertTrue(os_chown_mock.called)
self.assertTrue(os_stat_mock.called)
@mock.patch("moduletests.src.openssh.backup", side_effect=[True])
@mock.patch("shutil.copystat", side_effect=[True])
@mock.patch("shutil.copy2", side_effect=[True])
@mock.patch("os.stat")
@mock.patch("os.chown", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem._Problem__check_dupe_keyfile_lines", side_effect=[False])
@mock.patch("moduletests.src.openssh.Problem.setup_config_vars", side_effect=[True])
def test_ssh_problem_fix_dup_keyfile_lines_fixed(self,
setup_config_vars_mock,
check_dup_keyfile_lines_mock,
os_chown_mock,
os_stat_mock,
copy2_mock,
copystat_mock,
backup_mock):
open_mock = mock.mock_open(read_data="AuthorizedKeysFile a\nAuthorizedKeysFile b\nPort 22\n")
os_stat_mock.return_value = mock.Mock(st_uid=0, st_gid=0)
# mock_open does not have support for iteration so it must be added manually
# readline() until a blank line is reached (the sentinel)
def iter_func(self):
return iter(self.readline, "")
open_mock.return_value.__iter__ = iter_func
def py3_next_func(self):
return next(iter(self.readline, ""))
if sys.hexversion >= 0x3000000:
open_mock.return_value.__next__ = py3_next_func
self.problem.value = dict()
self.problem.value["line_nums"] = [1, 2]
# A set is used in the actual function, but since sets are not ordered, instead, use a list so there is
# only one possible valid outcome
self.problem.value["values"] = ["a", "b"]
# noinspection PyUnresolvedReferences
with mock.patch.object(moduletests.src.openssh.tempfile, "NamedTemporaryFile") as temp_file_mock:
with mock.patch("moduletests.src.openssh.open", open_mock):
with contextlib.redirect_stdout(StringIO()):
self.assertTrue(self.problem._Problem__fix_dup_keyfile_lines())
self.assertTrue(temp_file_mock.called)
self.assertEqual(str(temp_file_mock.mock_calls),
"[call(mode='wt'),\n call().__enter__(),\n "
"call().__enter__().write('# AuthorizedKeysFile a # commented out by "
"ec2rl\\n'),\n "
"call().__enter__().write('# AuthorizedKeysFile b # commented out by "
"ec2rl\\n'),\n "
"call().__enter__().write('AuthorizedKeysFile a b\\n'),\n "
"call().__enter__().write('Port 22\\n'),\n "
"call().__enter__().flush(),\n "
"call().__exit__(None, None, None)]")
self.assertTrue(open_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(copystat_mock.called)
self.assertTrue(copy2_mock.called)
self.assertTrue(setup_config_vars_mock.called)
self.assertTrue(check_dup_keyfile_lines_mock.called)
self.assertTrue(os_chown_mock.called)
self.assertTrue(os_stat_mock.called)
@mock.patch("moduletests.src.openssh.backup", side_effect=[True])
@mock.patch("shutil.copystat", side_effect=[True])
@mock.patch("shutil.copy2", side_effect=[True])
@mock.patch("os.stat")
@mock.patch("os.chown", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem._Problem__check_dupe_keyfile_lines", side_effect=[True])
@mock.patch("moduletests.src.openssh.restore", side_effect=[True])
def test_ssh_problem_fix_dup_keyfile_lines_not_fixed(self,
restore_mock,
check_dup_keyfile_lines_mock,
os_chown_mock,
os_stat_mock,
copy2_mock,
copystat_mock,
backup_mock):
open_mock = mock.mock_open(read_data="AuthorizedKeysFile a\nAuthorizedKeysFile b\nPort 22\n")
os_stat_mock.return_value = mock.Mock(st_uid=0, st_gid=0)
# mock_open does not have support for iteration so it must be added manually
# readline() until a blank line is reached (the sentinel)
def iter_func(self):
return iter(self.readline, "")
open_mock.return_value.__iter__ = iter_func
def py3_next_func(self):
return next(iter(self.readline, ""))
if sys.hexversion >= 0x3000000:
open_mock.return_value.__next__ = py3_next_func
self.problem.value = dict()
self.problem.value["line_nums"] = [1, 2]
# A set is used in the actual function, but since sets are not ordered, instead, use a list so there is
# only one possible valid outcome
self.problem.value["values"] = ["a", "b"]
# noinspection PyUnresolvedReferences
with mock.patch.object(moduletests.src.openssh.tempfile, "NamedTemporaryFile") as temp_file_mock:
with mock.patch("moduletests.src.openssh.open", open_mock):
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(self.problem._Problem__fix_dup_keyfile_lines())
self.assertTrue(temp_file_mock.called)
self.assertEqual(str(temp_file_mock.mock_calls),
"[call(mode='wt'),\n call().__enter__(),\n "
"call().__enter__().write('# AuthorizedKeysFile a # commented out by "
"ec2rl\\n'),\n "
"call().__enter__().write('# AuthorizedKeysFile b # commented out by "
"ec2rl\\n'),\n "
"call().__enter__().write('AuthorizedKeysFile a b\\n'),\n "
"call().__enter__().write('Port 22\\n'),\n "
"call().__enter__().flush(),\n "
"call().__exit__(None, None, None)]")
self.assertTrue(open_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(copystat_mock.called)
self.assertTrue(copy2_mock.called)
self.assertTrue(restore_mock.called)
self.assertTrue(check_dup_keyfile_lines_mock.called)
self.assertTrue(os_chown_mock.called)
self.assertTrue(os_stat_mock.called)
@mock.patch("moduletests.src.openssh.open", new_callable=mock.mock_open())
@mock.patch("os.chmod", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem._Problem__check_missing_config_file", side_effect=[False])
@mock.patch("moduletests.src.openssh.Problem.setup_config_vars", side_effect=[True])
def test_ssh_problem_fix_write_default_config_fixed(self,
setup_config_vars_mock,
check_missing_config_file_mock,
os_chmod_mock,
open_mock):
with contextlib.redirect_stdout(StringIO()):
self.assertTrue(self.problem._Problem__fix_write_default_config())
self.assertTrue("call().__enter__().writelines(["
"'HostKey /etc/ssh/ssh_host_rsa_key\\n', "
"'HostKey /etc/ssh/ssh_host_ecdsa_key\\n', "
"'HostKey /etc/ssh/ssh_host_ed25519_key\\n', "
"'SyslogFacility AUTHPRIV\\n', "
"'PermitRootLogin no\\n', "
"'AuthorizedKeysFile .ssh/authorized_keys\\n', "
"'PasswordAuthentication no\\n', "
"'ChallengeResponseAuthentication no\\n', "
"'UsePAM yes\\n']),\n " in str(open_mock.mock_calls))
self.assertTrue(setup_config_vars_mock.called)
self.assertTrue(check_missing_config_file_mock.called)
self.assertTrue(os_chmod_mock.called)
@mock.patch("moduletests.src.openssh.open", new_callable=mock.mock_open())
@mock.patch("os.chmod", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem._Problem__check_missing_config_file", side_effect=[True])
def test_ssh_problem_fix_write_default_config_not_fixed(self,
check_missing_config_file_mock,
os_chmod_mock,
open_mock):
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(self.problem._Problem__fix_write_default_config())
self.assertTrue("call().__enter__().writelines(["
"'HostKey /etc/ssh/ssh_host_rsa_key\\n', "
"'HostKey /etc/ssh/ssh_host_ecdsa_key\\n', "
"'HostKey /etc/ssh/ssh_host_ed25519_key\\n', "
"'SyslogFacility AUTHPRIV\\n', "
"'PermitRootLogin no\\n', "
"'AuthorizedKeysFile .ssh/authorized_keys\\n', "
"'PasswordAuthentication no\\n', "
"'ChallengeResponseAuthentication no\\n', "
"'UsePAM yes\\n']),\n " in str(open_mock.mock_calls))
self.assertTrue(check_missing_config_file_mock.called)
self.assertTrue(os_chmod_mock.called)
@mock.patch("moduletests.src.openssh.open", side_effect=OSError())
def test_ssh_problem_fix_write_default_config_error(self, open_mock):
self.assertFalse(self.problem._Problem__fix_write_default_config())
self.assertTrue(open_mock.called)
@mock.patch("moduletests.src.openssh.backup", side_effect=[True])
@mock.patch("shutil.copystat", side_effect=[True])
@mock.patch("shutil.copy2", side_effect=[True])
@mock.patch("os.stat")
@mock.patch("os.chown", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem._Problem__check_bad_config_options", side_effect=[False])
@mock.patch("moduletests.src.openssh.Problem.setup_config_vars", side_effect=[True])
def test_ssh_problem_fix_comment_bad_config_lines_fixed(self,
setup_config_vars_mock,
check_back_config_options_mock,
os_chown_mock,
os_stat_mock,
copy2_mock,
copystat_mock,
backup_mock):
open_mock = mock.mock_open(read_data="BadOptionOne a\nGoodOptionOne b\nBadOptionTwo c\n")
os_stat_mock.return_value = mock.Mock(st_uid=0, st_gid=0)
# mock_open does not have support for iteration so it must be added manually
# readline() until a blank line is reached (the sentinel)
def iter_func(self):
return iter(self.readline, "")
open_mock.return_value.__iter__ = iter_func
def py3_next_func(self):
return next(iter(self.readline, ""))
if sys.hexversion >= 0x3000000:
open_mock.return_value.__next__ = py3_next_func
self.problem.value = [1, 3]
# noinspection PyUnresolvedReferences
with mock.patch.object(moduletests.src.openssh.tempfile, "NamedTemporaryFile") as temp_file_mock:
with mock.patch("moduletests.src.openssh.open", open_mock):
with contextlib.redirect_stdout(StringIO()):
self.assertTrue(self.problem._Problem__fix_comment_bad_config_lines())
self.assertTrue(temp_file_mock.called)
self.assertEqual(str(temp_file_mock.mock_calls),
"[call(mode='wt'),\n call().__enter__(),\n "
"call().__enter__().write('# BadOptionOne a # commented out by ec2rl\\n'),\n "
"call().__enter__().write('GoodOptionOne b\\n'),\n "
"call().__enter__().write('# BadOptionTwo c # commented out by ec2rl\\n'),\n "
"call().__enter__().flush(),\n "
"call().__exit__(None, None, None)]")
self.assertTrue(open_mock.called)
self.assertTrue(setup_config_vars_mock.called)
self.assertTrue(check_back_config_options_mock.called)
self.assertTrue(os_chown_mock.called)
self.assertTrue(os_stat_mock.called)
self.assertTrue(copy2_mock.called)
self.assertTrue(copystat_mock.called)
self.assertTrue(backup_mock.called)
@mock.patch("moduletests.src.openssh.backup", side_effect=[True])
@mock.patch("shutil.copystat", side_effect=[True])
@mock.patch("shutil.copy2", side_effect=[True])
@mock.patch("os.stat")
@mock.patch("os.chown", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem._Problem__check_bad_config_options", side_effect=[True])
@mock.patch("moduletests.src.openssh.restore", side_effect=[True])
def test_ssh_problem_fix_comment_bad_config_lines_not_fixed(self,
restore_mock,
check_back_config_options_mock,
os_chown_mock,
os_stat_mock,
copy2_mock,
copystat_mock,
backup_mock):
open_mock = mock.mock_open(read_data="BadOptionOne a\nGoodOptionOne b\nBadOptionTwo c\n")
os_stat_mock.return_value = mock.Mock(st_uid=0, st_gid=0)
# mock_open does not have support for iteration so it must be added manually
# readline() until a blank line is reached (the sentinel)
def iter_func(self):
return iter(self.readline, "")
open_mock.return_value.__iter__ = iter_func
def py3_next_func(self):
return next(iter(self.readline, ""))
if sys.hexversion >= 0x3000000:
open_mock.return_value.__next__ = py3_next_func
self.problem.value = [1, 3]
# noinspection PyUnresolvedReferences
with mock.patch.object(moduletests.src.openssh.tempfile, "NamedTemporaryFile") as temp_file_mock:
with mock.patch("moduletests.src.openssh.open", open_mock):
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(self.problem._Problem__fix_comment_bad_config_lines())
self.assertTrue(temp_file_mock.called)
self.assertEqual(str(temp_file_mock.mock_calls),
"[call(mode='wt'),\n call().__enter__(),\n "
"call().__enter__().write('# BadOptionOne a # commented out by ec2rl\\n'),\n "
"call().__enter__().write('GoodOptionOne b\\n'),\n "
"call().__enter__().write('# BadOptionTwo c # commented out by ec2rl\\n'),\n "
"call().__enter__().flush(),\n "
"call().__exit__(None, None, None)]")
self.assertTrue(open_mock.called)
self.assertTrue(restore_mock.called)
self.assertTrue(check_back_config_options_mock.called)
self.assertTrue(os_chown_mock.called)
self.assertTrue(os_stat_mock.called)
self.assertTrue(copy2_mock.called)
self.assertTrue(copystat_mock.called)
self.assertTrue(backup_mock.called)
@mock.patch("os.makedirs", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem._Problem__check_missing_priv_sep_dir", side_effect=[False])
def test_ssh_problem_fix_missing_priv_sep_dir_fixed(self, check_mock, os_makedirs_mock):
with contextlib.redirect_stdout(StringIO()):
self.assertTrue(self.problem._Problem__fix_missing_priv_sep_dir())
self.assertTrue(check_mock.called)
self.assertTrue(os_makedirs_mock.called)
@mock.patch("os.makedirs", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem._Problem__check_missing_priv_sep_dir", side_effect=[True])
def test_ssh_problem_fix_missing_priv_sep_dir_not_fixed(self, check_mock, os_makedirs_mock):
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(self.problem._Problem__fix_missing_priv_sep_dir())
self.assertTrue(check_mock.called)
self.assertTrue(os_makedirs_mock.called)
@mock.patch("os.makedirs", side_effect=OSError)
def test_ssh_problem_fix_missing_priv_sep_dir_exception(self, os_makedirs_mock):
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(self.problem._Problem__fix_missing_priv_sep_dir())
self.assertTrue(os_makedirs_mock.called)
@mock.patch("os.path.exists", side_effect=[True] * 3 + [False])
@mock.patch("moduletests.src.openssh.backup", side_effect=["/tmp"] * 3)
@mock.patch("os.path.isfile", side_effect=[True] * 2 + [False] * 1)
@mock.patch("os.remove", side_effect=[True] * 2)
@mock.patch("shutil.rmtree", side_effect=[True])
@mock.patch("subprocess.check_call")
@mock.patch("moduletests.src.openssh.Problem._Problem__check_missing_host_keys", side_effect=[False])
def test_ssh_problem_fix_create_hostkeys_fixed(self,
check_mock,
check_call_mock,
shutil_rmtree_mock,
os_remove_mock,
os_path_isfile_mock,
backup_mock,
os_path_exists_mock):
check_call_mock.return_value = 0
with contextlib.redirect_stdout(StringIO()):
self.assertTrue(self.problem._Problem__fix_create_hostkeys())
self.assertTrue(check_call_mock.called)
self.assertTrue(shutil_rmtree_mock.called)
self.assertTrue(os_remove_mock.called)
self.assertTrue(os_path_isfile_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(os_path_exists_mock.called)
self.assertTrue(check_mock.called)
@mock.patch("moduletests.src.openssh.Problem._Problem__check_missing_host_keys", side_effect=[True])
@mock.patch("os.path.exists", side_effect=[True] * 3 + [False])
@mock.patch("moduletests.src.openssh.backup", side_effect=["/tmp"] * 3)
@mock.patch("os.path.isfile", side_effect=[True] * 2 + [False] * 1)
@mock.patch("os.remove", side_effect=[True] * 2)
@mock.patch("shutil.rmtree", side_effect=[True])
@mock.patch("subprocess.check_call")
def test_ssh_problem_fix_create_hostkeys_not_fixed(self,
check_call_mock,
shutil_rmtree_mock,
os_remove_mock,
os_path_isfile_mock,
backup_mock,
os_path_exists_mock,
check_mock):
check_call_mock.return_value = 0
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(self.problem._Problem__fix_create_hostkeys())
self.assertTrue(check_call_mock.called)
self.assertTrue(shutil_rmtree_mock.called)
self.assertTrue(os_remove_mock.called)
self.assertTrue(os_path_isfile_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(os_path_exists_mock.called)
self.assertTrue(check_mock.called)
@mock.patch("os.path.exists", side_effect=[True])
@mock.patch("moduletests.src.openssh.backup", side_effect=["/tmp"])
@mock.patch("os.path.isfile", side_effect=[True])
@mock.patch("os.remove", side_effect=[True])
@mock.patch("subprocess.check_call", side_effect=subprocess.CalledProcessError(1, "ssh-keygen"))
def test_ssh_problem_fix_create_hostkeys_exception(self,
check_call_mock,
os_remove_mock,
os_path_isfile_mock,
backup_mock,
os_path_exists_mock):
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(self.problem._Problem__fix_create_hostkeys())
self.assertTrue(check_call_mock.called)
self.assertTrue(os_remove_mock.called)
self.assertTrue(os_path_isfile_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(os_path_exists_mock.called)
@mock.patch("moduletests.src.openssh.open", new_callable=mock.mock_open())
@mock.patch("subprocess.check_call")
@mock.patch("os.chmod", side_effect=[True])
@mock.patch("os.stat")
@mock.patch("moduletests.src.openssh.Problem._Problem__check_missing_priv_sep_user", side_effect=[False])
def test_ssh_problem_fix_missing_priv_sep_user_fixed(self,
check_mock,
os_chmod_mock,
os_stat_mock,
check_call_mock,
open_mock):
os_stat_mock.return_value = mock.Mock(st_mode=0o600)
check_call_mock.return_value = 0
with contextlib.redirect_stdout(StringIO()):
self.assertTrue(self.problem._Problem__fix_missing_priv_sep_user())
self.assertTrue(check_mock.called)
self.assertTrue(open_mock.called)
self.assertTrue(check_call_mock.called)
self.assertTrue(os_chmod_mock.called)
self.assertTrue(os_stat_mock.called)
@mock.patch("moduletests.src.openssh.open", new_callable=mock.mock_open())
@mock.patch("subprocess.check_call")
@mock.patch("os.chmod", side_effect=[True])
@mock.patch("os.stat")
@mock.patch("moduletests.src.openssh.Problem._Problem__check_missing_priv_sep_user", side_effect=[True])
def test_ssh_problem_fix_missing_priv_sep_user_not_fixed(self,
check_mock,
os_chmod_mock,
os_stat_mock,
check_call_mock,
open_mock):
os_stat_mock.return_value = mock.Mock(st_mode=0o600)
check_call_mock.return_value = 0
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(self.problem._Problem__fix_missing_priv_sep_user())
self.assertTrue(check_mock.called)
self.assertTrue(open_mock.called)
self.assertTrue(check_call_mock.called)
self.assertTrue(os_chmod_mock.called)
self.assertTrue(os_stat_mock.called)
@mock.patch("moduletests.src.openssh.open", side_effect=OSError())
def test_ssh_problem_fix_missing_priv_sep_user_exception(self, open_mock):
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(self.problem._Problem__fix_missing_priv_sep_user())
self.assertTrue(open_mock.called)
@mock.patch("os.makedirs", side_effect=[True])
@mock.patch("os.chown", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem._Problem__check_missing_dir", side_effect=[False])
def test_ssh_problem_fix_missing_dir_fixed(self, check_mock, os_chown_mock, os_makedirs_mock):
self.assertTrue(self.problem._Problem__fix_missing_dir())
self.assertTrue(check_mock.called)
self.assertTrue(os_chown_mock.called)
self.assertTrue(os_makedirs_mock.called)
@mock.patch("os.makedirs", side_effect=OSError())
def test_ssh_problem_fix_missing_dir_exception(self, os_makedirs_mock):
self.assertFalse(self.problem._Problem__fix_missing_dir())
self.assertTrue(os_makedirs_mock.called)
@mock.patch("os.makedirs", side_effect=[True])
@mock.patch("os.chown", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem._Problem__check_missing_dir", side_effect=[True])
def test_ssh_problem_fix_missing_dir_not_fixed(self, check_mock, os_chown_mock, os_makedirs_mock):
self.assertFalse(self.problem._Problem__fix_missing_dir())
self.assertTrue(check_mock.called)
self.assertTrue(os_chown_mock.called)
self.assertTrue(os_makedirs_mock.called)
@mock.patch("os.mknod", side_effect=[True])
@mock.patch("os.chown", side_effect=[True])
@mock.patch("moduletests.src.openssh.inject_key_single", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem._Problem__check_missing_file", side_effect=[False])
def test_ssh_problem_fix_missing_key_file_fixed(self, check_mock, inject_mock, os_chown_mock, os_mknod_mock):
self.problem.item.e_mode = 0o600
with contextlib.redirect_stdout(StringIO()):
self.assertTrue(self.problem._Problem__fix_missing_key_file())
self.assertTrue(check_mock.called)
self.assertTrue(inject_mock.called)
self.assertTrue(os_chown_mock.called)
self.assertTrue(os_mknod_mock.called)
@mock.patch("os.mknod", side_effect=[True])
@mock.patch("os.chown", side_effect=[True])
@mock.patch("moduletests.src.openssh.inject_key_single", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem._Problem__check_missing_file", side_effect=[True])
def test_ssh_problem_fix_missing_key_file_not_fixed(self, check_mock, inject_mock, os_chown_mock, os_mknod_mock):
self.problem.item.e_mode = 0o600
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(self.problem._Problem__fix_missing_key_file())
self.assertTrue(check_mock.called)
self.assertTrue(inject_mock.called)
self.assertTrue(os_chown_mock.called)
self.assertTrue(os_mknod_mock.called)
@mock.patch("os.mknod", side_effect=OSError())
def test_ssh_problem_fix_missing_key_file_exception(self, os_mknod_mock):
self.problem.item.e_mode = 0o600
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(self.problem._Problem__fix_missing_key_file())
self.assertTrue(os_mknod_mock.called)
@mock.patch("moduletests.src.openssh.subprocess.check_call", side_effect=[True])
@mock.patch("moduletests.src.openssh.open")
@mock.patch("moduletests.src.openssh.os.remove", side_effect=[True, True])
def test_ssh_generate_rsa_key_pair_success(self, os_remove_mock, open_mock, subprocess_mock):
key_path = "test_path"
pub_open_mock = mock.mock_open(read_data="pub_key_value")
priv_open_mock = mock.mock_open(read_data="priv_key_value")
open_mock.side_effect = [pub_open_mock.return_value, priv_open_mock.return_value]
self.assertEqual(moduletests.src.openssh.generate_rsa_key_pair(key_path), {"public": "pub_key_value",
"private": "priv_key_value"})
self.assertEqual(os_remove_mock.call_count, 2)
self.assertTrue(os_remove_mock.called)
self.assertTrue(open_mock.called)
self.assertTrue(subprocess_mock.called)
@mock.patch("moduletests.src.openssh.subprocess.check_call", side_effect=subprocess.CalledProcessError(2, "cmd"))
@mock.patch("moduletests.src.openssh.os.remove", side_effect=[IOError, IOError])
def test_ssh_generate_rsa_key_pair_remove_error(self, os_remove_mock, subprocess_mock):
key_path = "test_path"
with self.assertRaises(subprocess.CalledProcessError):
moduletests.src.openssh.generate_rsa_key_pair(key_path)
self.assertEqual(os_remove_mock.call_count, 2)
self.assertTrue(os_remove_mock.called)
self.assertTrue(subprocess_mock.called)
def test_ssh_key_injection_driver_failure(self):
sys_config_dict = {"NEW_KEY": None,
"CREATE_NEW_KEYS": False}
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(moduletests.src.openssh.key_injection_driver(sys_config_dict))
@mock.patch("moduletests.src.openssh.inject_key_all", side_effect=OSError)
def test_ssh_key_injection_driver_exception(self, inject_key_mock):
sys_config_dict = {"NEW_KEY": "test_key",
"AUTH_KEYS": "",
"BACKED_FILES": "",
"BACKUP_DIR": ""}
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(moduletests.src.openssh.key_injection_driver(sys_config_dict))
self.assertTrue(inject_key_mock.called)
@mock.patch("moduletests.src.openssh.inject_key_all", side_effect=[True])
def test_ssh_key_injection_driver_new_key_success(self, inject_key_mock):
sys_config_dict = {"NEW_KEY": "test_key",
"AUTH_KEYS": "",
"BACKED_FILES": "",
"BACKUP_DIR": ""}
with contextlib.redirect_stdout(StringIO()):
self.assertTrue(moduletests.src.openssh.key_injection_driver(sys_config_dict))
self.assertTrue(inject_key_mock.called)
@mock.patch("moduletests.src.openssh.os.path.exists", return_value=True)
@mock.patch("moduletests.src.openssh.generate_rsa_key_pair", return_value={"public": "pub_key",
"private": "priv_key"})
@mock.patch("moduletests.src.openssh.get_instance_id", return_value="i-test_id")
@mock.patch("moduletests.src.openssh.get_instance_region", return_value="us-east-1")
@mock.patch("moduletests.src.openssh.boto3.client")
@mock.patch("moduletests.src.openssh.inject_key_all", side_effect=[True])
def test_ssh_key_injection_driver_create_key_success(self,
inject_key_mock,
client_mock,
get_instance_region_mock,
get_instance_id_mock,
gen_key_pair_mock,
exists_mock):
sys_config_dict = {"NEW_KEY": None,
"CREATE_NEW_KEYS": True,
"AUTH_KEYS": "",
"BACKED_FILES": "",
"BACKUP_DIR": ""}
with contextlib.redirect_stdout(StringIO()):
self.assertTrue(moduletests.src.openssh.key_injection_driver(sys_config_dict))
self.assertEqual(str(client_mock.mock_calls),
"[call('ssm', region_name='us-east-1'),\n "
"call().put_parameter("
"Description='Private key added to instance i-test_id by EC2 Rescue for Linux.', "
"Name='/ec2rl/openssh/i-test_id/key', "
"Overwrite=True, "
"Type='SecureString', "
"Value='priv_key')]")
self.assertTrue(inject_key_mock.called)
self.assertTrue(client_mock.called)
self.assertTrue(get_instance_region_mock.called)
self.assertTrue(get_instance_id_mock.called)
self.assertTrue(gen_key_pair_mock.called)
self.assertTrue(exists_mock.called)
@mock.patch("moduletests.src.openssh.os.path.exists", return_value=False)
@mock.patch("moduletests.src.openssh.os.makedirs", side_effect=[True])
@mock.patch("moduletests.src.openssh.generate_rsa_key_pair", return_value={"public": "pub_key",
"private": "priv_key"})
@mock.patch("moduletests.src.openssh.get_instance_id", return_value="i-test_id")
@mock.patch("moduletests.src.openssh.get_instance_region", return_value="us-east-1")
@mock.patch("moduletests.src.openssh.boto3.client", side_effect=botocore.exceptions.NoCredentialsError())
def test_ssh_key_injection_driver_create_key_missing_creds(self,
client_mock,
get_instance_region_mock,
get_instance_id_mock,
gen_key_pair_mock,
makedirs_mock,
exists_mock):
sys_config_dict = {"NEW_KEY": None,
"CREATE_NEW_KEYS": True,
"AUTH_KEYS": "",
"BACKED_FILES": "",
"BACKUP_DIR": ""}
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(moduletests.src.openssh.key_injection_driver(sys_config_dict))
self.assertTrue(client_mock.called)
self.assertTrue(get_instance_region_mock.called)
self.assertTrue(get_instance_id_mock.called)
self.assertTrue(gen_key_pair_mock.called)
self.assertTrue(makedirs_mock.called)
self.assertTrue(exists_mock.called)
@mock.patch("moduletests.src.openssh.inject_key_single", side_effect=[True, True])
@mock.patch("os.path.basename", side_effect=["test", "test"])
@mock.patch("moduletests.src.openssh.backup", side_effect=["/test_backup_dir/file1",
"/test_backup_dir/file2"])
@mock.patch("os.path.isfile", side_effect=[False, True, True])
@mock.patch("pwd.getpwnam")
@mock.patch("os.path.isdir", side_effect=[True])
@mock.patch("os.path.realpath", side_effect=["/one/two/three/file1", "/var/secrets/file2", "/home/testuser"])
@mock.patch("glob.glob")
def test_ssh_inject_key_all(self,
glob_mock,
os_path_realpath_mock,
os_path_isdir_mock,
pwd_getpwnam_mock,
os_path_isfile_mock,
backup_mock,
os_path_basename_mock,
inject_key_single_mock):
glob_mock.return_value = ["/home/testuser"]
pwd_getpwnam_mock.return_value.pw_uid = 1337
with contextlib.redirect_stdout(StringIO()):
self.assertTrue(moduletests.src.openssh.inject_key_all("new_key",
{"absolute": ["/one/two/three/file1",
"/var/secrets/file2"],
"relative": ["auth_keys"]},
{},
"backup_dir"))
self.assertTrue(glob_mock.called)
self.assertTrue(os_path_realpath_mock.called)
self.assertTrue(os_path_isdir_mock.called)
self.assertTrue(pwd_getpwnam_mock.called)
self.assertTrue(os_path_isfile_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(os_path_basename_mock.called)
self.assertTrue(inject_key_single_mock.called)
@mock.patch("moduletests.src.openssh.inject_key_single", side_effect=[True])
@mock.patch("os.path.basename", side_effect=["test", "test"])
@mock.patch("moduletests.src.openssh.backup", side_effect=["test_backup_dir"])
@mock.patch("os.path.isfile", side_effect=[True])
@mock.patch("pwd.getpwnam")
@mock.patch("os.path.isdir", side_effect=[False])
@mock.patch("os.path.realpath", side_effect="/home/testuser")
@mock.patch("glob.glob")
def test_ssh_inject_key_all_not_isdir(self,
glob_mock,
os_path_realpath_mock,
os_path_isdir_mock,
pwd_getpwnam_mock,
os_path_isfile_mock,
backup_mock,
os_path_basename_mock,
inject_key_single_mock):
glob_mock.return_value = ["/home/testuser"]
pwd_getpwnam_mock.return_value.pw_uid = 1337
with contextlib.redirect_stdout(StringIO()):
self.assertTrue(moduletests.src.openssh.inject_key_all("new_key",
{"absolute": [], "relative": ["auth_keys"]},
{},
"backup_dir"))
self.assertTrue(glob_mock.called)
self.assertTrue(os_path_realpath_mock.called)
self.assertTrue(os_path_isdir_mock.called)
self.assertFalse(pwd_getpwnam_mock.called)
self.assertFalse(os_path_isfile_mock.called)
self.assertFalse(backup_mock.called)
self.assertFalse(os_path_basename_mock.called)
self.assertFalse(inject_key_single_mock.called)
@mock.patch("moduletests.src.openssh.inject_key_single", side_effect=[True])
@mock.patch("os.path.basename", side_effect=["test", "test"])
@mock.patch("moduletests.src.openssh.backup", side_effect=["test_backup_dir"])
@mock.patch("os.path.isfile", side_effect=[True])
@mock.patch("pwd.getpwnam", side_effect=KeyError())
@mock.patch("os.path.isdir", side_effect=[True])
@mock.patch("os.path.realpath", side_effect="/home/testuser")
@mock.patch("glob.glob")
def test_ssh_inject_key_all_not_user(self,
glob_mock,
os_path_realpath_mock,
os_path_isdir_mock,
pwd_getpwnam_mock,
os_path_isfile_mock,
backup_mock,
os_path_basename_mock,
inject_key_single_mock):
glob_mock.return_value = ["/home/testuser"]
self.assertTrue(moduletests.src.openssh.inject_key_all("new_key",
{"absolute": [], "relative": ["auth_keys"]},
{},
"backup_dir"))
self.assertTrue(glob_mock.called)
self.assertTrue(os_path_realpath_mock.called)
self.assertTrue(os_path_isdir_mock.called)
self.assertTrue(pwd_getpwnam_mock.called)
self.assertFalse(os_path_isfile_mock.called)
self.assertFalse(backup_mock.called)
self.assertTrue(os_path_basename_mock.called)
self.assertFalse(inject_key_single_mock.called)
@mock.patch("moduletests.src.openssh.inject_key_single", side_effect=[True])
@mock.patch("os.path.basename", side_effect=["test", "test"])
@mock.patch("moduletests.src.openssh.backup", side_effect=["test_backup_dir"])
@mock.patch("os.path.isfile", side_effect=[False])
@mock.patch("pwd.getpwnam")
@mock.patch("os.path.isdir", side_effect=[True])
@mock.patch("os.path.realpath", side_effect="/home/testuser")
@mock.patch("glob.glob")
def test_ssh_inject_key_all_not_file(self,
glob_mock,
os_path_realpath_mock,
os_path_isdir_mock,
pwd_getpwnam_mock,
os_path_isfile_mock,
backup_mock,
os_path_basename_mock,
inject_key_single_mock):
glob_mock.return_value = ["/home/testuser"]
pwd_getpwnam_mock.return_value.pw_uid = 1337
self.assertTrue(moduletests.src.openssh.inject_key_all("new_key",
{"absolute": [], "relative": ["auth_keys"]},
{},
"backup_dir"))
self.assertTrue(glob_mock.called)
self.assertTrue(os_path_realpath_mock.called)
self.assertTrue(os_path_isdir_mock.called)
self.assertTrue(pwd_getpwnam_mock.called)
self.assertTrue(os_path_isfile_mock.called)
self.assertFalse(backup_mock.called)
self.assertTrue(os_path_basename_mock.called)
self.assertFalse(inject_key_single_mock.called)
@mock.patch("glob.glob", side_effect=[Exception("Test")])
def test_ssh_inject_key_all_exception(self, glob_mock):
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(moduletests.src.openssh.inject_key_all("new_key",
{"absolute": [], "relative": ["auth_keys"]},
{},
"backup_dir"))
self.assertTrue(glob_mock.called)
@mock.patch("moduletests.src.openssh.open", mock.mock_open(read_data="test_key1\ntest_key2\n"))
@mock.patch("os.path.isfile", side_effect=[True])
def test_ssh_inject_key_single_key(self, isfile_mock):
with contextlib.redirect_stdout(StringIO()):
self.assertTrue(
moduletests.src.openssh.inject_key_single(new_key="test_key", full_path_auth_keys="test"))
self.assertTrue(isfile_mock.called)
@mock.patch("moduletests.src.openssh.open", side_effect=OSError())
@mock.patch("os.path.isfile", side_effect=[True])
def test_ssh_inject_key_single_key_exception(self, isfile_mock, open_mock):
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(
moduletests.src.openssh.inject_key_single(new_key="test_key", full_path_auth_keys="test"))
self.assertTrue(isfile_mock.called)
self.assertTrue(open_mock.called)
@mock.patch("moduletests.src.openssh.open", mock.mock_open(read_data="test_key\ntest_key2\n"))
@mock.patch("os.path.isfile", side_effect=[True])
def test_ssh_inject_key_single_key_already_present(self, isfile_mock):
with contextlib.redirect_stdout(StringIO()):
self.assertTrue(
moduletests.src.openssh.inject_key_single(new_key="test_key", full_path_auth_keys="test"))
self.assertTrue(isfile_mock.called)
def test_ssh_inject_key_single_invalid_args(self):
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(moduletests.src.openssh.inject_key_single(new_key="", full_path_auth_keys="test"))
self.assertFalse(moduletests.src.openssh.inject_key_single(new_key="test", full_path_auth_keys=""))
@mock.patch("subprocess.check_output", side_effect=["debug2: load_server_config: filename /etc/ssh/sshd_config\n"])
def test_ssh_get_config_file_path_found(self, check_output_mock):
self.assertEqual(moduletests.src.openssh.get_config_file_path(), "/etc/ssh/sshd_config")
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output", side_effect="debug2:")
def test_ssh_get_config_file_path_not_found(self, check_output_mock):
with self.assertRaises(Exception) as ex:
moduletests.src.openssh.get_config_file_path()
self.assertEqual(ex, "Failed to obtain server configuration file path!")
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output",
side_effect=subprocess.CalledProcessError(1,
"test",
"debug2: load_server_config: filename "
"/etc/ssh/sshd_config\n"))
def test_ssh_get_config_file_path_cpe_found_load_server_config(self, check_output_mock):
self.assertEqual(moduletests.src.openssh.get_config_file_path(), "/etc/ssh/sshd_config")
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output",
side_effect=subprocess.CalledProcessError(1,
"test",
"/etc/ssh/sshd_config: No such file or directory\n"))
def test_ssh_get_config_file_path_cpe_found_no_such(self, check_output_mock):
self.assertEqual(moduletests.src.openssh.get_config_file_path(), "/etc/ssh/sshd_config")
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output",
side_effect=subprocess.CalledProcessError(1,
"test",
"Some other error\n"))
def test_ssh_get_config_file_path_cpe_not_found(self, check_output_mock):
with self.assertRaises(Exception) as ex:
moduletests.src.openssh.get_config_file_path()
self.assertEqual(ex, "Failed to obtain server configuration file path!")
self.assertTrue(check_output_mock.called)
def test_ssh_parse_configuration(self):
open_mock = mock.mock_open(read_data="key1 value1\n"
"too many values\n"
"# comment\n"
"# duplicates skipped\n"
"key1 value1\n"
"HostKey file1\n"
"HostKey file2\n"
"HostKey file_a file_b\n"
"AuthorizedKeysFile %h/skipped\n"
"AuthorizedKeysFile %u/skipped\n"
"AuthorizedKeysFile file3\n"
"AuthorizedKeysFile file4\n"
"AuthorizedKeysFile file5 file6\n")
# mock_open does not have support for iteration so it must be added manually
# readline() until a blank line is reached (the sentinel)
def iter_func(self):
return iter(self.readline, "")
open_mock.return_value.__iter__ = iter_func
def py3_next_func(self):
return next(iter(self.readline, ""))
if sys.hexversion >= 0x3000000:
open_mock.return_value.__next__ = py3_next_func
with mock.patch("moduletests.src.openssh.open", open_mock):
with contextlib.redirect_stdout(StringIO()):
ret = moduletests.src.openssh.parse_configuration("path/should/not/matter")
self.assertTrue(isinstance(ret, dict))
self.assertEqual(ret, {"AuthorizedKeysFile": ["file3", "file4", "file5", "file6"],
"HostKey": ["file1", "file2"],
"key1": ["value1"]})
@mock.patch("moduletests.src.openssh.open", side_effect=IOError())
def test_ssh_parse_configuration_invalid_path(self, open_mock):
with contextlib.redirect_stdout(StringIO()):
ret = moduletests.src.openssh.parse_configuration("tools/moduletests/tests/test_ssh.py")
self.assertTrue(isinstance(ret, dict))
self.assertEqual(ret, {})
self.assertTrue(open_mock.called)
@mock.patch("glob.glob")
@mock.patch("os.path.realpath", side_effect=["/home/testuser",
"/one/two/three/file1",
"/usr/secrets/file2",
"walkroot",
"/etc/ssh/walkfile1",
"/etc/ssh/walkfile2_key",
"/etc/ssh/walkdir",
"/etc/ssh/ssh_host_dsa_key"])
@mock.patch("os.path.isdir", side_effect=[True, True, False])
@mock.patch("pwd.getpwnam")
@mock.patch("os.walk")
@mock.patch("os.stat", get_mocked_stat)
@mock.patch("os.path.isfile", side_effect=[True])
def test_ssh_get_dag(self,
os_path_isfile_mock,
os_walk_mock,
pwd_getpwnam_mock,
os_path_isdir_mock,
os_path_realpath_mock,
glob_mock):
os_walk_mock.return_value = (("walkroot", ("walkdir",), ("walkfile1", "walkfile2_key")),)
pwd_getpwnam_mock.return_value = mock.Mock(pw_uid=0, pw_gid=0, pw_nam="testuser")
glob_mock.return_value = ["/home/testuser"]
self.problem.CONFIG_DICT["HOSTKEYS"] = ["/etc/ssh/ssh_host_dsa_key"]
with contextlib.redirect_stdout(StringIO()):
test_dag = moduletests.src.openssh.get_dag(self.problem.CONFIG_DICT)
vertex_dict = {"missing_sshd": ["missing_config_file",
"bad_mode_/etc/ssh/ssh_host_dsa_key",
"bad_uid_/etc/ssh/ssh_host_dsa_key"],
"bad_mode_/etc/ssh/ssh_host_dsa_key": [],
"bad_uid_/etc/ssh/ssh_host_dsa_key": [],
"missing_config_file": ["bad_config_options"],
"bad_config_options": ["missing_priv_sep_dir",
"duplicate_keyfile_lines"],
"missing_priv_sep_dir":
["missing_host_keys",
"bad_mode_/var/empty/sshd",
"bad_uid_/var/empty/sshd"],
"missing_host_keys": ["missing_priv_sep_user"],
"missing_priv_sep_user": [],
"bad_mode_/var/empty/sshd": [],
"bad_uid_/var/empty/sshd": [],
"missing_dir_/home":
["bad_uid_/home",
"bad_mode_/home",
"missing_dir_/home/testuser"],
"bad_uid_/home": [],
"bad_mode_/home": [],
"duplicate_keyfile_lines":
["missing_dir_/home",
"missing_dir_/one"],
"missing_dir_/home/testuser":
["bad_mode_/home/testuser",
"bad_uid_/home/testuser",
"missing_dir_/home/testuser/.ssh",
"missing_key_/home/testuser/.keyfile1"],
"bad_uid_/home/testuser": [],
"bad_mode_/home/testuser": [],
"missing_dir_/home/testuser/.ssh":
["bad_mode_/home/testuser/.ssh",
"bad_uid_/home/testuser/.ssh",
"missing_key_/home/testuser/.ssh/authorized_keys"],
"bad_uid_/home/testuser/.ssh": [],
"bad_mode_/home/testuser/.ssh": [],
"missing_key_/home/testuser/.ssh/authorized_keys":
["bad_mode_/home/testuser/.ssh/authorized_keys",
"bad_uid_/home/testuser/.ssh/authorized_keys"],
"bad_uid_/home/testuser/.ssh/authorized_keys": [],
"bad_mode_/home/testuser/.ssh/authorized_keys": [],
"missing_key_/home/testuser/.keyfile1":
["bad_mode_/home/testuser/.keyfile1",
"bad_uid_/home/testuser/.keyfile1"],
"bad_mode_/home/testuser/.keyfile1": [],
"bad_uid_/home/testuser/.keyfile1": [],
"missing_dir_/one":
["bad_uid_/one",
"bad_mode_/one",
"missing_dir_/one/two"],
"bad_uid_/one": [],
"bad_mode_/one": [],
"missing_dir_/one/two":
["bad_uid_/one/two",
"bad_mode_/one/two",
"missing_dir_/one/two/three"],
"bad_uid_/one/two": [],
"bad_mode_/one/two": [],
"missing_dir_/one/two/three":
["bad_uid_/one/two/three",
"bad_mode_/one/two/three",
"missing_key_/one/two/three/file1"],
"bad_uid_/one/two/three": [],
"bad_mode_/one/two/three": [],
"missing_key_/one/two/three/file1":
["bad_uid_/one/two/three/file1",
"bad_mode_/one/two/three/file1"],
"bad_uid_/one/two/three/file1": [],
"bad_mode_/one/two/three/file1": [],
"bad_mode_/etc/ssh": [],
"bad_mode_/etc/ssh/walkdir": [],
"bad_mode_/etc/ssh/walkfile1": [],
"bad_mode_/etc/ssh/walkfile2_key": [],
"bad_uid_/etc/ssh": [],
"bad_uid_/etc/ssh/walkdir": [],
"bad_uid_/etc/ssh/walkfile1": [],
"bad_uid_/etc/ssh/walkfile2_key": []}
self.assertEqual(len(test_dag), 46)
self.assertEqual(set(test_dag.vertices.keys()), set(vertex_dict.keys()))
for key in vertex_dict:
self.assertEqual(set(test_dag.vertices[key]), set(vertex_dict[key]))
self.assertTrue(os_walk_mock.called)
self.assertTrue(pwd_getpwnam_mock.called)
self.assertTrue(os_path_isfile_mock.called)
self.assertTrue(os_path_isdir_mock.called)
self.assertTrue(os_path_realpath_mock.called)
self.assertTrue(glob_mock.called)
@mock.patch("glob.glob")
@mock.patch("os.path.realpath", side_effect=["/home/testuser",
"/home/testuser",
"/home/testuser2",
"/home/testuser2",
"/etc/ssh/walkroot",
"/etc/ssh/walkfile1",
"/etc/ssh/walkdir",
"/etc/ssh/ssh_host_dsa_key",
"/etc/ssh/ssh_host_dsa_key"])
@mock.patch("os.path.isdir", side_effect=[False, True])
@mock.patch("pwd.getpwnam", side_effect=KeyError())
@mock.patch("os.walk")
@mock.patch("os.stat")
@mock.patch("os.path.islink", side_effect=[True, True])
@mock.patch("os.path.isfile", side_effect=[False, True])
def test_ssh_get_dag_skips(self,
os_path_isfile_mock,
os_path_islink_mock,
os_stat_mock,
os_walk_mock,
pwd_getpwnam_mock,
os_path_isdir_mock,
os_path_realpath_mock,
glob_mock):
os_stat_mock.return_value = mock.Mock(st_dev=0, st_ino=1)
os_walk_mock.return_value = (("walkroot", ("walkdir",), ("walkfile1",)),)
glob_mock.return_value = ["/home/testuser", "/home/testuser2"]
self.problem.CONFIG_DICT["HOSTKEYS"] = ["/etc/ssh/ssh_host_dsa_key", "/etc/ssh/ssh_host_dsa_key"]
self.problem.CONFIG_DICT["AUTH_KEYS"]["absolute"] = []
with contextlib.redirect_stdout(StringIO()):
test_dag = moduletests.src.openssh.get_dag(self.problem.CONFIG_DICT)
vertex_dict = {"missing_sshd": ["missing_config_file"],
"missing_config_file": ["bad_config_options"],
"bad_config_options": ["missing_priv_sep_dir",
"duplicate_keyfile_lines"],
"duplicate_keyfile_lines": [],
"missing_priv_sep_dir": ["missing_host_keys",
"bad_uid_/var/empty/sshd",
"bad_mode_/var/empty/sshd"],
"missing_host_keys": ["missing_priv_sep_user"],
"missing_priv_sep_user": [],
"bad_mode_/etc/ssh": [],
"bad_uid_/etc/ssh": [],
"bad_uid_/var/empty/sshd": [],
"bad_mode_/var/empty/sshd": []}
self.assertEqual(len(test_dag), 11)
self.assertEqual(set(test_dag.vertices.keys()), set(vertex_dict.keys()))
for key in vertex_dict:
self.assertEqual(set(test_dag.vertices[key]), set(vertex_dict[key]))
self.assertTrue(os_stat_mock.called)
self.assertTrue(os_walk_mock.called)
self.assertTrue(pwd_getpwnam_mock.called)
self.assertTrue(os_path_isfile_mock.called)
self.assertTrue(os_path_islink_mock.called)
self.assertTrue(os_path_isdir_mock.called)
self.assertTrue(os_path_realpath_mock.called)
self.assertTrue(glob_mock.called)
def test_ssh_get_output_status_failure(self):
v1 = moduletests.src.openssh.Vertex("v1", moduletests.src.openssh.Problem(state="UNCHECKED",
item_type="File",
item="v1 item",
value="v1 value",
value_str="v1 value_str",
info_msg="v1 info_msg",
check=self.return_true,
check_msg="v1 check_msg",
fix_msg="v1 fix_msg",
fix=self.return_true))
v2 = moduletests.src.openssh.Vertex("v2", moduletests.src.openssh.Problem(state="FAILURE",
item_type="File",
item="v2 item",
value="v2 value",
value_str="v2 value_str",
info_msg="v2 info_msg",
check=self.return_true,
check_msg="v2 check_msg",
fix_msg="v2 fix_msg",
fix=self.return_true))
self.dag.add_vertex(v1)
self.dag.add_vertex(v2)
self.assertEqual(moduletests.src.openssh.get_output_status("/test/log/dir", self.dag),
"[FAILURE] Improper configuration of one or more OpenSSH components.\n"
"-- SSH may deny access to users when improperly configured.\n"
"-- FAILURE v2 info_msg: v2 item\n"
"-- v2 fix_msg\n"
"-- Unable to check 1 items due to dependent check failures:\n"
" UNCHECKED v1 info_msg: v1 item\n"
"\n"
"Please view /test/log/dir/run/ssh.log for additional details.\n")
def test_ssh_get_output_status_fix_failed(self):
v1 = moduletests.src.openssh.Vertex("v1", moduletests.src.openssh.Problem(state="FIX_FAILED",
item_type="File",
item="v1 item",
value="v1 value",
value_str="v1 value_str",
info_msg="v1 info_msg",
check=self.return_true,
check_msg="v1 check_msg",
fix_msg="v1 fix_msg",
fix=self.return_true))
self.dag.add_vertex(v1)
self.assertEqual(moduletests.src.openssh.get_output_status("/test/log/dir", self.dag),
"[FAILURE] Failed to remediate one or more problems.\n"
"-- SSH may deny access to users when improperly configured.\n"
"-- FIX_FAILED v1 info_msg: v1 item\n"
"-- v1 fix_msg\n"
"\n"
"Please view /test/log/dir/run/ssh.log for additional details.\n")
def test_ssh_get_output_status_warn(self):
v1 = moduletests.src.openssh.Vertex("v1", moduletests.src.openssh.Problem(state="WARN",
item_type="File",
item="v1 item",
value="v1 value",
value_str="v1 value_str",
info_msg="v1 info_msg",
check=self.return_true,
check_msg="v1 check_msg",
fix_msg="v1 fix_msg",
fix=self.return_true))
self.dag.add_vertex(v1)
self.assertEqual(moduletests.src.openssh.get_output_status("/test/log/dir", self.dag),
"[WARN] Unable to fully validate one or more OpenSSH components.\n"
"-- Configuration could not be fully validated.\n"
"-- WARN v1 info_msg: v1 item\n"
"-- v1 fix_msg\n"
"\n"
"Please view /test/log/dir/run/ssh.log for additional details.\n")
def test_ssh_get_output_status_success(self):
v1 = moduletests.src.openssh.Vertex("v1", moduletests.src.openssh.Problem(state="FIXED",
item_type="File",
item="v1 item",
value="v1 value",
value_str="v1 value_str",
info_msg="v1 info_msg",
check=self.return_true,
check_msg="v1 check_msg",
fix_msg="v1 fix_msg",
fix=self.return_true))
self.dag.add_vertex(v1)
self.assertEqual(moduletests.src.openssh.get_output_status("/test/log/dir", self.dag),
"[SUCCESS] All configuration checks passed or all detected problems fixed.\n"
"-- FIXED v1 info_msg: v1 item\n"
"\n"
"Please view /test/log/dir/run/ssh.log for additional details.\n")
def test_ssh_get_output_status_empty_dag(self):
self.assertEqual(moduletests.src.openssh.get_output_status("/test/log/dir", self.dag),
"[WARN] the problem graph was empty!\n-- The configuration was not validated.\n"
"\n"
"Please view /test/log/dir/run/ssh.log for additional details.\n")
@mock.patch("subprocess.check_output", side_effect=[" /var/run/sshd\n"
" chroot(2) directory used by sshd during "
"privilege separation in the pre-authentication phase. The "
"directory should not contain any files and"])
def test_ssh_get_privilege_separation_dir_subprocess_found_plaintext(self, subprocess_mock):
self.assertEqual(moduletests.src.openssh.get_privilege_separation_dir(), "/var/run/sshd")
self.assertTrue(subprocess_mock.called)
@mock.patch("subprocess.check_output", side_effect=[" /var/run/sshd\n"
" chroot(2) directory used by "
"s\x08ss\x08sh\x08hd\x08d during "
"privilege separation in the pre-authentication phase. The "
"directory should not contain any files and"])
def test_ssh_get_privilege_separation_dir_subprocess_found_escaped_chars(self, subprocess_mock):
self.assertEqual(moduletests.src.openssh.get_privilege_separation_dir(), "/var/run/sshd")
self.assertTrue(subprocess_mock.called)
@mock.patch("subprocess.check_output", side_effect=["These are not\nthe lines you are looking for\n"])
def test_ssh_get_privilege_separation_dir_subprocess_not_found(self, subprocess_mock):
with self.assertRaises(Exception) as ex:
moduletests.src.openssh.get_privilege_separation_dir()
self.assertEqual(ex, "Failed to obtain privilege separation directory path!")
self.assertTrue(subprocess_mock.called)
@mock.patch("subprocess.check_output", side_effect=OSError(2, "No such file or directory"))
@mock.patch("os.path.exists", side_effect=[False, False, True])
def test_ssh_get_privilege_separation_dir_subprocess_exception_found(self, os_mock, subprocess_mock):
self.assertEqual(moduletests.src.openssh.get_privilege_separation_dir(), "/var/run/sshd")
self.assertTrue(os_mock.called)
self.assertTrue(subprocess_mock.called)
@mock.patch("subprocess.check_output", side_effect=OSError(2, "No such file or directory"))
@mock.patch("os.path.exists", side_effect=[False, False, False])
def test_ssh_get_privilege_separation_dir_subprocess_exception_not_found(self, os_mock, subprocess_mock):
with self.assertRaises(Exception) as ex:
moduletests.src.openssh.get_privilege_separation_dir()
self.assertEqual(ex, "Failed to obtain privilege separation directory path!")
self.assertTrue(os_mock.called)
self.assertTrue(subprocess_mock.called)
@mock.patch("moduletests.src.openssh.Problem.setup_config_vars", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem.setup_run_vars", side_effect=[True])
@mock.patch("moduletests.src.openssh.get_dag", return_value=moduletests.src.openssh.DirectedAcyclicGraph())
@mock.patch("moduletests.src.openssh.DirectedAcyclicGraph.topological_solve", side_effect=[True])
@mock.patch("moduletests.src.openssh.get_output_status", side_effect=[True])
def test_ssh_run(self,
get_output_status_mock,
topological_solve_mock,
get_dag_mock,
setup_run_vars_mock,
setup_config_vars_mock):
with contextlib.redirect_stdout(StringIO()):
moduletests.src.openssh.run()
self.assertTrue(get_output_status_mock.called)
self.assertTrue(topological_solve_mock.called)
self.assertTrue(get_dag_mock.called)
self.assertTrue(setup_run_vars_mock.called)
self.assertTrue(setup_config_vars_mock.called)
@mock.patch("moduletests.src.openssh.Problem.setup_config_vars", side_effect=[OSError])
@mock.patch("moduletests.src.openssh.Problem.setup_run_vars", side_effect=[True])
@mock.patch("moduletests.src.openssh.get_dag", return_value=moduletests.src.openssh.DirectedAcyclicGraph())
@mock.patch("moduletests.src.openssh.DirectedAcyclicGraph.topological_solve", side_effect=[True])
@mock.patch("moduletests.src.openssh.get_output_status", side_effect=[True])
def test_ssh_run_config_vars_oserror(self,
get_output_status_mock,
topological_solve_mock,
get_dag_mock,
setup_run_vars_mock,
setup_config_vars_mock):
with contextlib.redirect_stdout(StringIO()):
self.assertTrue(moduletests.src.openssh.run())
self.assertTrue(get_output_status_mock.called)
self.assertTrue(topological_solve_mock.called)
self.assertTrue(get_dag_mock.called)
self.assertTrue(setup_run_vars_mock.called)
self.assertTrue(setup_config_vars_mock.called)
@mock.patch("moduletests.src.openssh.Problem.setup_config_vars", side_effect=[ValueError])
def test_ssh_unhandled_exception(self, setup_config_vars_mock):
output = StringIO()
with contextlib.redirect_stdout(output):
self.assertFalse(moduletests.src.openssh.run())
self.assertTrue("[WARN] module generated an exception" in output.getvalue())
self.assertTrue(setup_config_vars_mock.called)
@mock.patch("moduletests.src.openssh.Problem.setup_config_vars", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem.setup_run_vars", side_effect=[True])
@mock.patch("moduletests.src.openssh.key_injection_driver", side_effect=[True])
def test_ssh_run_injectkeyonly_success(self,
key_injection_driver_mock,
setup_run_vars_mock,
setup_config_vars_mock):
self.problem.CONFIG_DICT["REMEDIATE"] = True
self.problem.CONFIG_DICT["INJECT_KEY_ONLY"] = True
with contextlib.redirect_stdout(StringIO()):
self.assertTrue(moduletests.src.openssh.run())
self.assertTrue(key_injection_driver_mock.called)
self.assertTrue(setup_run_vars_mock.called)
self.assertTrue(setup_config_vars_mock.called)
@mock.patch("moduletests.src.openssh.Problem.setup_config_vars", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem.setup_run_vars", side_effect=[True])
@mock.patch("moduletests.src.openssh.key_injection_driver", side_effect=[False])
def test_ssh_run_injectkeyonly_failure(self,
key_injection_driver_mock,
setup_run_vars_mock,
setup_config_vars_mock):
self.problem.CONFIG_DICT["REMEDIATE"] = True
self.problem.CONFIG_DICT["INJECT_KEY_ONLY"] = True
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(moduletests.src.openssh.run())
self.assertTrue(key_injection_driver_mock.called)
self.assertTrue(setup_run_vars_mock.called)
self.assertTrue(setup_config_vars_mock.called)
@mock.patch("moduletests.src.openssh.Problem.setup_config_vars", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem.setup_run_vars", side_effect=[True])
@mock.patch("moduletests.src.openssh.key_injection_driver", side_effect=[False])
def test_ssh_run_injectkey_failure(self,
key_injection_driver_mock,
setup_run_vars_mock,
setup_config_vars_mock):
self.problem.CONFIG_DICT["REMEDIATE"] = True
self.problem.CONFIG_DICT["INJECT_KEY"] = True
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(moduletests.src.openssh.run())
self.assertTrue(key_injection_driver_mock.called)
self.assertTrue(setup_run_vars_mock.called)
self.assertTrue(setup_config_vars_mock.called)
@mock.patch("moduletests.src.openssh.Problem.setup_config_vars", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem.setup_run_vars", side_effect=[True])
@mock.patch("moduletests.src.openssh.get_dag", return_value=moduletests.src.openssh.DirectedAcyclicGraph())
@mock.patch("moduletests.src.openssh.DirectedAcyclicGraph.topological_solve", side_effect=[True])
@mock.patch("moduletests.src.openssh.get_output_status", side_effect=[True])
def test_ssh_run_injectkey_missing_remediate(self,
get_output_status_mock,
topological_solve_mock,
get_dag_mock,
setup_run_vars_mock,
setup_config_vars_mock):
self.problem.CONFIG_DICT["REMEDIATE"] = False
self.problem.CONFIG_DICT["INJECT_KEY"] = True
with contextlib.redirect_stdout(StringIO()):
self.assertTrue(moduletests.src.openssh.run())
self.assertTrue(get_output_status_mock.called)
self.assertTrue(topological_solve_mock.called)
self.assertTrue(get_dag_mock.called)
self.assertTrue(setup_run_vars_mock.called)
self.assertTrue(setup_config_vars_mock.called)
@mock.patch("moduletests.src.openssh.Problem.setup_config_vars", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem.setup_run_vars", side_effect=[True])
def test_ssh_run_injectkeyonly_missing_remediate(self,
setup_run_vars_mock,
setup_config_vars_mock):
self.problem.CONFIG_DICT["REMEDIATE"] = False
self.problem.CONFIG_DICT["INJECT_KEY_ONLY"] = True
with contextlib.redirect_stdout(StringIO()):
self.assertFalse(moduletests.src.openssh.run())
self.assertTrue(setup_run_vars_mock.called)
self.assertTrue(setup_config_vars_mock.called)
@mock.patch("moduletests.src.openssh.Problem.setup_config_vars", side_effect=[True])
@mock.patch("moduletests.src.openssh.Problem.setup_run_vars", side_effect=[True])
@mock.patch("moduletests.src.openssh.key_injection_driver", side_effect=[True])
@mock.patch("moduletests.src.openssh.get_dag", return_value=moduletests.src.openssh.DirectedAcyclicGraph())
@mock.patch("moduletests.src.openssh.DirectedAcyclicGraph.topological_solve", side_effect=[True])
@mock.patch("moduletests.src.openssh.get_output_status", side_effect=[True])
def test_ssh_run_injectkey_success(self,
get_output_status_mock,
topological_solve_mock,
get_dag_mock,
key_injection_driver_mock,
setup_run_vars_mock,
setup_config_vars_mock):
self.problem.CONFIG_DICT["REMEDIATE"] = True
self.problem.CONFIG_DICT["INJECT_KEY"] = True
with contextlib.redirect_stdout(StringIO()):
self.assertTrue(moduletests.src.openssh.run())
self.assertTrue(get_output_status_mock.called)
self.assertTrue(topological_solve_mock.called)
self.assertTrue(get_dag_mock.called)
self.assertTrue(key_injection_driver_mock.called)
self.assertTrue(setup_run_vars_mock.called)
self.assertTrue(setup_config_vars_mock.called)
|
#!/usr/bin/python3
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from collections.abc import Iterable
from datetime import datetime
from FileReader import FileReader
from GreengrassAwareConnection import *
import MessagePayload
from Observer import *
import TopicGenerator
import argparse
from datetime import datetime
import json
import logging
import time
import sys
# singleton config/state/globals
from Config import state
# Configure logging
logger = logging.getLogger("TelemetryThing.core")
logger.setLevel(logging.INFO)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
# Read in command-line parameters
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--endpoint", action="store", required=True, dest="host", help="Your AWS IoT custom endpoint")
parser.add_argument("-r", "--rootCA", action="store", required=True, dest="rootCAPath", help="Root CA file path")
parser.add_argument("-c", "--cert", action="store", dest="certificatePath", help="Certificate file path")
parser.add_argument("-k", "--key", action="store", dest="privateKeyPath", help="Private key file path")
parser.add_argument("-n", "--thingName", action="store", dest="thingName", default="Bot", help="Targeted thing name")
args = parser.parse_args()
host = args.host
rootCA = args.rootCAPath
cert = args.certificatePath
key = args.privateKeyPath
thingName = args.thingName
# State variables
def_state = {
'deviceid': thingName,
'file': 's3://connected-vehicle-datasource/100.csv',
'time_col_name': 'Timestamp(ms)',
'time_scale':1000.0
}
for k in set(def_state.keys()) - set(state.keys()):
state[k] = def_state[k]
state_dirty = True
tripSrc = FileReader(local_dir=state.get('local_dir', "."), record_separator=state.get('record_separator', ','), quote_records=state.get('quote_records', False))
class DeltaProcessor(Observer):
def update(self, updateList):
global state_dirty
[ state.update(u) for u in updateList ]
state_dirty = True
try:
deltas = ObservableDeepArray()
iotConnection = GreengrassAwareConnection(host, rootCA, cert, key, thingName, deltas, state)
time.sleep(10)
deltaProcessor = DeltaProcessor()
deltas.addObserver(deltaProcessor)
except Exception as e:
logger.error(f'{str(type(e))} Error')
def getTopicGenerator():
topic_strategy = getattr(TopicGenerator, state.get('topic_strategy', 'SimpleFormattedTopic'))
return topic_strategy(state.get('topic_name', 'dt/cvra/{deviceid}/cardata'))
def makePayload(telemetry):
payload_strategy = getattr(MessagePayload, state.get('payload_strategy', 'SimpleLabelledPayload'))
return payload_strategy(telemetry, {
'preDropKeys':state.get('ignore_columns',[]),
'metricKey': state.get('measure_column'),
'readingKey': state.get('value_column'),
'time_col_name': state.get('time_col_name')
}).message(json.dumps)
def getTimestampMS(telemetry):
time_col_name = state.get('time_col_name', 'Timestamp(ms)')
time_scale = float(state.get('time_scale', 1000.0))
timestamp = telemetry.get(time_col_name, DEFAULT_SAMPLE_DURATION_MS)
time_format = state.get('timestamp_format')
timestamp_offset = state.get('timestamp_offset', 0.0)
# convert to milliseconds
if time_format == None:
timestamp_ms = (float(timestamp) + timestamp_offset)/time_scale*1000
else:
timestamp_ms = datetime.strptime(timestamp, time_format).timestamp()*1000
return int(timestamp_ms)
DEFAULT_SAMPLE_DURATION_MS = 1000
message_count = 0
def do_something():
# send current state to shadow
global state_dirty, message_count
if state_dirty:
tripSrc.useFileURI(state['file'])
iotConnection.updateShadow(state)
state_dirty = False
# assemble telemetry
telemetry = tripSrc.getSample()
# print(json.dumps(telemetry) + "\n")
if len(telemetry) == 0:
if state.get('at_end') == 'stop':
logger.info("end of file reached")
time.sleep(600) # wait 10 min for queued messages to clear
sys.exit()
return 30 # wait 30 seconds between runs
deviceid = state.get('deviceid', thingName)
timestamp_ms = getTimestampMS(telemetry)
payload = makePayload(telemetry)
topic = getTopicGenerator().make_topicname(deviceid=deviceid, timestamp_ms=timestamp_ms)
message_count += 1
logger.info(f"{message_count} - {topic}:{payload}")
sleep = [0, 1]
while not iotConnection.publishMessageOnTopic(payload, topic, qos=1):
logger.info("waiting to clear block")
# fibonacci backoff on wait
sleep.append(sum(sleep))
timeout = sleep.pop(0)
if timeout > 300:
logger.warn("timeout escalated to 30 sec -- re-connecting")
try:
iotConnection.disconnect()
time.sleep(10)
iotConnection.connect()
except Exception as e:
pass
sleep = [0, 1]
time.sleep(timeout/10.0)
# return the timestamp of the leg
return timestamp_ms/1000.0
timeout = 5
def run():
rate = state.get('message_publish_rate')
last_time = do_something()
sleep_time = 0.05 if rate == None else 1.0/rate
while True:
time.sleep(sleep_time if sleep_time > 0 else timeout)
cur_time = do_something()
if rate == None:
sleep_time = cur_time - last_time if timeout >= last_time else 0
last_time = cur_time
if __name__ == "__main__":
run()
|
## strobpy\strobopy
'''Function to load dm3s and get their meta_data '''
def load_dm3(filename,get_meta=False, stack=False):
import numpy as np
from pycroscopy.io.translators.df_utils.dm_utils import read_dm3
'''Loads a single dm3 into a numpy array. If get_meta=True gets all corresponding metadata aswell
Returns an numpy array of the dm3
'''
if (stack==True) | (np.array(filename).size!=1):
image_size= read_dm3(filename[0])[0].shape
num_images=len(filename)
stack_size=(image_size[0],image_size[1], len(filename)) # set size of data_stack
data_stack=np.zeros(stack_size) # Preallocate the data_stack
meta_size=[]
'''Determine the size of the meta data'''
for n in range(len(filename)):
meta_size.append(len(list(read_dm3(filename[n])[1].items())))
meta_dim1=np.max(meta_size)
meta_stack=np.zeros((meta_dim1,2,len(filename)),dtype=np.object_)
'''Parse the image intensities from the meta data and load into data_stack and meta_stack'''
for n in range(len(filename)):
data_stack[:,:,n]=read_dm3(filename[n])[0]
if get_meta==True:
for n in range(len(filename)):
string_stack=list(read_dm3(filename[n])[1].items())
if len(string_stack)<meta_dim1:
diff=meta_dim1-len(string_stack)
for x in range(diff):
string_stack.append(([],[]))
meta_stack[:,:,n]=string_stack # Use meta_stack[:,:,#] to look at the meta_data for file #
return data_stack, meta_stack
return data_stack
image_size= read_dm3(filename)[0].shape
image=np.zeros(image_size)
meta=np.zeros((len(list(read_dm3(filename)[1].items())),2),dtype=np.object_)
image[:,:]=read_dm3(filename)[0]
meta[:,:]=list(read_dm3(filename)[1].items())
if get_meta==True:
return image, meta
return image
|
from typing import Dict, List
import torch
from torch.autograd import Variable
from torch.nn.functional import nll_loss
from torch.nn.functional import softmax
import numpy as np
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.nn import util, InitializerApplicator
@Model.register("ProGlobal")
class ProGlobal(Model):
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
pos_field_embedder: TextFieldEmbedder,
sent_pos_field_embedder: TextFieldEmbedder,
modeling_layer: Seq2SeqEncoder,
span_end_encoder_before: Seq2SeqEncoder,
span_start_encoder_after: Seq2SeqEncoder,
span_end_encoder_after: Seq2SeqEncoder,
dropout: float = 0.2,
mask_lstms: bool = True,
initializer: InitializerApplicator = InitializerApplicator()) -> None:
"""
This ``Model`` takes as input a dataset read by ProGlobalDatasetReader
Input: a list of sentences, a participant
Output: location category for the participant, location span
The basic outline of this model is to
1. get an embedded representation for paragraph tokens,
2. apply bi-LSTM to get contextual embeddings,
3. apply three-category classification and location span prediction to predict the location state
:param vocab: Vocabulary
:param text_field_embedder: ``TextFieldEmbedder`` used to embed the ``sentence tokens``
:param pos_field_embedder: ``TextFieldEmbedder`` used to embed the ``word positions``
:param sent_pos_field_embedder: ``TextFieldEmbedder`` used to embed the sent indicators
:param modeling_layer: ``Seq2SeqEncoder`` to encode the sequence of paragraph
:param span_end_encoder_bef: ``Seq2SeqEncoder`` to encode the sequence for before location end prediction
:param span_start_encoder_aft: ``Seq2SeqEncoder`` to encode the sequence for after location start prediction
:param span_end_encoder_aft: ``Seq2SeqEncoder`` to encode the sequence for after location end prediction
:param dropout:
:param mask_lstms:
:param initializer: ``InitializerApplicator`` We will use this to initialize the parameters in the model
Sample commandline
------------------
python propara/run.py train -s /output_folder experiment_config/ProGlobal.json
"""
super(ProGlobal, self).__init__(vocab)
# embedders for text, word positions, and sentence indicators
self._text_field_embedder = text_field_embedder
self._pos_field_embedder = pos_field_embedder
self._sent_pos_field_embedder = sent_pos_field_embedder
# bi-LSTM: to generate the contextual embedding
self._modeling_layer = modeling_layer
modeling_dim = modeling_layer.get_output_dim()
# three category classifier for before location
self._category_before_predictor = torch.nn.Linear(modeling_dim, 3)
# LSTM encoder for before location end: encode the contextual embedding and before location start scores
self._span_end_encoder_before = span_end_encoder_before
# linear function for before location start
span_start_before_input_dim = modeling_dim
self._span_start_predictor_before = TimeDistributed(torch.nn.Linear(span_start_before_input_dim, 1))
# linear function for before location end
span_end_before_encoding_dim = span_end_encoder_before.get_output_dim()
span_end_before_input_dim = modeling_dim + span_end_before_encoding_dim
self._span_end_predictor_before = TimeDistributed(torch.nn.Linear(span_end_before_input_dim, 1))
# three category classifier for after location
self._category_after_predictor = torch.nn.Linear(modeling_dim+3, 3)
# LSTM encoder for after location start: encode the contextual embedding and
# previous before location start scores
self._span_start_encoder_after = span_start_encoder_after
# linear function for after location start
span_start_after_encoding_dim = span_start_encoder_after.get_output_dim()
span_start_after_input_dim = modeling_dim + span_start_after_encoding_dim
self._span_start_predictor_after = TimeDistributed(torch.nn.Linear(span_start_after_input_dim, 1))
# LSTM encoder for after location end: encode the contextual embedding and
# current before location start scores
self._span_end_encoder_after = span_end_encoder_after
span_end_after_encoding_dim = span_end_encoder_after.get_output_dim()
span_end_after_input_dim = modeling_dim + span_end_after_encoding_dim
# linear function for after location end
self._span_end_predictor_after = TimeDistributed(torch.nn.Linear(span_end_after_input_dim, 1))
self._dropout = torch.nn.Dropout(p=dropout)
self._mask_lstms = mask_lstms
initializer(self)
def forward(self, tokens_list: Dict[str, torch.LongTensor], positions_list: Dict[str, torch.LongTensor],
sent_positions_list: Dict[str, torch.LongTensor],
before_loc_start: torch.IntTensor = None, before_loc_end: torch.IntTensor = None,
after_loc_start_list: torch.IntTensor = None, after_loc_end_list: torch.IntTensor = None,
before_category: torch.IntTensor = None, after_category_list: torch.IntTensor = None,
before_category_mask: torch.IntTensor = None, after_category_mask_list: torch.IntTensor = None
) -> Dict[str, torch.Tensor]:
"""
:param tokens_list: Dict[str, torch.LongTensor], required
The output of ``TextField.as_array()``, which should typically be passed directly to a
``TextFieldEmbedder``. This output is a dictionary mapping keys to ``TokenIndexer``
tensors. At its most basic, using a ``SingleIdTokenIndexer`` this is: ``{"tokens":
Tensor(batch_size, num_tokens)}``. This dictionary will have the same keys as were used
for the ``TokenIndexers`` when you created the ``TextField`` representing your
sequence. The dictionary is designed to be passed directly to a ``TextFieldEmbedder``,
which knows how to combine different word representations into a single vector per
token in your input.
:param positions_list: same as tokens_list
:param sent_positions_list: same as tokens_list
:param before_loc_start: torch.IntTensor = None, required
An integer ``IndexField`` representation of the before location start
:param before_loc_end: torch.IntTensor = None, required
An integer ``IndexField`` representation of the before location end
:param after_loc_start_list: torch.IntTensor = None, required
A list of integers ``ListField (IndexField)`` representation of the list of after location starts
along the sequence of steps
:param after_loc_end_list: torch.IntTensor = None, required
A list of integers ``ListField (IndexField)`` representation of the list of after location ends
along the sequence of steps
:param before_category: torch.IntTensor = None, required
An integer ``IndexField`` representation of the before location category
:param after_category_list: torch.IntTensor = None, required
A list of integers ``ListField (IndexField)`` representation of the list of after location categories
along the sequence of steps
:param before_category_mask: torch.IntTensor = None, required
An integer ``IndexField`` representation of whether the before location is known or not (0/1)
:param after_category_mask_list: torch.IntTensor = None, required
A list of integers ``ListField (IndexField)`` representation of the list of whether after location is
known or not for each step along the sequence of steps
:return:
An output dictionary consisting of:
best_span: torch.FloatTensor
A tensor of shape ``()``
true_span: torch.FloatTensor
loss: torch.FloatTensor
"""
# batchsize * listLength * paragraphSize * embeddingSize
input_embedding_paragraph = self._text_field_embedder(tokens_list)
input_pos_embedding_paragraph = self._pos_field_embedder(positions_list)
input_sent_pos_embedding_paragraph = self._sent_pos_field_embedder(sent_positions_list)
# batchsize * listLength * paragraphSize * (embeddingSize*2)
embedding_paragraph = torch.cat([input_embedding_paragraph, input_pos_embedding_paragraph,
input_sent_pos_embedding_paragraph], dim=-1)
# batchsize * listLength * paragraphSize, this mask is shared with the text fields and sequence label fields
para_mask = util.get_text_field_mask(tokens_list, num_wrapping_dims=1).float()
# batchsize * listLength , this mask is shared with the index fields
para_index_mask, para_index_mask_indices = torch.max(para_mask, 2)
# apply mask to update the index values, padded instances will be 0
after_loc_start_list = (after_loc_start_list.float() * para_index_mask.unsqueeze(2)).long()
after_loc_end_list = (after_loc_end_list.float() * para_index_mask.unsqueeze(2)).long()
after_category_list = (after_category_list.float() * para_index_mask.unsqueeze(2)).long()
after_category_mask_list = (after_category_mask_list.float() * para_index_mask.unsqueeze(2)).long()
batch_size, list_size, paragraph_size, input_dim = embedding_paragraph.size()
# to store the values passed to next step
tmp_category_probability = torch.zeros(batch_size, 3)
tmp_start_probability = torch.zeros(batch_size, paragraph_size)
loss = 0
# store the predict logits for the whole lists
category_predict_logits_after_list = torch.rand(batch_size, list_size, 3)
best_span_after_list = torch.rand(batch_size, list_size, 2)
for index in range(list_size):
# get one slice of step for prediction
embedding_paragraph_slice = embedding_paragraph[:, index, :, :].squeeze(1)
para_mask_slice = para_mask[:, index, :].squeeze(1)
para_lstm_mask_slice = para_mask_slice if self._mask_lstms else None
para_index_mask_slice = para_index_mask[:, index]
after_category_mask_slice = after_category_mask_list[:, index, :].squeeze()
# bi-LSTM: generate the contextual embeddings for the current step
# size: batchsize * paragraph_size * modeling_layer_hidden_size
encoded_paragraph = self._dropout(self._modeling_layer(embedding_paragraph_slice, para_lstm_mask_slice))
# max-pooling output for three category classification
category_input, category_input_indices = torch.max(encoded_paragraph, 1)
modeling_dim = encoded_paragraph.size(-1)
span_start_input = encoded_paragraph
# predict the initial before location state
if index == 0:
# three category classification for initial before location
category_predict_logits_before = self._category_before_predictor(category_input)
tmp_category_probability = category_predict_logits_before
'''Model the before_loc prediction'''
# predict the initial before location start scores
# shape: batchsize * paragraph_size
span_start_logits_before = self._span_start_predictor_before(span_start_input).squeeze(-1)
# shape: batchsize * paragraph_size
span_start_probs_before = util.masked_softmax(span_start_logits_before, para_mask_slice)
tmp_start_probability = span_start_probs_before
# shape: batchsize * hiddensize
span_start_representation_before = util.weighted_sum(encoded_paragraph, span_start_probs_before)
# Shape: (batch_size, passage_length, modeling_dim)
tiled_start_representation_before = span_start_representation_before.unsqueeze(1).expand(batch_size,
paragraph_size,
modeling_dim)
# incorporate the original contextual embeddings and weighted sum vector from location start prediction
# shape: batchsize * paragraph_size * 2hiddensize
span_end_representation_before = torch.cat([encoded_paragraph,
tiled_start_representation_before], dim=-1)
# Shape: (batch_size, passage_length, encoding_dim)
encoded_span_end_before = self._dropout(
self._span_end_encoder_before(span_end_representation_before, para_lstm_mask_slice))
# initial before location end prediction
encoded_span_end_before = torch.cat([encoded_paragraph, encoded_span_end_before], dim=-1)
# Shape: (batch_size, passage_length, encoding_dim * 4 + span_end_encoding_dim)
span_end_logits_before = self._span_end_predictor_before(encoded_span_end_before).squeeze(-1)
span_end_probs_before = util.masked_softmax(span_end_logits_before, para_mask_slice)
# best_span_bef = self._get_best_span(span_start_logits_bef, span_end_logits_bef)
best_span_before, best_span_before_start, best_span_before_end, best_span_before_real = \
self._get_best_span_single_extend(span_start_logits_before, span_end_logits_before,
category_predict_logits_before, before_category_mask)
# compute the loss for initial bef location three-category classification
before_null_pred = softmax(category_predict_logits_before)
before_null_pred_values, before_null_pred_indices = torch.max(before_null_pred, 1)
loss += nll_loss(before_null_pred, before_category.squeeze(-1))
# compute the loss for initial bef location start/end prediction
before_loc_start_pred = util.masked_softmax(span_start_logits_before, para_mask_slice)
logpy_before_start = torch.gather(before_loc_start_pred, 1, before_loc_start).view(-1).float()
before_category_mask = before_category_mask.float()
loss += -(logpy_before_start * before_category_mask).mean()
before_loc_end_pred = util.masked_softmax(span_end_logits_before, para_mask_slice)
logpy_before_end = torch.gather(before_loc_end_pred, 1, before_loc_end).view(-1)
loss += -(logpy_before_end * before_category_mask).mean()
# get the real predicted location spans
# convert category output (Null and Unk) into spans ((-2,-2) or (-1, -1))
before_loc_start_real = self._get_real_spans_extend(before_loc_start, before_category,
before_category_mask)
before_loc_end_real = self._get_real_spans_extend(before_loc_end, before_category,
before_category_mask)
true_span_before = torch.stack([before_loc_start_real, before_loc_end_real], dim=-1)
true_span_before = true_span_before.squeeze(1)
# input for (after location) three category classification
category_input_after = torch.cat((category_input, tmp_category_probability), dim=1)
category_predict_logits_after = self._category_after_predictor(category_input_after)
tmp_category_probability = category_predict_logits_after
# copy the predict logits for the index of the list
category_predict_logits_after_tmp = category_predict_logits_after.unsqueeze(1)
category_predict_logits_after_list[:, index, :] = category_predict_logits_after_tmp.data
''' Model the after_loc prediction '''
# after location start prediction: takes contextual embeddings and weighted sum vector as input
# shape: batchsize * hiddensize
prev_start = util.weighted_sum(category_input, tmp_start_probability)
tiled_prev_start = prev_start.unsqueeze(1).expand(batch_size, paragraph_size, modeling_dim)
span_start_input_after = torch.cat((span_start_input, tiled_prev_start), dim=2)
encoded_start_input_after = self._dropout(
self._span_start_encoder_after(span_start_input_after, para_lstm_mask_slice))
span_start_input_after_cat = torch.cat([encoded_paragraph, encoded_start_input_after], dim=-1)
# predict the after location start
span_start_logits_after = self._span_start_predictor_after(span_start_input_after_cat).squeeze(-1)
# shape: batchsize * paragraph_size
span_start_probs_after = util.masked_softmax(span_start_logits_after, para_mask_slice)
tmp_start_probability = span_start_probs_after
# after location end prediction: takes contextual embeddings and weight sum vector as input
# shape: batchsize * hiddensize
span_start_representation_after = util.weighted_sum(encoded_paragraph, span_start_probs_after)
# Tensor Shape: (batch_size, passage_length, modeling_dim)
tiled_start_representation_after = span_start_representation_after.unsqueeze(1).expand(batch_size,
paragraph_size,
modeling_dim)
# shape: batchsize * paragraph_size * 2hiddensize
span_end_representation_after = torch.cat([encoded_paragraph, tiled_start_representation_after], dim=-1)
# Tensor Shape: (batch_size, passage_length, encoding_dim)
encoded_span_end_after = self._dropout(self._span_end_encoder_after(span_end_representation_after,
para_lstm_mask_slice))
encoded_span_end_after = torch.cat([encoded_paragraph, encoded_span_end_after], dim=-1)
# Shape: (batch_size, passage_length, encoding_dim * 4 + span_end_encoding_dim)
span_end_logits_after = self._span_end_predictor_after(encoded_span_end_after).squeeze(-1)
span_end_probs_after = util.masked_softmax(span_end_logits_after, para_mask_slice)
# get the best span for after location prediction
best_span_after, best_span_after_start, best_span_after_end, best_span_after_real = \
self._get_best_span_single_extend(span_start_logits_after, span_end_logits_after,
category_predict_logits_after, after_category_mask_slice)
# copy current best span to the list for final evaluation
best_span_after_list[:, index, :] = best_span_after.data.view(batch_size, 1, 2)
""" Compute the Loss for this slice """
after_category_mask = after_category_mask_slice.float().squeeze(-1) # batchsize
after_category_slice = after_category_list[:, index, :] # batchsize * 1
after_loc_start_slice = after_loc_start_list[:, index, :]
after_loc_end_slice = after_loc_end_list[:, index, :]
# compute the loss for (after location) three category classification
para_index_mask_slice_tiled = para_index_mask_slice.unsqueeze(1).expand(para_index_mask_slice.size(0), 3)
after_category_pred = util.masked_softmax(category_predict_logits_after, para_index_mask_slice_tiled)
logpy_after_category = torch.gather(after_category_pred, 1, after_category_slice).view(-1)
loss += -(logpy_after_category * para_index_mask_slice).mean()
# compute the loss for location start/end prediction
after_loc_start_pred = util.masked_softmax(span_start_logits_after, para_mask_slice)
logpy_after_start = torch.gather(after_loc_start_pred, 1, after_loc_start_slice).view(-1)
loss += -(logpy_after_start * after_category_mask).mean()
after_loc_end_pred = util.masked_softmax(span_end_logits_after, para_mask_slice)
logpy_after_end = torch.gather(after_loc_end_pred, 1, after_loc_end_slice).view(-1)
loss += -(logpy_after_end * after_category_mask).mean()
# for evaluation (combine the all annotations)
after_loc_start_real = self._get_real_spans_extend_list(after_loc_start_list, after_category_list,
after_category_mask_list)
after_loc_end_real = self._get_real_spans_extend_list(after_loc_end_list, after_category_list,
after_category_mask_list)
true_span_after = torch.stack([after_loc_start_real, after_loc_end_real], dim=-1)
true_span_after = true_span_after.squeeze(2)
best_span_after_list = Variable(best_span_after_list)
true_span_after = true_span_after.view(true_span_after.size(0) * true_span_after.size(1),
true_span_after.size(2)).float()
para_index_mask_tiled = para_index_mask.view(-1, 1)
para_index_mask_tiled = para_index_mask_tiled.expand(para_index_mask_tiled.size(0), 2)
para_index_mask_tiled2 = para_index_mask.unsqueeze(2).expand(para_index_mask.size(0),
para_index_mask.size(1), 2)
after_category_mask_list_tiled = after_category_mask_list.expand(batch_size, list_size, 2)
after_category_mask_list_tiled = after_category_mask_list_tiled*para_index_mask_tiled2.long()
# merge all the best spans predicted for the current batch, filter out the padded instances
merged_sys_span, merged_gold_span = self._get_merged_spans(true_span_before, best_span_before, true_span_after,
best_span_after_list, para_index_mask_tiled)
output_dict = {}
output_dict["best_span"] = merged_sys_span.view(1, merged_sys_span.size(0)*merged_sys_span.size(1))
output_dict["true_span"] = merged_gold_span.view(1, merged_gold_span.size(0)*merged_gold_span.size(1))
output_dict["loss"] = loss
return output_dict
# merge system spans and gold spans for a batchsize of lists, based on mask
def _get_merged_spans(self, gold_span_before: Variable, sys_span_before: Variable, gold_span_after: Variable,
sys_span_after: Variable, mask: Variable):
batchsize, listsize, d = sys_span_after.size()
gold_span_before = gold_span_before.numpy()
gold_span_after = gold_span_after.numpy()
sys_span_before = sys_span_before.data.cpu().numpy()
sys_span_after = sys_span_after.data.cpu().numpy()
mask = mask.data.cpu().numpy()
merged_sys_span = []
merged_gold_span = []
for i in range(batchsize):
merged_sys_span.append(sys_span_before[i])
merged_gold_span.append(gold_span_before[i])
for j in range(listsize):
if mask[i*listsize+j][0]==1:
merged_sys_span.append(sys_span_after[i][j])
merged_gold_span.append(gold_span_after[i*listsize+j])
merged_sys_span_new = np.zeros((len(merged_sys_span), 2), dtype=np.long)
merged_gold_span_new = np.zeros((len(merged_gold_span), 2), dtype=np.long)
for i in range(len(merged_sys_span)):
tmp = merged_sys_span[i]
merged_sys_span_new[i] = tmp
tmp1 = merged_gold_span[i]
merged_gold_span_new[i] = tmp1
merged_sys_span = torch.from_numpy(merged_sys_span_new)
merged_gold_span = torch.from_numpy(merged_gold_span_new)
return merged_sys_span, merged_gold_span
# convert null to -2, unk to -1, return all the location spans
def _get_real_spans_extend(self, loc_anno: Variable, category_anno: Variable, category_mask: Variable):
batch_size, v = loc_anno.size()
real_loc_anno = np.zeros((batch_size, v), dtype=np.long)
loc_anno = loc_anno.data.cpu().numpy()
category_anno = category_anno.data.cpu().numpy()
for b in range(batch_size):
if category_anno[b, 0] == 1:
real_loc_anno[b, 0] = -1
elif category_anno[b, 0] == 2:
real_loc_anno[b, 0] = -2
elif category_anno[b, 0] == 0:
real_loc_anno[b, 0] = loc_anno[b, 0]
real_loc_anno = torch.from_numpy(real_loc_anno)
return real_loc_anno
# convert null to -2, unk to -1, return all the location spans
def _get_real_spans_extend_list(self, loc_anno: Variable, category_anno: Variable, category_mask: Variable):
batch_size, list_size, v = loc_anno.size()
real_loc_anno = np.zeros((batch_size, list_size, v), dtype=np.long)
loc_anno = loc_anno.data.cpu().numpy() # batch_size * list_size * 1
category_anno = category_anno.data.cpu().numpy()
for b in range(batch_size):
for l in range(list_size):
if category_anno[b, l, 0] == 1:
real_loc_anno[b, l, 0] = -1
elif category_anno[b, l, 0] == 2:
real_loc_anno[b, l, 0] = -2
elif category_anno[b, l, 0] == 0:
real_loc_anno[b, l, 0] = loc_anno[b, l, 0]
real_loc_anno = torch.from_numpy(real_loc_anno)
return real_loc_anno
# convert null to -2, unk to -1, return all the location spans
def _get_best_span_single_extend(self, span_start_logits: Variable, span_end_logits: Variable,
category_predict_logits: Variable, category_mask: Variable):
if span_start_logits.dim() != 2 or span_end_logits.dim() != 2:
raise ValueError("Input shapes must be (batch_size, passage_length)")
batch_size, passage_length = span_start_logits.size()
max_span_log_prob = [-1e20] * batch_size
span_start_argmax = [0] * batch_size
best_word_span = Variable(span_start_logits.data.new()
.resize_(batch_size, 2).fill_(0)).long()
best_start_span = Variable(span_start_logits.data.new()
.resize_(batch_size).fill_(0)).float()
best_end_span = Variable(span_start_logits.data.new()
.resize_(batch_size).fill_(0)).float()
span_start_logits = span_start_logits.data.cpu().numpy()
span_end_logits = span_end_logits.data.cpu().numpy()
category_predict_logits = category_predict_logits.data.cpu().numpy()
category_mask = category_mask.data.cpu().numpy()
category_best_pos = np.argmax(category_predict_logits, axis=1)
real_loc_size = 0
for i in range(batch_size):
if category_mask[i]==1:
real_loc_size = real_loc_size+1
real_best_word_span = Variable(torch.rand(real_loc_size, 2).fill_(0)).long()
real_index = 0
for b in range(batch_size): # pylint: disable=invalid-name
for j in range(passage_length):
val1 = span_start_logits[b, span_start_argmax[b]]
if val1 < span_start_logits[b, j]:
span_start_argmax[b] = j
val1 = span_start_logits[b, j]
val2 = span_end_logits[b, j]
span_length = j - span_start_argmax[b]
if val1 + val2 > max_span_log_prob[b] and span_length < 6:
best_word_span[b, 0] = span_start_argmax[b]
best_word_span[b, 1] = j
max_span_log_prob[b] = val1 + val2
if category_best_pos[b] == 1:
best_word_span[b, 0] = -1
best_word_span[b, 1] = -1
elif category_best_pos[b] == 2:
best_word_span[b, 0] = -2
best_word_span[b, 1] = -2
if category_mask[b] == 1:
real_best_word_span[real_index, 0] = best_word_span[b, 0]
real_best_word_span[real_index, 1] = best_word_span[b, 1]
real_index += 1
best_start_span[b] = best_word_span[b, 0]
best_end_span[b] = best_word_span[b, 1]
return best_word_span, best_start_span, best_end_span, real_best_word_span
# to get the best span based on location start and location end scores (maximal answer length is 5)
def _get_best_span(self, span_start_logits: Variable, span_end_logits: Variable) -> Variable:
if span_start_logits.dim() != 2 or span_end_logits.dim() != 2:
raise ValueError("Input shapes must be (batch_size, passage_length)")
batch_size, passage_length = span_start_logits.size()
max_span_log_prob = [-1e20] * batch_size
span_start_argmax = [0] * batch_size
best_word_span = Variable(span_start_logits.data.new()
.resize_(batch_size, 2).fill_(0)).long()
span_start_logits = span_start_logits.data.cpu().numpy()
span_end_logits = span_end_logits.data.cpu().numpy()
for b in range(batch_size): # pylint: disable=invalid-name
for j in range(passage_length):
# get the current max value till j
val1 = span_start_logits[b, span_start_argmax[b]]
if val1 < span_start_logits[b, j]:
span_start_argmax[b] = j
val1 = span_start_logits[b, j]
# end value should start from j
val2 = span_end_logits[b, j]
if val1 + val2 > max_span_log_prob[b]:
best_word_span[b, 0] = span_start_argmax[b]
best_word_span[b, 1] = j
max_span_log_prob[b] = val1 + val2
return best_word_span
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params) -> 'ProGlobal':
token_embedder_params = params.pop("text_field_embedder")
pos_embedder_params = params.pop("pos_field_embedder")
sent_pos_embedder_params = params.pop("sent_pos_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(vocab, token_embedder_params)
pos_field_embedder = TextFieldEmbedder.from_params(vocab, pos_embedder_params)
sent_pos_field_embedder = TextFieldEmbedder.from_params(vocab, sent_pos_embedder_params)
modeling_layer = Seq2SeqEncoder.from_params(params.pop("modeling_layer"))
span_end_encoder_before = Seq2SeqEncoder.from_params(params.pop("span_end_encoder_bef"))
span_start_encoder_after = Seq2SeqEncoder.from_params(params.pop("span_start_encoder_aft"))
span_end_encoder_after = Seq2SeqEncoder.from_params(params.pop("span_end_encoder_aft"))
dropout = params.pop('dropout', 0.2)
init_params = params.pop('initializer', None)
initializer = (InitializerApplicator.from_params(init_params)
if init_params is not None
else InitializerApplicator())
params.assert_empty(cls.__name__)
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
pos_field_embedder=pos_field_embedder,
sent_pos_field_embedder=sent_pos_field_embedder,
modeling_layer=modeling_layer,
span_start_encoder_after=span_start_encoder_after,
span_end_encoder_before=span_end_encoder_before,
span_end_encoder_after=span_end_encoder_after,
dropout=dropout,
initializer=initializer)
|
from PySide import QtCore, QtGui
# https://qt.gitorious.org/pyside/pyside-examples/source/060dca8e4b82f301dfb33a7182767eaf8ad3d024:examples/richtext/syntaxhighlighter.py
class Highlighter(QtGui.QSyntaxHighlighter):
'''Perform simple syntax highlighting by subclassing the
QSyntaxHighlighter class and describing highlighting
rules using regular expressions.
'''
def __init__(self, parent=None):
super(Highlighter, self).__init__(parent)
keywordFormat = QtGui.QTextCharFormat()
keywordFormat.setForeground(QtCore.Qt.darkBlue)
keywordFormat.setFontWeight(QtGui.QFont.Bold)
keywordPatterns = ["\\bchar\\b", "\\bclass\\b", "\\bconst\\b",
"\\bdouble\\b", "\\benum\\b", "\\bexplicit\\b", "\\bfriend\\b",
"\\binline\\b", "\\bint\\b", "\\blong\\b", "\\bnamespace\\b",
"\\boperator\\b", "\\bprivate\\b", "\\bprotected\\b",
"\\bpublic\\b", "\\bshort\\b", "\\bsignals\\b", "\\bsigned\\b",
"\\bslots\\b", "\\bstatic\\b", "\\bstruct\\b",
"\\btemplate\\b", "\\btypedef\\b", "\\btypename\\b",
"\\bunion\\b", "\\bunsigned\\b", "\\bvirtual\\b", "\\bvoid\\b",
"\\bvolatile\\b", "\\bfinal\\b", "\\bsynthetic\\b", "\\bextends\\b",
"\\bthis\\b", "\\bswitch\\b", "\\bcase\\b", "\\bdefault\\b",
"\\breturn\\b", "\\bsuper\\b", "\\btry\\b", "\\bcatch\\b",
"\\bpackage\\b", "\\bif\\b", "\\bthen\\b", "\\belse\\b",
"\\bnull\\b", "\\bbreak\\b", "\\bimplements\\b"
]
self.highlightingRules = [(QtCore.QRegExp(pattern), keywordFormat)
for pattern in keywordPatterns]
classFormat = QtGui.QTextCharFormat()
classFormat.setFontWeight(QtGui.QFont.Bold)
classFormat.setForeground(QtCore.Qt.darkMagenta)
self.highlightingRules.append((QtCore.QRegExp("\\bQ[A-Za-z]+\\b"),
classFormat))
singleLineCommentFormat = QtGui.QTextCharFormat()
singleLineCommentFormat.setForeground(QtCore.Qt.red)
self.highlightingRules.append((QtCore.QRegExp("//[^\n]*"),
singleLineCommentFormat))
self.multiLineCommentFormat = QtGui.QTextCharFormat()
self.multiLineCommentFormat.setForeground(QtCore.Qt.red)
quotationFormat = QtGui.QTextCharFormat()
quotationFormat.setForeground(QtCore.Qt.darkGreen)
self.highlightingRules.append((QtCore.QRegExp("\".*\""),
quotationFormat))
functionFormat = QtGui.QTextCharFormat()
functionFormat.setFontItalic(True)
functionFormat.setForeground(QtCore.Qt.blue)
self.highlightingRules.append((QtCore.QRegExp("\\b[A-Za-z0-9_]+(?=\\()"),
functionFormat))
self.commentStartExpression = QtCore.QRegExp("/\\*")
self.commentEndExpression = QtCore.QRegExp("\\*/")
def highlightBlock(self, text):
for pattern, format in self.highlightingRules:
expression = QtCore.QRegExp(pattern)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
startIndex = 0
if self.previousBlockState() != 1:
startIndex = self.commentStartExpression.indexIn(text)
while startIndex >= 0:
endIndex = self.commentEndExpression.indexIn(text, startIndex)
if endIndex == -1:
self.setCurrentBlockState(1)
commentLength = len(text) - startIndex
else:
commentLength = endIndex - startIndex + self.commentEndExpression.matchedLength()
self.setFormat(startIndex, commentLength,
self.multiLineCommentFormat)
startIndex = self.commentStartExpression.indexIn(text,
startIndex + commentLength);
|
from django.forms import *
from django.forms.models import BaseModelFormSet
from django.forms.models import BaseInlineFormSet
from django.forms.models import ModelChoiceIterator
from django.forms.models import InlineForeignKeyField
from django.utils.text import capfirst
from .formsets import BaseFormSet
from django.db.models import fields
from dojango.forms.fields import *
from dojango.forms.widgets import DojoWidgetMixin, Textarea, Select, SelectMultiple, HiddenInput
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'ModelChoiceField', 'ModelMultipleChoiceField',
)
class ModelChoiceField(DojoFieldMixin, models.ModelChoiceField):
"""
Overwritten 'ModelChoiceField' using the 'DojoFieldMixin' functionality.
"""
widget = Select
class ModelMultipleChoiceField(DojoFieldMixin, models.ModelMultipleChoiceField):
"""
Overwritten 'ModelMultipleChoiceField' using the 'DojoFieldMixin' functonality.
"""
widget = SelectMultiple
# Fields #####################################################################
class InlineForeignKeyField(DojoFieldMixin, InlineForeignKeyField, Field):
"""
Overwritten InlineForeignKeyField to use the dojango HiddenInput
the dojango InlineForeignKeyHiddenInput as widget.
"""
widget = HiddenInput
# our customized model field => form field map
# here it is defined which form field is used by which model field, when creating a ModelForm
MODEL_TO_FORM_FIELD_MAP = (
# (model_field, form_field, [optional widget])
# the order of these fields is very important for inherited model fields
# e.g. the CharField must be checked at last, because several other
# fields are a subclass of it.
(fields.CommaSeparatedIntegerField, CharField),
(fields.DateTimeField, DateTimeField), # must be in front of the DateField
(fields.DateField, DateField),
(fields.DecimalField, DecimalField),
(fields.EmailField, EmailField),
(fields.FilePathField, FilePathField),
(fields.FloatField, FloatField),
(fields.related.ForeignKey, ModelChoiceField),
(fields.files.ImageField, ImageField),
(fields.files.FileField, FileField),
(fields.GenericIPAddressField, GenericIPAddressField),
(fields.related.ManyToManyField, ModelMultipleChoiceField),
(fields.NullBooleanField, CharField),
(fields.BooleanField, BooleanField),
(fields.PositiveSmallIntegerField, IntegerField),
(fields.PositiveIntegerField, IntegerField),
(fields.SlugField, SlugField),
(fields.SmallIntegerField, IntegerField),
(fields.IntegerField, IntegerField),
(fields.TimeField, TimeField),
(fields.URLField, URLField),
(fields.TextField, CharField, Textarea),
(fields.CharField, CharField),
)
def formfield_function(field, **kwargs):
"""
Custom formfield function, so we can inject our own form fields. The
mapping of model fields to form fields is defined in 'MODEL_TO_FORM_FIELD_MAP'.
It uses the default django mapping as fallback, if there is no match in our
custom map.
field -- a model field
"""
for field_map in MODEL_TO_FORM_FIELD_MAP:
if isinstance(field, field_map[0]):
defaults = {}
if field.choices:
# the normal django field forms.TypedChoiceField is wired hard
# within the original db/models/fields.py.
# If we use our custom Select widget, we also have to pass in
# some additional validation field attributes.
defaults['widget'] = Select(attrs={
'extra_field_attrs':{
'required':not field.blank,
'help_text':field.help_text,
}
})
elif len(field_map) == 3:
defaults['widget']=field_map[2]
defaults.update(kwargs)
return field.formfield(form_class=field_map[1], **defaults)
# return the default formfield, if there is no equivalent
return field.formfield(**kwargs)
# ModelForms #################################################################
def fields_for_model(*args, **kwargs):
"""Changed fields_for_model function, where we use our own formfield_callback"""
kwargs["formfield_callback"] = formfield_function
return models.fields_for_model(*args, **kwargs)
class ModelFormMetaclass(models.ModelFormMetaclass):
"""
Overwritten 'ModelFormMetaClass'. We attach our own formfield generation
function.
"""
def __new__(cls, name, bases, attrs):
# this is how we can replace standard django form fields with dojo ones
attrs["formfield_callback"] = formfield_function
return super(ModelFormMetaclass, cls).__new__(cls, name, bases, attrs)
class ModelForm(models.ModelForm, metaclass=ModelFormMetaclass):
"""
Overwritten 'ModelForm' using the metaclass defined above.
"""
def modelform_factory(*args, **kwargs):
"""Changed modelform_factory function, where we use our own formfield_callback"""
kwargs["formfield_callback"] = formfield_function
kwargs["form"] = ModelForm
return models.modelform_factory(*args, **kwargs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseModelFormSet, BaseFormSet):
def add_fields(self, form, index):
"""Overwritten BaseModelFormSet using the dojango BaseFormSet and
the ModelChoiceField.
NOTE: This method was copied from django 1.3 beta 1"""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
def pk_is_not_editable(pk):
return ((not pk.editable) or (pk.auto_created or isinstance(pk, AutoField))
or (pk.rel and pk.rel.parent_link and pk_is_not_editable(pk.rel.to._meta.pk)))
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
pk_value = form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, OneToOneField) or isinstance(pk, ForeignKey):
qs = pk.rel.to._default_manager.get_queryset()
else:
qs = self.model._default_manager.get_queryset()
qs = qs.using(form.instance._state.db)
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=HiddenInput)
BaseFormSet.add_fields(self, form, index)
def modelformset_factory(*args, **kwargs):
"""Changed modelformset_factory function, where we use our own formfield_callback"""
kwargs["formfield_callback"] = kwargs.get("formfield_callback", formfield_function)
kwargs["formset"] = kwargs.get("formset", BaseModelFormSet)
return models.modelformset_factory(*args, **kwargs)
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseInlineFormSet, BaseModelFormSet):
"""Overwritten BaseInlineFormSet using the dojango InlineForeignKeyFields.
NOTE: This method was copied from django 1.1"""
def add_fields(self, form, index):
super(BaseInlineFormSet, self).add_fields(form, index)
if self._pk_field == self.fk:
form.fields[self._pk_field.name] = InlineForeignKeyField(self.instance, pk_field=True)
else:
kwargs = {
'label': getattr(form.fields.get(self.fk.name), 'label', capfirst(self.fk.verbose_name))
}
if self.fk.rel.field_name != self.fk.rel.to._meta.pk.name:
kwargs['to_field'] = self.fk.rel.field_name
form.fields[self.fk.name] = InlineForeignKeyField(self.instance, **kwargs)
def inlineformset_factory(*args, **kwargs):
"""Changed inlineformset_factory function, where we use our own formfield_callback"""
kwargs["formfield_callback"] = kwargs.get("formfield_callback", formfield_function)
kwargs["formset"] = kwargs.get("formset", BaseInlineFormSet)
return models.inlineformset_factory(*args, **kwargs)
|
from __future__ import absolute_import
from django.utils import timezone
from celery.schedules import crontab
from celery.task import periodic_task
from .models import Grantee
@periodic_task(run_every=crontab())
def publish_grantees():
Grantee.objects.filter(status=Grantee.STATUS_READY_FOR_PUBLISH, published_at__lte=timezone.now()) \
.update(status=Grantee.STATUS_PUBLISHED)
|
#!/usr/bin/python3
'''
Abstract:
This is a program to exercise what I learned in CH2.
Usage:
20180329_CH2_3_cheating_among_students.py
Editor:
Jacob975
concept:
Privacy algorithm
assume interviewer didn't know how many fliping you do.
Head or tails for each flip.
flip coin -> heads -> tell the truth
-> tails -> flip the coin again -> Say Yes for head, say no for tail
##################################
# Python3 #
# This code is made in python3 #
##################################
20170329
####################################
update log
20180329 version alpha 1:
1. I don't know
'''
import pymc as pm
import numpy as np
import matplotlib.pyplot as plt
import time
from IPython.core.pylabtools import figsize
#--------------------------------------------
# main code
if __name__ == "__main__":
VERBOSE = 0
# measure times
start_time = time.time()
#-----------------------------------
# Initialize variables and constants
N = 100
p = pm.Uniform("freq_cheating", 0, 1)
true_answer = pm.Bernoulli("truths", p, size = N)
first_coin_flips = pm.Bernoulli("firtst_flips", 0.5, size = N)
second_coin_flips = pm.Bernoulli("second_flips", 0.5, size = N)
@pm.deterministic
def observed_proportion(t_a = true_answer, fc = first_coin_flips, sc = second_coin_flips):
observed = fc * t_a + (1 - fc)*sc
return observed.sum() / float(N)
#-----------------------------------
# import datasets
X = 35
observations = pm.Binomial("obs", N, observed_proportion, observed = True, value = X)
if VERBOSE>0:
print ("Property of observations:")
print (observations)
print (observations.value)
print (type(observations))
model = pm.Model([p, true_answer, first_coin_flips, second_coin_flips, observed_proportion, observations])
# to be explained in Chapter 3
mcmc = pm.MCMC(model)
mcmc.sample(40000, 15000)
#-----------------------------------
# plot the answer
figsize(12.5, 3)
p_trace = mcmc.trace("freq_cheating")[:]
plt.hist(p_trace, histtype = "stepfilled", normed = True, alpha = 0.85, bins = 30, label = "posterior distribution", color = "#348ABD")
#plt.vlines([.05, .35], [0, 0], [5, 5], alpha = 0.3)
plt.xlim(0, 1)
plt.xlabel("Value of $p$")
plt.ylabel("Density")
plt.title("Posterior distribution of parameter $p$")
plt.legend()
plt.show()
#-----------------------------------
# measuring time
elapsed_time = time.time() - start_time
print ("Exiting Main Program, spending ", elapsed_time, "seconds.")
|
from framework.utils.common_utils import by_css
from pages.page import Page
from tests.testsettings import UI_TEST_TIMEOUT
CANCEL_LINK = by_css('div.ui-dialog[style*="block"] > div.ui-dialog-content > div > a.no_button')
CONFIRM_LINK = by_css('div.ui-dialog[style*="block"] > div.ui-dialog-content > div > a.yes_button')
MESSAGE_LINK = by_css('div.ui-dialog[style*="block"] > div.ui-dialog-content > .warning_message')
class WarningDialog(Page):
def __init__(self, driver, cancel_link=CANCEL_LINK, confirm_link=CONFIRM_LINK, message_link=MESSAGE_LINK):
Page.__init__(self, driver)
self.cancel_link = cancel_link
self.confirm_link = confirm_link
self.message_link = message_link
def cancel(self):
self.driver.find(self.cancel_link).click()
def confirm(self):
self.driver.find(self.confirm_link).click()
def get_message(self):
self.driver.wait_for_element(UI_TEST_TIMEOUT, self.message_link, True)
return self.driver.find(self.message_link).text
|
from cnddh import app
from cnddh.utils import template_checa_permissao
from sqlalchemy import func
import locale
from config import TIMEZONE, LOCALE, EMAIL_LOGIN
@app.context_processor
def inject_functions():
return dict(
checa_permissao = template_checa_permissao
)
@app.template_filter('tamanho')
def length(object):
return len(object)+2
@app.template_filter('datetimeformat')
def datetimeformat(value, format='%H:%M / %d-%m-%Y', blank_message=u"N/A"):
if value:
return value.strftime(format)
else:
return blank_message
@app.template_filter('getdatelocalized')
def getdatelocalized(value):
if value:
from datetime import datetime
from babel.dates import format_datetime,get_timezone
return format_datetime(value,tzinfo=get_timezone(TIMEZONE), locale=LOCALE, format="d 'de' MMMM 'em' HH':'mm")
else:
return u"-"
@app.template_filter('emailfrom')
def emailfrom(value):
return EMAIL_LOGIN
|
from datasets.avmnist.get_data import get_dataloader
import torch.autograd as A
import torch.nn.functional as F
import torch.nn as nn
import torch
from unimodals.common_models import GlobalPooling2D
import sys
import os
sys.path.append(os.getcwd())
# %%
class GP_LeNet(nn.Module):
def __init__(self, args, in_channels):
super(GP_LeNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels, args.channels,
kernel_size=5, padding=2, bias=False)
self.bn1 = nn.BatchNorm2d(int(args.channels))
self.gp1 = GlobalPooling2D()
self.conv2 = nn.Conv2d(
args.channels, 2 * args.channels, kernel_size=5, padding=2, bias=False)
self.bn2 = nn.BatchNorm2d(int(2 * args.channels))
self.gp2 = GlobalPooling2D()
self.conv3 = nn.Conv2d(
2 * args.channels, 4 * args.channels, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(int(4 * args.channels))
self.gp3 = GlobalPooling2D()
self.classifier = nn.Sequential(
nn.Linear(int(4 * args.channels), args.num_outputs)
)
# initialization of weights
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_uniform_(m.weight)
def forward(self, x):
out1 = F.relu(self.bn1(self.conv1(x)))
out = F.max_pool2d(out1, 2)
gp1 = self.gp1(out1)
out2 = F.relu(self.bn2(self.conv2(out)))
out = F.max_pool2d(out2, 2)
gp2 = self.gp2(out2)
out3 = F.relu(self.bn3(self.conv3(out)))
out = F.max_pool2d(out3, 2)
gp3 = self.gp3(out3)
out = self.classifier(gp3)
return out, gp1, gp2, gp3
class GP_LeNet_Deeper(nn.Module):
def __init__(self, args, in_channels):
super(GP_LeNet_Deeper, self).__init__()
self.conv1 = nn.Conv2d(in_channels, args.channels,
kernel_size=5, padding=2, bias=False)
self.bn1 = nn.BatchNorm2d(int(args.channels))
self.gp1 = GlobalPooling2D()
self.conv2 = nn.Conv2d(
args.channels, 2 * args.channels, kernel_size=5, padding=2, bias=False)
self.bn2 = nn.BatchNorm2d(int(2 * args.channels))
self.gp2 = GlobalPooling2D()
self.conv3 = nn.Conv2d(
2 * args.channels, 4 * args.channels, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(int(4 * args.channels))
self.gp3 = GlobalPooling2D()
self.conv4 = nn.Conv2d(
4 * args.channels, 8 * args.channels, kernel_size=3, padding=1, bias=False)
self.bn4 = nn.BatchNorm2d(int(8 * args.channels))
self.gp4 = GlobalPooling2D()
self.conv5 = nn.Conv2d(
8 * args.channels, 16 * args.channels, kernel_size=3, padding=1, bias=False)
self.bn5 = nn.BatchNorm2d(int(16 * args.channels))
self.gp5 = GlobalPooling2D()
self.classifier = nn.Sequential(
nn.Linear(int(16 * args.channels), args.num_outputs)
)
# initialization of weights
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_uniform_(m.weight)
def forward(self, x):
out1 = F.relu(self.bn1(self.conv1(x)))
out = F.max_pool2d(out1, 2)
gp1 = self.gp1(out)
out2 = F.relu(self.bn2(self.conv2(out)))
out = F.max_pool2d(out2, 2)
gp2 = self.gp2(out2)
out3 = F.relu(self.bn3(self.conv3(out)))
out = F.max_pool2d(out3, 2)
gp3 = self.gp3(out3)
out4 = F.relu(self.bn4(self.conv4(out)))
out = F.max_pool2d(out4, 2)
gp4 = self.gp4(out4)
out5 = F.relu(self.bn5(self.conv5(out)))
out = F.max_pool2d(out5, 2)
gp5 = self.gp5(out5)
out = self.classifier(gp5)
return out, gp1, gp2, gp3, gp4, gp5
class SimpleAVNet(nn.Module):
def __init__(self, args, audio_channels, image_channels):
super(SimpleAVNet, self).__init__()
self.audio_net = GP_LeNet(args, audio_channels)
self.image_net = GP_LeNet(args, image_channels)
self.classifier = nn.Linear(
int(2 * 4 * args.channels), args.num_outputs)
def forward(self, audio, image):
audio_out, audio_gp1, audio_gp2, audio_gp3 = self.audio_net(audio)
image_out, image_gp1, image_gp2, image_gp3 = self.image_net(image)
multimodal_feat = torch.cat((audio_gp3, image_gp3), 1)
out = self.classifier(multimodal_feat)
return out
class SimpleAVNet_Deeper(nn.Module):
def __init__(self, args, audio_channels, image_channels):
super(SimpleAVNet_Deeper, self).__init__()
self.audio_net = GP_LeNet_Deeper(args, audio_channels)
self.image_net = GP_LeNet(args, image_channels)
self.classifier = nn.Linear(int(20 * args.channels), args.num_outputs)
def forward(self, audio, image):
audio_out, audio_gp1, audio_gp2, audio_gp3, audio_gp4, audio_gp5 = self.audio_net(
audio)
image_out, image_gp1, image_gp2, image_gp3 = self.image_net(image)
multimodal_feat = torch.cat((audio_gp5, image_gp3), 1)
out = self.classifier(multimodal_feat)
return out
class Help:
def __init__(self):
self.channels = 3
self.num_outputs = 10
model = SimpleAVNet_Deeper(Help(), 1, 1).cuda()
optim = torch.optim.SGD(model.parameters(), lr=0.1, weight_decay=0.0001)
trains, valids, tests = get_dataloader('/data/yiwei/avmnist/_MFAS/avmnist')
criterion = nn.CrossEntropyLoss()
for ep in range(100):
totalloss = 0.0
batches = 0
for j in trains:
batches += 1
optim.zero_grad()
inputs = [x.float().cuda() for x in j[:-1]]
labels = j[-1].cuda()
preds = model(inputs[1], inputs[0])
loss = criterion(preds, labels)
loss.backward()
optim.step()
totalloss += loss
print("ep "+str(ep) + " train loss "+str(totalloss/batches))
batches = 0
total = 0
corrects = 0
totalloss = 0
with torch.no_grad():
for j in valids:
batches += 1
inputs = [x.float().cuda() for x in j[:-1]]
labels = j[-1].cuda()
preds = model(inputs[1], inputs[0])
loss = criterion(preds, labels)
totalloss += loss
for i in range(len(j[-1])):
total += 1
if torch.argmax(preds[i]).item() == j[-1][i].item():
corrects += 1
print("ep "+str(ep)+" valid loss "+str(totalloss/batches) +
" acc: "+str(float(corrects)/total))
|
import numpy as np
from wssnet.Network.TrainerController import TrainerController
from wssnet.Network.CsvInputHandler import CsvInputHandler
import config
def load_indexes(index_file):
"""
Load patch index file (csv). This is the file that is used to load the patches based on x,y,z index
"""
indexes = np.genfromtxt(index_file, delimiter=',', skip_header=True, dtype='unicode') # 'unicode' or None
return indexes
if __name__ == "__main__":
data_dir = config.DATA_DIR
train_dir = f'{data_dir}/train'
val_dir = f'{data_dir}/val'
test_dir = f'{data_dir}/test'
restore = False
if restore:
model_dir = "[model_dir]"
model_file = "[model_name].h5"
# csv index file
training_file = f'{config.DATA_DIR}/train.csv'
validate_file = f'{config.DATA_DIR}/val.csv'
test_file = f'{config.DATA_DIR}/test.csv'
QUICKSAVE = True
lr_decay = 'cosine'
# Hyperparameters optimisation variables
initial_learning_rate = 1e-4
epochs = 2
batch_size = 16
# Network setting
network_name = 'wssnet'
input_shape = (48,48)
# Load data file and indexes
trainset = load_indexes(training_file)
valset = load_indexes(validate_file)
# ----------------- TensorFlow stuff -------------------
# TRAIN dataset iterator
z = CsvInputHandler(train_dir, True, batch_size)
trainset = z.initialize_dataset(trainset, shuffle=True, n_parallel=None)
# VALIDATION iterator
valdh = CsvInputHandler(val_dir, False, batch_size)
valset = valdh.initialize_dataset(valset, shuffle=True, n_parallel=None)
# # Bechmarking dataset, use to keep track of prediction progress per best model
testset = None
if QUICKSAVE and test_file is not None:
testset = load_indexes(test_file)
# WE use this bechmarking set so we can see the prediction progressing over time
ph = CsvInputHandler(test_dir, False, batch_size)
# No shuffling, so we can save the first batch consistently
testset = ph.initialize_dataset(testset, shuffle=False)
# ------- Main Network ------
print(f"WSSNet {input_shape}, lr {initial_learning_rate}, batch {batch_size}")
network = TrainerController(input_shape, initial_learning_rate, lr_decay, QUICKSAVE, network_name)
network.init_model_dir()
if restore:
print("Restoring model...")
network.restore_model(model_dir, model_file)
print("Learning rate", network.optimizer.lr.numpy())
network.train_network(trainset, valset, n_epoch=epochs, testset=testset)
|
weight=4
_instances=2
p=[100,300]
def run():
# not necessary but useful for visualising on gui
r.conf_set('send_status_interval', 10)
r.conf_set('accel', 500) # robot accelerates to given speed 100 for 500ms
r.conf_set('alpha', 500) # robot accelerates (rotation) to given speed 100 for 500ms
r.speed(100)
# natural mathematic coordinate system
# x - when robot orientation == 0, robot is looking at positive x axis
# y - when robot orientation == 90, robot is looking at positive y axis
r.setpos(0,0,0)
# 200 mm forward
r.forward(p[_i])
# 200 mm backward
r.forward(-p[_i])
# move to point 200,200
r.goto(200,200)
if _i == 1:
# task #0 will wait 1 second
sleep(1)
else:
# task #1 will wait 5 seconds
sleep(5)
# go back to 0,0
r.goto(0,0)
# go to 200,0
r.goto(200,0)
# go back to 0,0 in reverse
r.goto(0,0,-1)
|
salar = float(input('Qual o seu salario? '))
if salar >= 1250:
aumen = 10
salar = salar + (salar * 0.10)
else:
aumen = 15
salar = salar + (salar * 0.15)
print('Seu salario com o aumento de {}% agora fica {} reais'.format(aumen, salar))
|
class TestLine:
def test_size(self, line):
assert line.size == 5
def test_end_offset(self, line):
assert line.end_offset == 5
def test_text(self, line):
assert line.text == "aaaaa"
def test_offset_in_file(self, line):
assert line.offset_in_file == 0
|
import os
import cv2
import json
import numpy as np
import torch
import matplotlib.pyplot as plt
from tqdm import tqdm
from config import system_configs
from utils import crop_image, normalize_
from external.nms import soft_nms, soft_nms_merge
import sys
sys.path.append("../../") # Adds higher directory to python modules path.
from db.detection_video import db_configs # Import 'db' parameters
from db.coco_video import mscoco_classes # Import 'class_name' function
def _rescale_dets(detections, ratios, borders, sizes):
xs, ys = detections[..., 0:4:2], detections[..., 1:4:2]
xs /= ratios[:, 1][:, None, None]
ys /= ratios[:, 0][:, None, None]
xs -= borders[:, 2][:, None, None]
ys -= borders[:, 0][:, None, None]
np.clip(xs, 0, sizes[:, 1][:, None, None], out=xs)
np.clip(ys, 0, sizes[:, 0][:, None, None], out=ys)
def save_image(data, fn):
sizes = np.shape(data)
height = float(sizes[0])
width = float(sizes[1])
fig = plt.figure()
fig.set_size_inches(width/height, 1, forward=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(data)
plt.savefig(fn, dpi = height)
plt.close()
def kp_decode(nnet, images, K, ae_threshold=0.5, kernel=3):
detections = nnet.test([images], ae_threshold=ae_threshold, K=K, kernel=kernel)
detections = detections.data.cpu().numpy()
return detections
def kp_detection(frame, nnet, score_min, debug=False, decode_func=kp_decode):
K = db_configs.top_k
ae_threshold = db_configs.ae_threshold
nms_kernel = db_configs.nms_kernel
scales = db_configs.test_scales
weight_exp = db_configs.weight_exp
merge_bbox = db_configs.merge_bbox
categories = db_configs.categories
nms_threshold = db_configs.nms_threshold
max_per_image = db_configs.max_per_image
nms_algorithm = {
"nms": 0,
"linear_soft_nms": 1,
"exp_soft_nms": 2
}[db_configs.nms_algorithm]
top_bboxes = {}
#for ind in tqdm(range(0, num_images), ncols=80, desc="locating kps"):
# db_ind = db_inds[ind]
# print(db_ind)
# image_id = db.image_ids(db_ind)
# image_file = db.image_file(db_ind)
#image_file = os.path.join(system_configs.data_dir, "coco", "images", "testdev2017", "{}").format("00000000000" + str(db_ind + 1) + ".jpg")
#if db_ind < 9:
# image_id = "00000000000" + str(db_ind + 1) + ".jpg"
# image_file = os.path.join(system_configs.data_dir, "coco", "images", "testdev2017", "{}").format("00000000000" + str(db_ind + 1) + ".jpg")
#elif db_ind >= 9 and db_ind < 99:
# image_id = "0000000000" + str(db_ind + 1) + ".jpg"
# image_file = os.path.join(system_configs.data_dir, "coco", "images", "testdev2017", "{}").format("0000000000" + str(db_ind + 1) + ".jpg")
#print(image_id)
#print(image_file)
#image = cv2.imread(image_file)
image = frame
height, width = image.shape[0:2]
detections = []
for scale in scales:
new_height = int(height * scale)
new_width = int(width * scale)
new_center = np.array([new_height // 2, new_width // 2])
inp_height = new_height | 127
inp_width = new_width | 127
images = np.zeros((1, 3, inp_height, inp_width), dtype=np.float32)
ratios = np.zeros((1, 2), dtype=np.float32)
borders = np.zeros((1, 4), dtype=np.float32)
sizes = np.zeros((1, 2), dtype=np.float32)
out_height, out_width = (inp_height + 1) // 4, (inp_width + 1) // 4
height_ratio = out_height / inp_height
width_ratio = out_width / inp_width
resized_image = cv2.resize(image, (new_width, new_height))
resized_image, border, offset = crop_image(resized_image, new_center, [inp_height, inp_width])
mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32) # From CenterNet/db/coco.py
std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32) # From CenterNet/db/coco.py
resized_image = resized_image / 255.
normalize_(resized_image, mean, std)
images[0] = resized_image.transpose((2, 0, 1))
borders[0] = border
sizes[0] = [int(height * scale), int(width * scale)]
ratios[0] = [height_ratio, width_ratio]
images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)
images = torch.from_numpy(images)
dets = decode_func(nnet, images, K, ae_threshold=ae_threshold, kernel=nms_kernel)
dets = dets.reshape(2, -1, 8)
dets[1, :, [0, 2]] = out_width - dets[1, :, [2, 0]]
dets = dets.reshape(1, -1, 8)
_rescale_dets(dets, ratios, borders, sizes)
dets[:, :, 0:4] /= scale
detections.append(dets)
detections = np.concatenate(detections, axis=1)
classes = detections[..., -1]
classes = classes[0]
detections = detections[0]
# reject detections with negative scores
keep_inds = (detections[:, 4] > -1)
detections = detections[keep_inds]
classes = classes[keep_inds]
top_bboxes = {}
for j in range(categories):
keep_inds = (classes == j)
top_bboxes[j + 1] = detections[keep_inds][:, 0:7].astype(np.float32)
if merge_bbox:
soft_nms_merge(top_bboxes[j + 1], Nt=nms_threshold, method=nms_algorithm, weight_exp=weight_exp)
else:
soft_nms(top_bboxes[j + 1], Nt=nms_threshold, method=nms_algorithm)
top_bboxes[j + 1] = top_bboxes[j + 1][:, 0:5]
scores = np.hstack([
top_bboxes[j][:, -1]
for j in range(1, categories + 1)
])
if len(scores) > max_per_image:
kth = len(scores) - max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, categories + 1):
keep_inds = (top_bboxes[j][:, -1] >= thresh)
top_bboxes[j] = top_bboxes[j][keep_inds]
# if debug:
# image_file = db.image_file(db_ind)
# image = cv2.imread(image_file)
# bboxes = {}
# for j in range(1, categories + 1):
# keep_inds = (top_bboxes[image_id][j][:, -1] > 0.5)
# cat_name = db.class_name(j)
# cat_size = cv2.getTextSize(cat_name, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 2)[0]
# color = np.random.random((3, )) * 0.6 + 0.4
# color = color * 255
# color = color.astype(np.int32).tolist()
# for bbox in top_bboxes[image_id][j][keep_inds]:
# bbox = bbox[0:4].astype(np.int32)
# if bbox[1] - cat_size[1] - 2 < 0:
# cv2.rectangle(image,
# (bbox[0], bbox[1] + 2),
# (bbox[0] + cat_size[0], bbox[1] + cat_size[1] + 2),
# color, -1
# )
# cv2.putText(image, cat_name,
# (bbox[0], bbox[1] + cat_size[1] + 2),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), thickness=1
# )
# else:
# cv2.rectangle(image,
# (bbox[0], bbox[1] - cat_size[1] - 2),
# (bbox[0] + cat_size[0], bbox[1] - 2),
# color, -1
# )
# cv2.putText(image, cat_name,
# (bbox[0], bbox[1] - 2),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), thickness=1
# )
# cv2.rectangle(image,
# (bbox[0], bbox[1]),
# (bbox[2], bbox[3]),
# color, 2
# )
# debug_file = os.path.join(debug_dir, "{}.jpg".format(db_ind))
# result_json = os.path.join(result_dir, "results.json")
# detections = db.convert_to_coco(top_bboxes)
# with open(result_json, "w") as f:
# json.dump(detections, f)
# cls_ids = list(range(1, categories + 1))
# image_ids = [db.image_ids(ind) for ind in db_inds]
# db.evaluate(result_json, cls_ids, image_ids)
detections = mscoco_classes.convert_to_coco(top_bboxes, score_min)
return detections
def testing(frame, nnet, score_min, debug=False):
return globals()[system_configs.sampling_function](frame, nnet, score_min, debug=debug)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Pavel 'Blane' Tuchin
from __future__ import unicode_literals
import json
import six
from . import generation
from . import utils
RES_DEFS = generation.DEFAULT_RESOURCE_DEFS_FILE_NAME
TYPE_DEFS = generation.DEFAULT_TYPE_DEFS_FILE_NAME
def defs_from_generated(resources_file=RES_DEFS, types_file=TYPE_DEFS):
"""Create definitions from pre-generated resource and type definitions
:param resources_file: path to pre-generated resource definitions file
:param types_file: path to pre-generated type definitions file
:return:
"""
with open(resources_file) as res_fp, \
open(types_file) as types_fp:
res_defs = json.load(res_fp)
type_defs = json.load(types_fp)
return Definitions(res_defs, type_defs)
def defs_from_raw(resources_file='profiles-resources.json', types_file='profiles-types.json'):
"""Create definitions directly from profiles downloaded from FHIR official website
:param resources_file: path to resources profiles
:param types_file: path to types profiles
:return:
"""
res_defs = generation.generate_resource_definitions(resources_file)
type_defs = generation.generate_type_definitions(types_file)
return Definitions(res_defs, type_defs)
class Definitions(object):
"""Collection of definition FHIR Resources and Complex types.
:ivar type_defs: dictionary of Complex Type definitions
:ivar res_defs: dictionary of Resource definitions
"""
def __init__(self, res_defs, type_defs):
self.type_defs = {k: StructDefinition(v, type_defs) for k, v in type_defs.items()}
self.res_defs = {k: StructDefinition(v, type_defs) for k, v in res_defs.items()}
def types_from_path(self, path):
"""Get element types
:param path: point-separated path to the element (can start with a resource or complex type)
:return: type definitions for provided path
"""
element = self.find(path)
if element.is_struct_def:
raise ValueError('Path does not point to an element')
return element.types
def find(self, path):
"""Find definition for provided path
:param path: point-separated path to the element or resource
:return: Either resource definition or element definition, depending on the path
"""
name = utils.resource_from_path(path)
resource = self.get_def(name)
if name == path:
return resource
return resource[path]
def get_def(self, name):
"""Get resource or complex type definition.
Method will raise `KeyError` if definition is not found
:param name: resource or complex type name
:return: resource or complex type definition
"""
try:
return self.res_defs[name]
except KeyError:
return self.type_defs[name]
class StructDefinition(object):
"""Structure definition.
Used to define a Resource or a Complex Type
"""
def __init__(self, _json, type_defs):
#: Is this a structure definition (yes, it is)
self.is_struct_def = True
#: Is this definition abstract
self.abstract = _json['abstract']
#: Name of the base definition
self.base = _json['base']
#: Definition name
self.name = _json['name']
elements = _json['elements']
#: Dictionary of elements present in this definition
self.elements = {k: ElementDefinition(v, type_defs) for k, v in elements.items()}
class ElementDefinition(object):
"""Definition of the element in Resource or Complex Type.
:ivar max: maximum number of values in this element (`None` if unlimited)
:ivar is_unlimited: can this element have unlimited number of values
:ivar is_required: is this element required (min value is 1)
:ivar is_single: is this element single (max == 1)
:ivar is_array: is this element an array (opposite to `is_single`)
:ivar types: types that are allowed in this element
"""
def __init__(self, _json, type_defs):
#: Is this a structure definition (no, it is not)
self.is_struct_def = False
#: Is element summary
self.is_summary = _json.get('isSummary')
#: Minimal number of values in this element
self.min = _json['min']
_max = _json['max']
if _max == '*':
self.max = None
self.is_unlimited = True
else:
self.max = int(_max)
self.is_unlimited = False
self.is_required = self.min > 0
self.is_single = self.max == 1
self.is_array = not self.is_single
self.types = [Type(t, type_defs) for t in _json['types']]
@property
def is_polymorphic(self):
"""Is this element polymorphic? (Can contain more than one type)
:return: `True` if this element is polymorphic
"""
return len(self.types) != 1
@property
def type(self):
"""Get a type definition of this element (only usable for non-polymorphic elements)
:return: type definition of this element/
"""
if self.is_polymorphic:
raise ValueError('Element is polymorphic')
return self.types[0]
def to_single_type(self, _type):
"""Convert polymorphic element to a single type.
:param _type: one of the polymorphic types
:return: New element definition for a provided type
:raises ValueError
"""
if _type not in self.types:
raise ValueError('Invalid Type')
new_def = ElementDefinition({
'min': self.min,
'max': '*' if self.is_unlimited else six.text_type(self.max),
'types': []
}, {})
new_def.types = [_type]
return new_def
class Type(object):
"""Type definition.
Contains necessary information about a type, that can be present in some element.
:ivar code: type name (str)
:ivar is_reference: is this type a reference
:ivar is_backbone: is this type BackboneElement
:ivar is_complex: is this a complex type
:ivar is_primitive: is this a primitive type
:ivar to: for a reference type - a list of targets, `None` otherwise
:ivar is_any: if this type is a reference and list of targets is empty (can be any kind of resource)
"""
def __init__(self, _json, type_defs):
self.code = _json['code']
self.is_reference = self.code == 'Reference'
self.is_backbone = self.code == 'BackboneElement' or self.code == 'Element'
self.is_resource = self.code == 'Resource'
self.is_complex = self.code in type_defs
self.is_primitive = not self.is_complex
if self.is_reference:
self.to = _json.get('targets')
if not self.to:
self.is_any = True
else:
self.to = None
self.is_any = False
|
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
def SVC_model(c_index, dataset_target):
svc_model = SVC(kernel='linear',C=1.0).fit(c_index, dataset_target)
return svc_model
""" subistituir SVC"""
def prediction(model, tranf_dtest, dataset_test):
predicted = model.predict(tranf_dtest)
return predicted
def look_at_predictions(predicted, dataset_train, dataset_test):
for sample, class_pos in zip(dataset_test, predicted):
print('%r => %s' % (sample, dataset_train.target_names[class_pos]))
|
import enum
import numpy as np
from scipy import interpolate
from matplotlib import pyplot as plt
class ContrastLandscape(enum.Flag):
FIXED = 0
RANDOM_PATH = enum.auto()
RANDOM_BACKGROUND = enum.auto()
SHARED_RANDOM = enum.auto()
def gabor_kernel(size, scale, wavelength, phase, orientation):
scale, wavelength, phase, orientation = np.atleast_2d(
scale, wavelength, phase, orientation)
scale = scale.T
wavelength = wavelength.T
phase = phase.T
orientation = orientation.T
x, y = np.meshgrid(np.linspace(-size[0]/2, size[0]/2, size[0], endpoint=True),
np.linspace(-size[1]/2, size[1]/2, size[1], endpoint=True))
# flatten with extra leading dimension to allow broadcasting
x = x.reshape((1, -1))
y = y.reshape((1, -1))
# rotate coordinates to match the orientation
x_r = x*np.cos(orientation) + y*np.sin(orientation)
y_r = -x*np.sin(orientation) + y*np.cos(orientation)
# compute kernel value at each pixel position
g = np.exp(-(x_r**2 + y_r**2)/(2*scale**2))
g = g*np.cos(x_r*2*np.pi/wavelength + phase)
return g.reshape((-1, size[0], size[1])).squeeze()
def add_gabors(positions, gabors, image, clip_values=False):
image = image.copy()
min_val = min(image.min(), gabors.min())
max_val = max(image.max(), gabors.max())
for (x, y), gabor in zip(positions, gabors):
i = x - gabor.shape[0]//2
j = y - gabor.shape[1]//2
img_i_start = max(0, i)
img_j_start = max(0, j)
img_i_end = min(i + gabor.shape[0], image.shape[0])
img_j_end = min(j + gabor.shape[1], image.shape[1])
g_i_start = max(0, -i)
g_j_start = max(0, -j)
g_i_end = g_i_start + (img_i_end - img_i_start)
g_j_end = g_j_start + (img_j_end - img_j_start)
image[img_i_start:img_i_end, img_j_start:img_j_end] += gabor[g_i_start:g_i_end,
g_j_start:g_j_end]
if clip_values:
image = np.clip(image, min_val, max_val)
return image
def random_grid_positions(grid_size, cell_size):
img_size = grid_size*cell_size
offsets = np.stack((np.random.randint(cell_size[0], size=(grid_size[0], grid_size[1])),
np.random.randint(cell_size[1], size=(grid_size[0], grid_size[1]))))
idx = np.mgrid[0:img_size[0]:cell_size[0],
0:img_size[1]:cell_size[1]]
positions = idx + offsets
return positions.reshape((2, -1)).T
def grid_indices(point, cell_size):
return tuple((point/cell_size).astype(np.int64))
def sample_path(path_start, num_points, step_size, grid_size, cell_size,
path_angles, angle_noise):
point = np.array(path_start)
angle = np.arctan2(*(grid_size*cell_size/2 - point))
grid_occupancy = set()
path = [point]
for i in range(num_points):
angle = angle + np.random.choice(path_angles)
angle = angle + np.random.uniform(-angle_noise, angle_noise)
angle = angle % (2*np.pi)
direction = np.array((np.sin(angle), np.cos(angle)))
new_point = point + step_size*direction
element = (point + new_point)/2
elem_idx = grid_indices(element, cell_size)
if elem_idx in grid_occupancy:
new_point = new_point + step_size*direction/4
element = (point + new_point)/2
elem_idx = grid_indices(element, cell_size)
# reject path if invalid
if (elem_idx in grid_occupancy
or np.any(np.array(elem_idx) < 0)
or np.any(np.array(elem_idx) >= grid_size)):
return None
point = new_point
path.append(new_point)
grid_occupancy.add(elem_idx)
return path
def create_path(path_start, num_points, step_size, grid_size, cell_size,
path_angles, angle_noise, max_tries=1000):
path = None
for _ in range(max_tries):
path = sample_path(path_start, num_points, step_size, grid_size,
cell_size, path_angles, angle_noise)
if path is not None:
break
return path
def align_position_to_phase(position, wavelength, phase, orientation):
direction = np.array([np.sin(orientation), np.cos(orientation)])
phase_shift = wavelength*(phase - np.pi)/(2*np.pi)
position = position + direction*phase_shift
return position
def create_path_gabor(path, cell_size, size, scale, wavelength,
phase=None, align_phase=False):
positions= []
gabors = []
elem_indices = []
for p1, p2 in zip(path[:-1], path[1:]):
orientation = np.arctan2(*(p2 - p1)) + np.pi/2
position = (p1 + p2)/2
elem_indices.append(grid_indices(position, cell_size))
gabor_phase = phase
# sample a random phase even if not needed to keep the pseudo random
# generator state consistent.
rnd_phase = np.random.uniform(0, np.pi*2)
if phase is None:
gabor_phase = rnd_phase
if align_phase:
# align the position as though we were using the random phase but use the
# fixed phase with the new position
position = align_position_to_phase(position, wavelength, rnd_phase, orientation)
gabor = gabor_kernel(
size=size,
scale=scale,
wavelength=wavelength,
phase=gabor_phase,
orientation=orientation,
)
positions.append(position)
gabors.append(gabor)
return (np.array(positions, dtype=np.int),
np.array(gabors),
elem_indices)
def replace_background_gabor(bg_pos, bg_gabors, path_pos, path_gabors,
elem_indices, grid_size, cell_size):
bg_pos = bg_pos.copy()
bg_gabors = bg_gabors.copy()
for pos, gabor, grid_idx in zip(path_pos, path_gabors, elem_indices):
gabor_idx = np.ravel_multi_index(grid_idx, grid_size)
bg_pos[gabor_idx] = pos
bg_gabors[gabor_idx] = gabor
return bg_pos, bg_gabors
def uniform_random_contrast(point_grid_size, img_size, min_contrast, max_contrast,
epsilon, smooth):
img_size = np.array(img_size, dtype=np.int)
z = np.random.uniform(min_contrast, max_contrast, size=point_grid_size)
x, y = np.meshgrid(np.linspace(-1.2, 1.2, point_grid_size[0], endpoint=True),
np.linspace(-1.2, 1.2, point_grid_size[1], endpoint=True))
rbf = interpolate.Rbf(x, y, z, epsilon=epsilon, smooth=smooth)
def contrast_function(pos):
pos = 2*pos/img_size[None, :] - 1.
return rbf(pos[:, 0], pos[:, 1])
return contrast_function
def generate_images(seed, grid_size, cell_size, kernel_size, scale, wavelength,
start_distance, num_points, path_angle, angle_noise,
random_phase, align_phase, contrast_landscape, contrast_grid_size,
min_contrast, max_contrast, generate_contrast_image,
contrast_epsilon=0.4, contrast_smooth=0.):
if seed is not None:
np.random.seed(seed)
grid_size = np.array((grid_size, grid_size), dtype=np.int)
cell_size = np.array((cell_size, cell_size), dtype=np.int)
img_size = grid_size*cell_size
image = np.zeros(img_size)
start_angle = np.random.uniform(np.pi*2)
path_start = np.array([np.sin(start_angle), np.cos(start_angle)])
path_start = path_start*start_distance + img_size/2
path_angles = np.array([-path_angle, path_angle])
step_size = cell_size[0]
positions = random_grid_positions(grid_size, cell_size)
num_gabor = positions.shape[0]
orientations = np.random.uniform(0, np.pi, size=num_gabor)
phase = np.random.uniform(0., 2*np.pi, size=num_gabor)
gabors = gabor_kernel(
size=(kernel_size, kernel_size),
scale=scale,
wavelength=wavelength,
phase=phase if random_phase else np.pi,
orientation=orientations,
)
# bg_image = add_gabors(positions, gabors, image)
if num_points:
path = create_path(
path_start=path_start,
num_points=num_points,
step_size=step_size,
grid_size=grid_size,
cell_size=cell_size,
path_angles=path_angles,
angle_noise=angle_noise,
)
path_pos, path_gabors, elem_indices = create_path_gabor(
path=path,
cell_size=cell_size,
size=(kernel_size, kernel_size),
scale=scale,
wavelength=wavelength,
phase=None if random_phase else np.pi,
align_phase=align_phase,
)
path_contrast_func = uniform_random_contrast(
point_grid_size=contrast_grid_size,
img_size=img_size,
min_contrast=min_contrast,
max_contrast=max_contrast,
epsilon=contrast_epsilon,
smooth=contrast_smooth,
)
bg_contrast_func = uniform_random_contrast(
point_grid_size=contrast_grid_size,
img_size=img_size,
min_contrast=min_contrast,
max_contrast=max_contrast,
epsilon=contrast_epsilon,
smooth=contrast_smooth,
)
bg_contrast = max_contrast
path_contrast = max_contrast
if bool(contrast_landscape & ContrastLandscape.SHARED_RANDOM):
path_contrast_func = bg_contrast_func
if bool(contrast_landscape & ContrastLandscape.RANDOM_BACKGROUND):
bg_contrast = bg_contrast_func(positions)[:, None, None]
if bool(contrast_landscape & ContrastLandscape.RANDOM_PATH):
path_contrast = path_contrast_func(path_pos)[:, None, None]
gabors *= bg_contrast
path_image = None
if num_points:
path_gabors *= path_contrast
path_image = add_gabors(np.array(path_pos, dtype=np.int),
np.array(path_gabors), image)
positions, gabors = replace_background_gabor(
positions, gabors, path_pos, path_gabors, elem_indices, grid_size, cell_size)
bg_path_image = add_gabors(positions, gabors, image)
if generate_contrast_image:
contr_size = (min(img_size[0], 400), min(img_size[1], 400))
x, y = np.mgrid[0:contr_size[0], 0:contr_size[1]]
pos = np.concatenate((x.reshape((-1, 1)), y.reshape((-1, 1))), axis=1)
if bool(contrast_landscape & ContrastLandscape.RANDOM_PATH):
path_contrast = path_contrast_func(pos).reshape(contr_size)
else:
path_contrast = np.broadcast_to(path_contrast, contr_size)
if bool(contrast_landscape & ContrastLandscape.RANDOM_BACKGROUND):
bg_contrast = bg_contrast_func(pos).reshape(contr_size)
else:
bg_contrast = np.broadcast_to(bg_contrast, contr_size)
return path_image, bg_path_image, path_contrast, bg_contrast
|
#!/usr/bin/env python3
# || ---------- test_entry.py ---------- ||
# Tests for entry.py
#
# Ben Carpenter and Nancy Onyimah
# April 24, 2022
# ------------- test_entry.py -------------
from datetime import datetime
from entry import Entry
import crypto
def test_construction():
"""
Test that:
1. Class construction works
2. Fields are accessible
"""
dt = datetime.now()
e = Entry(0, "Title", "Text", dt)
assert type(e) == Entry
assert e.title == "Title" and e.text == "Text" and e.timestamp == dt
def test_decrypt():
"""
Test that:
1. The decypts method decrypts all fields properly
"""
key = crypto.convert_passphrase_to_key("the cow jumped over the moon")
enc_title = crypto.encrypt("Title", key)
enc_text = crypto.encrypt("Text", key)
e = Entry(0, enc_title, enc_text, datetime.now())
e.decrypt(key)
assert e.title == "Title" and e.text == "Text"
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (C) 2010 Alan Franzoni.
#
# Commandline integration for property mapping.
class CommandLinePropertyParser(object):
def parse(self, arglist):
return [ arg[3:] for arg in arglist if arg.startswith("-PD") ]
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
def extract_top40_songs(url):
with urlopen(url) as f:
html = f.read()
soup = BeautifulSoup(html, "lxml")
song_divs = soup.findAll(attrs={'class': 'song-details'})
songs = []
for song_div in song_divs:
title_elem = song_div.find(attrs={'class': 'title'})
artist = song_div.find(attrs={'class': 'artist'})
if title_elem is not None and artist is not None:
song = {
'title': title_elem.text.strip(),
'artist': artist.text.strip()
}
songs.append(song)
return songs
|
#!/usr/bin/env python
import sys
from app import create_app
from app.models import Role
def main(app=create_app()):
with app.app_context():
Role.insert_roles()
print 'Added roles'
if __name__ == '__main__':
sys.exit(main())
|
import numpy as np
import tensorflow as tf
import os
from tqdm import tqdm
from tensorflow.keras import datasets
from tensorflow.keras.losses import Loss
from tensorflow.keras.layers import Layer
from tensorflow.keras import backend as K
from tensorflow.python.keras.utils.vis_utils import plot_model
"""
Center loss: https://ydwen.github.io/papers/WenECCV16.pdf
Helped taken from:
https://github.com/zoli333/Center-Loss
https://github.com/Kakoedlinnoeslovo/center_loss/blob/master/Network.py
"""
print("TensorFlow version: {}".format(tf.__version__))
print("Eager execution: {}".format(tf.executing_eagerly()))
# load data
(train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data()
print("shape train_images", np.shape(train_images))
print("shape train_labels", np.shape(train_labels))
train_images = train_images.reshape((-1, 28, 28, 1))
test_images = test_images.reshape((-1, 28, 28, 1))
num_classes = 10
class_weights = [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
# create fake clusters to pass as argument
train_cl = np.zeros((train_images.shape[0], 1))
test_cl = np.zeros((test_images.shape[0], 1))
# prepare train and test sets
train_images = train_images.astype('float32')
test_images = test_images.astype('float32')
class CenterLayer(Layer):
def __init__(self, num_classes, alpha_center, **kwargs):
super().__init__(**kwargs)
self.num_classes = num_classes
self.alpha_center = alpha_center
def build(self, input_shape):
# split input
features = input_shape[0]
# Create a trainable weight variable for this layer
self.centers = self.add_weight(name='centers',
shape=(self.num_classes, features[-1]),
initializer='uniform',
trainable=False)
super().build(input_shape)
def call(self, x):
# split data
y_pred = x[0]
y_true = x[1]
# transform to one hot encoding
y_true = tf.cast(y_true, dtype=tf.uint8)
y_true = tf.one_hot(y_true, self.num_classes)
y_true = tf.cast(y_true, dtype='float32')
y_true = tf.reshape(y_true, shape=(tf.shape(y_true)[0], self.num_classes))
# compute center loss
delta_centers = K.dot(tf.transpose(y_true), (K.dot(y_true, self.centers) - y_pred))
denominator = K.sum(tf.transpose(y_true), axis=1, keepdims=True) + 1
delta_centers /= denominator
new_centers = self.centers - self.alpha_center * delta_centers
self.add_update((self.centers, new_centers))
result = (K.dot(y_true, self.centers) - y_pred)
return K.sum(result ** 2, axis=1, keepdims=True)
# ----------------------- create model --------------------------------
input = tf.keras.Input(shape=(28, 28, 1), name="base_input")
label = tf.keras.Input(shape=(1,), name="labels", dtype='int32')
x = tf.keras.layers.Conv2D(32, (3, 3), padding='same', activation='relu')(input)
x = tf.keras.layers.MaxPooling2D((2, 2))(x)
x = tf.keras.layers.Conv2D(64, (3, 3), activation='relu')(x)
x = tf.keras.layers.MaxPooling2D((2, 2))(x)
x = tf.keras.layers.Conv2D(64, (3, 3), activation='relu')(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
output = tf.keras.layers.Dense(10, name='output')(x)
cluster = CenterLayer(num_classes=10, alpha_center=0.5, name='cluster')([x, label])
model = tf.keras.Model(inputs=[input, label], outputs=[output, cluster])
def cluster_loss(y_true, y_pred):
return 0.5 * K.sum(y_pred, axis=0)
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9),
loss={'output': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
'cluster': cluster_loss},
loss_weights=[1, .5],
metrics={'output': ['accuracy']},
class_weights=[class_weights, class_weights])
# model.fit(train_images, train_labels, epochs=10,
# # validation_data=(test_images, test_labels))
# model.train_on_batch([train_images[:32], train_labels[:32]], [train_labels[:32], train_cl[:32]])
model.fit([train_images, train_labels], [train_labels, train_cl], epochs=20,
validation_data=([test_images, test_labels], [test_labels, test_cl]))
|
from prob_13 import prob_13
n = int(input("Ingrese un numero: "))
print (prob_13(n))
|
#!/usr/bin/env python
"""
Centrality measures of Krackhardt social network.
"""
# Author: Aric Hagberg (hagberg@lanl.gov)
# Date: 2005-05-12 14:33:11 -0600 (Thu, 12 May 2005)
# Revision: 998
# Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from networkx import *
G=krackhardt_kite_graph()
print("Betweenness")
b=betweenness_centrality(G)
for v in G.nodes():
print("%0.2d %5.3f"%(v,b[v]))
print("Degree centrality")
d=degree_centrality(G)
for v in G.nodes():
print("%0.2d %5.3f"%(v,d[v]))
print("Closeness centrality")
c=closeness_centrality(G)
for v in G.nodes():
print("%0.2d %5.3f"%(v,c[v]))
|
"""List package presets."""
# :license: MIT, see LICENSE for more details.
import click
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.managers import ordering
COLUMNS = ['name',
'keyName',
'description', ]
@click.command()
@click.argument('package_keyname')
@click.option('--keyword',
help="A word (or string) used to filter preset names.")
@environment.pass_env
def cli(env, package_keyname, keyword):
"""List package presets.
Package keynames can be retrieved from `slcli order package-list`.
Some packages do not have presets.
\b
Example:
# List the presets for Bare Metal servers
slcli order preset-list BARE_METAL_SERVER
The --keyword option can also be used for additional filtering on
the returned presets.
\b
Example:
# List the Bare Metal server presets that include a GPU
slcli order preset-list BARE_METAL_SERVER --keyword gpu
"""
table = formatting.Table(COLUMNS)
manager = ordering.OrderingManager(env.client)
_filter = {}
if keyword:
_filter = {'activePresets': {'name': {'operation': '*= %s' % keyword}}}
presets = manager.list_presets(package_keyname, filter=_filter)
for preset in presets:
table.add_row([
preset['name'],
preset['keyName'],
preset['description']
])
env.fout(table)
|
import numpy.random as random
from scipy.stats import truncnorm
SAMPLES = 200
def truncated_normal(mean=0, sd=1, low=0, upp=10, samples=SAMPLES):
a, b = (low - mean) / sd, (upp - mean) / sd
return truncnorm(a, b, loc=mean, scale=sd).rvs(samples)
# fn_map = {"uniform": random.uniform, "normal": truncated_normal}
def sample_values():
"""Generate samples in range [0,1]"""
while True:
chances = random.uniform(0, 1, SAMPLES)
for val in chances:
yield val
generator = sample_values()
def all_strings(values):
return all(isinstance(val, str) for val in values)
def all_couples(values):
return all(isinstance(val, (list, tuple)) and len(val) == 2 for val in values)
def get_value_generator(node):
"""Returns a value generator from the provided distribution.
If node is a scalar, return the scalar.
if node is a list of strings, samples from the list.
if node is a list of numbers uniformly samples in the interval.
if nodes is a list of lists of len 2 defining values and probabilities of being picked, sample from that dist.
if node is a dict defining a distribution, samples from the distribution (if supported).
"""
match node:
case float() | int() | str(): # scalar
while True:
yield node
case [*values] if all_strings(values): # list of strings
while True:
yield random.choice(node)
case [int(low), int(high)]: # list of two ints
while True:
yield random.randint(low, high + 1)
case [float(low), float(high)]: # list of two floats
while True:
yield random.uniform(low, high)
case [0, float(high)]: # list of [0, float] (QoL)
while True:
yield random.uniform(0, high)
case [[_, _], *_] | [
(_, _),
*_,
] if all_couples( # list of tuples (value, probability)
node
): # list of tuples or list of lists (len 2)
values, probs = list(zip(*node))
if sum(probs) != 1:
raise ValueError(
f"Probabilities associated with values don't sum to 1. Node: {node}"
)
while True:
yield random.choice(values, p=probs)
case {
"distribution": "normal",
"mu": mu,
"sigma": sigma,
"min": min,
"max": max,
}:
while True:
vals = truncated_normal(mu, sigma, min, max)
for val in vals:
yield val
case {"distribution": "uniform", "min": min, "max": max}:
random.uniform(min, max)
case _:
raise ValueError(f"Unrecognized Value distribution for list {node}")
def get_size_generator(node):
"""Generate width, height values."""
size = node.get("size", dict())
width = size.get("width", 1)
height = size.get("height", 1)
while True:
ws = get_value_generator(width)
hs = get_value_generator(height)
for couple in zip(ws, hs):
yield couple
def roll():
"""Pops a number in [0,1]"""
return next(generator)
def roll_value(node):
"""Pops a number in a given distribution."""
return next(get_value_generator(node))
|
import requests, base64, json, hashlib
from Crypto.Cipher import AES
def push_server_chan(logs):
if sckey is None or sckey == "":
print("跳过推送")
return
params = {
'text': "网易云音乐自动脚本",
'desp': logs
}
serverURL = "https://sc.ftqq.com/" + sckey + ".send"
response = requests.session().post(serverURL, data=params)
if response.status_code == 200:
print("推送成功")
def encrypt(key, text):
cryptor = AES.new(key.encode('utf8'), AES.MODE_CBC, b'0102030405060708')
length = 16
count = len(text.encode('utf-8'))
if (count % length != 0):
add = length - (count % length)
else:
add = 16
pad = chr(add)
text1 = text + (pad * add)
ciphertext = cryptor.encrypt(text1.encode('utf8'))
cryptedStr = str(base64.b64encode(ciphertext), encoding='utf-8')
return cryptedStr
def md5(str):
hl = hashlib.md5()
hl.update(str.encode(encoding='utf-8'))
return hl.hexdigest()
def protect(text):
return {"params": encrypt('TA3YiYCfY2dDJQgg', encrypt('0CoJUm6Qyw8W8jud', text)),
"encSecKey": "84ca47bca10bad09a6b04c5c927ef077d9b9f1e37098aa3eac6ea70eb59df0aa28b691b7e75e4f1f9831754919ea784c8f74fbfadf2898b0be17849fd656060162857830e241aba44991601f137624094c114ea8d17bce815b0cd4e5b8e2fbaba978c6d1d14dc3d1faf852bdd28818031ccdaaa13a6018e1024e2aae98844210"}
def do_checkin(user, pwd, logs):
s = requests.Session()
header = {}
url = "https://music.163.com/weapi/login/cellphone"
url2 = "https://music.163.com/weapi/point/dailyTask"
url3 = "https://music.163.com/weapi/v1/discovery/recommend/resource"
logindata = {
"phone": user,
"countrycode": "86",
"password": md5(pwd),
"rememberLogin": "true",
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36',
"Referer": "http://music.163.com/",
"Accept-Encoding": "gzip, deflate",
}
headers2 = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36',
"Referer": "http://music.163.com/",
"Accept-Encoding": "gzip, deflate",
"Cookie": "os=pc; osver=Microsoft-Windows-10-Professional-build-10586-64bit; appver=2.0.3.131777; channel=netease; __remember_me=true;"
}
res = s.post(url=url, data=protect(json.dumps(logindata)), headers=headers2)
tempcookie = res.cookies
object = json.loads(res.text)
if object['code'] == 200:
print("登录成功!")
logs += "账号" + logindata["phone"] + "登录成功!\n\n"
else:
print("登录失败!请检查密码是否正确!" + str(object['code']))
logs += "账号" + logindata["phone"] + "登录失败!请检查密码是否正确!\n\n"
return logs + str(object['code'])
res = s.post(url=url2, data=protect('{"type":0}'), headers=headers)
object = json.loads(res.text)
if object['code'] != 200 and object['code'] != -2:
print("签到时发生错误:" + object['msg'])
else:
if object['code'] == 200:
print("签到成功,经验+" + str(object['point']))
logs += "签到成功,经验+" + str(object['point']) + "\n\n"
else:
print("重复签到")
logs += "重复签到\n\n"
res = s.post(url=url3,
data=protect('{"csrf_token":"' + requests.utils.dict_from_cookiejar(tempcookie)['__csrf'] + '"}'),
headers=headers)
object = json.loads(res.text, strict=False)
for x in object['recommend']:
url = 'https://music.163.com/weapi/v3/playlist/detail?csrf_token=' + \
requests.utils.dict_from_cookiejar(tempcookie)[
'__csrf']
data = {
'id': x['id'],
'n': 1000,
'csrf_token': requests.utils.dict_from_cookiejar(tempcookie)['__csrf'],
}
res = s.post(url, protect(json.dumps(data)), headers=headers)
object = json.loads(res.text, strict=False)
buffer = []
count = 0
for j in object['playlist']['trackIds']:
data2 = {}
data2["action"] = "play"
data2["json"] = {}
data2["json"]["download"] = 0
data2["json"]["end"] = "playend"
data2["json"]["id"] = j["id"]
data2["json"]["sourceId"] = ""
data2["json"]["time"] = "240"
data2["json"]["type"] = "song"
data2["json"]["wifi"] = 0
buffer.append(data2)
count += 1
if count >= 310:
break
if count >= 310:
break
url = "http://music.163.com/weapi/feedback/weblog"
postdata = {
"logs": json.dumps(buffer)
}
res = s.post(url, protect(json.dumps(postdata)))
object = json.loads(res.text, strict=False)
if object['code'] == 200:
print("刷单成功!共" + str(count) + "首")
logs += "刷单成功!共" + str(count) + "首\n\n"
return logs
else:
print("发生错误:" + str(object['code']) + object['message'])
logs += "发生错误:" + str(object['code']) + object['message'] + "\n\n"
return logs + str(object['code'])
if __name__ == '__main__':
logs = ""
user_list = input().split("#")
pwd_list = input().split("#")
sckey = input()
if len(user_list) != len(pwd_list):
print("账号和密码个数不对应")
logs += "账号和密码个数不对应\n\n"
else:
print("共有" + str(len(user_list)) + "个账号,即将开始签到")
logs += "共有" + str(len(user_list)) + "个账号,即将开始签到\n\n"
for user, pwd in zip(user_list, pwd_list):
try:
logs = do_checkin(user, pwd, logs)
except BaseException as e:
logs += "程序出现异常"+e
logs += "执行完成"
push_server_chan(logs)
exit()
|
# coding: utf8
"""
################################ assertion schema ################################
schema = int
schema = lambda v: v in [0, 1]
schema = (int, float, str) # OR
schema = (int, float, str, type(None)) # can be `null` (i.e. `None` in python)
schema = (int, float, str, type(None), None) # can be missing
schema = (True, (int, float), lambda v: v >= 0) # AND
schema = {0, 1, 2} # should be in the set
schema = {'id': idSchema, 'name': nameSchema} # a dict
schema = [elementScheme] # a list
schema = ([elementScheme], len) # a nonempty list
##################################################################################
"""
from __future__ import print_function
import sys
import json
import re as regex
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str
else:
string_types = basestring # pylint: disable=undefined-variable
__all__ = ['azzert', 'ensure', 'mock', 'C', 'D', 'E']
def type_of(v):
return type(v)
class ErrorInfo:
invalidSchema = 'schema is invalid'
wrongType = 'value type is wrong'
emptyValue = 'value cannot be null'
missingProperty = 'property is missing'
redundantProperty = 'property is redundant'
emptyList = 'list cannot be empty'
notList = 'value should be a list'
shouldNotExist = 'value should not exist'
notInEnumValues = 'value is not among the enum list'
notMatchPattern = 'value does not match the regex pattern'
exampleOnlyInMock = 'example value is only used for mocking'
IDENTIFIER = '^[a-zA-Z_][0-9a-zA-Z_]*$'
AssertOptions = {
'debug': True, # throw exceptions or not
'allowmore': False, # allow redundant properties or not
'dictkeypattern': 1,
}
class AzzertionError(Exception):
def __init__(self, *args):
super(AzzertionError, self).__init__(*args)
class C(object):
'''Convert'''
def __init__(self, arg, *args, **kwargs):
self.exec = arg if callable(arg) else lambda *args, **kwargs: arg
def __call__(self, data, *args, **kwargs):
return self.exec(data)
class D(object):
'''Default'''
def __init__(self, arg, *args, **kwargs):
self.exec = arg if callable(arg) else lambda *args, **kwargs: arg
def __call__(self, *args, **kwargs):
return self.exec()
class E(object):
'''Example'''
def __init__(self, arg, *args, **kwargs):
self.exec = arg if callable(arg) else lambda *args, **kwargs: arg
def __call__(self, *args, **kwargs):
return self.exec()
def wrap_exception(options, message, *args):
if len(args):
message += ': ' + ', '.join([str(arg)
if i != 1 and isinstance(arg, (bool, int, float, str))
else json.dumps(arg, ensure_ascii=False)
for i, arg in enumerate(args)])
if options['debug']:
raise AzzertionError(message)
return message
def is_and_schema(schema):
return type(schema) is tuple and len(schema) > 0 and schema[0] is True
def is_blank_str(v):
return isinstance(v, string_types) and v.strip() == ''
def _azzert(value, schema, options, path='', **kwargs):
if isinstance(schema, C):
try:
return True, schema(value)
except Exception as e:
return False, e
if isinstance(schema, D):
return True, schema() if (value is None) or is_blank_str(value) else value
if isinstance(schema, E):
if options['mode'] == 'mock':
return True, schema()
return False, wrap_exception(options, ErrorInfo.exampleOnlyInMock, path)
if schema is True:
return True, value
if schema is None:
return False, wrap_exception(options, ErrorInfo.shouldNotExist, path, value)
st = type_of(schema)
if st is str:
if not isinstance(value, string_types):
return False, wrap_exception(options, ErrorInfo.wrongType, path, value, str(schema))
if regex.match(schema, value):
return True, value
return False, wrap_exception(options, ErrorInfo.notMatchPattern, path, value, str(schema))
if st is type:
if schema is str:
if isinstance(value, string_types):
return True, value
elif type(value) is schema:
return True, value
return False, wrap_exception(options, ErrorInfo.wrongType, path, value, str(schema))
if st is set:
if value in schema:
return True, value
return False, wrap_exception(options, ErrorInfo.notInEnumValues, path, value, list(schema))
if st is tuple:
_d, _e = None, None
for s in schema:
if isinstance(s, D): _d = s
if isinstance(s, E): _e = s
if options['mode'] == 'mock':
if _d is not None: return _d()
if _e is not None: return _e()
if is_and_schema(schema): # AND
schema = schema[1:]
v = value
for s in schema:
if isinstance(s, E):
continue
re = _azzert(v, s, options, path)
if not (type(re) is tuple and re[0] is True):
return re
v = re[1]
return True, v
for s in schema: # OR
if isinstance(s, E):
continue
re = None
try:
re = _azzert(value, s, options, path)
except:
pass
if type(re) is tuple and re[0] is True:
return True, re[1]
return False, wrap_exception(options, ErrorInfo.wrongType, path, value)
if st is dict:
if not isinstance(value, dict):
return False, wrap_exception(options, ErrorInfo.wrongType, path, value)
opt_dictkeypattern = options.get('dictkeypattern', 0)
if opt_dictkeypattern:
value3 = {}
pattern_in_keys = False
for k, s in schema.items():
if not regex.match(IDENTIFIER, k):
pattern_in_keys = True
break
if pattern_in_keys:
def check_kv(k, v):
for sk, sv in schema.items():
p = path + '[\'' + sk + '\']'
if regex.match(IDENTIFIER, sk):
if k != sk:
continue
else: # sk is a pattern
if not regex.match(sk, k):
continue
re = _azzert(v, sv, options, p)
if type(re) is not tuple:
return re
if re[0] is not True:
return re
value3[k] = re[1]
for k, v in value.items():
re = check_kv(k, v)
if k not in value3:
if re is None:
if not options['allowmore']:
return False, wrap_exception(options, ErrorInfo.redundantProperty, path + '.' + k)
else:
return re
else: # (k, v) is ok
pass
return True, value3
value2 = {}
v = None
for k, s in schema.items():
p = path + '.' + k
if k not in value:
if s is None: continue
if type(s) is tuple:
d = list(filter(lambda ss: isinstance(ss, D), s))
if len(d):
v = d[0]()
else:
continue
else:
return False, wrap_exception(options, ErrorInfo.missingProperty, p)
else:
v = value[k]
re = _azzert(v, s, options, p)
if type(re) is not tuple:
return re
if re[0] is not True:
return re
value2[k] = re[1]
for k, v in value.items():
p = path + '.' + k
if k not in schema:
if not options['allowmore']:
return False, wrap_exception(options, ErrorInfo.redundantProperty, p)
else:
value2[k] = v
return True, value2
if st is list:
if not isinstance(value, (list, tuple, set)):
return False, wrap_exception(options, ErrorInfo.notList, p, value)
value2 = []
s = schema[0]
for i, v in enumerate(value):
p = path + '[' + str(i) + ']'
re = _azzert(v, s, options, p)
if type(re) is not tuple:
return re
if re[0] is not True:
return re
value2.append(re[1])
return True, value2
if callable(schema):
re = schema(value)
if re:
return True, value
if schema is len:
return False, wrap_exception(options, ErrorInfo.emptyList, path)
return False, wrap_exception(options, ErrorInfo.wrongType, path, value, 'judged by lambda')
return False, wrap_exception(options, ErrorInfo.invalidSchema)
def azzert(value, schema, options={}, **kwargs):
opts = {}
opts.update(AssertOptions)
opts.update(options)
opts.update(kwargs)
opts['mode'] = 'assert'
re = _azzert(value, schema, opts)
return True if re[0] is True else re[1]
def ensure(value, schema, options={}, **kwargs):
opts = {}
opts.update(AssertOptions)
opts.update(options)
opts.update(kwargs)
opts['debug'] = True
opts['mode'] = 'ensure'
re = _azzert(value, schema, opts)
return re[1]
def _mock(schema, options={}):
none_flag = '<absent>'
if schema in [int, float]: return 0
if schema is str: return ''
if schema is bool: return False
if schema is type(None): return None
if schema is None: return none_flag
st = type_of(schema)
uncertain_format = '<uncertain format>'
if st is tuple:
for s in schema:
if isinstance(s, D):
return s()
for s in schema:
if isinstance(s, E):
return s()
for s in schema:
if s in [type(None), None]:
return _mock(s, options)
if not is_and_schema(schema):
for s in schema:
return _mock(s, options)
return uncertain_format
if st is set:
for s in schema:
return s
if st is dict:
re = {}
for k, s in schema.items():
v = _mock(s, options)
if v == none_flag:
continue
re[k] = v
return re
if st is list:
return [_mock(schema[0], options)]
def mock(schema, options={}, **kwargs):
opts = {}
opts.update(AssertOptions)
opts.update(options)
opts.update(kwargs)
opts['mode'] = 'mock'
re = _mock(schema, opts)
return ensure(re, schema, opts)
if __name__ == '__main__':
AssertOptions['debug'] = False
id = 123
name = 'tom'
user = {'id': id, 'name': name}
users = [user]
try:
print(azzert(id, int))
print(azzert(id, None))
print(azzert(id, True))
print(azzert(id, (None,)))
print(azzert(id, (int, None)))
print(azzert(name, str))
print(azzert(user, dict))
print(azzert(user, {'id': int}))
print(azzert(user, {'id': int, 'age': int}))
print(azzert(user, {'id': int, 'name': str}))
print(azzert(users, [{'id': int, 'name': str}]))
print(azzert(users, [{'id': (True, int, lambda v: v > 0), 'name': str}]))
user['id'] = None
print(azzert(users, [{'id': (int, type(None)), 'name': (str,)}]))
print(azzert(users, [{'id': (None,), 'name': (str,)}]))
print(azzert(users, [{'id': None, 'name': (str,)}]))
del user['id']
print(azzert(users, [{'id': (int, None), 'name': (str,)}]))
users = []
print(azzert(users, [{'id': (int, None), 'name': (str,)}]))
print(azzert(users, ([{'id': (int, None), 'name': (str,)}], len)))
users = [user]
print(azzert(users, ([{'id': (int, None), 'name': (str,)}], len)))
except:
raise
|
import cPickle
import sys
from moduleUsefulFunctions_20180215 import *
from random import randint
import subprocess
def returnNumberMutations(alignInfoList):
numberMutations=0
for eachEl in alignInfoList:
if eachEl.typeOfEvent=='Start':
startPosition=eachEl.position
elif eachEl.typeOfEvent=='End':
endPosition=eachEl.position
for eachEl in alignInfoList:
if eachEl.typeOfEvent=='Insertion':
if eachEl.position==startPosition or eachEl.position==endPosition: #only count insertions in the middle of the sequence as mutations
pass
else:
numberMutations+=1 #as insertions are not being collapsed together (collapseInsertions=False in the smithWaterman calls below), each base inserted is counted separately
elif eachEl.typeOfEvent=='Mismatch' or eachEl.typeOfEvent=='Deletion':
numberMutations+=1
return numberMutations
def returnStartPosition(alignInfoList):
for eachEl in alignInfoList:
if eachEl.typeOfEvent=='Start':
return eachEl.position
def returnEndPosition(alignInfoList):
for eachEl in alignInfoList:
if eachEl.typeOfEvent=='End':
return eachEl.position
def FivePrimeClipSequence(alignInfoList):
startPosition=returnStartPosition(alignInfoList)
for eachEl in alignInfoList:
if eachEl.typeOfEvent=='Insertion' and eachEl.position==startPosition:
return eachEl.notes
return ''
def ThreePrimeClipSequence(alignInfoList):
endPosition=returnEndPosition(alignInfoList)
for eachEl in alignInfoList:
if eachEl.typeOfEvent=='Insertion' and eachEl.position==endPosition:
return eachEl.notes
return ''
def collapseRefs(listSeqToAlign,listSeqToWhichAlign,par1,par2,par3):
toleranceNumberMutations=par1
ntLengthUpdateFullLength=par2
softClippingThresh=par3
indexListToReturn=[]
for eachRef in listSeqToAlign:
alignmentInfo=[]
alignmentScore=[]
for eachRef1 in listSeqToWhichAlign:
score=-1
listRefs=[eachRef1,revComp(eachRef1)]
for eachSeq in listRefs:
alignOut=smithWaterman(eachSeq,eachRef,collapseInsertions=False)
if alignOut[0]>score:
score=alignOut[0]
info=alignedObject(alignOut,eachRef1,-1) #note eachRef is specified as refUsed, not eachSeq; artificial count of -1 passed
alignmentInfo.append(info)
alignmentScore.append(float(score)/min([len(eachRef1),len(eachRef)]))
alignmentScore=np.array(alignmentScore)
indicesSort=np.argsort(alignmentScore)
subclusterIndex=indicesSort[-1] #note there may be clashes between two references. Here, whatever sorts to the -1 position is chosen
numberMutations=returnNumberMutations(alignmentInfo[subclusterIndex].alignmentInfo[1])
endPositionBestAlignment=returnEndPosition(alignmentInfo[subclusterIndex].alignmentInfo[1])
startPositionBestAlignment=returnStartPosition(alignmentInfo[subclusterIndex].alignmentInfo[1])
maxNumberMutationsAllowed=np.ceil(toleranceNumberMutations*min([len(alignmentInfo[subclusterIndex].refUsed),len(eachRef)]))
totalNumberMutationsCalc=len(FivePrimeClipSequence(alignmentInfo[subclusterIndex].alignmentInfo[1]))+len(ThreePrimeClipSequence(alignmentInfo[subclusterIndex].alignmentInfo[1]))+numberMutations
if numberMutations<=np.ceil(toleranceNumberMutations*(endPositionBestAlignment-startPositionBestAlignment)):
if (totalNumberMutationsCalc<=(maxNumberMutationsAllowed+ntLengthUpdateFullLength)) or (startPositionBestAlignment<softClippingThresh and endPositionBestAlignment>(len(alignmentInfo[subclusterIndex].refUsed)-softClippingThresh)):
indexListToReturn.append(subclusterIndex)
else:
indexListToReturn.append('Left')
else:
indexListToReturn.append('Left')
return indexListToReturn
#ver16 denovoClustering code onwards definition of clusterObject
class clusterObject:
__slots__=['reference','dictKmers','numberOfReads','forAlignment','listSeqIndices']
def __init__(self,reference,dictKmers,numberOfReads,forAlignment,listSeqIndices):
self.reference=reference
self.dictKmers=dictKmers
self.numberOfReads=numberOfReads
self.forAlignment=forAlignment
self.listSeqIndices=listSeqIndices
sortWithPCRDuplicates=0 #parameter to dictate whether sorting is happening with or without subtraction of PCR duplicates, 0 means that each barcode is only counted once for each insert sequence, 1 means that each barcode is counted as many times as it is observed even if the insert sequence is the same
toleranceNumberMutations=0.1 #for subclusters, this is number of mutations per nucleotide, i.e. for a value of 0.1, 1 mutation is allowed every 10 bases on average, another good acceptable value is 0.06
ntLengthUpdateFullLength=6 #how much should a new sequence be longer by to update the fullLengthSequence attribute for each cluster object
softClippingThresh=2 #for length upgrading
countThreshold=5
inputSample=sys.argv[1]
def returnClusterDict(sampleName):
listAllRefs1=[]
listAllClusters1=[]
listTotalCounts=[]
with open(sampleName+'_trimmomaticP_thresholdTestDict_1_seqErrorCorrect_1_clusterDict_subrefCollapsed_v1.pckl','rb') as f:
clusterDict=cPickle.load(f)
for counter, eachCluster in enumerate(clusterDict):
for eachRef in clusterDict[eachCluster]:
if clusterDict[eachCluster][eachRef].forAlignment==1: #subref collapsed dicts should have all subreferences set to forAlignment=1 anyway
if clusterDict[eachCluster][eachRef].numberOfReads>=countThreshold:
listAllRefs1.append(eachRef)
listAllClusters1.append(eachCluster)
listTotalCounts.append(clusterDict[eachCluster][eachRef].numberOfReads)
return [clusterDict,listAllRefs1,listAllClusters1,listTotalCounts]
[inputClusterDict,newClusterDictRef,newClusterDictCluster,newTotalCount]=returnClusterDict(inputSample)
print len(newClusterDictRef)
sortedIndices=np.argsort(np.array(newTotalCount))
sortedIndices=sortedIndices[::-1] #decreasing order
newClusterDictRef2=[]
newClusterDictRef2Dict={}
for counter, eachEl in enumerate(sortedIndices):
print counter
if counter==0:
newClusterDictRef2.append(newClusterDictRef[eachEl])
newClusterDictRef2Dict[newClusterDictRef[eachEl]]={(newClusterDictCluster[eachEl],newClusterDictRef[eachEl]):0}
else:
alignmentMappingList=collapseRefs([newClusterDictRef[eachEl]],newClusterDictRef2,toleranceNumberMutations,ntLengthUpdateFullLength,softClippingThresh)
alignmentMappingIndex=alignmentMappingList[0]
if alignmentMappingIndex=='Left':
newClusterDictRef2.append(newClusterDictRef[eachEl])
newClusterDictRef2Dict[newClusterDictRef[eachEl]]={(newClusterDictCluster[eachEl],newClusterDictRef[eachEl]):0}
else:
newClusterDictRef2Dict[newClusterDictRef2[alignmentMappingIndex]][(newClusterDictCluster[eachEl],newClusterDictRef[eachEl])]=0
with open(inputSample+'_finalRefCollapseOutput_v1_20190313.pckl','wb') as f:
cPickle.dump([newClusterDictRef2Dict,newClusterDictRef2],f,protocol=cPickle.HIGHEST_PROTOCOL)
|
# coding=utf-8
from matplotlib import pyplot as plt
from matplotlib import font_manager
a=[131, 98, 125, 131, 124, 139, 131, 117, 128, 108, 135, 138, 131, 102, 107, 114, 119, 128, 121, 142, 127, 130, 124, 101, 110, 116, 117, 110, 128, 128, 115, 99, 136, 126, 134, 95, 138, 117, 111,78, 132, 124, 113, 150, 110, 117, 86, 95, 144, 105, 126, 130,126, 130, 126, 116, 123, 106, 112, 138, 123, 86, 101, 99, 136,123, 117, 119, 105, 137, 123, 128, 125, 104, 109, 134, 125, 127,105, 120, 107, 129, 116, 108, 132, 103, 136, 118, 102, 120, 114,105, 115, 132, 145, 119, 121, 112, 139, 125, 138, 109, 132, 134,156, 106, 117, 127, 144, 139, 139, 119, 140, 83, 110, 102,123,107, 143, 115, 136, 118, 139, 123, 112, 118, 125, 109, 119, 133,112, 114, 122, 109, 106, 123, 116, 131, 127, 115, 118, 112, 135,115, 146, 137, 116, 103, 144, 83, 123, 111, 110, 111, 100, 154,136, 100, 118, 119, 133, 134, 106, 129, 126, 110, 111, 109, 141,120, 117, 106, 149, 122, 122, 110, 118, 127, 121, 114, 125, 126,114, 140, 103, 130, 141, 117, 106, 114, 121, 114, 133, 137, 92,121, 112, 146, 97, 137, 105, 98, 117, 112, 81, 97, 139, 113,134, 106, 144, 110, 137, 137, 111, 104, 117, 100, 111, 101, 110,105, 129, 137, 112, 120, 113, 133, 112, 83, 94, 146, 133, 101,131, 116, 111, 84, 137, 115, 122, 106, 144, 109, 123, 116, 111,111, 133, 150]
#计算组数
d = 3 #组距
num_bins = (max(a)-min(a))//d
print(max(a),min(a),max(a)-min(a))
print(num_bins)
#设置图形的大小
plt.figure(figsize=(20,8),dpi=80)
plt.hist(a,num_bins,normed=True)
#设置x轴的刻度
plt.xticks(range(min(a),max(a)+d,d))
plt.grid()
plt.show()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
PIXEL_ON = '#'
PIXEL_OFF = '.'
GROWTH_PER_ITERATION = 3
ITERATIONS_PART_ONE = 2
ITERATIONS_PART_TWO = 50
def filter_to_integer(image, x, y, default_value):
# The image enhancement algorithm describes how to enhance an image by simultaneously
# converting all pixels in the input image into an output image. Each pixel of the
# output image is determined by looking at a 3x3 square of pixels centered on the
# corresponding input image pixel.
integer = 0
for y_local in range(y - 1, y + 2):
for x_local in range(x - 1, x + 2):
# These nine input pixels are combined into a single binary number
# that is used as an index in the image enhancement algorithm string.
integer <<= 1
if image.get((x_local, y_local), default_value) == PIXEL_ON:
integer += 1
return integer
def run_filter(i, default_value):
# Scan a 3x3 area for each pixel in the image we're going to generate.
# Because our filter is sneaky and lights up infinite pixels sometimes,
# we'll provide a default value for what we expect in the image if we
# don't get a hit.
new_image = {}
for x in range(0 - (GROWTH_PER_ITERATION * iteration), image_size_x + (GROWTH_PER_ITERATION * iteration) + 1):
for y in range(0 - (GROWTH_PER_ITERATION * iteration), image_size_y + (GROWTH_PER_ITERATION * iteration) + 1):
filter_index = filter_to_integer(i, x, y, default_value)
new_image[(x, y)] = enhancement_algo[filter_index]
return new_image
def lit_pixels(image):
pixel_count = 0
for pixel in image:
if image[pixel] == PIXEL_ON: pixel_count += 1
return pixel_count
# Parse the ocean trench map file
with open('day20_input.txt') as f:
lines = [line.rstrip('\n') for line in f]
# The first section is the image enhancement algorithm. It is normally given on a single line,
# but it has been wrapped to multiple lines in this example for legibility.
enhancement_algo = lines[0]
# The second section is the input image, a two-dimensional grid of light pixels (#) and dark pixels (.).
input_image = lines[2:]
image = {}
for y in range(len(input_image)):
for x in range(len(input_image[y])):
if input_image[y][x] == PIXEL_ON: image[(x, y)] = input_image[y][x]
image_size_x = x
image_size_y = y
iteration = 1
while iteration <= ITERATIONS_PART_TWO:
# The puzzle input very subtly deviates from the example given in the puzzle description:
# Any 3x3 region with no lit pixels will be lit in the next iteration, meaning an infinite
# number will be lit on odd ticks, and then be unlit on the subsequent (even) tick.
#
# To handle this, the filter only examines an area 3 pixels larger in every dimension
# than the existing image. We then know on the next tick that every pixel should default
# to "lit" if it's not in the given image.
image_with_infinite_pixels_lit = run_filter(image, default_value=PIXEL_OFF)
iteration += 1
# Run the same filter, but now defaulting to "lit" out to infinity.
image = run_filter(image_with_infinite_pixels_lit, default_value=PIXEL_ON)
iteration += 1
# Start with the original input image and apply the image enhancement algorithm twice, being careful to account
# for the infinite size of the images. How many pixels are lit in the resulting image?
if iteration - 1 == ITERATIONS_PART_ONE:
print('Part One: {0} pixels are lit after {1} image enhancement algorithms.'.format(lit_pixels(image), ITERATIONS_PART_ONE))
# Start again with the original input image and apply the image enhancement algorithm 50 times.
# How many pixels are lit in the resulting image?
print('Part Two: {0} pixels are lit after {1} image enhancement algorithms.'.format(lit_pixels(image), ITERATIONS_PART_TWO))
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
def create_driver():
chrome_options = Options()
chrome_options.add_argument("--headless")
# create a new chrome session
driver = webdriver.Chrome(options=chrome_options)
driver.implicitly_wait(19)
return driver
def quit_driver(driver):
driver.close()
driver.quit()
|
# this code will help importing strategies from external files
# which isn't very easy in Python for some reason
import imp
# import strategy module from the given path
def import_strategy(module_name, full_path):
return imp.load_source(module_name, full_path)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
r"""
This module provides an implementation of the *Extremal Perturbations* (EP)
method of [EP]_ for saliency visualization. The interface is given by
the :func:`extremal_perturbation` function:
.. literalinclude:: ../examples/extremal_perturbation.py
:language: python
:linenos:
Extremal perturbations seek to find a region of the input image that maximally
excites a certain output or intermediate activation of a neural network.
.. _ep_perturbations:
Perturbation types
~~~~~~~~~~~~~~~~~~
The :class:`Perturbation` class supports the following perturbation types:
* :attr:`BLUR_PERTURBATION`: Gaussian blur.
* :attr:`FADE_PERTURBATION`: Fade to black.
.. _ep_variants:
Extremal perturbation variants
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The :func:`extremal_perturbation` function supports the following variants:
* :attr:`PRESERVE_VARIANT`: Find a mask that makes the activations large.
* :attr:`DELETE_VARIANT`: Find a mask that makes the activations small.
* :attr:`DUAL_VARIANT`: Find a mask that makes the activations large and whose
complement makes the activations small, rewarding the difference between
these two.
References:
.. [EP] Ruth C. Fong, Mandela Patrick and Andrea Vedaldi,
*Understanding Deep Networks via Extremal Perturbations and Smooth Masks,*
ICCV 2019,
`<http://arxiv.org/>`__.
"""
from __future__ import division
from __future__ import print_function
__all__ = [
"extremal_perturbation",
"Perturbation",
"simple_reward",
"contrastive_reward",
"BLUR_PERTURBATION",
"FADE_PERTURBATION",
"PRESERVE_VARIANT",
"DELETE_VARIANT",
"DUAL_VARIANT",
]
import math
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from torchray.utils import imsmooth, imsc
from torchray.attribution.common import resize_saliency
BLUR_PERTURBATION = "blur"
"""Blur-type perturbation for :class:`Perturbation`."""
FADE_PERTURBATION = "fade"
"""Fade-type perturbation for :class:`Perturbation`."""
PRESERVE_VARIANT = "preserve"
"""Preservation game for :func:`extremal_perturbation`."""
DELETE_VARIANT = "delete"
"""Deletion game for :func:`extremal_perturbation`."""
DUAL_VARIANT = "dual"
"""Combined game for :func:`extremal_perturbation`."""
class Perturbation:
r"""Perturbation pyramid.
The class takes as input a tensor :attr:`input` and applies to it
perturbation of increasing strenght, storing the resulting pyramid as
the class state. The method :func:`apply` can then be used to generate an
inhomogeneously perturbed image based on a certain perturbation mask.
The pyramid :math:`y` is the :math:`L\times C\times H\times W` tensor
.. math::
y_{lcvu} = [\operatorname{perturb}(x, \sigma_l)]_{cvu}
where :math:`x` is the input tensor, :math:`c` a channel, :math:`vu`,
the spatial location, :math:`l` a perturbation level, and
:math:`\operatorname{perturb}` is a perturbation operator.
For the *blur perturbation* (:attr:`BLUR_PERTURBATION`), the perturbation
operator amounts to convolution with a Gaussian whose kernel has
standard deviation :math:`\sigma_l = \sigma_{\mathrm{max}} (1 - l/ (L-1))`:
.. math::
\operatorname{perturb}(x, \sigma_l) = g_{\sigma_l} \ast x
For the *fade perturbation* (:attr:`FADE_PERTURBATION`),
.. math::
\operatorname{perturb}(x, \sigma_l) = \sigma_l \cdot x
where :math:`\sigma_l = l / (L-1)`.
Note that in all cases the last pyramid level :math:`l=L-1` corresponds
to the unperturbed input and the first :math:`l=0` to the maximally
perturbed input.
Args:
input (:class:`torch.Tensor`): A :math:`1\times C\times H\times W`
input tensor (usually an image).
num_levels (int, optional): Number of pyramid leves. Defaults to 8.
type (str, optional): Perturbation type (:ref:`ep_perturbations`).
max_blur (float, optional): :math:`\sigma_{\mathrm{max}}` for the
Gaussian blur perturbation. Defaults to 20.
Attributes:
pyramid (:class:`torch.Tensor`): A :math:`L\times C\times H\times W`
tensor with :math:`L` ():attr:`num_levels`) increasingly
perturbed versions of the input tensor.
"""
def __init__(self, input, num_levels=8, max_blur=3, type=BLUR_PERTURBATION):
self.type = type
self.num_levels = num_levels
self.pyramid = []
assert num_levels >= 2
assert max_blur > 0
with torch.no_grad():
for sigma in torch.linspace(0, 1, self.num_levels):
if type == BLUR_PERTURBATION:
y = imsmooth(input, sigma=(1 - sigma) * max_blur)
elif type == FADE_PERTURBATION:
y = input * sigma
else:
assert False
self.pyramid.append(y)
self.pyramid = torch.cat(self.pyramid, dim=0)
def apply(self, mask):
r"""Generate a perturbetd tensor from a perturbation mask.
The :attr:`mask` is a tensor :math:`K\times 1\times H\times W`
with spatial dimensions :math:`H\times W` matching the input
tensor passed upon instantiation of the class. The output
is a :math:`K\times C\times H\times W` with :math:`K` perturbed
versions of the input tensor, one for each mask.
Masks values are in the range 0 to 1, where 1 means that the input
tensor is copied as is, and 0 that it is maximally perturbed.
Formally, the output is then given by:
.. math::
z_{kcvu} = y_{m_{k1cu}, c, v, u}
where :math:`k` index the mask, :math:`c` the feature channel,
:math:`vu` the spatial location, :math:`y` is the pyramid tensor,
and :math:`m` the mask tensor :attr:`mask`.
The mask must be in the range :math:`[0, 1]`. Linear interpolation
is used to index the perturbation level dimension of :math:`y`.
Args:
mask (:class:`torch.Tensor`): A :math:`K\times 1\times H\times W`
input tensor representing :math:`K` masks.
Returns:
:class:`torch.Tensor`: A :math:`K\times C\times H\times W` tensor
with :math:`K` perturbed versions of the input tensor.
"""
n = mask.shape[0]
w = mask.reshape(n, 1, *mask.shape[1:])
w = w * (self.num_levels - 1)
k = w.floor()
w = w - k
k = k.long()
y = self.pyramid[None, :]
y = y.expand(n, *y.shape[1:])
k = k.expand(n, 1, *y.shape[2:])
y0 = torch.gather(y, 1, k)
y1 = torch.gather(y, 1, torch.clamp(k + 1, max=self.num_levels - 1))
return ((1 - w) * y0 + w * y1).squeeze(dim=1)
def to(self, dev):
"""Switch to another device.
Args:
dev: PyTorch device.
Returns:
Perturbation: self.
"""
self.pyramid.to(dev)
return self
def __str__(self):
return (
f"Perturbation:\n"
f"- type: {self.type}\n"
f"- num_levels: {self.num_levels}\n"
f"- pyramid shape: {list(self.pyramid.shape)}"
)
def simple_reward(activation, target, variant):
r"""Simple reward.
For the :attr:`PRESERVE_VARIANT`, the simple reward is given by:
.. math::
z_{k1vu} = y_{n, c, v, u}
where :math:`y` is the :math:`K\times C\times H\times W` :attr:`activation`
tensor, :math:`c` the :attr:`target` channel, :math:`k` the mask index
and :math:`vu` the spatial indices. :math:`c` must be in the range
:math:`[0, C-1]`.
For the :attr:`DELETE_VARIANT`, the reward is the opposite.
For the :attr:`DUAL_VARIANT`, it is given by:
.. math::
z_{n1vu} = y_{n, c, v, u} - y_{n + N/2, c, v, u}.
Args:
activation (:class:`torch.Tensor`): activation tensor.
target (int): target channel.
variant (str): A :ref:`ep_variants`.
Returns:
:class:`torch.Tensor`: reward tensor with the same shape as
:attr:`activation` but a single channel.
"""
assert isinstance(activation, torch.Tensor)
assert len(activation.shape) >= 2 and len(activation.shape) <= 4
assert isinstance(target, int)
if variant == DELETE_VARIANT:
reward = - activation[:, target]
elif variant == PRESERVE_VARIANT:
reward = activation[:, target]
elif variant == DUAL_VARIANT:
bs = activation.shape[0]
assert bs % 2 == 0
num_areas = int(bs / 2)
reward = activation[:num_areas, target] - \
activation[num_areas:, target]
else:
assert False
return reward
def contrastive_reward(activation, target, variant):
r"""Contrastive reward.
For the :attr:`PRESERVE_VARIANT`, the contrastive reward is given by:
.. math::
z_{k1vu} = y_{n, c, v, u} - \max_{c'\not= c} y_{n, c', v, u}
The other variants are derived in the same manner as for
:func:`simple_reward`.
Args:
activation (:class:`torch.Tensor`): activation tensor.
target (int): target channel.
variant (str): A :ref:`ep_variants`.
Returns:
:class:`torch.Tensor`: reward tensor with the same shape as
:attr:`activation` but a single channel.
"""
assert isinstance(activation, torch.Tensor)
assert len(activation.shape) >= 2 and len(activation.shape) <= 4
assert isinstance(target, int)
def get(pred_y, y):
temp_y = pred_y.clone()
temp_y[:, y] = -100
return pred_y[:, y] - temp_y.max(dim=1, keepdim=True)[0]
if variant == DELETE_VARIANT:
reward = - get(activation, target)
elif variant == PRESERVE_VARIANT:
reward = get(activation, target)
elif variant == DUAL_VARIANT:
bs = activation.shape[0]
assert bs % 2 == 0
num_areas = int(bs / 2)
reward = (
get(activation[:num_areas], target) -
get(activation[num_areas:], target)
)
else:
assert False
return reward
class MaskGenerator:
r"""Mask generator.
The class takes as input the mask parameters and returns
as output a mask.
Args:
shape (tuple of int): output shape.
step (int): parameterization step in pixels.
sigma (float): kernel size.
clamp (bool, optional): whether to clamp the mask to [0,1]. Defaults to True.
pooling_mehtod (str, optional): `'softmax'` (default), `'sum'`, '`sigmoid`'.
Attributes:
shape (tuple): the same as the specified :attr:`shape` parameter.
shape_in (tuple): spatial size of the parameter tensor.
shape_out (tuple): spatial size of the output mask including margin.
"""
def __init__(self, shape, step, sigma, clamp=True, pooling_method='softmax'):
self.shape = shape
self.step = step
self.sigma = sigma
self.coldness = 20
self.clamp = clamp
self.pooling_method = pooling_method
assert int(step) == step
# self.kernel = lambda z: (z < 1).float()
self.kernel = lambda z: torch.exp(-2 * ((z - .5).clamp(min=0)**2))
self.margin = self.sigma
# self.margin = 0
self.padding = 1 + math.ceil((self.margin + sigma) / step)
self.radius = 1 + math.ceil(sigma / step)
self.shape_in = [math.ceil(z / step) for z in self.shape]
self.shape_mid = [
z + 2 * self.padding - (2 * self.radius + 1) + 1
for z in self.shape_in
]
self.shape_up = [self.step * z for z in self.shape_mid]
self.shape_out = [z - step + 1 for z in self.shape_up]
self.weight = torch.zeros((
1,
(2 * self.radius + 1)**2,
self.shape_out[0],
self.shape_out[1]
))
step_inv = [
torch.tensor(zm, dtype=torch.float32) /
torch.tensor(zo, dtype=torch.float32)
for zm, zo in zip(self.shape_mid, self.shape_up)
]
for ky in range(2 * self.radius + 1):
for kx in range(2 * self.radius + 1):
uy, ux = torch.meshgrid(
torch.arange(self.shape_out[0], dtype=torch.float32),
torch.arange(self.shape_out[1], dtype=torch.float32)
)
iy = torch.floor(step_inv[0] * uy) + ky - self.padding
ix = torch.floor(step_inv[1] * ux) + kx - self.padding
delta = torch.sqrt(
(uy - (self.margin + self.step * iy))**2 +
(ux - (self.margin + self.step * ix))**2
)
k = ky * (2 * self.radius + 1) + kx
self.weight[0, k] = self.kernel(delta / sigma)
def generate(self, mask_in):
r"""Generate a mask.
The function takes as input a parameter tensor :math:`\bar m` for
:math:`K` masks, which is a :math:`K\times 1\times H_i\times W_i`
tensor where `H_i\times W_i` are given by :attr:`shape_in`.
Args:
mask_in (:class:`torch.Tensor`): mask parameters.
Returns:
tuple: a pair of mask, cropped and full. The cropped mask is a
:class:`torch.Tensor` with the same spatial shape :attr:`shape`
as specfied upon creating this object. The second mask is the same,
but with an additional margin and shape :attr:`shape_out`.
"""
mask = F.unfold(mask_in,
(2 * self.radius + 1,) * 2,
padding=(self.padding,) * 2)
mask = mask.reshape(
len(mask_in), -1, self.shape_mid[0], self.shape_mid[1])
mask = F.interpolate(mask, size=self.shape_up, mode='nearest')
mask = F.pad(mask, (0, -self.step + 1, 0, -self.step + 1))
mask = self.weight * mask
if self.pooling_method == 'sigmoid':
if self.coldness == float('+Inf'):
mask = (mask.sum(dim=1, keepdim=True) - 5 > 0).float()
else:
mask = torch.sigmoid(
self.coldness * mask.sum(dim=1, keepdim=True) - 3
)
elif self.pooling_method == 'softmax':
if self.coldness == float('+Inf'):
mask = mask.max(dim=1, keepdim=True)[0]
else:
mask = (
mask * F.softmax(self.coldness * mask, dim=1)
).sum(dim=1, keepdim=True)
elif self.pooling_method == 'sum':
mask = mask.sum(dim=1, keepdim=True)
else:
assert False, f"Unknown pooling method {self.pooling_method}"
m = round(self.margin)
if self.clamp:
mask = mask.clamp(min=0, max=1)
cropped = mask[:, :, m:m + self.shape[0], m:m + self.shape[1]]
return cropped, mask
def to(self, dev):
"""Switch to another device.
Args:
dev: PyTorch device.
Returns:
MaskGenerator: self.
"""
self.weight = self.weight.to(dev)
return self
def extremal_perturbation(model,
input,
target,
areas=[0.1],
perturbation=BLUR_PERTURBATION,
max_iter=800,
num_levels=8,
step=7,
sigma=21,
jitter=True,
variant=PRESERVE_VARIANT,
print_iter=None,
debug=False,
reward_func=simple_reward,
resize=False,
resize_mode='bilinear',
smooth=0):
r"""Compute a set of extremal perturbations.
The function takes a :attr:`model`, an :attr:`input` tensor :math:`x`
of size :math:`1\times C\times H\times W`, and a :attr:`target`
activation channel. It produces as output a
:math:`K\times C\times H\times W` tensor where :math:`K` is the number
of specified :attr:`areas`.
Each mask, which has approximately the specified area, is searched
in order to maximise the (spatial average of the) activations
in channel :attr:`target`. Alternative objectives can be specified
via :attr:`reward_func`.
Args:
model (:class:`torch.nn.Module`): model.
input (:class:`torch.Tensor`): input tensor.
target (int): target channel.
areas (float or list of floats, optional): list of target areas for saliency
masks. Defaults to `[0.1]`.
perturbation (str, optional): :ref:`ep_perturbations`.
max_iter (int, optional): number of iterations for optimizing the masks.
num_levels (int, optional): number of buckets with which to discretize
and linearly interpolate the perturbation
(see :class:`Perturbation`). Defaults to 8.
step (int, optional): mask step (see :class:`MaskGenerator`).
Defaults to 7.
sigma (float, optional): mask smoothing (see :class:`MaskGenerator`).
Defaults to 21.
jitter (bool, optional): randomly flip the image horizontally at each iteration.
Defaults to True.
variant (str, optional): :ref:`ep_variants`. Defaults to
:attr:`PRESERVE_VARIANT`.
print_iter (int, optional): frequency with which to print losses.
Defaults to None.
debug (bool, optional): If True, generate debug plots.
reward_func (function, optional): function that generates reward tensor
to backpropagate.
resize (bool, optional): If True, upsamples the masks the same size
as :attr:`input`. It is also possible to specify a pair
(width, height) for a different size. Defaults to False.
resize_mode (str, optional): Upsampling method to use. Defaults to
``'bilinear'``.
smooth (float, optional): Apply Gaussian smoothing to the masks after
computing them. Defaults to 0.
Returns:
A tuple containing the masks and the energies.
The masks are stored as a :class:`torch.Tensor`
of dimension
"""
if isinstance(areas, float):
areas = [areas]
momentum = 0.9
learning_rate = 0.01
regul_weight = 300
device = input.device
regul_weight_last = max(regul_weight / 2, 1)
if debug:
print(
f"extremal_perturbation:\n"
f"- target: {target}\n"
f"- areas: {areas}\n"
f"- variant: {variant}\n"
f"- max_iter: {max_iter}\n"
f"- step/sigma: {step}, {sigma}\n"
f"- image size: {list(input.shape)}\n"
f"- reward function: {reward_func.__name__}"
)
# Disable gradients for model parameters.
# TODO(av): undo on leaving the function.
for p in model.parameters():
p.requires_grad_(False)
# Get the perturbation operator.
# The perturbation can be applied at any layer of the network (depth).
perturbation = Perturbation(
input,
num_levels=num_levels,
type=perturbation
).to(device)
perturbation_str = '\n '.join(perturbation.__str__().split('\n'))
if debug:
print(f"- {perturbation_str}")
# Prepare the mask generator.
shape = perturbation.pyramid.shape[2:]
mask_generator = MaskGenerator(shape, step, sigma).to(device)
h, w = mask_generator.shape_in
pmask = torch.ones(len(areas), 1, h, w).to(device)
if debug:
print(f"- mask resolution:\n {pmask.shape}")
# Prepare reference area vector.
max_area = np.prod(mask_generator.shape_out)
reference = torch.ones(len(areas), max_area).to(device)
for i, a in enumerate(areas):
reference[i, :int(max_area * (1 - a))] = 0
# Initialize optimizer.
optimizer = optim.SGD([pmask],
lr=learning_rate,
momentum=momentum,
dampening=momentum)
hist = torch.zeros((len(areas), 2, 0))
for t in range(max_iter):
pmask.requires_grad_(True)
# Generate the mask.
mask_, mask = mask_generator.generate(pmask)
# Apply the mask.
if variant == DELETE_VARIANT:
x = perturbation.apply(1 - mask_)
elif variant == PRESERVE_VARIANT:
x = perturbation.apply(mask_)
elif variant == DUAL_VARIANT:
x = torch.cat((
perturbation.apply(mask_),
perturbation.apply(1 - mask_),
), dim=0)
else:
assert False
# Apply jitter to the masked data.
if jitter and t % 2 == 0:
x = torch.flip(x, dims=(3,))
# Evaluate the model on the masked data.
y = model(x)
# Get reward.
reward = reward_func(y, target, variant=variant)
# Reshape reward and average over spatial dimensions.
reward = reward.reshape(len(areas), -1).mean(dim=1)
# Area regularization.
mask_sorted = mask.reshape(len(areas), -1).sort(dim=1)[0]
regul = - ((mask_sorted - reference)**2).mean(dim=1) * regul_weight
energy = (reward + regul).sum()
# Gradient step.
optimizer.zero_grad()
(- energy).backward()
optimizer.step()
pmask.data = pmask.data.clamp(0, 1)
# Record energy.
hist = torch.cat(
(hist,
torch.cat((
reward.detach().cpu().view(-1, 1, 1),
regul.detach().cpu().view(-1, 1, 1)
), dim=1)), dim=2)
# Adjust the regulariser/area constraint weight.
regul_weight *= 1.0035
# Diagnostics.
debug_this_iter = debug and (t in (0, max_iter - 1)
or regul_weight / regul_weight_last >= 2)
if (print_iter is not None and t % print_iter == 0) or debug_this_iter:
print("[{:04d}/{:04d}]".format(t + 1, max_iter), end="")
for i, area in enumerate(areas):
print(" [area:{:.2f} loss:{:.2f} reg:{:.2f}]".format(
area,
hist[i, 0, -1],
hist[i, 1, -1]), end="")
print()
if debug_this_iter:
regul_weight_last = regul_weight
for i, a in enumerate(areas):
plt.figure(i, figsize=(20, 6))
plt.clf()
ncols = 4 if variant == DUAL_VARIANT else 3
plt.subplot(1, ncols, 1)
plt.plot(hist[i, 0].numpy())
plt.plot(hist[i, 1].numpy())
plt.plot(hist[i].sum(dim=0).numpy())
plt.legend(('energy', 'regul', 'both'))
plt.title(f'target area:{a:.2f}')
plt.subplot(1, ncols, 2)
imsc(mask[i], lim=[0, 1])
plt.title(
f"min:{mask[i].min().item():.2f}"
f" max:{mask[i].max().item():.2f}"
f" area:{mask[i].sum() / mask[i].numel():.2f}")
plt.subplot(1, ncols, 3)
imsc(x[i])
if variant == DUAL_VARIANT:
plt.subplot(1, ncols, 4)
imsc(x[i + len(areas)])
plt.pause(0.001)
mask_ = mask_.detach()
# Resize saliency map.
mask_ = resize_saliency(input,
mask_,
resize,
mode=resize_mode)
# Smooth saliency map.
if smooth > 0:
mask_ = imsmooth(
mask_,
sigma=smooth * min(mask_.shape[2:]),
padding_mode='constant'
)
return mask_, hist
|
from node.base import AttributedNode
from node.behaviors import Events
from node.events import EventAttribute
from node.events import EventDispatcher
from node.events import suppress_events
from node.events import UnknownEvent
from node.interfaces import IEvents
from node.utils import UNSET
from plumber import Behavior
from plumber import default
from plumber import plumbing
import unittest
###############################################################################
# Mock objects
###############################################################################
class Subscriber(object):
def __call__(self, *args, **kw):
self.args = args
self.kw = kw
class MyDispatcher(EventDispatcher):
attr = EventAttribute(1)
class Behavior1(Behavior):
attr = default(EventAttribute(1))
@plumbing(Behavior1)
class PlumbedDispatcher(EventDispatcher):
pass
@plumbing(Events)
class AttributedDispatcher(AttributedNode):
attr = EventAttribute(1, storage='attrs')
@plumbing(Events)
class MixedEventDeclatationsDispatcher(object):
__events__ = ['event_a']
@plumbing(Events)
class AlwaysDispatchingAttributedDispatcher(object):
attr = EventAttribute(always_dispatch=True)
class SubscriberDecoratorDispatcher(EventDispatcher):
attr = EventAttribute()
@attr.subscriber
def attr_changed(self, value):
self.changed_value = value
class SubscriberDecoratorBehavior(Behavior):
attr = EventAttribute()
@attr.subscriber
def attr_changed(self, value):
self.changed_value = value
attr = default(attr)
@plumbing(SubscriberDecoratorBehavior)
class PlumbedSubscriberDecoratorDispatcher(EventDispatcher):
pass
###############################################################################
# Tests
###############################################################################
class TestEvents(unittest.TestCase):
def test_implements(self):
dispatcher = EventDispatcher()
self.assertTrue(IEvents.providedBy(dispatcher))
def test_register_event(self):
dispatcher = EventDispatcher()
# no events registered yet
self.assertEqual(dispatcher.__events__, [])
# register event
dispatcher.register_event('my_event')
self.assertEqual(dispatcher.__events__, ['my_event'])
# register same event again, still just one registration
dispatcher.register_event('my_event')
self.assertEqual(dispatcher.__events__, ['my_event'])
def test_bind(self):
dispatcher = EventDispatcher()
# no event subscribers registered yet
self.assertEqual(dispatcher.__subscribers__, {})
# bind to unknown event
self.assertRaises(
UnknownEvent,
lambda: dispatcher.bind(my_event=Subscriber())
)
# register event and bind subscriber to it
dispatcher.register_event('my_event')
subscriber = Subscriber()
dispatcher.bind(my_event=subscriber)
self.assertEqual(
dispatcher.__subscribers__,
{'my_event': [subscriber]}
)
# register same subscriber again, still just one Registration
dispatcher.bind(my_event=subscriber)
self.assertEqual(
dispatcher.__subscribers__,
{'my_event': [subscriber]}
)
def test_mixed_event_declarations(self):
dispatcher = MixedEventDeclatationsDispatcher()
dispatcher.register_event('event_b')
subscriber = Subscriber()
dispatcher.bind(event_a=subscriber)
dispatcher.bind(event_b=subscriber)
self.assertEqual(dispatcher.__subscribers__, {
'event_a': [subscriber],
'event_b': [subscriber]
})
def test_unbind(self):
dispatcher = EventDispatcher()
dispatcher.register_event('event_1')
dispatcher.register_event('event_2')
subscriber = Subscriber()
# event is None and subscriber is None
dispatcher.bind(event_1=subscriber, event_2=subscriber)
self.assertEqual(dispatcher.__subscribers__, {
'event_1': [subscriber],
'event_2': [subscriber]
})
dispatcher.unbind()
self.assertEqual(dispatcher.__subscribers__, {})
# event is not None and subscriber is None
dispatcher.bind(event_1=subscriber, event_2=subscriber)
self.assertEqual(dispatcher.__subscribers__, {
'event_1': [subscriber],
'event_2': [subscriber]
})
dispatcher.unbind(event='event_1')
self.assertEqual(dispatcher.__subscribers__, {
'event_2': [subscriber]
})
dispatcher.unbind()
# event is None and subscriber is not None
dispatcher.bind(event_1=subscriber, event_2=subscriber)
self.assertEqual(dispatcher.__subscribers__, {
'event_1': [subscriber],
'event_2': [subscriber]
})
dispatcher.unbind(subscriber=subscriber)
self.assertEqual(dispatcher.__subscribers__, {
'event_1': [],
'event_2': []
})
dispatcher.unbind()
# event is not None and subscriber is not None
subscriber_1 = Subscriber()
subscriber_2 = Subscriber()
dispatcher.bind(event_1=subscriber_1)
dispatcher.bind(event_1=subscriber_2)
self.assertEqual(dispatcher.__subscribers__, {
'event_1': [subscriber_1, subscriber_2]
})
dispatcher.unbind(event='event_1', subscriber=subscriber_1)
self.assertEqual(dispatcher.__subscribers__, {
'event_1': [subscriber_2]
})
dispatcher.unbind()
def test_dispatch(self):
dispatcher = EventDispatcher()
# register event and bind subscriber to it
dispatcher.register_event('my_event')
subscriber = Subscriber()
dispatcher.bind(my_event=subscriber)
# dispatch event, arguments and keyword arguments are passed to
# subscriber
dispatcher.dispatch('my_event', 1, kw=2)
self.assertEqual(subscriber.args, (1,))
self.assertEqual(subscriber.kw, dict(kw=2))
def test_attribute(self):
dispatcher = MyDispatcher()
# attribute events get registered automatically and are written to
# class dict
self.assertEqual(dispatcher.__class__.__events__, ['attr'])
# subscribe to attribute change
subscriber = Subscriber()
dispatcher.bind(attr=subscriber)
# if value not changes, no event is triggered
self.assertEqual(dispatcher.attr, 1)
dispatcher.attr = 1
self.assertFalse(hasattr(subscriber, 'args'))
# if value changes, an event gets triggered
dispatcher.attr = 2
self.assertEqual(dispatcher.attr, 2)
self.assertEqual(subscriber.args, (2,))
dispatcher.attr = 3
self.assertEqual(dispatcher.attr, 3)
self.assertEqual(subscriber.args, (3,))
# default value on class still 1
self.assertEqual(MyDispatcher.attr, 1)
# __del__ removes attribute from storage and triggers event with UNSET
# as value.
self.assertEqual(dispatcher.__dict__['attr'], 3)
del dispatcher.attr
self.assertEqual(subscriber.args, (UNSET,))
self.assertFalse('attr' in dispatcher.__dict__)
# After deleting the default value of event attribute is returned again
self.assertEqual(dispatcher.attr, 1)
def test_attribute_always_dispatch(self):
dispatcher = AlwaysDispatchingAttributedDispatcher()
subscriber = Subscriber()
dispatcher.bind(attr=subscriber)
dispatcher.attr = 1
self.assertEqual(subscriber.args, (1,))
del subscriber.args
dispatcher.attr = 1
self.assertEqual(subscriber.args, (1,))
def test_attribute_storage(self):
dispatcher = AttributedDispatcher()
subscriber = Subscriber()
dispatcher.bind(attr=subscriber)
dispatcher.attr = 0
self.assertEqual(subscriber.args, (0,))
self.assertEqual(dispatcher.attrs['attr'], 0)
self.assertFalse('attr' in dispatcher.__dict__)
del dispatcher.attr
self.assertEqual(subscriber.args, (UNSET,))
self.assertFalse('attr' in dispatcher.attrs)
def test_attributes_on_behavior(self):
dispatcher = PlumbedDispatcher()
subscriber = Subscriber()
dispatcher.bind(attr=subscriber)
dispatcher.attr = 0
self.assertEqual(subscriber.args, (0,))
def test_attribute_subscriber_decorator(self):
dispatcher = SubscriberDecoratorDispatcher()
dispatcher.attr = 'Changed Value'
self.assertEqual(dispatcher.changed_value, 'Changed Value')
dispatcher = PlumbedSubscriberDecoratorDispatcher()
dispatcher.attr = 'Changed Value'
self.assertEqual(dispatcher.changed_value, 'Changed Value')
def test_suppress_events(self):
dispatcher = MyDispatcher()
dispatcher.register_event('my_event')
subscriber = Subscriber()
dispatcher.bind(my_event=subscriber, attr=subscriber)
with suppress_events():
dispatcher.attr = 0
self.assertFalse(hasattr(subscriber, 'args'))
with suppress_events('attr'):
dispatcher.attr = 1
self.assertFalse(hasattr(subscriber, 'args'))
with suppress_events(['other']):
dispatcher.attr = 2
self.assertEqual(subscriber.args, (2,))
del subscriber.args
with suppress_events():
dispatcher.dispatch('my_event', 0)
self.assertFalse(hasattr(subscriber, 'args'))
with suppress_events('my_event'):
dispatcher.dispatch('my_event', 1)
self.assertFalse(hasattr(subscriber, 'args'))
with suppress_events(['other']):
dispatcher.dispatch('my_event', 2)
self.assertEqual(subscriber.args, (2,))
|
"""
Basic data structure used for general trading function in VN Trader.
"""
from dataclasses import dataclass
from datetime import datetime
from logging import INFO
from .constant import Direction, Exchange, Interval, Offset, Status, Product, OptionType, OrderType
ACTIVE_STATUSES = set([Status.SUBMITTING, Status.NOTTRADED, Status.PARTTRADED])
@dataclass
class BaseData:
"""
Any data object needs a gateway_name as source
and should inherit base data.
"""
gateway_name: str
@dataclass
class TickData(BaseData):
"""
Tick data contains information about:
* last trade in market
* orderbook snapshot
* intraday market statistics.
"""
symbol: str
exchange: Exchange
datetime: datetime
name: str = ""
volume: float = 0
open_interest: float = 0
last_price: float = 0
last_volume: float = 0
limit_up: float = 0
limit_down: float = 0
open_price: float = 0
high_price: float = 0
low_price: float = 0
pre_close: float = 0
bid_price_1: float = 0
bid_price_2: float = 0
bid_price_3: float = 0
bid_price_4: float = 0
bid_price_5: float = 0
ask_price_1: float = 0
ask_price_2: float = 0
ask_price_3: float = 0
ask_price_4: float = 0
ask_price_5: float = 0
bid_volume_1: float = 0
bid_volume_2: float = 0
bid_volume_3: float = 0
bid_volume_4: float = 0
bid_volume_5: float = 0
ask_volume_1: float = 0
ask_volume_2: float = 0
ask_volume_3: float = 0
ask_volume_4: float = 0
ask_volume_5: float = 0
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class BarData(BaseData):
"""
Candlestick bar data of a certain trading period.
"""
symbol: str
exchange: Exchange
datetime: datetime
interval: Interval = None
volume: float = 0
open_interest: float = 0
open_price: float = 0
high_price: float = 0
low_price: float = 0
close_price: float = 0
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class OrderData(BaseData):
"""
Order data contains information for tracking lastest status
of a specific order.
"""
symbol: str
exchange: Exchange
orderid: str
type: OrderType = OrderType.LIMIT
direction: Direction = ""
offset: Offset = Offset.NONE
price: float = 0
volume: float = 0
traded: float = 0
status: Status = Status.SUBMITTING
time: str = ""
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
self.vt_orderid = f"{self.gateway_name}.{self.orderid}"
def is_active(self):
"""
Check if the order is active.
"""
if self.status in ACTIVE_STATUSES:
return True
else:
return False
def create_cancel_request(self):
"""
Create cancel request object from order.
"""
req = CancelRequest(
orderid=self.orderid, symbol=self.symbol, exchange=self.exchange
)
return req
@dataclass
class TradeData(BaseData):
"""
Trade data contains information of a fill of an order. One order
can have several trade fills.
"""
symbol: str
exchange: Exchange
orderid: str
tradeid: str
direction: Direction = ""
offset: Offset = Offset.NONE
price: float = 0
volume: float = 0
time: str = ""
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
self.vt_orderid = f"{self.gateway_name}.{self.orderid}"
self.vt_tradeid = f"{self.gateway_name}.{self.tradeid}"
@dataclass
class PositionData(BaseData):
"""
Positon data is used for tracking each individual position holding.
"""
symbol: str
exchange: Exchange
direction: Direction
volume: float = 0
frozen: float = 0
price: float = 0
pnl: float = 0
yd_volume: float = 0
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
self.vt_positionid = f"{self.vt_symbol}.{self.direction.value}"
@dataclass
class AccountData(BaseData):
"""
Account data contains information about balance, frozen and
available.
"""
accountid: str
balance: float = 0
frozen: float = 0
def __post_init__(self):
""""""
self.available = self.balance - self.frozen
self.vt_accountid = f"{self.gateway_name}.{self.accountid}"
@dataclass
class LogData(BaseData):
"""
Log data is used for recording log messages on GUI or in log files.
"""
msg: str
level: int = INFO
def __post_init__(self):
""""""
self.time = datetime.now()
@dataclass
class ContractData(BaseData):
"""
Contract data contains basic information about each contract traded.
"""
symbol: str
exchange: Exchange
name: str
product: Product
size: int
pricetick: float
min_volume: float = 1 # minimum trading volume of the contract
stop_supported: bool = False # whether server supports stop order
net_position: bool = False # whether gateway uses net position volume
history_data: bool = False # whether gateway provides bar history data
option_strike: float = 0
option_underlying: str = "" # vt_symbol of underlying contract
option_type: OptionType = None
option_expiry: datetime = None
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class SubscribeRequest:
"""
Request sending to specific gateway for subscribing tick data update.
"""
symbol: str
exchange: Exchange
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class OrderRequest:
"""
Request sending to specific gateway for creating a new order.
"""
symbol: str
exchange: Exchange
direction: Direction
type: OrderType
volume: float
price: float = 0
offset: Offset = Offset.NONE
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
def create_order_data(self, orderid: str, gateway_name: str):
"""
Create order data from request.
"""
order = OrderData(
symbol=self.symbol,
exchange=self.exchange,
orderid=orderid,
type=self.type,
direction=self.direction,
offset=self.offset,
price=self.price,
volume=self.volume,
gateway_name=gateway_name,
)
return order
@dataclass
class CancelRequest:
"""
Request sending to specific gateway for canceling an existing order.
"""
orderid: str
symbol: str
exchange: Exchange
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class HistoryRequest:
"""
Request sending to specific gateway for querying history data.
"""
symbol: str
exchange: Exchange
start: datetime
end: datetime = None
interval: Interval = None
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
|
#######################################################
#
# https://firebase.google.com/docs/firestore/quickstart
#
#######################################################
###################################################
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
from firebase_admin import db as fdb
###################################################
cred = credentials.Certificate("firebase/sacckey.json")
firebase_admin.initialize_app(cred, {
"databaseURL": "https://pgneditor-1ab96.firebaseio.com/",
})
db = firestore.client()
fdb.reference("test").set({
"meta": "test"
})
obj = fdb.reference("test").get()
print("got", obj["meta"], obj)
###################################################
"""
###################################################
doc_ref = db.collection(u'users').document(u'alovelace')
doc_ref.set({
u'first': u'Ada',
u'last': u'Lovelace',
u'born': 1815
})
doc_ref = db.collection(u'users').document(u'aturing')
doc_ref.set({
u'first': u'Alan',
u'middle': u'Mathison',
u'last': u'Turing',
u'born': 1912
})
###################################################
users_ref = db.collection(u'users')
docs = users_ref.stream()
for doc in docs:
print(u'{} => {}'.format(doc.id, doc.to_dict()))
###################################################
"""
|
# Perfect maze (tree) in a 70x70 grid
# Nodes: 806
# Edges: 805
adjList = [
[19, 1],
[23, 2, 0],
[29, 1],
[30, 4],
[24, 3],
[18, 6],
[27, 5],
[8],
[26, 9, 7],
[34, 10, 8],
[53, 9],
[54, 12],
[11],
[36, 14],
[37, 13],
[38, 16],
[39, 15],
[18],
[42, 5, 17],
[0, 20],
[44, 21, 19],
[28, 20],
[45, 23],
[1, 22],
[4, 25],
[47, 24],
[8],
[6],
[21],
[2, 30],
[3, 29],
[57, 32],
[31],
[58, 34],
[9, 33],
[60, 36],
[13, 35],
[14, 38],
[15, 37],
[16, 40],
[75, 39],
[62, 42],
[18, 41],
[64, 44],
[20, 43],
[22, 46],
[68, 45],
[25, 48],
[71, 47],
[72, 50],
[80, 49],
[81, 52],
[91, 51],
[10, 54],
[11, 53],
[56],
[97, 57, 55],
[77, 31, 56],
[33, 59],
[127, 58],
[35, 61],
[85, 60],
[41, 63],
[118, 62],
[43, 65],
[96, 64],
[76, 67],
[87, 66],
[46, 69],
[89, 68],
[99, 71],
[48, 70],
[49, 73],
[134, 72],
[116, 75],
[40, 74],
[66],
[57, 78],
[106, 77],
[107],
[50, 81],
[51, 80],
[150, 83],
[113, 82],
[114, 85],
[61, 84],
[103],
[67, 88],
[123, 87],
[69, 90],
[105, 89],
[52, 92],
[112, 91],
[189, 94],
[117, 93],
[120, 96],
[65, 95],
[56, 98],
[125, 97],
[70, 100],
[133, 99],
[140, 102],
[170, 103, 101],
[86, 102],
[128, 105],
[90, 104],
[78, 107],
[79, 108, 106],
[131, 107],
[135, 110],
[137, 109],
[112],
[126, 92, 111],
[83, 114],
[84, 113],
[141, 116],
[74, 115],
[94, 118],
[63, 119, 117],
[166, 118],
[95, 121],
[151, 120],
[152, 123],
[153, 88, 122],
[147, 125],
[98, 124],
[199, 112, 127],
[59, 126],
[104, 129],
[155, 128],
[131],
[108, 130],
[178, 133],
[100, 132],
[73, 135],
[109, 136, 134],
[135],
[110, 138],
[160, 137],
[161, 140],
[101, 139],
[115, 142],
[164, 141],
[144],
[165, 143],
[230, 146],
[145],
[124, 148],
[167, 147],
[183, 150],
[82, 149],
[121, 152],
[122, 151],
[123, 154],
[174, 153],
[129, 156],
[246, 155],
[179, 158],
[180, 157],
[182, 160],
[138, 159],
[139, 162],
[186, 161],
[187, 164],
[142, 163],
[144, 166],
[119, 165],
[148, 168],
[176, 167],
[200, 170],
[102, 169],
[231, 172],
[223, 173, 171],
[172],
[154, 175],
[210, 174],
[195, 168, 177],
[196, 176],
[132, 179],
[157, 178],
[158, 181],
[213, 182, 180],
[159, 181],
[149, 184],
[205, 183],
[217, 186],
[162, 185],
[163, 188],
[206, 187],
[93, 190],
[208, 191, 189],
[303, 190],
[247, 193],
[225, 192],
[195],
[235, 176, 194],
[177],
[212],
[199],
[126, 198],
[169, 201],
[221, 200],
[224, 203],
[232, 202],
[228, 205],
[184, 204],
[188, 207],
[242, 206],
[190],
[233, 210],
[175, 209],
[236, 212],
[197, 213, 211],
[181, 212],
[250, 215],
[251, 214],
[254, 217],
[185, 218, 216],
[240, 217],
[241, 220],
[256, 219],
[201, 222],
[229, 221],
[172, 224],
[202, 223],
[193, 226],
[271, 225],
[252, 228],
[204, 227],
[222],
[266, 145, 231],
[171, 230],
[203, 233],
[209, 234, 232],
[260, 233],
[284, 195, 236],
[211, 235],
[272, 238],
[312, 237],
[249],
[218, 241],
[219, 240],
[207, 243],
[242],
[281, 245],
[282, 244],
[156, 247],
[192, 246],
[273, 249],
[239, 250, 248],
[214, 249],
[215, 252],
[227, 251],
[254],
[216, 255, 253],
[254],
[220, 257],
[277, 256],
[267],
[260],
[268, 234, 259],
[285, 262],
[286, 263, 261],
[262],
[291, 265],
[292, 264],
[230, 267],
[258, 266],
[260, 269],
[268],
[296, 271],
[226, 270],
[237],
[248, 274],
[298, 273],
[276],
[288, 275],
[257, 278],
[301, 279, 277],
[302, 278],
[319, 281],
[244, 280],
[245, 283],
[306, 282],
[235, 285],
[261, 284],
[262, 287],
[310, 286],
[337, 276, 289],
[314, 288],
[315, 291],
[264, 290],
[265, 293],
[316, 292],
[340, 295],
[318, 294],
[345, 270, 297],
[308, 296],
[274, 299],
[336, 298],
[339, 301],
[278, 300],
[279, 303],
[327, 191, 302],
[328, 305],
[304],
[283, 307],
[331, 306],
[332, 297, 309],
[308],
[287, 311],
[334, 310],
[238, 313],
[322, 312],
[289, 315],
[290, 314],
[293, 317],
[338, 316],
[295, 319],
[280, 318],
[321],
[376, 320],
[348, 313, 323],
[322],
[325],
[351, 324],
[353, 327],
[303, 326],
[304, 329],
[355, 328],
[356, 331],
[307, 330],
[308, 333],
[358, 332],
[311, 335],
[360, 334],
[299, 337],
[288, 336],
[317, 339],
[300, 338],
[396, 294, 341],
[366, 340],
[367, 343],
[370, 342],
[369, 345],
[296, 344],
[377, 347],
[378, 346],
[322, 349],
[381, 348],
[362],
[395, 325, 352],
[386, 351],
[326, 354],
[391, 353],
[392, 329, 356],
[330, 355],
[373, 358],
[333, 357],
[379, 360],
[335, 359],
[382, 362],
[384, 350, 363, 361],
[394, 362],
[365],
[389, 364],
[341, 367],
[342, 366],
[369],
[344, 368],
[343, 371],
[410, 370],
[399, 373],
[357, 372],
[457],
[402, 376],
[321, 377, 375],
[346, 376],
[347, 379],
[359, 378],
[413, 381],
[349, 380],
[361, 383],
[405, 382],
[362, 385],
[438, 384],
[352, 387],
[406, 386],
[407, 389],
[445, 365, 388],
[408, 391],
[354, 390],
[355, 393],
[421, 392],
[363, 395],
[351, 394],
[340, 397],
[429, 396],
[422, 399],
[372, 398],
[424, 401],
[479, 400],
[375, 403],
[433, 402],
[425, 405],
[383, 404],
[387, 407],
[388, 406],
[390, 409],
[427, 408],
[371, 411],
[450, 410],
[460, 413],
[434, 380, 412],
[435, 415],
[436, 414],
[426],
[439, 418],
[441, 417],
[447],
[430],
[393, 422],
[398, 421],
[453, 424],
[400, 423],
[437, 404, 426],
[416, 425],
[409, 428],
[456, 427],
[448, 397, 430],
[420, 431, 429],
[449, 430],
[458, 433],
[403, 432],
[413, 435],
[414, 434],
[415, 437],
[425, 436],
[385, 439],
[417, 438],
[467, 441],
[418, 440],
[443],
[469, 444, 442],
[470, 443],
[471, 389, 446],
[445],
[472, 419, 448],
[429, 447],
[431],
[411, 451],
[495, 450],
[476, 453],
[423, 454, 452],
[453],
[491, 456],
[428, 455],
[374, 458],
[432, 459, 457],
[480, 458],
[412, 461],
[482, 460],
[463],
[485, 462],
[509, 465],
[500, 464],
[501, 467],
[440, 466],
[488, 469],
[443, 468],
[444, 471],
[445, 470],
[447, 473],
[503, 472],
[559, 475],
[493, 474],
[452, 477],
[505, 476],
[496, 479],
[401, 478],
[459, 481],
[520, 480],
[461, 483],
[506, 482],
[507, 485],
[463, 484],
[487],
[526, 488, 486],
[468, 487],
[490],
[513, 489],
[455, 492],
[539, 491],
[475, 494],
[516, 495, 493],
[451, 494],
[478, 497],
[533, 496],
[517, 499],
[518, 498],
[465, 501],
[466, 500],
[549, 503],
[473, 502],
[529, 505],
[477, 504],
[522, 483, 507],
[484, 506],
[535, 509],
[464, 508],
[570, 511],
[536, 510],
[537, 513],
[527, 490, 514, 512],
[513],
[516],
[494, 515],
[498],
[499, 519],
[541, 520, 518],
[481, 519],
[522],
[506, 521],
[546, 524],
[547, 523],
[548, 526],
[487, 525],
[513],
[552, 529],
[504, 528],
[575, 531],
[576, 530],
[553, 533],
[497, 532],
[564, 535],
[508, 534],
[511, 537],
[572, 512, 536],
[557, 539],
[492, 538],
[550],
[519, 542],
[561, 541],
[590, 544],
[563, 543],
[567, 546],
[523, 545],
[524, 548],
[525, 547],
[676, 502, 550],
[540, 549],
[612, 552],
[528, 551],
[532, 554],
[579, 553],
[569],
[582, 557],
[538, 556],
[585, 559],
[474, 558],
[574],
[542, 562],
[589, 561],
[544, 564],
[534, 563],
[566],
[592, 567, 565],
[545, 566],
[569],
[593, 555, 570, 568],
[510, 569],
[572],
[537, 571],
[574],
[638, 560, 575, 573],
[530, 574],
[531, 577],
[600, 576],
[614, 579],
[554, 578],
[603, 581],
[605, 580],
[556, 583],
[609, 582],
[596],
[558, 586],
[611, 585],
[615, 588],
[616, 587],
[562, 590],
[543, 589],
[619, 592],
[601, 566, 591],
[569, 594],
[623, 593],
[634, 596],
[584, 595],
[598],
[625, 597],
[626, 600],
[577, 599],
[592, 602],
[628, 601],
[580, 604],
[629, 603],
[581, 606],
[631, 605],
[608],
[633, 609, 607],
[583, 608],
[656, 611],
[586, 610],
[551, 613],
[646, 612],
[578, 615],
[587, 614],
[588, 617],
[641, 616],
[619],
[591, 618],
[643, 621],
[665, 620],
[673, 623],
[594, 622],
[658, 625],
[598, 624],
[599, 627],
[648, 626],
[602, 629],
[604, 628],
[653, 631],
[606, 630],
[655, 633],
[608, 632],
[595, 635],
[667, 634],
[657, 637],
[668, 636],
[574, 639],
[688, 638],
[660, 641],
[617, 640],
[662, 643],
[620, 642],
[675, 645],
[666, 644],
[613, 647],
[687, 646],
[627, 649],
[680, 648],
[681],
[670, 652],
[697, 653, 651],
[630, 652],
[712, 655],
[632, 654],
[610, 657],
[636, 656],
[624, 659],
[679, 658],
[640, 661],
[692, 660],
[642, 663],
[708, 662],
[682, 665],
[621, 664],
[645, 667],
[685, 635, 666],
[637, 669],
[686, 668],
[651, 671],
[670],
[698, 673],
[622, 672],
[700, 675],
[644, 674],
[549, 677],
[702, 676],
[704],
[659, 680],
[649, 681, 679],
[650, 680],
[664, 683],
[721, 682],
[713, 685],
[667, 684],
[669, 687],
[647, 686],
[639, 689],
[727, 690, 688],
[717, 689],
[730, 692],
[661, 693, 691],
[719, 692],
[720],
[722, 696],
[723, 695],
[652, 698],
[672, 699, 697],
[710, 698],
[674, 701],
[736, 700],
[677, 703],
[740, 702],
[678, 705],
[715, 704],
[750, 707],
[729, 706],
[663, 709],
[732, 708],
[699],
[733, 712],
[654, 711],
[684, 714],
[739, 713],
[762, 705, 716],
[765, 715],
[690, 718],
[774, 717],
[693, 720],
[694, 719],
[683, 722],
[695, 721],
[696, 724],
[748, 723],
[726],
[759, 725],
[743, 689, 728],
[727],
[707, 730],
[691, 729],
[771, 732],
[709, 731],
[711, 734],
[754, 733],
[755, 736],
[701, 735],
[738],
[757, 739, 737],
[714, 738],
[703, 741],
[761, 740],
[764],
[727],
[769, 745],
[770, 744],
[747],
[780, 746],
[724, 749],
[772, 748],
[706, 751],
[790, 750],
[777, 753],
[779, 752],
[734, 755],
[735, 754],
[783, 757],
[738, 756],
[793, 759],
[726, 758],
[785, 761],
[741, 760],
[715, 763],
[786, 762],
[787, 742, 765],
[716, 764],
[788, 767],
[795, 766],
[791, 769],
[744, 768],
[745, 771],
[731, 770],
[749, 773],
[803, 772],
[796, 718, 775],
[789, 774],
[799, 777],
[752, 776],
[800, 779],
[753, 778],
[747, 781],
[801, 780],
[804],
[756, 784],
[805, 783],
[794, 760, 786],
[763, 785],
[764, 788],
[766, 787],
[775],
[751, 791],
[797, 768, 790],
[798],
[758, 794],
[785, 793],
[767, 796],
[774, 795],
[791, 798],
[792, 799, 797],
[776, 798],
[778, 801],
[781, 802, 800],
[801],
[773, 804],
[782, 805, 803],
[784, 804]]
# x coord, y coord
nodeData = [
(1, 1),
(12, 1),
(14, 1),
(17, 1),
(23, 1),
(68, 1),
(70, 1),
(27, 2),
(37, 2),
(47, 2),
(49, 2),
(51, 2),
(52, 2),
(55, 2),
(57, 2),
(59, 2),
(61, 2),
(64, 2),
(68, 2),
(1, 3),
(4, 3),
(6, 3),
(9, 3),
(12, 3),
(23, 3),
(25, 3),
(37, 3),
(70, 3),
(6, 4),
(14, 4),
(17, 4),
(20, 4),
(22, 4),
(43, 4),
(47, 4),
(53, 4),
(55, 4),
(57, 4),
(59, 4),
(61, 4),
(64, 4),
(66, 4),
(68, 4),
(1, 5),
(4, 5),
(9, 5),
(11, 5),
(25, 5),
(29, 5),
(31, 5),
(36, 5),
(39, 5),
(41, 5),
(49, 5),
(51, 5),
(17, 6),
(18, 6),
(20, 6),
(43, 6),
(46, 6),
(53, 6),
(55, 6),
(66, 6),
(69, 6),
(1, 7),
(3, 7),
(5, 7),
(8, 7),
(11, 7),
(14, 7),
(27, 7),
(29, 7),
(31, 7),
(33, 7),
(61, 7),
(64, 7),
(5, 8),
(20, 8),
(22, 8),
(24, 8),
(36, 8),
(39, 8),
(48, 8),
(50, 8),
(52, 8),
(55, 8),
(58, 8),
(8, 9),
(10, 9),
(14, 9),
(16, 9),
(41, 9),
(44, 9),
(64, 9),
(66, 9),
(1, 10),
(3, 10),
(18, 10),
(20, 10),
(27, 10),
(31, 10),
(54, 10),
(57, 10),
(58, 10),
(12, 11),
(16, 11),
(22, 11),
(24, 11),
(26, 11),
(35, 11),
(39, 11),
(43, 11),
(44, 11),
(50, 11),
(52, 11),
(59, 11),
(61, 11),
(66, 11),
(69, 11),
(70, 11),
(1, 12),
(5, 12),
(8, 12),
(10, 12),
(18, 12),
(20, 12),
(44, 12),
(46, 12),
(12, 13),
(15, 13),
(23, 13),
(26, 13),
(29, 13),
(31, 13),
(33, 13),
(35, 13),
(36, 13),
(39, 13),
(42, 13),
(51, 13),
(54, 13),
(59, 13),
(62, 13),
(66, 13),
(68, 13),
(1, 14),
(2, 14),
(18, 14),
(20, 14),
(46, 14),
(48, 14),
(5, 15),
(8, 15),
(10, 15),
(13, 15),
(15, 15),
(17, 15),
(31, 15),
(35, 15),
(38, 15),
(42, 15),
(51, 15),
(53, 15),
(60, 15),
(62, 15),
(68, 15),
(70, 15),
(20, 16),
(25, 16),
(55, 16),
(57, 16),
(3, 17),
(6, 17),
(9, 17),
(13, 17),
(15, 17),
(25, 17),
(27, 17),
(29, 17),
(31, 17),
(35, 17),
(37, 17),
(38, 17),
(46, 17),
(49, 17),
(51, 17),
(53, 17),
(60, 17),
(62, 17),
(64, 17),
(67, 17),
(69, 17),
(19, 18),
(21, 18),
(24, 18),
(25, 18),
(27, 18),
(32, 18),
(41, 18),
(44, 18),
(55, 18),
(59, 18),
(9, 19),
(11, 19),
(47, 19),
(49, 19),
(62, 19),
(64, 19),
(67, 19),
(13, 20),
(15, 20),
(30, 20),
(32, 20),
(37, 20),
(41, 20),
(43, 20),
(50, 20),
(51, 20),
(53, 20),
(55, 20),
(57, 20),
(59, 20),
(61, 20),
(6, 21),
(9, 21),
(21, 21),
(23, 21),
(45, 21),
(47, 21),
(61, 21),
(1, 22),
(3, 22),
(11, 22),
(13, 22),
(15, 22),
(25, 22),
(30, 22),
(33, 22),
(36, 22),
(39, 22),
(53, 22),
(55, 22),
(64, 22),
(65, 22),
(8, 23),
(10, 23),
(17, 23),
(19, 23),
(38, 23),
(39, 23),
(41, 23),
(43, 23),
(45, 23),
(49, 23),
(50, 23),
(51, 23),
(57, 23),
(60, 23),
(5, 24),
(14, 24),
(15, 24),
(27, 24),
(29, 24),
(30, 24),
(54, 24),
(56, 24),
(1, 25),
(5, 25),
(15, 25),
(17, 25),
(19, 25),
(23, 25),
(33, 25),
(38, 25),
(40, 25),
(43, 25),
(47, 25),
(60, 25),
(65, 25),
(67, 25),
(6, 26),
(8, 26),
(10, 26),
(14, 26),
(25, 26),
(27, 26),
(29, 26),
(31, 26),
(47, 26),
(49, 26),
(51, 26),
(54, 26),
(56, 26),
(58, 26),
(2, 27),
(4, 27),
(19, 27),
(21, 27),
(40, 27),
(44, 27),
(63, 27),
(65, 27),
(67, 27),
(69, 27),
(9, 28),
(10, 28),
(14, 28),
(16, 28),
(21, 28),
(22, 28),
(31, 28),
(34, 28),
(36, 28),
(38, 28),
(49, 28),
(51, 28),
(58, 28),
(61, 28),
(4, 29),
(6, 29),
(26, 29),
(28, 29),
(38, 29),
(42, 29),
(53, 29),
(56, 29),
(65, 29),
(69, 29),
(9, 30),
(12, 30),
(14, 30),
(16, 30),
(21, 30),
(24, 30),
(34, 30),
(36, 30),
(44, 30),
(47, 30),
(61, 30),
(63, 30),
(2, 31),
(4, 31),
(6, 31),
(8, 31),
(17, 31),
(19, 31),
(30, 31),
(32, 31),
(38, 31),
(40, 31),
(49, 31),
(56, 31),
(58, 31),
(65, 31),
(70, 31),
(12, 32),
(14, 32),
(22, 32),
(24, 32),
(34, 32),
(36, 32),
(43, 32),
(49, 32),
(53, 32),
(61, 32),
(64, 32),
(4, 33),
(6, 33),
(16, 33),
(17, 33),
(8, 34),
(10, 34),
(18, 34),
(22, 34),
(25, 34),
(27, 34),
(28, 34),
(30, 34),
(32, 34),
(34, 34),
(36, 34),
(40, 34),
(43, 34),
(46, 34),
(49, 34),
(51, 34),
(58, 34),
(60, 34),
(62, 34),
(64, 34),
(66, 34),
(70, 34),
(12, 35),
(14, 35),
(53, 35),
(56, 35),
(2, 36),
(6, 36),
(16, 36),
(18, 36),
(21, 36),
(23, 36),
(27, 36),
(31, 36),
(43, 36),
(46, 36),
(60, 36),
(62, 36),
(66, 36),
(68, 36),
(10, 37),
(12, 37),
(33, 37),
(36, 37),
(39, 37),
(41, 37),
(49, 37),
(53, 37),
(57, 37),
(2, 38),
(8, 38),
(14, 38),
(16, 38),
(18, 38),
(21, 38),
(43, 38),
(49, 38),
(68, 38),
(70, 38),
(6, 39),
(8, 39),
(10, 39),
(28, 39),
(31, 39),
(36, 39),
(39, 39),
(41, 39),
(43, 39),
(51, 39),
(53, 39),
(55, 39),
(57, 39),
(59, 39),
(60, 39),
(62, 39),
(64, 39),
(65, 39),
(2, 40),
(6, 40),
(10, 40),
(12, 40),
(14, 40),
(16, 40),
(18, 40),
(19, 40),
(67, 40),
(70, 40),
(25, 41),
(28, 41),
(31, 41),
(33, 41),
(36, 41),
(40, 41),
(44, 41),
(47, 41),
(50, 41),
(52, 41),
(55, 41),
(58, 41),
(60, 41),
(62, 41),
(64, 41),
(2, 42),
(4, 42),
(6, 42),
(9, 42),
(16, 42),
(18, 42),
(20, 42),
(23, 42),
(31, 43),
(33, 43),
(36, 43),
(38, 43),
(41, 43),
(44, 43),
(55, 43),
(56, 43),
(58, 43),
(62, 43),
(65, 43),
(67, 43),
(70, 43),
(9, 44),
(11, 44),
(14, 44),
(20, 44),
(26, 44),
(28, 44),
(30, 44),
(50, 44),
(52, 44),
(1, 45),
(4, 45),
(15, 45),
(18, 45),
(38, 45),
(41, 45),
(45, 45),
(47, 45),
(58, 45),
(60, 45),
(62, 45),
(65, 45),
(66, 45),
(9, 46),
(11, 46),
(28, 46),
(30, 46),
(31, 46),
(33, 46),
(36, 46),
(38, 46),
(49, 46),
(51, 46),
(53, 46),
(56, 46),
(65, 46),
(12, 47),
(15, 47),
(17, 47),
(20, 47),
(23, 47),
(26, 47),
(41, 47),
(45, 47),
(60, 47),
(62, 47),
(68, 47),
(70, 47),
(3, 48),
(31, 48),
(33, 48),
(37, 48),
(39, 48),
(47, 48),
(49, 48),
(51, 48),
(53, 48),
(1, 49),
(3, 49),
(9, 49),
(12, 49),
(23, 49),
(29, 49),
(56, 49),
(64, 49),
(68, 49),
(4, 50),
(6, 50),
(15, 50),
(33, 50),
(35, 50),
(39, 50),
(41, 50),
(44, 50),
(45, 50),
(47, 50),
(55, 50),
(56, 50),
(58, 50),
(61, 50),
(62, 50),
(12, 51),
(15, 51),
(17, 51),
(20, 51),
(25, 51),
(27, 51),
(29, 51),
(49, 51),
(53, 51),
(64, 51),
(66, 51),
(70, 51),
(4, 52),
(6, 52),
(30, 52),
(32, 52),
(35, 52),
(37, 52),
(39, 52),
(45, 52),
(56, 52),
(59, 52),
(68, 52),
(70, 52),
(18, 53),
(21, 53),
(23, 53),
(25, 53),
(45, 53),
(47, 53),
(49, 53),
(51, 53),
(53, 53),
(55, 53),
(62, 53),
(63, 53),
(66, 53),
(3, 54),
(6, 54),
(9, 54),
(12, 54),
(27, 54),
(30, 54),
(32, 54),
(34, 54),
(37, 54),
(39, 54),
(41, 54),
(44, 54),
(57, 54),
(59, 54),
(19, 55),
(21, 55),
(23, 55),
(25, 55),
(47, 55),
(51, 55),
(53, 55),
(55, 55),
(61, 55),
(63, 55),
(68, 55),
(70, 55),
(6, 56),
(9, 56),
(15, 56),
(17, 56),
(32, 56),
(34, 56),
(37, 56),
(41, 56),
(64, 56),
(66, 56),
(12, 57),
(14, 57),
(25, 57),
(28, 57),
(30, 57),
(46, 57),
(51, 57),
(53, 57),
(59, 57),
(61, 57),
(3, 58),
(6, 58),
(19, 58),
(23, 58),
(32, 58),
(34, 58),
(37, 58),
(39, 58),
(41, 58),
(44, 58),
(66, 58),
(70, 58),
(9, 59),
(12, 59),
(46, 59),
(47, 59),
(54, 59),
(57, 59),
(61, 59),
(64, 59),
(1, 60),
(3, 60),
(7, 60),
(23, 60),
(28, 60),
(30, 60),
(41, 60),
(43, 60),
(67, 60),
(70, 60),
(12, 61),
(14, 61),
(17, 61),
(19, 61),
(22, 61),
(32, 61),
(34, 61),
(35, 61),
(37, 61),
(45, 61),
(49, 61),
(51, 61),
(54, 61),
(55, 61),
(61, 61),
(64, 61),
(3, 62),
(5, 62),
(7, 62),
(11, 62),
(28, 62),
(30, 62),
(39, 62),
(41, 62),
(55, 62),
(57, 62),
(59, 62),
(67, 62),
(70, 62),
(11, 63),
(17, 63),
(22, 63),
(25, 63),
(35, 63),
(37, 63),
(43, 63),
(45, 63),
(49, 63),
(52, 63),
(1, 64),
(3, 64),
(19, 64),
(20, 64),
(30, 64),
(32, 64),
(39, 64),
(41, 64),
(57, 64),
(59, 64),
(61, 64),
(64, 64),
(67, 64),
(68, 64),
(70, 64),
(5, 65),
(9, 65),
(15, 65),
(19, 65),
(34, 65),
(37, 65),
(47, 65),
(49, 65),
(52, 65),
(54, 65),
(28, 66),
(30, 66),
(42, 66),
(46, 66),
(59, 66),
(61, 66),
(63, 66),
(68, 66),
(1, 67),
(3, 67),
(5, 67),
(9, 67),
(11, 67),
(13, 67),
(15, 67),
(17, 67),
(21, 67),
(23, 67),
(32, 67),
(34, 67),
(37, 67),
(39, 67),
(54, 67),
(57, 67),
(25, 68),
(27, 68),
(40, 68),
(42, 68),
(44, 68),
(46, 68),
(49, 68),
(53, 68),
(59, 68),
(63, 68),
(70, 68),
(5, 69),
(13, 69),
(15, 69),
(21, 69),
(27, 69),
(30, 69),
(32, 69),
(36, 69),
(1, 70),
(5, 70),
(23, 70),
(25, 70),
(32, 70),
(36, 70),
(40, 70),
(44, 70),
(53, 70),
(54, 70),
(57, 70),
(59, 70),
(70, 70)]
|
import pafy
url = "https://www.youtube.com/watch?v=OE7wUUpJw6I&list=PL2_aWCzGMAwLPEZrZIcNEq9ukGWPfLT4A"
video = pafy.new(url)
print(video.title)
stream=pafy.new(url).streams
best=video.getbest()
for i in stream:
print(i)
print(best.resolution,best.extension)
print(best.url)
best.download(quiet=False)
|
import socket
UDPSock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
listen_addr = ("", 3386)
UDPSock.bind(listen_addr)
server_ip = None
client_ip = None
server_ports = {}
client_ports = {}
while True:
data, addr = UDPSock.recvfrom(1024)
data = str(data, encoding = "utf-8")
ip_type, attr = data.split(":")
ip = str(addr[0])
port = str(addr[1])
if ip_type == 'server':
server_ip = ip
server_ports[attr] = port
print(addr , 'is connected as a server', attr)
elif ip_type == 'client':
client_ip = ip
client_ports[attr] = port
print(addr , 'is connected as a client', attr)
else:
print('Error data:', data, 'from', addr)
if len(server_ports) > 0 and len(client_ports) > 0 :
print('Ready to start NAT !')
for server_port in list(server_ports.keys()):
for client_port in list(client_ports.keys()):
# same port attributes
if server_port == client_port:
# exchange IP & port to each other
data_s2c = server_ip + ":" + server_ports[server_port]
data_c2s = client_ip + ":" + client_ports[client_port]
UDPSock.sendto(data_s2c.encode("utf-8"), (client_ip, int(client_ports[client_port])))
UDPSock.sendto(data_c2s.encode("utf-8"), (server_ip, int(server_ports[server_port])))
del server_ports[server_port]
del client_ports[client_port]
print('Connect ', data_s2c, 'and', data_c2s, 'as', server_port)
|
#
# Copyright 2018 Joachim Lusiardi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from http.server import HTTPServer
from socketserver import ThreadingMixIn
from zeroconf import Zeroconf, ServiceInfo
import socket
import sys
import logging
from homekit.serverdata import HomeKitServerData
from homekit.request_handler import HomeKitRequestHandler
from homekit.model import Accessories, Categories
class HomeKitServer(ThreadingMixIn, HTTPServer):
def __init__(self, config_file, logger=sys.stderr):
"""
Create a new server that acts like a homekit accessory.
:param config_file: the file that contains the configuration data. Must be a string representing an absolute
path to the file
:param logger: this can be None to disable logging, sys.stderr to use the default behaviour of the python
implementation or an instance of logging.Logger to use this.
"""
if logger is None or logger == sys.stderr or isinstance(logger, logging.Logger):
self.logger = logger
else:
raise Exception('Invalid logger given.')
self.data = HomeKitServerData(config_file)
self.data.increase_configuration_number()
self.sessions = {}
self.zeroconf = Zeroconf()
self.mdns_type = '_hap._tcp.local.'
self.mdns_name = self.data.name + '._hap._tcp.local.'
self.accessories = Accessories()
HTTPServer.__init__(self, (self.data.ip, self.data.port), HomeKitRequestHandler)
def publish_device(self):
desc = {'md': 'My Lightbulb', # model name of accessory
# category identifier (page 254, 2 means bridge), must be a String
'ci': str(Categories[self.data.category]),
'pv': '1.0', # protocol version
'c#': str(self.data.configuration_number),
# configuration (consecutive number, 1 or greater, must be changed on every configuration change)
'id': self.data.accessory_pairing_id_bytes, # id MUST look like Mac Address
'ff': '0', # feature flags (Table 5-8, page 69)
's#': '1', # must be 1
'sf': '1' # status flag, lowest bit encodes pairing status, 1 means unpaired
}
if self.data.is_paired:
desc['sf'] = '0'
info = ServiceInfo(self.mdns_type, self.mdns_name, socket.inet_aton(self.data.ip), self.data.port, 0, 0, desc,
'ash-2.local.')
self.zeroconf.unregister_all_services()
self.zeroconf.register_service(info, allow_name_change=True)
def unpublish_device(self):
self.zeroconf.unregister_all_services()
def shutdown(self):
# tell all handlers to close the connection
for session in self.sessions:
self.sessions[session]['handler'].close_connection = True
self.socket.close()
HTTPServer.shutdown(self)
|
from rest_framework import response
from .utils import checkotp, createjwt, createotp, emailbody
from rest_framework.response import Response
from django.http import HttpResponse
from rest_framework.views import APIView
from .mongo import database_entry
from .backends import CustomPerms
from rest_framework import status
import boto3
from .serializer import EmailSerializer, DataEntrySerializer
client = boto3.client('sesv2', region_name='ap-south-1')
class DataEntry(APIView):
"""
Class to handle data entry
"""
permission_classes = [CustomPerms]
throttle_scope = 'emails'
serializer_class = DataEntrySerializer
def post(self, request, **kwargs) -> Response:
if checkotp(request.headers['Authorization'], request.data['otp']) is False:
return response.Response({
"error": "invalid otp"
}, status=status.HTTP_400_BAD_REQUEST)
if self.serializer_class(data=request.data).is_valid():
if database_entry(request.data['fields']):
pass
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
client.send_email(
FromEmailAddress='GitHub Community SRM <community@githubsrm.tech>',
Destination={
'ToAddresses': [
request.data['fields']['email'],
],
},
ReplyToAddresses=[
'community@githubsrm.tech',
],
Content={
'Simple': {
'Subject': {
'Data': 'Submission Confirmation | GitHub Community SRM',
'Charset': 'utf-8'
},
'Body': {
'Text': {
'Data': 'string',
'Charset': 'utf-8'
},
'Html': {
'Data': emailbody(file='confirm_email.html', name=request.data['fields']['name'], otp=None),
'Charset': 'utf-8'
}
}
},
}
)
return HttpResponse("Data Received Successfully", status=status.HTTP_200_OK)
else:
return HttpResponse("Bad Request", status=status.HTTP_400_BAD_REQUEST)
class Email(APIView):
"""
Class to handle emails
"""
throttle_scope = 'emails'
serializer_class = EmailSerializer
def post(self, request, **kwargs) -> Response:
try:
otp = createotp()
jwt = createjwt(otp)
if self.serializer_class(data=request.data).is_valid():
response = client.send_email(
FromEmailAddress='GitHub Community SRM <community@githubsrm.tech>',
Destination={
'ToAddresses': [
f'{request.data.get("email")}',
],
},
ReplyToAddresses=[
'community@githubsrm.tech',
],
Content={
'Simple': {
'Subject': {
'Data': 'OTP | GitHub Community SRM',
'Charset': 'utf-8'
},
'Body': {
'Text': {
'Data': 'string',
'Charset': 'utf-8'
},
'Html': {
'Data': emailbody(file='email.html', otp=otp, name=request.data["name"]),
'Charset': 'utf-8'
}
}
},
}
)
return Response({"jwt": jwt}, status=status.HTTP_201_CREATED)
return Response(status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
print(e)
return HttpResponse("Internal Server Error", status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
from codes.a_config._rl_parameters.on_policy.parameter_ppo import PARAMETERS_PPO
from codes.e_utils.names import *
from codes.a_config.parameters_general import PARAMETERS_GENERAL
class PARAMETERS_LUNAR_LANDER_PPO(PARAMETERS_GENERAL, PARAMETERS_PPO):
ENVIRONMENT_ID = EnvironmentName.LUNAR_LANDER_V2
DEEP_LEARNING_MODEL = DeepLearningModelName.DISCRETE_STOCHASTIC_ACTOR_CRITIC_MLP
RL_ALGORITHM = RLAlgorithmName.DISCRETE_PPO_V0
OPTIMIZER = OptimizerName.ADAM
TRAIN_STOP_EPISODE_REWARD = 200.0
STOP_PATIENCE_COUNT = 10
MAX_GLOBAL_STEP = 10000000
GAMMA = 0.99
BATCH_SIZE = 32
AVG_EPISODE_SIZE_FOR_STAT = 50
CLIP_GRAD = 3.0
## PPO
PPO_GAE_LAMBDA = 0.75
PPO_TRAJECTORY_SIZE = 2049
PPO_TRAJECTORY_BATCH_SIZE = 64
ACTOR_LEARNING_RATE = 0.0002
LEARNING_RATE = 0.001
PPO_K_EPOCHS = 10
PPO_EPSILON_CLIP = 0.2
N_STEP = 1
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='celestial-client',
version='0.1',
description='Python client for www.celestial-automl.com',
author='Lars Hertel',
author_email='lars.h.hertel@gmail.com',
url='https://github.com/LarsHH/celestial-client',
project_urls={
"Website": "https://www.celestial-automl.com",
"Bug Tracker": "https://github.com/LarsHH/celestial-client/issues",
},
packages=['celestial'],
install_requires=['requests'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
import discord
import asyncio
import random
import steam
from steam.steamid import SteamId
from steam.steamprofile import SteamProfile
from steam.steamaccountuniverse import SteamAccountUniverse
from steam.steamaccounttype import SteamAccountType
from discord.ext import commands
from utils import checks
from mods.cog import Cog
code = "```py\n{0}\n```"
class Verification(Cog):
def __init__(self, bot):
super().__init__(bot)
self.cursor = bot.mysql.cursor
self.escape = bot.escape
self.bot.loop.create_task(self.verification_task())
async def remove_verification(self, server, idk=None):
role = discord.utils.get(server.roles, name='Awaiting Approval')
if role:
try:
await self.bot.delete_role(server, role)
except:
pass
sql = 'DELETE FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
self.cursor.execute(sql)
self.cursor.commit()
sql = 'DELETE FROM `verification_queue` WHERE server={0}'
sql = sql.format(server.id)
self.cursor.execute(sql)
self.cursor.commit()
if idk is None:
try:
await self.bot.send_message(server.owner, ":warning: One of your server administrators (or you) have enabled approval/verification on user join.\n\nAdministrator permission was taken away from me making the feature unusable, I need Administrator permission to make/add a role to mute on join.\n\n`The system has been automatically disabled, re-enable anytime if you please.`")
except:
pass
@commands.group(pass_context=True, aliases=['onjoinverify', 'approval'], invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def verification(self, ctx, channel:discord.Channel=None, *, mentions:str=None):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
if channel is None:
channel = ctx.message.channel
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
if mentions is None:
sql = "INSERT INTO `verification` (`server`, `channel`) VALUES (%s, %s)"
self.cursor.execute(sql, (ctx.message.server.id, channel.id))
self.cursor.commit()
await self.bot.say(":white_check_mark: Enabled user approval/verification on join, all requests will go to {0} (`verification #<discord_channel>` to change)!".format(channel.mention))
else:
if len(ctx.message.mentions) == 0:
await self.bot.say("invalid mention")
return
sql = "INSERT INTO `verification` (`server`, `channel`, `mentions`) VALUES (%s, %s, %s)"
mention_ids = []
mention_names = []
for mention in ctx.message.mentions:
mention_ids.append(mention.id)
mention_names.append(mention.name)
self.cursor.execute(sql, (ctx.message.server.id, channel.id, ' '.join(mention_ids)))
self.cursor.commit()
await self.bot.say(":white_check_mark: Enabled user approval/verification on join, all requests will go to {0} (`verification <#discord_channel>` to change) and mention `{0}`!".format(channel.mention, ', '.join(mention_names)))
permissions = discord.Permissions()
permissions.read_messages = True
try:
await self.bot.create_role(ctx.message.server, name='Awaiting Approval', color=discord.Colour(int("FF0000", 16)), permissions=permissions)
except Exception as e:
print(e)
await self.bot.say(":warning: For some reason I couldn't create the \"Awaiting Approval\" role and users won't be muted, please create it (same name) and disable all the permissions you don't want unapproved-users to have.\nMake sure I have the administrator permission!")
elif channel is None:
sql = 'UPDATE `verification` SET channel={0} WHERE server={1}'
sql = sql.format(channel.id, ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(":white_check_mark: Set approval/verification channel to {0}".format(channel.mention))
else:
await self.bot.say(':warning: You are about to disable member verification/approval on join, type `yes` to proceed.')
while True:
response = await self.bot.wait_for_message(timeout=15, author=ctx.message.author, channel=ctx.message.channel)
if response is None or response.content != 'yes':
await self.bot.say('**Aborting**')
return
else:
break
await self.remove_verification(ctx.message.server, True)
try:
role = discord.utils.get(ctx.message.server.roles, name='Awaiting Approval')
if role != None:
await self.bot.delete_role(ctx.message.server, role)
except discord.errors.Forbidden:
await self.bot.say("could not remove role, you took my perms away :(")
role2 = discord.utils.get(ctx.message.server.roles, name='Approved')
if role2 != None:
try:
await self.bot.delete_role(ctx.message.server, role2)
except:
pass
await self.bot.say(":negative_squared_cross_mark: **Disabled** user approval on join")
@verification.command(name='mention', aliases=['mentions'], pass_context=True, invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def verification_mention(self, ctx, *mentions:str):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
if len(ctx.message.mentions) == 0 and '@everyone' not in mentions and '@here' not in mentions:
await self.bot.say(':no_entry: `Invalid mention(s).`')
return
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: This server does not have approval/verification turned on (`verification <#discord_channel>` to do so)!!!")
return
if len(mentions) == 0:
sql = 'UPDATE `verification` SET mentions=NULL WHERE server={0}'
sql = sql.format(ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(":negative_squared_cross_mark: Disabled/Removed mentions on user join for approval")
else:
mention_ids = []
mention_names = []
everyone = False
for mention in mentions:
if mention == '@everyone':
mention_ids.append('@everyone')
elif mention == '@here':
mention_ids.append('@here')
for mention in ctx.message.mentions:
mention_ids.append(mention.id)
mention_names.append(mention.name)
sql = 'SELECT mentions FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
mention_results = self.cursor.execute(sql).fetchall()
update = False
if mention_results[0]['mentions'] != None:
update = True
things = mention_results[0]['mentions'].split()
for x in things:
mention_ids.append(x)
sql = "UPDATE `verification` SET mentions={0} WHERE server={1}"
sql = sql.format(self.escape(' '.join(mention_ids)), ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.commit()
if update:
await self.bot.say(":white_check_mark: Updated mentions to include `{0}` on user join for approval".format(', '.join(mention_names)))
else:
await self.bot.say(":white_check_mark: Set `{0}` to be mentioned on user join for approval".format(', '.join(mention_names)))
@commands.group(pass_context=True, invoke_without_command=True, no_pm=True)
@checks.mod_or_perm(manage_server=True)
async def verify(self, ctx, *users:str):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
if len(users) == 0:
await self.bot.say("pls input users to verify thx")
return
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: This server does not have approval/verification turned **on** (`verification <#discord_channel>` to do so)!!!")
return
role = discord.utils.get(ctx.message.server.roles, name="Awaiting Approval")
count = 0
count2 = 0
discord_user = None
for user in users:
if user.isdigit():
user = int(user)
sql = 'SELECT * FROM `verification_queue` WHERE server={0} AND id={1}'
sql = sql.format(ctx.message.server.id, user)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":warning: `{0}` is not in the verification queue.".format(user))
if len(users) > 1:
continue
else:
return
sql = 'DELETE FROM `verification_queue` WHERE server={0} AND id={1}'
sql = sql.format(ctx.message.server.id, user)
self.cursor.execute(sql)
self.cursor.commit()
discord_user = discord.Server.get_member(ctx.message.server, user_id=str(result[count]['user']))
count += 1
else:
if len(ctx.message.mentions) == 0:
await self.bot.say("If you're not gonna use approval id, atleast mention correctly!")
return
for x in ctx.message.mentions:
if count == len(ctx.message.mentions):
break
sql = 'SELECT * FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(ctx.message.server.id, x.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":warning: `{0}` is not in the verification queue.".format(user))
if len(users) > 1:
continue
else:
return
sql = 'DELETE FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(ctx.message.server.id, x.id)
self.cursor.execute(sql)
self.cursor.commit()
discord_user = discord.Server.get_member(ctx.message.server, user_id=str(result[count2]['user']))
count2 += 1
if discord_user is None:
continue
try:
await self.bot.remove_roles(discord_user, role)
except Exception as e:
await self.bot.say(code.format(e))
await self.bot.say(":warning: {0} was removed from the queue however his role could not be removed because I do not have Administrator permissions.\nPlease remove the role manually and give me **Administrator**.".format(user))
return
role = discord.utils.get(ctx.message.server.roles, name='Approved')
if role != None:
try:
await self.bot.add_roles(discord_user, role)
except:
pass
await self.bot.say(":white_check_mark: Removed `{0}` from queue!".format(user))
queue_removed_msg = 'You have been approved/verified for `{0}` and can now message!'.format(ctx.message.server.name)
await self.bot.send_message(discord_user, queue_removed_msg)
@verify.command(name='list', pass_context=True, invoke_without_command=True, no_pm=True)
async def verify_list(self, ctx):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: This server does not have approval/verification turned on (`verification <#discord_channel>` to do so)!!!")
return
sql = 'SELECT * FROM `verification_queue` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: `There are no users in the verification/approval queue`")
return
users = []
for s in result:
user = discord.Server.get_member(ctx.message.server, user_id=str(s['user']))
if user is None:
continue
users.append('{0}#{1} ({2})'.format(user.name, user.discriminator, str(s['id'])))
await self.bot.say("**{0} Users in Queue**\n`{1}`".format(len(users), ', '.join(users)))
# steam_regex = r"^(http|https|)(\:\/\/|)steamcommunity\.com\/id\/(.*)$"
@verify.command(name='check', pass_context=True, aliases=['steam', 'link'])
async def verify_check(self, ctx, stem:str):
try:
if ctx.message.channel.is_private is False:
await self.bot.say(':no_entry: `Private Message only.`')
return
sql = 'SELECT * FROM `verification_queue` WHERE user={0}'
sql = sql.format(ctx.message.author.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(':no_entry: You are not in the verification queue for any server.')
return
server_id = result[0]['server']
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(server_id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: Server you are in queue for disabled verification.")
return
sql = 'SELECT * FROM `verification_steam` WHERE server={0} AND user={1}'
sql = sql.format(server_id, ctx.message.author.id)
result = self.cursor.execute(sql).fetchall()
if len(result) != 0:
await self.bot.say(":no_entry: You've already verified your steam account!")
return
sql = 'SELECT id,server FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server_id, ctx.message.author.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":warning: `{0}` is not in the verification queue.".format(ctx.message.author))
return
verification_id = str(result[0]['id'])
steamId = None
steamProfile = None
if steamId is None:
steamId = SteamId.fromSteamId("{0}".format(stem))
if steamId is None:
steamId = SteamId.fromSteamId3(stem)
if steamId is None:
steamId = SteamId.fromSteamId64(stem)
if steamId is None:
steamId = SteamId.fromProfileUrl(stem)
if steamId is None:
steamProfile = SteamProfile.fromCustomProfileUrl(stem)
if steamProfile is None:
await self.bot.say("`:no_entry: `Bad Steam ID/64/URL`")
return
steamId = steamProfile.steamId
else:
steamProfile = SteamProfile.fromSteamId(steamId)
if verification_id in steamProfile.displayName:
sql = 'INSERT INTO `verification_steam` (`user`, `server`, `steam`, `id`) VALUES (%s, %s, %s, %s)'
self.cursor.execute(sql, (ctx.message.author.id, server_id, steamId.profileUrl, verification_id))
self.cursor.commit()
await self.bot.say(':white_check_mark: `{0}` steam profile submitted and passed steam name check, awaiting moderator approval.'.format(ctx.message.author))
else:
await self.bot.say(':warning: **{0}** is not in the steam accounts name.'.format(verification_id))
except Exception as e:
await self.bot.say(code.format(e))
async def verification_task(self):
if self.bot.shard_id != 0:
return
while True:
sql = 'SELECT * FROM `verification_steam`'
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await asyncio.sleep(60)
continue
for s in result:
server = self.bot.manager.get_server(str(s['server']))
if server:
user = server.get_member(str(s['user']))
if user is None:
continue
sql = 'SELECT channel FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
channel = server.get_channel(str(self.cursor.execute(sql).fetchall()[0]['channel']))
msg = '**Steam Account Check**\n`{0} (Verification ID: {1})` has submitted their steam profile and passed the name check.\n`Steam Profile:` {2}'.format(user, s['id'], s['steam'])
await self.bot.send_message(channel, msg)
sql = 'DELETE FROM `verification_steam` WHERE server={0} AND user={1}'
sql = sql.format(server.id, user.id)
self.cursor.execute(sql)
self.cursor.commit()
await asyncio.sleep(60)
async def on_member_join(self, member):
try:
if member.bot:
return
server = member.server
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
return
channel = server.get_channel(str(result[0]['channel']))
if channel is None:
raise discord.errors.NotFound
perms = server.me.permissions_in(channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.remove_verification(server)
return
sql = "INSERT INTO `verification_queue` (`user`, `server`, `id`) VALUES (%s, %s, %s)"
rand = random.randint(0, 99999)
self.cursor.execute(sql, (member.id, server.id, rand))
self.cursor.commit()
role = discord.utils.get(server.roles, name='Awaiting Approval')
await self.bot.add_roles(member, role)
for s in server.channels:
perms = member.permissions_in(s)
if perms.read_messages is False:
continue
overwrite = discord.PermissionOverwrite()
overwrite.send_messages = False
overwrite.read_messages = False
await self.bot.edit_channel_permissions(s, role, overwrite)
msg = ''
if result[0]['mentions']:
for x in result[0]['mentions'].split(' '):
if 'everyone' in x or 'here' in x:
msg += '{0} '.format(x)
else:
msg += '<@{0}> '.format(x)
msg += '\n'
msg += ':warning: `{0}` has joined the server and is awaiting approval\n\nRun `verify {1} or mention` to approve, kick user to remove from the queue.'.format(member, rand)
await self.bot.send_message(channel, msg, replace_everyone=False, replace_mentions=False)
join_msg = "You've been placed in the approval queue for `{0}`, please be patient and wait until a staff member approves your join!\n\nIf you'd like to expedite approval (and have a steam account), place **{1}** in your steam name and then run `.verify check <stean_url/id/vanity>`.".format(server.name, rand)
await self.bot.send_message(member, join_msg)
except (discord.errors.Forbidden, discord.errors.InvalidArgument, discord.errors.NotFound):
await self.remove_verification(server)
async def on_member_remove(self, member):
try:
if member.bot:
return
server = member.server
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
return
sql = 'SELECT * FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server.id, member.id)
result2 = self.cursor.execute(sql).fetchall()
if len(result2) == 0:
return
sql = 'DELETE FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server.id, member.id)
self.cursor.execute(sql)
self.cursor.commit()
channel = self.bot.get_channel(id=str(result[0]['channel']))
await self.bot.send_message(channel, ':exclamation: `{0}` has been removed from the approval/verification queue for leaving the server or being kicked.'.format(member))
except (discord.errors.Forbidden, discord.errors.InvalidArgument, discord.errors.NotFound):
await self.remove_verification(server)
async def on_member_ban(self, member):
try:
if member.bot:
return
server = member.server
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
return
sql = 'SELECT * FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server.id, member.id)
result2 = self.cursor.execute(sql).fetchall()
if len(result2) == 0:
return
sql = 'DELETE FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server.id, member.id)
self.cursor.execute(sql)
self.cursor.commit()
channel = self.bot.get_channel(id=str(result[0]['channel']))
await self.bot.send_message(channel, ':exclamation: `{0}` has been removed from the approval/verification queue for being banned from the server.'.format(member))
except (discord.errors.Forbidden, discord.errors.InvalidArgument, discord.errors.NotFound):
await self.remove_verification(server)
def setup(bot):
bot.add_cog(Verification(bot))
|
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def delete_connection(ConnectionId=None):
"""
Delete the connection with the provided id.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_connection(
ConnectionId='string'
)
:type ConnectionId: string
:param ConnectionId: [REQUIRED]
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_connection(ConnectionId=None):
"""
Get information about the connection with the provided id.
See also: AWS API Documentation
Exceptions
:example: response = client.get_connection(
ConnectionId='string'
)
:type ConnectionId: string
:param ConnectionId: [REQUIRED]
:rtype: dict
ReturnsResponse Syntax{
'ConnectedAt': datetime(2015, 1, 1),
'Identity': {
'SourceIp': 'string',
'UserAgent': 'string'
},
'LastActiveAt': datetime(2015, 1, 1)
}
Response Structure
(dict) --
ConnectedAt (datetime) --The time in ISO 8601 format for when the connection was established.
Identity (dict) --
SourceIp (string) --The source IP address of the TCP connection making the request to API Gateway.
UserAgent (string) --The User Agent of the API caller.
LastActiveAt (datetime) --The time in ISO 8601 format for when the connection was last active.
Exceptions
ApiGatewayManagementApi.Client.exceptions.GoneException
ApiGatewayManagementApi.Client.exceptions.LimitExceededException
ApiGatewayManagementApi.Client.exceptions.ForbiddenException
:return: {
'ConnectedAt': datetime(2015, 1, 1),
'Identity': {
'SourceIp': 'string',
'UserAgent': 'string'
},
'LastActiveAt': datetime(2015, 1, 1)
}
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def post_to_connection(Data=None, ConnectionId=None):
"""
Sends the provided data to the specified connection.
See also: AWS API Documentation
Exceptions
:example: response = client.post_to_connection(
Data=b'bytes'|file,
ConnectionId='string'
)
:type Data: bytes or seekable file-like object
:param Data: [REQUIRED]\nThe data to be sent to the client specified by its connection id.\n
:type ConnectionId: string
:param ConnectionId: [REQUIRED]\nThe identifier of the connection that a specific client is using.\n
:returns:
ApiGatewayManagementApi.Client.exceptions.GoneException
ApiGatewayManagementApi.Client.exceptions.LimitExceededException
ApiGatewayManagementApi.Client.exceptions.PayloadTooLargeException
ApiGatewayManagementApi.Client.exceptions.ForbiddenException
"""
pass
|
#!/usr/bin/env python
"""
RS 2018/02/23: Digesting Obsidian Output
Plots we want to see:
Trace plots of key parameters like layer depths, rock properties
Histograms of residuals between forward model values and data
Contour maps of residuals between forward models and data
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import GPy
pickaxe_npz = ('/Users/rscalzo/Desktop/Projects/Professional/Geoscience'
'/Formation Boundaries/sandbox/moomba-dk-run06.npz')
pickaxe_npz = ('/Users/rscalzo/Desktop/Projects/Professional/Geoscience'
'/Formation Boundaries/sandbox/gascoyne_v4-thin1000.npz')
mason_npz = ('/Users/rscalzo/Desktop/Projects/Professional/Geoscience'
'/Formation Boundaries/sandbox/moomba-dk-run01.npz')
def plot_sensor(sensors, readings, chain, sample=None,
units='unknown units', show=True, energy=None):
"""
Plots the value of a sensor against the forward model:
Contour plots of real data and forward models
Contour plots of mean model residuals across the chain
Histograms of model residuals across the chain
:param sensors: np.array of shape (N, 3) with the physical
coordinates (x,y,z) of the sensors in meters; assumed to have a
regular raster-like structure where N factors into (Nx, Ny)
:param readings: np.array of shape (N, ) with the observed
sensor readings for the real dataset
:param chain: np.array of shape (M, N) with the synthetic
sensor readings for each forward model
:param sample: int index of sample to grab from chain
(defaults to None, which averages over the chain)
:param units: str describing the units of sensor readings
:param show: call plt.show()? default True; set to False if you're
making a multi-panel plot or want to save fig in calling routine
:param energy: optional log posterior estimate(s)
"""
x, y, z = sensors.T
d = readings - readings.mean()
if energy is not None: # HACK: take MAP estimate
sample = np.argmin(energy)
print "Taking MAP estimate, energy =", energy[sample]
if sample is None:
print "Averaged fwd models over chain of shape", chain.shape
f = chain.mean(axis=0) - chain.mean()
elif not isinstance(sample, int):
print "ERROR: sample = {} is of type {}, not int".format(
sample, sample.__class__)
return
elif abs(sample) > len(chain):
print "ERROR: sample = {} not in range ({}, {})".format(
sample, -len(chain), len(chain))
return
else:
print "Picking sample", sample, "from chain of shape", chain.shape
f = chain[sample] - chain[sample].mean()
# Contour map of residuals in f
fig = plt.figure(figsize=(6,7))
ax1 = plt.subplot2grid((3,1), (0,0), rowspan=1)
plt.tricontourf(x, y, d, alpha=0.5, label='Data')
plt.colorbar()
plt.tricontour(x, y, f, colors='k', label='Fwd Model')
plt.xlabel("Eastings (m)")
plt.ylabel("Northings (m)")
plt.legend(loc='upper right')
# Contour map of residuals in f
ax1 = plt.subplot2grid((3,1), (1,0), rowspan=1)
plt.tricontourf(x, y, d-f, alpha=0.95, label='Data', cmap='coolwarm')
plt.colorbar()
plt.tricontour(x, y, f, colors='k', label='Fwd Model')
plt.xlabel("Eastings (m)")
plt.ylabel("Northings (m)")
plt.legend(loc='upper right')
# Histogram of residuals in f
ax2 = plt.subplot2grid((3,1), (2,0), rowspan=1)
plt.hist(d-f, bins=20)
plt.xlabel("Data $-$ Model ({})".format(units))
# Show
plt.subplots_adjust(left=0.12, bottom=0.08,
right=0.90, top=0.92,
wspace=0.20, hspace=0.40)
if show:
plt.show()
def display_ground_truth(labels, spherical=True, show=True):
"""
Displays geological ground-truth labels in a given area. Put here
until I find a better home for it.
:param x: x-coordinate of centre of extracted area
:param y: y-coordinate of centre of extracted area
:param L: length of side of (square) modeled area in metres
:param labels: pd.DataFrame with columns ['lat', 'lng', 'val']
:param spherical: bool, True if (x, y) = (lat, lng) in degrees
:param show: call plt.show()? default True; set to False if you're
making a multi-panel plot or want to save fig in calling routine
"""
# Plot the ground truth
x, y, v = labels.x, labels.y, labels.val
for f in np.unique(v.values):
idx = (v == f)
plt.plot(x[idx], y[idx], ls='None', marker='o', ms=3, label=f)
plt.legend(loc='upper left')
# plt.title("${:.1f} \\times {:.1f}$ km$^2$ area centered on "
# "lng = ${:.3f}$, lat = ${:.3f}$"
# .format(L/1e+3, L/1e+3, lng, lat))
plt.xlabel('Eastings (m)')
plt.ylabel('Northings (m)')
if show:
plt.show()
def fieldobs_lookup(readings):
from gascoyne_config import config_layers
readstr = [ ]
for i, v in enumerate(readings):
if v in config_layers.index:
readstr.append(config_layers.loc[v,'name'])
else:
readstr.append('Unknown layer')
return readstr
def main_contours():
"""
The main routine to run a suite of diagnostic plots
"""
# Load everything
magSensors = np.loadtxt("magSensors.csv", delimiter=',')
magReadings = np.loadtxt("magReadings.csv", delimiter=',')
gravSensors = np.loadtxt("gravSensors.csv", delimiter=',')
gravReadings = np.loadtxt("gravReadings.csv", delimiter=',')
samples = np.load(pickaxe_npz)
N = len(samples['magReadings'])
# Make a few plots of sensors
plot_sensor(magSensors, magReadings, samples['magReadings'][N/2:],
units='nT', show=False) #, energy=samples['energy'][N/2:])
plt.savefig('mag_contours.png')
plt.show()
plot_sensor(gravSensors, gravReadings, samples['gravReadings'][N/2:],
units='mgal', show=False) #, energy=samples['energy'][N/2:])
plt.savefig('grav_contours.png')
plt.show()
def main_fieldobs():
"""
The main routine to show simulated field observations
"""
# First show the data we expect
fieldSensors = pd.read_csv('fieldobsSensors.csv', names=['x','y'], comment='#')
fieldReadings = pd.read_csv('fieldobsReadings.csv', names=['val'], comment='#')
fieldLabels = fieldSensors.assign(val=fieldobs_lookup(fieldReadings.val))
fig = plt.figure(figsize=(6,6))
display_ground_truth(fieldLabels, show=False)
plt.title('Field Observations')
plt.savefig('boundary_data.png')
# Now show samples
samples = np.load(pickaxe_npz)
fig = plt.figure(figsize=(6,6))
i = len(samples['fieldReadings'])
readings = samples['fieldReadings'][i-1]
fieldLabels.val = fieldobs_lookup(readings)
display_ground_truth(fieldLabels, show=False)
plt.title('Forward-Modeled Field Observations, '
'Sample {} from MCMC Chain'.format(i))
plt.savefig('boundary_fwdmodel_endchain.png')
plt.close()
def main_boundarymovie():
"""
Makes a movie of how the boundaries change as the chain samples
"""
# Load everything
magSensors = np.loadtxt("magSensors.csv", delimiter=',')
magReadings = np.loadtxt("magReadings.csv", delimiter=',')
gravSensors = np.loadtxt("gravSensors.csv", delimiter=',')
gravReadings = np.loadtxt("gravReadings.csv", delimiter=',')
samples = np.load(pickaxe_npz)
# Try fitting a few GP layers
layer_labels = ['layer{}ctrlPoints'.format(i) for i in range(4)]
for i in np.arange(0, 2500, 25):
layer_pars = np.array([samples[ll][i] for ll in layer_labels]).reshape(4,5,5)
fig = plt.figure()
gp_predict(gravSensors, layer_pars, ((0.0, 2e+4), (0.0, 2e+4)))
plt.savefig('boundary_movie_frame{:04d}.png'.format(i))
plt.close()
if __name__ == "__main__":
main_contours()
# main_fieldobs()
|
# Copyright (c) 2013 MetaMetrics, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
"""Core functionality and all required components of a working CFN template.
These are all available without preamble in a pyplate's global namespace.
"""
import inspect
import json
import traceback
import warnings
from collections import OrderedDict
from cfn_pyplates import exceptions
import functions
aws_template_format_version = '2010-09-09'
__all__ = [
'JSONableDict',
'CloudFormationTemplate',
'Parameters',
'Mappings',
'Resources',
'Outputs',
'Conditions',
'Properties',
'Mapping',
'Resource',
'Parameter',
'Output',
'DependsOn',
'CreationPolicy',
'DeletionPolicy',
'UpdatePolicy',
'Metadata',
'Condition',
'ec2_tags',
]
class JSONableDict(OrderedDict):
"""A dictionary that knows how to turn itself into JSON
Args:
update_dict: A dictionary of values for prepopulating the JSONableDict
at instantiation
name: An optional name. If left out, the class's (or subclass's) name
will be used.
The most common use-case of any JSON entry in a CFN Template is the
``{"Name": {"Key1": "Value1", "Key2": Value2"} }`` pattern. The
significance of a JSONableDict's subclass name, or explicitly passing
a 'name' argument is accomodating this pattern. All JSONableDicts have
names.
To create the pyplate equivalent of the above JSON, contruct a
JSONableDict accordingly::
JSONableDict({'Key1': 'Value1', 'Key2', 'Value2'}, 'Name'})
Based on :class:`ordereddict.OrderedDict`, the order of keys is significant.
"""
def __init__(self, update_dict=None, name=None):
super(JSONableDict, self).__init__()
self._name = name
if update_dict:
self.update(update_dict)
def __unicode__(self):
# Indenting to keep things readable
# Trailing whitespace after commas removed
# (The space after colons is cool, though. He can stay.)
return unicode(self.json)
def __str__(self):
return unicode(self).encode('utf-8')
def __setattr__(self, name, value):
# This makes it simple to bind child dictionaries to an
# attribute while still making sure they wind up in the output
# dictionary, see usage example in CloudFormationTemplate init
if isinstance(value, JSONableDict):
self.add(value)
super(JSONableDict, self).__setattr__(name, value)
def __delattr__(self, name):
attr = getattr(self, name)
if isinstance(attr, JSONableDict):
self.remove(attr)
super(JSONableDict, self).__delattr__(name)
def _get_name(self):
if self._name is not None:
return self._name
else:
# Default to the class name if _name is None
return self.__class__.__name__
def _set_name(self, name):
self._name = name
def _del_name(self):
self._name = None
name = property(_get_name, _set_name, _del_name)
"""Accessor to the ``name`` internals;
Allows getting, settings, and deleting the name
"""
@property
def json(self):
'Accessor to the canonical JSON representation of a JSONableDict'
return self.to_json(indent=2, separators=(',', ': '))
def add(self, child):
"""Add a child node
Args:
child: An instance of JSONableDict
Raises:
AddRemoveError: :exc:`cfn_pyplates.exceptions.AddRemoveError`
"""
if isinstance(child, JSONableDict):
self.update(
{child.name: child}
)
else:
raise exceptions.AddRemoveError
return child
def remove(self, child):
"""Remove a child node
Args:
child: An instance of JSONableDict
Raises:
AddRemoveError: :exc:`cfn_pyplates.exceptions.AddRemoveError`
"""
if isinstance(child, JSONableDict):
try:
del(self[child.name])
except KeyError:
# This will KeyError if the name of a child is changed but its
# corresponding key in this dict is not updated. In normal usage,
# this should never happen. If it does happen, you're doing
# weird stuff, and have been warned.
warnings.warn('{0} not found in dict when attempting removal'.format(child.name))
else:
raise exceptions.AddRemoveError
def to_json(self, *args, **kwargs):
"""Thin wrapper around the :func:`json.dumps` method.
Allows for passing any arguments that json.dumps would accept to
completely customize the JSON output if desired.
"""
return json.dumps(self, *args, **kwargs)
class CloudFormationTemplate(JSONableDict):
"""The root element of a CloudFormation template
Takes an option description string in the constructor
Comes pre-loaded with all the subelements CloudFormation can stand:
- Metadata
- Parameters
- Mappings
- Resources
- Outputs
- Conditions
For more information, see `the AWS docs <cfn-template_>`_
"""
def __init__(self, description=None, options=None):
super(CloudFormationTemplate, self).__init__({
'AWSTemplateFormatVersion': aws_template_format_version,
})
if description:
self.update({
'Description': description,
})
# Tack on all the base template elements that a CF template can handle
# at easy-to-reach parameters
self.options = options
self.metadata = Metadata()
self.parameters = Parameters()
self.mappings = Mappings()
self.resources = Resources()
self.outputs = Outputs()
self.conditions = Conditions()
def __unicode__(self):
# Before outputting to json, remove empty elements
def predicate(obj):
"""getmembers predicate to find empty JSONableDict attributes attached to self
CloudFormation doesn't like empty mappings for these top-level
attributes, so any falsey JSONableDict that's at attribute on
the CloudFormationTemplate instance needs to get removed
"""
if isinstance(obj, JSONableDict) and not obj:
return True
for attr, mapping in inspect.getmembers(self, predicate):
delattr(self, attr)
return super(CloudFormationTemplate, self).__unicode__()
# CloudFormationTemplate base elements
class Parameters(JSONableDict):
"""The base Container for parameters used at stack creation
Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate`
For more information, see `the AWS docs <cfn-parameters_>`_
"""
pass
class Mappings(JSONableDict):
"""The base Container for stack option mappings
.. note::
Since most lookups can be done inside a pyplate using python,
this is normally unused.
Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate`
For more information, see `the AWS docs <cfn-mappings_>`_
"""
pass
class Resources(JSONableDict):
"""The base Container for stack resources
Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate`
For more information, see `the AWS docs <cfn-resources_>`_
"""
pass
class Outputs(JSONableDict):
"""The base Container for stack outputs
Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate`
For more information, see `the AWS docs <cfn-outputs_>`_
"""
pass
class Conditions(JSONableDict):
"""The base Container for stack conditions used at stack creation
Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate`
For more information, see `the AWS docs <cfn-conditions_>`_
"""
pass
# Other 'named' JSONableDicts
class Properties(JSONableDict):
"""A properties mapping, used by various CFN declarations
Can be found in:
- :class:`cfn_pyplates.core.Parameters`
- :class:`cfn_pyplates.core.Outputs`
- :class:`cfn_pyplates.core.Resource`
Properties will be most commonly found in Resources
For more information, see `the AWS docs <cfn-properties_>`_
"""
pass
class Resource(JSONableDict):
"""A generic CFN Resource
Used in the :class:`cfn_pyplates.core.Resources` container.
All resources have a name, and most have a 'Type' and 'Properties' dict.
Thus, this class takes those as arguments and makes a generic resource.
The 'name' parameter must follow CFN's guidelines for naming
The 'type' parameter must be `one of these <cfn-resource-types_>`_
The optional 'properties' parameter is a dictionary of properties as
defined by the resource type, see documentation related to each resource
type
Args:
name: The unique name of the resource to add
type: The type of this resource
properties: Optional properties mapping to apply to this resource,
can be an instance of ``JSONableDict`` or just plain old ``dict``
attributes: Optional (one of 'Condition', 'DependsOn', 'DeletionPolicy',
'Metadata', 'UpdatePolicy' or a list of 2 or more)
For more information, see `the AWS docs <cfn-resources_>`_
"""
def __init__(self, name, type, properties=None, attributes=[]):
update_dict = {'Type': type}
super(Resource, self).__init__(update_dict, name)
if properties:
try:
# Assume we've got a JSONableDict
self.add(properties)
except exceptions.AddRemoveError:
# If not, coerce it
self.add(Properties(properties))
if attributes:
self._add_attributes(attributes)
def _add_attributes(self, attribute):
"""Is the Object a valid Resource Attribute?
:param attribute: the object under test
"""
if isinstance(attribute, list):
for attr in attribute:
self._add_attributes(attr)
elif attribute.__class__.__name__ in ['Metadata', 'UpdatePolicy', 'CreationPolicy']:
self.add(attribute)
elif attribute.__class__.__name__ in ['DependsOn', 'DeletionPolicy', 'Condition']:
self.update({attribute.__class__.__name__: attribute.value})
class Parameter(JSONableDict):
"""A CFN Parameter
Used in the :class:`cfn_pyplates.core.Parameters` container, a Parameter
will be used when the template is processed by CloudFormation to prompt the
user for any additional input.
For more information, see `the AWS docs <cfn-parameters_>`_
Args:
name: The unique name of the parameter to add
type: The type of this parameter
properties: Optional properties mapping to apply to this parameter
"""
def __init__(self, name, type, properties=None):
# Just like a Resource, except the properties go in the
# update_dict, not a named key.
update_dict = {'Type': type}
if properties is not None:
update_dict.update(properties)
super(Parameter, self).__init__(update_dict, name)
class Mapping(JSONableDict):
"""A CFN Mapping
Used in the :class:`cfn_pyplates.core.Mappings` container, a Mapping
defines mappings used within the Cloudformation template and is not
the same as a PyPlates options mapping.
For more information, see `the AWS docs <cfn-mappings_>`_
Args:
name: The unique name of the mapping to add
mappings: The dictionary of mappings
"""
def __init__(self, name, mappings=None):
update_dict = {}
if mappings is not None:
update_dict.update(mappings)
super(Mapping, self).__init__(update_dict, name)
class Output(JSONableDict):
"""A CFN Output
Used in the :class:`cfn_pyplates.core.Outputs`, an Output entry describes
a value to be shown when describe this stack using CFN API tools.
For more information, see `the AWS docs <cfn-outputs_>`_
Args:
name: The unique name of the output
value: The value the output should return
description: An optional description of this output
"""
def __init__(self, name, value, description=None):
update_dict = {'Value': value}
if description is not None:
update_dict['Description'] = description
super(Output, self).__init__(update_dict, name)
class Metadata(JSONableDict):
"""CFN Metadata
The Metadata attribute enables you to associate arbtrary data data with a template element (for
example, a :class:`cfn_pyplates.core.Resource`. In addition, you can use intrinsic functions
(such as GetAtt and Ref), parameters, and pseudo parameters within the Metadata attribute to
add those interpreted values.
For more information, see `the AWS docs <cfn-metadata_>`_
"""
pass
class DependsOn(object):
"""A CFN Resource Dependency
Used in the :class:`cfn_pyplates.core.Resource`, The DependsOn attribute enables you to specify
that the creation of a specific resource follows another
For more information, see `the AWS docs <cfn-dependson_>`_
Args:
properties: The unique name of the output
"""
def __init__(self, policy=None):
if policy:
self.value = policy
class CreationPolicy(JSONableDict):
"""A CFN Resource Creation Policy
Used in the :class:`cfn_pyplates.core.Resource`, The CreationPolicy attribute enables you to
prevent a Resource's status reaching create complete until AWS CloudFormation receives a
specified number of success signals or the timeout period is exceeded
For more information, see `the AWS docs <cfn-creationpolicy_>`_
Args:
count: number of success signals AWS CloudFormation must receive before
it sets the resource status as CREATE_COMPLETE
timeout: The length of time that AWS CloudFormation waits for the number of signals that
was specified in the count kwarg (see AWS docs for syntax)
"""
def __init__(self, count=None, timeout=None):
super(CreationPolicy, self).__init__(name='CreationPolicy')
resource_signal = {}
if count is not None:
resource_signal['Count'] = int(count)
if timeout is not None:
resource_signal['Timeout'] = str(timeout)
resource_signal = JSONableDict(resource_signal, 'ResourceSignal')
self.add(resource_signal)
class DeletionPolicy(object):
"""A CFN Resource Deletion Policy
Used in the :class:`cfn_pyplates.core.Resource`, The DeletionPolicy attribute enables you to
specify how AWS CloudFormation handles the resource deletion.
For more information, see `the AWS docs <cfn-deletionpolicy_>`_
Args:
properties: The unique name of the output
"""
def __init__(self, policy=None):
if policy:
self.value = str(policy)
class UpdatePolicy(JSONableDict):
"""A CFN Resource Update Policy
Used in the :class:`cfn_pyplates.core.Resource`, The UpdatePolicy attribute enables you to
specify how AWS CloudFormation handles rolling updates for a particular resource.
For more information, see `the AWS docs <cfn-updatepolicy_>`_
Args:
properties: The unique name of the output
"""
def __init__(self, properties=None):
super(UpdatePolicy, self).__init__(properties, 'UpdatePolicy')
class Condition(JSONableDict):
"""A CFN Condition Item
Used in the :class:`cfn_pyplates.core.Condition` container, a ConditionItem
will be used when the template is processed by CloudFormation so you can define
which resources are created and how they're configured for each environment type.
Conditions are made up of instrinsic functions for conditions found in
:mod:`cfn_pyplates.functions`, or a :func:`ref <cfn_pyplates.functions.ref>`
to a :class:`Parameter` or :class:`Mapping`.
For more information, see `the AWS docs <cfn-conditions_>`_
Args:
name: The unique name of the ConditionItem to add
type: The type of this parameter
properties: The Intrinsic Conditional function
"""
def __init__(self, name, condition):
super(Condition, self).__init__(condition, name)
self.value = name
def ec2_tags(tags):
"""A container for Tags on EC2 Instances
Tags are declared really verbosely in CFN templates, but we have
opportunites in the land of python to keep things a little more
sane.
So we can turn the AWS EC2 Tags example from this::
"Tags": [
{ "Key" : "Role", "Value": "Test Instance" },
{ "Key" : "Application", "Value" : { "Ref" : "AWS::StackName"} }
]
Into this::
ec2_tags({
'Role': 'Test Instance',
'Application': ref('StackName'),
})
For more information, see `the AWS docs <cfn-ec2tags_>`_
Args:
tags: A dictionary of tags to apply to an EC2 instance
"""
tags_list = list()
for key, value in tags.iteritems():
tags_list.append({'Key': key, 'Value': value})
return tags_list
def generate_pyplate(pyplate, options=None):
"""Generate CloudFormation JSON Template based on a Pyplate
Arguments:
pyplate
input pyplate file, can be a path or a file object
options
a mapping of some kind (probably a dict),
to be used at this pyplate's options mapping
Returns the output string of the compiled pyplate
"""
try:
if not isinstance(pyplate, file):
pyplate = open(pyplate)
pyplate = _load_pyplate(pyplate, options)
cft = _find_cloudformationtemplate(pyplate)
output = unicode(cft)
except Exception:
print 'Error processing the pyplate:'
print traceback.format_exc()
return None
return output
def _load_pyplate(pyplate, options_mapping=None):
'Load a pyplate file object, and return a dict of its globals'
# Inject all the useful stuff into the template namespace
exec_namespace = {
'options': options_mapping,
}
for entry in __all__:
exec_namespace[entry] = globals().get(entry)
for entry in functions.__all__:
exec_namespace[entry] = getattr(functions, entry)
# Do the needful.
exec pyplate in exec_namespace
return exec_namespace
def _find_cloudformationtemplate(pyplate):
"""Find a CloudFormationTemplate in a pyplate
Goes through a pyplate namespace dict and returns the first
CloudFormationTemplate it finds.
"""
for key, value in pyplate.iteritems():
if isinstance(value, CloudFormationTemplate):
return value
# If we haven't returned something, it's an Error
raise exceptions.Error('No CloudFormationTemplate found in pyplate')
|
import json
from sequana import sequana_data
from sequana.modules_report.summary import SummaryModule
from sequana.utils import config
def test_summary_module(tmpdir):
directory = tmpdir.mkdir('test_variant_calling_module')
config.output_dir = str(directory)
config.sample_name = 'JB409847'
summary_dict = {'tool': 'sequana_summary',
'inputs': [
sequana_data('Hm2_GTGAAA_L005_R1_001.fastq.gz'),
sequana_data('Hm2_GTGAAA_L005_R2_001.fastq.gz')],
'outputs': [sequana_data('JB409847.vcf')],
'html': [sequana_data('JB409847.vcf')],
'rulegraph': sequana_data('test_summary_module.svg'),
'requirements': sequana_data('test_summary_module.svg'),
'snakefile': sequana_data('test_summary_module.svg'),
'config': sequana_data('test_summary_module.svg'),
'name': 'JB409847'}
SummaryModule(summary_dict)
|
"""
Determine if the given number is a power of two.
Example
For n = 64, the output should be
isPowerOfTwo(n) = true;
For n = 5, the output should be
isPowerOfTwo(n) = false.
"""
def isPowerOfTwo(n):
while n % 2 == 0:
n >>= 1
if n == 1:
return True
return False
|
# webhook test file to test webhook for live stats project
|
import unittest
from tools37.tkfw.evaluable import EvaluableDictItem, EvaluableListItem, EvaluablePath
class TestEvaluableDictItem(unittest.TestCase):
def test_method_evaluate(self):
# on missing key
self.assertRaises(KeyError, EvaluableDictItem("key").evaluate, {})
# on wrong data type
self.assertRaises(TypeError, EvaluableDictItem("key").evaluate, [])
# standard behaviour
self.assertEqual("value", EvaluableDictItem("key").evaluate({"key": "value"}))
def test_method_get(self):
# on missing key
self.assertRaises(KeyError, EvaluableDictItem("key").get, {})
# on wrong data type
self.assertRaises(TypeError, EvaluableDictItem("key").get, [])
# standard behaviour
self.assertEqual("value", EvaluableDictItem("key").get({"key": "value"}))
def test_method_set(self):
# standard behaviour : key doesn't exists
data = {}
EvaluableDictItem("key").set(data, "value")
self.assertEqual(data["key"], "value")
# standard behaviour : key already exists
data = {"key": "value"}
EvaluableDictItem("key").set(data, "modified")
self.assertEqual(data["key"], "modified")
class TestEvaluableListItem(unittest.TestCase):
def test_method_evaluate(self):
# on missing index
self.assertRaises(IndexError, EvaluableListItem(index=0).evaluate, [])
# on wrong data type
self.assertRaises(TypeError, EvaluableListItem(index=0).evaluate, {})
# standard behaviour
self.assertEqual("value", EvaluableListItem(index=0).evaluate(["value"]))
def test_method_get(self):
# on missing index
self.assertRaises(IndexError, EvaluableListItem(index=0).get, [])
# on wrong data type
self.assertRaises(TypeError, EvaluableListItem(index=0).get, {})
# standard behaviour
self.assertEqual("value", EvaluableListItem(index=0).get(["value"]))
def test_method_set(self):
# on missing index
self.assertRaises(IndexError, EvaluableListItem(index=0).set, data=[], value="value")
# standard behaviour
data = ["value"]
EvaluableListItem(index=0).set(data, "modified")
self.assertEqual(data[0], "modified")
class TestEvaluablePath(unittest.TestCase):
def test_method_evaluate(self):
# on wrong type 1
path = EvaluablePath(steps=[EvaluableDictItem("key"), EvaluableListItem(0)])
self.assertRaises(TypeError, path.evaluate, data=[])
# on missing key 1
path = EvaluablePath(steps=[EvaluableDictItem("key"), EvaluableListItem(0)])
self.assertRaises(KeyError, path.evaluate, data={})
# on wrong type 2
path = EvaluablePath(steps=[EvaluableDictItem("key"), EvaluableListItem(0)])
self.assertRaises(TypeError, path.evaluate, data={"key": {}})
# on missing index 2
path = EvaluablePath(steps=[EvaluableDictItem("key"), EvaluableListItem(0)])
self.assertRaises(IndexError, path.evaluate, data={"key": []})
# standard behaviour
path = EvaluablePath(steps=[EvaluableDictItem("key"), EvaluableListItem(0)])
self.assertEqual("value", path.evaluate(data={"key": ["value"]}))
def test_method_get(self):
# on wrong type 1
path = EvaluablePath(steps=[EvaluableDictItem("key"), EvaluableListItem(0)])
self.assertRaises(TypeError, path.get, data=[])
# on missing key 1
path = EvaluablePath(steps=[EvaluableDictItem("key"), EvaluableListItem(0)])
self.assertRaises(KeyError, path.get, data={})
# on wrong type 2
path = EvaluablePath(steps=[EvaluableDictItem("key"), EvaluableListItem(0)])
self.assertRaises(TypeError, path.get, data={"key": {}})
# on missing index 2
path = EvaluablePath(steps=[EvaluableDictItem("key"), EvaluableListItem(0)])
self.assertRaises(IndexError, path.get, data={"key": []})
# standard behaviour
path = EvaluablePath(steps=[EvaluableDictItem("key"), EvaluableListItem(0)])
self.assertEqual("value", path.get(data={"key": ["value"]}))
def test_method_set(self):
# on wrong type 1
path = EvaluablePath(steps=[EvaluableDictItem("key"), EvaluableListItem(0)])
self.assertRaises(TypeError, path.set, data=[], value=None)
# on missing key 1
path = EvaluablePath(steps=[EvaluableDictItem("key"), EvaluableListItem(0)])
self.assertRaises(KeyError, path.set, data={}, value=None)
# on wrong type 2
path = EvaluablePath(steps=[EvaluableDictItem("key"), EvaluableListItem(0)])
self.assertRaises(TypeError, path.set, data={"key": {}}, value=None)
# on missing index 2
path = EvaluablePath(steps=[EvaluableDictItem("key"), EvaluableListItem(0)])
self.assertRaises(IndexError, path.set, data={"key": []}, value=None)
# standard behaviour
path = EvaluablePath(steps=[EvaluableDictItem("key"), EvaluableListItem(0)])
data = {"key": ["value"]}
path.set(data=data, value="modified")
self.assertEqual("modified", data["key"][0])
def test_dunder_str(self):
# standard behaviour
self.assertEqual("key.0", str(EvaluablePath(steps=[EvaluableDictItem("key"), EvaluableListItem(0)])))
def test_dunder_add(self):
left = EvaluablePath(steps=[EvaluableDictItem("key")])
right = EvaluablePath(steps=[EvaluableListItem(0)])
# standard behaviour
self.assertEqual(EvaluablePath(steps=left.steps + right.steps), left + right)
if __name__ == '__main__':
unittest.main()
|
import sys
import textwrap
import pytask
import pytest
from _pytask.mark import MarkGenerator
from pytask import cli
from pytask import main
@pytest.mark.unit
@pytest.mark.parametrize("attribute", ["hookimpl", "mark"])
def test_mark_exists_in_pytask_namespace(attribute):
assert attribute in sys.modules["pytask"].__all__
@pytest.mark.unit
def test_pytask_mark_notcallable() -> None:
mark = MarkGenerator()
with pytest.raises(TypeError):
mark()
@pytest.mark.unit
@pytest.mark.filterwarnings("ignore:Unknown pytask.mark.foo")
def test_mark_with_param():
def some_function(abc):
pass
class SomeClass:
pass
assert pytask.mark.foo(some_function) is some_function
marked_with_args = pytask.mark.foo.with_args(some_function)
assert marked_with_args is not some_function
assert pytask.mark.foo(SomeClass) is SomeClass
assert pytask.mark.foo.with_args(SomeClass) is not SomeClass
@pytest.mark.unit
def test_pytask_mark_name_starts_with_underscore():
mark = MarkGenerator()
with pytest.raises(AttributeError):
mark._some_name
@pytest.mark.end_to_end
@pytest.mark.parametrize("config_name", ["pytask.ini", "tox.ini", "setup.cfg"])
def test_ini_markers(tmp_path, config_name):
tmp_path.joinpath(config_name).write_text(
textwrap.dedent(
"""
[pytask]
markers =
a1: this is a webtest marker
a2: this is a smoke marker
"""
)
)
session = main({"paths": tmp_path})
assert session.exit_code == 0
assert "a1" in session.config["markers"]
assert "a2" in session.config["markers"]
@pytest.mark.end_to_end
@pytest.mark.parametrize("config_name", ["pytask.ini", "tox.ini", "setup.cfg"])
def test_markers_command(tmp_path, runner, config_name):
config_path = tmp_path.joinpath(config_name)
config_path.write_text(
textwrap.dedent(
"""
[pytask]
markers =
a1: this is a webtest marker
a2: this is a smoke marker
nodescription
"""
)
)
result = runner.invoke(cli, ["markers", "-c", config_path.as_posix()])
for out in ["pytask.mark.a1", "pytask.mark.a2", "pytask.mark.nodescription"]:
assert out in result.output
@pytest.mark.end_to_end
@pytest.mark.filterwarnings("ignore:Unknown pytask.mark.")
@pytest.mark.parametrize("config_name", ["pytask.ini", "tox.ini", "setup.cfg"])
def test_ini_markers_whitespace(tmp_path, config_name):
tmp_path.joinpath(config_name).write_text(
textwrap.dedent(
"""
[pytask]
markers =
a1 : this is a whitespace marker
"""
)
)
tmp_path.joinpath("task_dummy.py").write_text(
textwrap.dedent(
"""
import pytask
@pytask.mark.a1
def test_markers():
assert True
"""
)
)
session = main({"paths": tmp_path, "strict_markers": True})
assert session.exit_code == 3
assert isinstance(session.collection_reports[0].exc_info[1], ValueError)
@pytest.mark.end_to_end
@pytest.mark.filterwarnings("ignore:Unknown pytask.mark.")
@pytest.mark.parametrize(
("expr", "expected_passed"),
[
("xyz", ["task_one"]),
("((( xyz)) )", ["task_one"]),
("not not xyz", ["task_one"]),
("xyz and xyz2", []),
("xyz2", ["task_two"]),
("xyz or xyz2", ["task_one", "task_two"]),
],
)
def test_mark_option(tmp_path, expr: str, expected_passed: str) -> None:
tmp_path.joinpath("task_dummy.py").write_text(
textwrap.dedent(
"""
import pytask
@pytask.mark.xyz
def task_one():
pass
@pytask.mark.xyz2
def task_two():
pass
"""
)
)
session = main({"paths": tmp_path, "marker_expression": expr})
tasks_that_run = [
report.task.name.rsplit("::")[1]
for report in session.execution_reports
if not report.exc_info
]
assert set(tasks_that_run) == set(expected_passed)
@pytest.mark.end_to_end
@pytest.mark.parametrize(
("expr", "expected_passed"),
[
("interface", ["task_interface"]),
("not interface", ["task_nointer", "task_pass", "task_no_1", "task_no_2"]),
("pass", ["task_pass"]),
("not pass", ["task_interface", "task_nointer", "task_no_1", "task_no_2"]),
(
"not not not (pass)",
["task_interface", "task_nointer", "task_no_1", "task_no_2"],
),
("no_1 or no_2", ["task_no_1", "task_no_2"]),
("not (no_1 or no_2)", ["task_interface", "task_nointer", "task_pass"]),
],
)
def test_keyword_option_custom(tmp_path, expr: str, expected_passed: str) -> None:
tmp_path.joinpath("task_dummy.py").write_text(
textwrap.dedent(
"""
def task_interface():
pass
def task_nointer():
pass
def task_pass():
pass
def task_no_1():
pass
def task_no_2():
pass
"""
)
)
session = main({"paths": tmp_path, "expression": expr})
assert session.exit_code == 0
tasks_that_run = [
report.task.name.rsplit("::")[1]
for report in session.execution_reports
if not report.exc_info
]
assert set(tasks_that_run) == set(expected_passed)
@pytest.mark.end_to_end
@pytest.mark.parametrize(
("expr", "expected_passed"),
[
("arg0", ["task_func[arg0]"]),
("1.3", ["task_func[1.3]"]),
("2-3", ["task_func[2-3]"]),
],
)
def test_keyword_option_parametrize(tmp_path, expr: str, expected_passed: str) -> None:
tmp_path.joinpath("task_dummy.py").write_text(
textwrap.dedent(
"""
import pytask
@pytask.mark.parametrize("arg", [None, 1.3, "2-3"])
def task_func(arg):
pass
"""
)
)
session = main({"paths": tmp_path, "expression": expr})
assert session.exit_code == 0
tasks_that_run = [
report.task.name.rsplit("::")[1]
for report in session.execution_reports
if not report.exc_info
]
assert set(tasks_that_run) == set(expected_passed)
@pytest.mark.end_to_end
@pytest.mark.parametrize(
("expr", "expected_error"),
[
(
"foo or",
"at column 7: expected not OR left parenthesis OR identifier; got end of "
"input",
),
(
"foo or or",
"at column 8: expected not OR left parenthesis OR identifier; got or",
),
(
"(foo",
"at column 5: expected right parenthesis; got end of input",
),
(
"foo bar",
"at column 5: expected end of input; got identifier",
),
(
"or or",
"at column 1: expected not OR left parenthesis OR identifier; got or",
),
(
"not or",
"at column 5: expected not OR left parenthesis OR identifier; got or",
),
],
)
@pytest.mark.parametrize("option", ["expression", "marker_expression"])
def test_keyword_option_wrong_arguments(
tmp_path, capsys, option: str, expr: str, expected_error: str
) -> None:
tmp_path.joinpath("task_dummy.py").write_text(
textwrap.dedent("def task_func(arg): pass")
)
session = main({"paths": tmp_path, option: expr})
assert session.exit_code == 4
captured = capsys.readouterr()
assert expected_error in captured.out.replace(
"\n", " "
) or expected_error in captured.out.replace("\n", "")
@pytest.mark.end_to_end
def test_configuration_failed(runner, tmp_path):
result = runner.invoke(
cli, ["markers", "-c", tmp_path.joinpath("non_existent_path").as_posix()]
)
assert result.exit_code == 2
@pytest.mark.end_to_end
def test_selecting_task_with_keyword_should_run_predecessor(runner, tmp_path):
source = """
import pytask
@pytask.mark.produces("first.txt")
def task_first(produces):
produces.touch()
@pytask.mark.depends_on("first.txt")
def task_second(depends_on):
pass
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix(), "-k", "second"])
assert result.exit_code == 0
assert "2 succeeded" in result.output
@pytest.mark.end_to_end
def test_selecting_task_with_marker_should_run_predecessor(runner, tmp_path):
source = """
import pytask
@pytask.mark.produces("first.txt")
def task_first(produces):
produces.touch()
@pytask.mark.wip
@pytask.mark.depends_on("first.txt")
def task_second(depends_on):
pass
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix(), "-m", "wip"])
assert result.exit_code == 0
assert "2 succeeded" in result.output
@pytest.mark.end_to_end
def test_selecting_task_with_keyword_ignores_other_task(runner, tmp_path):
source = """
import pytask
@pytask.mark.depends_on("first.txt")
def task_first():
pass
def task_second():
pass
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix(), "-k", "second"])
assert result.exit_code == 0
assert "1 succeeded" in result.output
assert "1 skipped" in result.output
@pytest.mark.end_to_end
def test_selecting_task_with_marker_ignores_other_task(runner, tmp_path):
source = """
import pytask
@pytask.mark.depends_on("first.txt")
def task_first():
pass
@pytask.mark.wip
def task_second():
pass
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix(), "-m", "wip"])
assert result.exit_code == 0
assert "1 succeeded" in result.output
assert "1 skipped" in result.output
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Parse Maven POM file.
"""
import logging
import os
from xml.etree.ElementTree import parse
from pyschool.cmdline import parse_args
class InvalidArtifact(Exception):
pass
class Artifact(object):
NAMESPACE = "http://maven.apache.org/POM/4.0.0"
DEFAULT_GROUP = 'sample-group'
DEFAULT_VERSION = '1.0.0-SNAPSHOT'
def __init__(self, artifactId, groupId=None, version=None):
self.artifactId = artifactId
self.groupId = groupId or Artifact.DEFAULT_GROUP
self.version = version or Artifact.DEFAULT_VERSION
def __repr__(self):
return "%s/%s/%s" % (self.groupId, self.artifactId, self.version)
@staticmethod
def from_pom_file(pom):
xml = parse(pom)
element = xml.find('{%s}artifactId' % (Artifact.NAMESPACE,))
if element is None:
raise InvalidArtifact("'artifactId' is missing in " + pom)
artifact = Artifact(element.text)
element = xml.find('{%s}groupId' % (Artifact.NAMESPACE,))
if element is None:
logging.info("'groupId' is missing in " + pom)
else:
artifact.groupId = element.text
element = xml.find('{%s}version' % (Artifact.NAMESPACE,))
if element is None:
logging.info("'groupId' is missing in " + pom)
else:
artifact.version = element.text
return artifact
def main():
args = parse_args()
fname = args.filename[0]
if not os.path.exists(fname):
raise SystemExit('"{}" is not found.'.format(fname))
artifact = Artifact.from_pom_file(fname)
print(artifact)
def test():
fname = "etc/xml-1.xml"
artifact = Artifact.from_pom_file(fname)
assert 'sample-group/sample-group-commons/1.0.0' == repr(artifact)
if __name__ == '__main__':
main()
# vim: set et ts=4 sw=4 cindent fileencoding=utf-8 :
|
import ac
import acsys
import os
import sys
import platform
import math
# Import Assetto Corsa shared memory library.
# It has a dependency on ctypes, which is not included in AC python version.
# Point to correct ctypes module based on platform architecture.
# First, get directory of the app, then add correct folder to sys.path.
app_dir = os.path.dirname(__file__)
if platform.architecture()[0] == "64bit":
sysdir = os.path.join(app_dir, 'dll', 'stdlib64')
else:
sysdir = os.path.join(app_dir, 'dll', 'stdlib')
# Python looks in sys.path for modules to load, insert new dir first in line.
sys.path.insert(0, sysdir)
os.environ['PATH'] = os.environ['PATH'] + ";."
from lib.sim_info import info
class ACGlobalData:
"""Handling all data from AC that is not car-specific.
Args:
cfg (obj:Config): App configuration.
"""
def __init__(self, cfg):
# Config object
self.cfg = cfg
# Data attributes
self.focused_car = 0
self.replay_time_multiplier = 1
def update(self):
"""Update data."""
self.focused_car = ac.getFocusedCar()
self.replay_time_multiplier = info.graphics.replayTimeMultiplier
class ACCarData:
"""Handling all data from AC that is car-specific.
Args:
cfg (obj:Config): App configuration.
car_id (int, optional): Car ID number to retrieve data from.
Defaults to own car.
"""
def __init__(self, cfg, car_id=0):
self.cfg = cfg
self.car_id = car_id
# Initialize data attributes
self.speed = 0
self.throttle = 0
self.brake = 0
self.clutch = 0
self.gear = 0
self.steering = 0
self.ffb = 0
# Normalized steering for steering trace
self.steering_normalized = 0.5
self.steering_cap = self.cfg.trace_steering_cap * math.pi / 180
self.gear_text = "N"
def set_car_id(self, car_id):
"""Update car ID to retrieve data from.
Args:
car_id (int): Car ID number."""
self.car_id = car_id
def update(self):
"""Update data."""
self.throttle = ac.getCarState(self.car_id, acsys.CS.Gas)
self.brake = ac.getCarState(self.car_id, acsys.CS.Brake)
self.clutch = 1 - ac.getCarState(self.car_id, acsys.CS.Clutch)
self.ffb = ac.getCarState(self.car_id, acsys.CS.LastFF)
self.steering = ac.getCarState(self.car_id, acsys.CS.Steer) * math.pi / 180
self.gear = ac.getCarState(self.car_id, acsys.CS.Gear)
if self.cfg.use_kmh:
self.speed = ac.getCarState(self.car_id, acsys.CS.SpeedKMH)
else:
self.speed = ac.getCarState(self.car_id, acsys.CS.SpeedMPH)
self.steering_normalized = 0.5 - (self.steering / (2 * self.steering_cap))
if self.steering_normalized > 1:
self.steering_normalized = 1
elif self.steering_normalized < 0:
self.steering_normalized = 0
# Gear label
if self.gear == 0:
self.gear_text = "R"
elif self.gear == 1:
self.gear_text = "N"
else:
self.gear_text = str(self.gear - 1)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: __init__.py
#
# Copyright 2018 Costas Tyfoxylos
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
"""
towerlib package
Import all parts from entities here
.. _Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
"""
from towerlib.entities.credential import Credential, CredentialType
from towerlib.entities.group import Group
from towerlib.entities.host import Host
from towerlib.entities.instance import Instance, InstanceGroup
from towerlib.entities.inventory import Inventory
from towerlib.entities.job import (JobRun,
JobSummary,
JobEvent,
JobTemplate,
SystemJob,
ProjectUpdateJob,
AdHocCommandJob,
Job)
from towerlib.entities.role import Role, ObjectRole
from towerlib.entities.core import (Entity,
Config,
LicenseInfo,
LicenseFeatures,
CERTIFICATE_TYPE_KINDS,
JOB_TYPES,
VERBOSITY_LEVELS,
Cluster,
ClusterInstance,
EntityManager)
from towerlib.entities.organization import Organization
from towerlib.entities.project import Project
from towerlib.entities.team import Team
from towerlib.entities.user import User
__author__ = '''Costas Tyfoxylos <ctyfoxylos@schubergphilis.com>'''
__docformat__ = '''google'''
__date__ = '''2018-01-02'''
__copyright__ = '''Copyright 2018, Costas Tyfoxylos'''
__license__ = '''MIT'''
__maintainer__ = '''Costas Tyfoxylos'''
__email__ = '''<ctyfoxylos@schubergphilis.com>'''
__status__ = '''Development''' # "Prototype", "Development", "Production".
# This is to 'use' the module(s), so lint doesn't complain
assert Entity
assert Config
assert LicenseInfo
assert LicenseFeatures
assert CERTIFICATE_TYPE_KINDS
assert JOB_TYPES
assert VERBOSITY_LEVELS
assert Organization
assert User
assert Team
assert Project
assert Group
assert Inventory
assert Host
assert Instance
assert InstanceGroup
assert CredentialType
assert Credential
assert JobTemplate
assert Role
assert ObjectRole
assert JobRun
assert JobSummary
assert JobEvent
assert ProjectUpdateJob
assert SystemJob
assert AdHocCommandJob
assert Job
assert Cluster
assert ClusterInstance
assert EntityManager
|
"""
This module provides errors/exceptions and warnings of general use for SunPy.
Exceptions that are specific to a given package should **not** be here,
but rather in the particular package.
"""
import warnings
__all__ = ["NoMapsInFileError",
"SunpyWarning", "SunpyUserWarning", "SunpyDeprecationWarning",
"SunpyPendingDeprecationWarning", "SunpyMetadataWarning",
"warn_user", "warn_deprecated", "warn_metadata"]
class NoMapsInFileError(Exception):
"""
An error raised when a file is opened and no maps are found.
"""
class SunpyWarning(Warning):
"""
The base warning class from which all Sunpy warnings should inherit.
Any warning inheriting from this class is handled by the Sunpy
logger. This warning should not be issued in normal code. Use
"SunpyUserWarning" instead or a specific sub-class.
"""
class SunpyUserWarning(UserWarning, SunpyWarning):
"""
The primary warning class for Sunpy.
Use this if you do not need a specific type of warning.
"""
class SunpyMetadataWarning(UserWarning, SunpyWarning):
"""
Warning class for cases metadata is missing.
This does not inherit from SunpyWarning because we want to use
stacklevel=3 to show the user where the issue occurred in their code.
"""
class SunpyDeprecationWarning(FutureWarning, SunpyWarning):
"""
A warning class to indicate a deprecated feature.
"""
class SunpyPendingDeprecationWarning(PendingDeprecationWarning, SunpyWarning):
"""
A warning class to indicate a soon-to-be deprecated feature.
"""
def warn_metadata(msg, stacklevel=1):
"""
Raise a `SunpyMetadataWarning`.
Parameters
----------
msg : str
Warning message.
stacklevel : int
This is interpreted relative to the call to this function,
e.g. ``stacklevel=1`` (the default) sets the stack level in the
code that calls this function.
"""
warnings.warn(msg, SunpyMetadataWarning, stacklevel + 1)
def warn_user(msg, stacklevel=1):
"""
Raise a `SunpyUserWarning`.
Parameters
----------
msg : str
Warning message.
stacklevel : int
This is interpreted relative to the call to this function,
e.g. ``stacklevel=1`` (the default) sets the stack level in the
code that calls this function.
"""
warnings.warn(msg, SunpyUserWarning, stacklevel + 1)
def warn_deprecated(msg, stacklevel=1):
"""
Raise a `SunpyDeprecationWarning`.
Parameters
----------
msg : str
Warning message.
stacklevel : int
This is interpreted relative to the call to this function,
e.g. ``stacklevel=1`` (the default) sets the stack level in the
code that calls this function.
"""
warnings.warn(msg, SunpyDeprecationWarning, stacklevel + 1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.