text stringlengths 8 6.05M |
|---|
#!/usr/bin/python
import numpy as np
import pylab as py
from COMMON import nanosec,yr,week,grav,msun,light,mpc,hub0,h0,omm,omv
from PREPARING_EPTA_DATA import EPTA_data
from scipy import integrate
from scipy.ndimage import gaussian_filter
import pyPdf,os
#Input parameters:
outputdir='../plots/'
maxreds=100000 #I will not consider redshifts larger than this (since there should be no coalescences anyways).
minreds=1e-4
zbin=1000 #Number of z-bins to construct k(z), that will be fitted.
mchvec=np.array([8.,8.5,9.,9.5,10.,10.5,11.])
#mchvec=np.linspace(8.,11.,8)
lsocondi=True #Either True or False, to apply the condition that the lso frequency is respected.
#-----------------------------------------------------------------
def mchirpfun(m1,m2):
'''Gives the chirp mass of a binary of masses m1 and m2'''
return (m1*m2)**(3./5.)/(m1+m2)**(1./5.)
def fmaxlso(m1,m2):
'''Gives the frequency of the last stable orbit of a binary of masses m1 and m2 (in solar masses).'''
return light**3./(6.*np.sqrt(6.)*np.pi*grav*(m1+m2)*msun)
def kfun(f,mch,amp):
'''A certain function corresponding to the luminosity distance (times (1+z)**(-5/3))) to which a binary emitting at observed GW frequency f, and observed log10 chirp mass mch, can be observed, given an upper limit (95%) in the amplitude of amp.'''
return 2.*(10**(mch)*msun*grav)**(5./3.)/(amp*light**4.)*(np.pi*f)**(2./3.)*1./mpc
def amp(mch,reds,f):
'''Amplitude of the GWs as a function of the proper chirp mass (in log10), redshift, and observed frequency.'''
ldist_const=light/(hub0*h0)/mpc
ld=(1.+reds)*integrate.quad(lambda z: (omm*(1.+z)**3.+omv)**(-0.5),0,reds)[0]*ldist_const
print ld
return 2.*(msun*10**(mch)*grav*(1.+reds))**(5./3.)*1./(light**4.*ld*mpc)*(np.pi*f)**(2./3.)
#Choose plotting options that look optimal for the paper.
fig_width = 3.4039
goldenmean=(np.sqrt(5.)-1.0)/2.0
fig_height = fig_width * goldenmean
sizepoints=8
legendsizepoints=4.5
py.rcParams.update({
'backend': 'ps',
'ps.usedistiller': 'xpdf',
'text.usetex': True,
'figure.figsize': [fig_width, fig_height],
'axes.titlesize': sizepoints,
'axes.labelsize': sizepoints,
'text.fontsize': sizepoints,
'xtick.labelsize': sizepoints,
'ytick.labelsize': sizepoints,
'legend.fontsize': legendsizepoints
})
#Define the borders of the plot.
left, right, top, bottom, cb_fraction=0.15, 0.94, 0.96, 0.16, 0.145
#Load sensitivity curves.
ul1,ul2,ul3,ul4,ul5,ul6=EPTA_data() #Use only ul2.
fvec,hvec=ul2[:,0],ul2[:,1]
#reds=np.linspace(0.,maxreds,zbin)
reds=np.logspace(np.log10(minreds),np.log10(maxreds),zbin)
kdist_const=light/(hub0*h0)/mpc
kdistvec=np.zeros(len(reds))
lumdistvec=np.zeros(len(reds))
for zi in xrange(len(reds)):
kdistvec[zi]=(1.+reds[zi])**(-2./3.)*integrate.quad(lambda z: (omm*(1.+z)**3.+omv)**(-0.5),0,reds[zi])[0]*kdist_const
lumdistvec[zi]=(1.+reds[zi])*integrate.quad(lambda z: (omm*(1.+z)**3.+omv)**(-0.5),0,reds[zi])[0]*kdist_const
redskmax=reds[kdistvec.argmax()]
findi=5 #Index of the frequency bin at which the label (of the mass) will be plotted.
xmin,xmax=1e-2,1e3
#ymin,ymax=np.amin(dlhmat),np.amax(dlhmat) #Edges of the y-axis.
ymin,ymax=10,30000
fig=py.figure()
fig.subplots_adjust(left=left,right=right,top=top,bottom=bottom)
ax=fig.gca()
ax.plot(reds,lumdistvec,color='black')
#ax.plot(reds,kdistvec,color='black')
ax.hlines(1e3,minreds,maxreds,color='blue')
ax.grid()
ax.set_xlabel('$\\mathrm{Redshift}$')
#ax.set_ylabel('$\\log_{10}(\\mathrm{Redshift})$')
#ax.set_xlabel('$\\mathrm{GW\ Frequency\ /\ Hz}$')
ax.set_ylabel('$\\mathrm{D}/\ \\mathrm{Mpc}$')
#ax.set_xlabel('$\\mathrm{f\ [Hz]}$')
#ax.set_ylabel('$\\mathrm{D}_\\mathrm{H}\ [\\mathrm{Mpc}]$')
ax.set_xlim(xmin,xmax)
#ax.set_xticks([-8.5,-8.,-7.5,-7.,-6.5])
#ax.set_xticklabels(["$-8.5$","$-8$","$-7.5$","$-7$","$-6.5$"])
#ax.set_yticks([-14.5,-14.,-13.5,-13.])
#ax.set_yticklabels(["$-14.5$","$-14$","$-13.5$","$-13$"])
ax.set_ylim(ymin,ymax)
ax.set_xscale('log')
ax.set_yscale('log')
oplot='dl_plot.pdf'
fig.savefig(outputdir+oplot, transparent=True)
fig=py.figure()
fig.subplots_adjust(left=left,right=right,top=top,bottom=bottom)
ax=fig.gca()
ax.plot(reds,lumdistvec,':',color='black')
ax.plot(reds,kdistvec,color='black')
ax.hlines(1e3,minreds,maxreds,color='blue')
ax.grid()
ax.set_xlabel('$\\mathrm{Redshift}$')
#ax.set_ylabel('$\\log_{10}(\\mathrm{Redshift})$')
#ax.set_xlabel('$\\mathrm{GW\ Frequency\ /\ Hz}$')
ax.set_ylabel('$\\mathrm{D}/\ \\mathrm{Mpc}$')
#ax.set_xlabel('$\\mathrm{f\ [Hz]}$')
#ax.set_ylabel('$\\mathrm{D}_\\mathrm{H}\ [\\mathrm{Mpc}]$')
ax.set_xlim(xmin,xmax)
#ax.set_xticks([-8.5,-8.,-7.5,-7.,-6.5])
#ax.set_xticklabels(["$-8.5$","$-8$","$-7.5$","$-7$","$-6.5$"])
#ax.set_yticks([-14.5,-14.,-13.5,-13.])
#ax.set_yticklabels(["$-14.5$","$-14$","$-13.5$","$-13$"])
ax.set_ylim(ymin,ymax)
ax.set_xscale('log')
ax.set_yscale('log')
oplot='dlnew_plot.pdf'
fig.savefig(outputdir+oplot, transparent=True)
|
rows_of_the_field = int(input())
all_rows = []
destroyed_ships = 0
for _ in range(rows_of_the_field):
current_row = list(map(int, input().split(" ")))
all_rows.append(current_row)
all_squares_to_attack = input().split(" ")
for square_to_attack in all_squares_to_attack:
square_to_attack = square_to_attack.split("-")
row = int(square_to_attack[0])
ship = int(square_to_attack[1])
if all_rows[row][ship] > 0:
all_rows[row][ship] -= 1
if all_rows[row][ship] == 0:
destroyed_ships += 1
print(destroyed_ships) |
#!/usr/bin/env python
"""
Author Paula Dwan
Email paula.dwan@gmail.com
Student ID 13208660
Module COMP47270 (Computational Network Analysis and Modeling)
Course MSc ASE
Due date 11-May-2015
Lecturer Dr. Neil Hurley
CASE STUDY 2 : Community Finding
Algorithm = Laplacian using eigen-vectors and k-means, with defined number of nodes to match
dataset sizes as used. So we have :
num_nodes = 4039 --> Facebook
num_nodes = 27770 --> cit-HepTh
num_nodes = 34546 --> cit-HepPh
num_nodes = 81306 --> Twitter
num_nodes = 107614 --> Google +
"""
import networkx as nx
import matplotlib.pyplot as plt
import cs2_functions as nxc
from scipy.spatial import Delaunay
from scipy.cluster.vq import vq, kmeans
from datetime import datetime
import os.path as osp
import numpy as np
import scipy as sp
import random
import logging
cs_ref = 'CS 2 : Laplacian Amended : '
src_file = "../equiv-social_facebook_combined.txt" ; num_nodes = 50
# src_file = "../equiv-cit-HepTh.txt" ; num_nodes = 27770
# src_file = "../equiv-cit-HepPh.txt" ; num_nodes = 34546
# src_file = "../equiv-twitter_combined.txt" ; num_nodes = 81306
# src_file = "../equiv-gplus_combined.txt" ; num_nodes = 107614
def create_output_data_file():
"""
create_output_data_file()
create output data file using timestamp and name of data source file
:return: df - destination file name
"""
logging.info(cs_ref, 'create output data file')
print(cs_ref, 'create output data file')
current_date = '%Y%m%d-%H%M%S'
head, tail = osp.split(src_file)
first_data = "OUTPUT DATA FILE for " + cs_ref + src_file
df = 'data/%s_%s' % (datetime.now().strftime(current_date), tail)
open(df, 'w').write(first_data)
return df
def placement():
"""
placement()
:return:
"""
x = [random.random() for i in range(num_nodes)]
y = [random.random() for i in range(num_nodes)]
x = np.array(x)
y = np.array(y)
G = nx.empty_graph(
num_nodes)
print "graph.number_of_nodes() = ", G.number_of_nodes()
pos = dict()
for i in range(num_nodes):
pos[i] = x[i], y[i]
# plot_graph(G, pos, 10)
points = np.column_stack((x, y))
dl = Delaunay(points)
tri = dl.simplices
edges = np.zeros((2, 6 * len(tri)), dtype=int)
data = np.ones(6 * len(points))
j = 0
for i in range(len(tri)):
edges[0][j] = tri[i][0]
edges[1][j] = tri[i][1]
j += 1
edges[0][j] = tri[i][1]
edges[1][j] = tri[i][0]
j += 1
edges[0][j] = tri[i][0]
edges[1][j] = tri[i][2]
j += 1
edges[0][j] = tri[i][2]
edges[1][j] = tri[i][0]
j += 1
edges[0][j] = tri[i][1]
edges[1][j] = tri[i][2]
j += 1
edges[0][j] = tri[i][2]
edges[1][j] = tri[i][1]
j += 1
data = np.ones(6 * len(tri))
A = sp.sparse.csc_matrix((data, (edges[0, :], edges[1, :])))
for i in range(A.nnz):
A.data[i] = 1.0
G = nx.to_networkx_graph(A)
# plot_graph(G, pos, 20)
eigen_pos = dict()
deg = A.sum(0)
diags = np.array([0])
D = sp.sparse.spdiags(deg, diags, A.shape[0], A.shape[1]) # diagonal matrix of degrees
Dinv = sp.sparse.spdiags(1 / deg, diags, A.shape[0], A.shape[1])
L = Dinv * (D - A)
E, V = sp.sparse.linalg.eigs(L, 3, None, float(num_nodes), 'SM')
V = V.real
for i in range(num_nodes):
eigen_pos[i] = V[i, 1].real, V[i, 2].real
print("plot_graph(G, eigen_pos, 3)")
plot_graph(G, eigen_pos, 9)
features = np.column_stack((V[:, 1], V[:, 2]))
cluster_nodes(G, features, pos, eigen_pos)
raw_input("Press Enter to Continue ...")
cluster_nodes(G, A.todense(), pos, eigen_pos)
raw_input("Press Enter to Continue ...")
def plot_graph(G, pos, fignum):
"""
plot_graph(G, pos, fignum)
:param G: graph to plot
:param pos: pos for graph tp plot
:param fignum: reference for figure when plotted
:return:
"""
label = dict()
labelpos = dict()
for i in range(G.number_of_nodes()):
label[i] = i
labelpos[i] = pos[i][0] + 0.02, pos[i][1] + 0.02
plt.clf()
plt.figure(fignum, figsize=(10,10))
nx.draw_networkx_nodes(G, pos, node_size=40, hold=False, )
nx.draw_networkx_edges(G, pos, hold=True)
nx.draw_networkx_labels(G, labelpos, label, font_size=10, hold=True, )
plt.savefig(str(fignum)+"laplacian_amended.png")
plt.show()
def cluster_nodes(G, feat, pos, eigen_pos):
'''
cluster the nodes in the graph as specified by num_cluster, note num_clusters must match
:param G:
:param feat:
:param pos:
:param eigen_pos:
:return:
'''
num_clusters=3
book, distortion = kmeans(feat, num_clusters)
codes, distortion = vq(feat, book)
nodes = np.array(range(G.number_of_nodes()))
cluster0 = nodes[codes == 0].tolist()
cluster1 = nodes[codes == 1].tolist()
cluster2 = nodes[codes == 2].tolist()
print "\tW0 = \t", cluster0
print "\tW1 = \t", cluster1
print "\tW2 = \t", cluster2
plt.figure(333) # positions of nodes per eigen
plt.title("Eigen")
nx.draw_networkx_nodes(G, eigen_pos, node_size=40, hold=True, nodelist=cluster0, node_color='m')
nx.draw_networkx_nodes(G, eigen_pos, node_size=40, hold=True, nodelist=cluster1, node_color='b')
nx.draw_networkx_nodes(G, eigen_pos, node_size=40, hold=True, nodelist=cluster2, node_color='g')
plt.show()
plt.figure(999) # positions of nodes per Delaney tesselation
plt.title("Delaney")
nx.draw_networkx_nodes(G, pos, node_size=40, hold=True, nodelist=cluster0, node_color='m')
nx.draw_networkx_nodes(G, pos, node_size=40, hold=True, nodelist=cluster1, node_color='b')
nx.draw_networkx_nodes(G, pos, node_size=40, hold=True, nodelist=cluster2, node_color='g')
plt.show()
if __name__ == '__main__':
time_start = nxc.get_start_time()
placement()
time_taken = nxc.show_time_taken(time_start)
print(time_taken)
|
from scipy.stats import loguniform, uniform
import numpy as np
import argparse
import os
import sys
import time
import json
import pandas as pd
from IPython import embed
def convert(o):
if isinstance(o, np.int64): return int(o)
raise TypeError
def select_hyperparams(config, output_name, model, is_arc, score_key='f_macro'):
### make directories
config_path, checkpoint_path, result_path = make_dirs(config)
setup_params = ['tune_params', 'num_search_trials', 'dir_name']
model_params = set()
for p in config:
if p in setup_params or ('range' in p or 'algo' in p or 'type' in p or p.startswith('CON')): continue
model_params.add(p)
print("[model params] {}".format(model_params))
score_lst = []
time_lst = []
best_epoch_lst = []
tn2vals = dict()
for trial_num in range(int(config['num_search_trials'])):
### sample values
print("[trial {}] Starting...".format(trial_num))
print("[trial {}] sampling parameters in {}".format(trial_num, config['tune_params']))
constraints_OK = False
while not constraints_OK:
p2v = sample_values(trial_num)
constraints_OK = check_constraints(config, p2v)
tn2vals[trial_num] = p2v
### construct the appropriate config file
config_file_name = config_path + 'config-{}.txt'.format(trial_num)
print("[trial {}] writing configuration to {}".format(trial_num, config_file_name))
print("[trial {}] checkpoints to {}".format(trial_num, checkpoint_path))
print("[trial {}] results to {}".format(trial_num, result_path))
f = open(config_file_name, 'w')
model_name = '{}_t{}'.format(config['name'], trial_num)
f.write('name:{}\n'.format(model_name)) # include trial number in name
f.write('ckp_path:{}\n'.format(checkpoint_path)) # checkpoint save location
f.write('res_path:{}\n'.format(result_path)) # results save location
for p in model_params:
if p == 'name': continue
f.write('{}:{}\n'.format(p, config[p]))
for p in p2v:
f.write('{}:{}\n'.format(p, p2v[p]))
f.flush()
### run the script
print("[trial {}] running cross validation".format(trial_num))
start_time = time.time()
if model == 'adv':
os.system("./adv_train.sh 1 {} 0 {} > {}log_t{}.txt".format(config_file_name, score_key, result_path, trial_num))
elif model == 'bicond':
os.system("./bicond.sh {} {} > {}log_t{}.txt".format(config_file_name, score_key, result_path, trial_num))
else:
print("ERROR: model {} is not supported".format(model))
sys.exit(1)
script_time = (time.time() - start_time) / 60.
print("[trial {}] running on ARC took {:.4f} minutes".format(trial_num, script_time))
### process the result and update information on best
if model == 'adv':
res_f = open('{}{}_t{}-{}.top5_{}.txt'.format(result_path, config['name'], trial_num, config['enc'], score_key), 'r')
else:
res_f = open('{}{}_t{}.top5_{}.txt'.format(result_path, config['name'], trial_num, score_key), 'r')
res_lines = res_f.readlines()
score_lst.append(res_lines[-2].strip().split(':')[1])
time_lst.append(script_time)
best_epoch_lst.append(res_lines[-3].strip().split(':')[1])
print("[trial {}] Done.".format(trial_num))
print()
### save the resulting scores and times, for calculating the expected validation f1
data = []
for ti in tn2vals:
data.append([ti, score_lst[ti], time_lst[ti], best_epoch_lst[ti], json.dumps(tn2vals[ti], default=convert)])
df = pd.DataFrame(data, columns=['trial_num', 'avg_score', 'time', 'best_epoch', 'param_vals'])
df.to_csv('data/model_results/{}-{}trials/{}'.format(config['dir_name'], config['num_search_trials'],
output_name), index=False)
print("results to {}".format(output_name))
def parse_config(fname):
f = open(fname, 'r')
lines = f.readlines()
n2info = dict()
for l in lines:
n, info = l.strip().split(':')
n2info[n] = info
n2info['tune_params'] = n2info['tune_params'].split(',')
for p in n2info['tune_params']:
t = n2info['{}_type'.format(p)]
n2info['{}_range'.format(p)] = list(map(lambda x: int(x) if t == 'int' else
float(x) if t == 'float' else x,
n2info['{}_range'.format(p)].split('-')))
return n2info
def sample_values(trial_num):
p2v = dict()
for p in config['tune_params']:
a = config['{}_algo'.format(p)]
if a == 'selection': #To select in order from a list of hyperparam values
p2v[p] = config['{}_range'.format(p)][trial_num]
elif a == 'choice': #To randomly select any value from a list of hyperparam values
p2v[p] = np.random.choice(config['{}_range'.format(p)])
else: #To randomly select a value from a given range
min_v, max_v = config['{}_range'.format(p)]
if a == 'loguniform':
p2v[p] = loguniform.rvs(min_v, max_v)
elif a == 'uniform-integer':
p2v[p] = np.random.randint(min_v, max_v + 1)
elif a == 'uniform-float':
p2v[p] = uniform.rvs(min_v, max_v)
else:
print("ERROR: sampling method specified as {}".format(a))
return p2v
def check_constraints(n2info, p2v):
constraints_OK = True
for n in n2info:
if not n.startswith('CON'): continue
eq = n2info[n].split('#') # equations should be in format param1#symbol#param2
if len(eq) == 3:
con_res = parse_equation(p2v[eq[0]], eq[1], p2v[eq[2]])
elif len(eq) == 4:
if eq[0] in p2v:
v1 = p2v[eq[0]]
s = eq[1]
v2 = float(eq[2]) * p2v[eq[3]]
else:
v1 = float(eq[0]) * p2v[eq[1]]
s = eq[2]
v2 = p2v[eq[3]]
con_res = parse_equation(v1, s, v2)
else:
print("ERROR: equation not parsable {}".format(eq))
sys.exit(1)
constraints_OK = con_res and constraints_OK
return constraints_OK
def parse_equation(v1, s, v2):
if s == '<': return v1 < v2
elif s == '<=': return v1 <= v2
elif s == '=': return v1 == v2
elif s == '!=': return v1 != v2
elif s == '>': return v1 > v2
elif s == '>=': return v1 >= v2
else:
print("ERROR: symbol {} not recognized".format(s))
sys.exit(1)
def make_dirs(config):
config_path = 'data/config/{}-{}trials/'.format(config['dir_name'],
config['num_search_trials'])
checkpoint_path = 'data/checkpoints/{}-{}trials/'.format(config['dir_name'],
config['num_search_trials'])
result_path = 'data/model_results/{}-{}trials/'.format(config['dir_name'],
config['num_search_trials'])
for p_name, p_path in [('config_path', config_path), ('ckp_path', checkpoint_path),
('result_path', result_path)]:
if not os.path.exists(p_path):
os.makedirs(p_path)
else:
print("[{}] Directory {} already exists!".format(p_name, p_path))
sys.exit(1)
return config_path, checkpoint_path, result_path
def remove_dirs(config):
config_path = 'data/config/{}-{}trials/'.format(config['dir_name'],
config['num_search_trials'])
checkpoint_path = 'data/checkpoints/{}-{}trials/'.format(config['dir_name'],
config['num_search_trials'])
result_path = 'data/model_results/{}-{}trials/'.format(config['dir_name'],
config['num_search_trials'])
for p_name, p_path in [('config_path', config_path), ('ckp_path', checkpoint_path),
('result_path', result_path)]:
if not os.path.exists(p_path):
print("[{}] directory {} doesn't exist".format(p_name, p_path))
continue
else:
print("[{}] removing all files from {}".format(p_name, p_path))
for fname in os.listdir(p_path):
os.remove(os.path.join(p_path, fname))
print("[{}] removing empty directory".format(p_name))
os.rmdir(p_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', help='What to do', required=True)
parser.add_argument('-s', '--settings', help='Name of the file containing hyperparam info', required=True)
# model_name should be bert-text-level or adv or bicond currently and is to be specified when is_arc is True.
parser.add_argument('-n', '--model', help='Name of the model to run', required=False, default='adv')
parser.add_argument('-o', '--output', help='Name of the output file (full path)', required=False,
default='trial_results.csv')
parser.add_argument('-k', '--score_key', help='Score key for optimization', required=False, default='f_macro')
args = vars(parser.parse_args())
config = parse_config(args['settings'])
if args['mode'] == '1':
## run hyperparam search
remove_dirs(config)
select_hyperparams(config, args['output'], args['model'], is_arc=('arc' in args['settings'] or 'twitter' in args['settings']), score_key=args['score_key'])
elif args['mode'] == '2':
## remove directories
remove_dirs(config)
else:
print("ERROR. exiting") |
#https://docs.pycom.io/tutorials/networks/wlan/#connecting-to-a-router
#https://docs.pycom.io/firmwareapi/micropython/usocket/
import os, _thread, sys, machine, utime, time
from network import WLAN
import irc, oauth
global serial = machine.UART(1, 19200)
def setupSerial():
serial.init(19200, bits=8, parity=None, stop=1)
_thread.start_new_thread(checkSerial, ())
def checkSerial():
while 1:
if serial.any():
print(serial.read())
time.sleep(.5)
def connectWifi(retries):
for attempt in range(retries):
if wifi.isconnected(): #skip if
continue
available_networks = wifi.scan(show_hidden=True)
current_nets = frozenset([e.ssid for e in available_networks])
know_net_names = frozenset([key for key in oauth.KNOWN_NETWORKS])
network_to_use = list(current_nets & know_net_names)
if not network_to_use:
print("No known Network found, retrying:")
continue
try:
network_to_use = network_to_use[0]
net_props = oauth.KNOWN_NETWORKS[network_to_use]
password = net_props['pwd']
security = [e.sec for e in available_networks if e.ssid == network_to_use][0]
wifi.connect(network_to_use, (security, password), timeout=10000)
while not wifi.isconnected():
machine.idle()
print("connected " + network_to_use+" w/ IP:"+wifi.ifconfig()[0] + "in " + str(attempt))
break
except Exception as exp:
print(exp)
else:
print("Wifi AP not found in alloted retries.")
sys.exit()
connectWifi(10)
setupSerial()
irc.connectBot() |
import numpy as np
import matplotlib.pylab as plt
def sigmoid(x):
return 1 / (1 + np.exp(-x))
X = np.arange(-6.0, 6.0, 0.2)
Y = sigmoid(X)
plt.plot(X, Y, linestyle='--')
plt.ylim(-0.2, 1.2)
plt.show()
|
import pathlib
import os
import pyglet
BACKGROUND_DIR = 'assets/backgrounds'
### FONTS ###
# Add font directory; Enables pyglet to search fonts found in this directory
pyglet.font.add_directory("assets/fonts")
# Loading fonts
subFont = pyglet.font.load("Press Start")
buttonFont = pyglet.font.load("Segoe UI Black")
### IMAGE ASSETS ###
# Icons
pause_icon = pyglet.image.load('assets/icons/pause.png')
house_icon = pyglet.image.load('assets/icons/house8bit.png')
# Backgrounds
backgrounds = {}
for path in pathlib.Path(BACKGROUND_DIR).iterdir():
backgrounds[path.name] = pyglet.image.load(
os.path.join(BACKGROUND_DIR, path.name))
# center the anchor of all images
images = [pause_icon, house_icon, *backgrounds.values()]
for image in images:
image.anchor_x = image.width // 2
image.anchor_y = image.height // 2 |
from enum import Enum
import orm
from ..db.basemodel import BaseModel
from ..db.database import database, metadata
class QuestionChoices(Enum):
num1 = '你是谁'
num2 = '你叫什么'
num3 = '你想咋地'
class Questions(BaseModel):
__tablename__ = 'questions'
__database__ = database
__metadata__ = metadata
question = orm.String(max_length=100, allow_null=True)
def __str__(self):
return self.question
|
import requests
response = requests.get("http://api.open-notify.org/astros.json")
print(response.content) |
def main():
pilaantumislaskuri = 0
pilaantumissumma = 0
tuloslaskuri = 1
mittaustulos = int(0)
rivi = input("Syötä mittausten lukumäärä: ")
mittausten_lkm = int(rivi) # Luodaan ketju, joka jatkuu kunnes mittaustulosten määrä on saavutettu, tai vaihtoehtoisesti kunnes viini on mennyt pilalle.
if mittausten_lkm > 0:
for luku in range (0, mittausten_lkm):
kierros = str(tuloslaskuri)
tuloste = "Syötä "+kierros+". mittaustulos: "
mittaustulos = input(tuloste)
mittaustulos = int(mittaustulos)
tuloslaskuri += 1
if 20 > mittaustulos or 25 < mittaustulos:
pilaantumislaskuri += 1 # Pilaantumisen ehdot määritetään kierroksittain kasvavalla laskurilla, jonka mukaan kaksi peräkkäistä "pilaantunutta" kierrosta johtaa viinin pilaantumiseen.
pilaantumissumma += 1 # Pilaantumissumma kasvaa joka kierros, jos lämpötila ei ole suotuisa. Pilaantumislaskuri toimii samalla tavalla, mutta se nollataan,
if pilaantumislaskuri == 1 and 20 <= mittaustulos <= 25: # mikäli heitto suotuisan käymislämpötilan ulkopuolella kestää vain yhden kierroksen.
pilaantumislaskuri = 0
elif pilaantumislaskuri == 2:
print("Viinisi on pilalla.")
return
elif pilaantumissumma / mittausten_lkm > 0.1: # Vaihtoehtoisesti viini menee pilalle, jos yli 10% mittaustuloksista johtaa pilaantumissumman kasvamiseen.
print("Viinisi on pilalla.")
return
elif tuloslaskuri > mittausten_lkm and pilaantumislaskuri < 2:
print("Viinisi on hyvää.")
else:
print("Mittausten lukumäärän tulee olla positiivinen kokonaisluku.")
main()
|
h = open('Day9/numbers.txt', 'r')
# Reading from the file
content = h.readlines()
for x in range(25, len(content)):
found = False
for y in range(1, 26):
if found:
break
for z in range(1, 26):
sum = 0
if int(content[x-y]) != int(content[x-z]):
sum = int(content[x-y]) + int(content[x-z])
#print('X = ' + str(x) + ', adding y = ' + str(content[x-y]) + 'and z = ' + str(content[x-z]) + ', to be = ' + str(sum) + '. Is it the value == ' + str(content[x]))
if sum == int(content[x]):
found = True
break
if found:
print('There is a sum, move on')
if not found:
print('No sum of number: ' + str(content[x]) + ', on index: ' + str(x))
break |
from __future__ import print_function
import json
import os
import jedi.api
from subprocess import Popen, PIPE
class JediRemote(object):
'''Jedi remote process communication client
This class provide jedi compatible API.
'''
python = 'python'
remote_command = 'jedi-remote-command.py'
jedi_pythonx_path = ''
def __init__(self, python=None, jedi_pythonx_path=None):
if python is not None:
self.python = python
if jedi_pythonx_path is not None:
self.jedi_pythonx_path = jedi_pythonx_path
self._process = None
def __del__(self):
if self._process is not None:
# XXX: why does this raise exception?
#self._process.terminate()
self._process = None
@property
def process(self):
if self._process is None or self._process.poll() is not None:
cmd = os.path.join(
os.path.dirname(
os.path.abspath(__file__)), self.remote_command)
self._process = Popen(
[self.python, cmd, self.jedi_pythonx_path],
stdin=PIPE,
stdout=PIPE
)
return self._process
def remote_object_getattr(self, id, *args, **kwargs):
return self._call_remote('remote_object_getattr', id, *args, **kwargs)
def remote_object_setattr(self, id, *args, **kwargs):
return self._call_remote('remote_object_setattr', id, *args, **kwargs)
def remote_object_call(self, id, *args, **kwargs):
return self._call_remote('remote_object_call', id, *args, **kwargs)
def remote_object_repr(self, id, *args, **kwargs):
return self._call_remote('remote_object_repr', id, *args, **kwargs)
def free(self, id):
return self._call_remote('free', id)
def __getattr__(self, name):
return self._call_remote('get_from_jedi', name)
def _call_remote(self, func, *args, **kwargs):
output = json.dumps({'func': func, 'args': args, 'kwargs': kwargs})
self.process.stdin.write(output.encode('utf-8'))
self.process.stdin.write(b'\n')
self.process.stdin.flush()
input = json.loads(self.process.stdout.readline().decode('utf-8'),
object_hook=self.remote_object_hook)
if input.get('code') == 'ok':
return input['return']
elif input.get('code') == 'ng':
exception_name = input['exception']
e_args = input['args']
# try to find from jedi first
exception_class = getattr(jedi.api, exception_name, None)
if exception_class is None:
# ... and then, try to find from builtin
import __builtin__
exception_class = getattr(__builtin__, exception_name, None)
if exception_class is None:
exception_class = Exception
e_args = ('{}: {}'.format(exception_name, e_args), )
# print remote traceback to stdout, which will be in :messages
print(input.get('traceback'))
raise exception_class(*e_args)
else:
raise NotImplementedError(repr(input))
def remote_object_hook(self, obj):
if obj.get('__type') == 'RemoteObject':
return RemoteObject(self, obj['__id'])
else:
return obj
class RemoteObject(object):
'''Remotely managed object
This class represents objects which managed on remote process.
It proxy accesses to remote process.
'''
def __init__(self, jedi_remote, id):
# to bypass this class's __setattr__, use super()
super(RemoteObject, self).__setattr__('_jedi_remote', jedi_remote)
super(RemoteObject, self).__setattr__('_id', id)
def __getattr__(self, name):
return self._jedi_remote.remote_object_getattr(self._id, name)
def __setattr__(self, name, value):
return self._jedi_remote.remote_object_setattr(self._id, name, value)
def __call__(self, *args, **kwargs):
return self._jedi_remote.remote_object_call(self._id, *args, **kwargs)
def __repr__(self):
return self._jedi_remote.remote_object_repr(self._id)
def __del__(self):
self._jedi_remote.free(self._id)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, render_template, Response
from feedgen.feed import FeedGenerator
from bg2feed import parser
app = Flask(__name__)
web = parser.GlobeParser()
def create_feed(title, stories):
feed = FeedGenerator()
feed.id('https://bostonglobe.com/today')
feed.title(title)
feed.link(href='https://bostonglobe.com')
feed.description('Today\'s Boston Globe')
for story in reversed(stories):
item = feed.add_item()
item.id(story['url'])
item.title(story['title'])
item.link(href=story['url'])
downloaded = web.get_article(story['url'])
if downloaded['metadata']:
item.author(author={
'name': downloaded['metadata'].get('author', 'BostonGlobe.com')})
item.summary(summary=downloaded['metadata'].get('description'))
return feed
@app.route('/feeds/top-stories')
def feed_top_stories():
top_stories = web.find_top_stories()
feed = create_feed('Boston Globe - Top Stories', top_stories)
return Response(feed.atom_str())
@app.route('/feeds/section/<section>')
def feed_section(section):
if section in ['world', 'nation']:
# There is more stuff on this page
stories = web.get_section(section)
else:
# Otherwise let's just parse it from the today page
stories = web.find_section(section)
feed = create_feed('Boston Globe - %s' % section.capitalize(), stories)
return Response(feed.atom_str())
@app.route('/proxy/<path:url>')
def proxy(url):
article = web.get_article(url)
return render_template('template.html', **article)
if __name__ == '__main__':
app.run(port=8080)
|
n = int(input().strip())
arr = []
current=[]
for a in range(0,n):
row =list(map(int,input().strip().split(" ")))
row.pop(0)
arr.append(row)
current.append(-1)
q = int(input().strip())
def clearCurrnet(k):
for a in range(k,n):
current[a] = -1
def addToRow(k,h):
k= k-1
arr[k].append(h)
#clearCurrnet(k)
if(current[k]>-1):
if k==0:
firstRow = min(arr[0])
current[k] = firstRow
elif k < n-1:
if current[k+1] < h:
clearCurrnet(k)
elif current[k] > h:
current[k]=h
else:
if (current[k]> h):
current[k] = h
def removeFromRow(k):
k = k-1
elem = arr[k].pop()
if(len(arr[k])==0):
clearCurrnet(k)
elif current[k] > -1:
if current[k] == elem:
if k==0:
nextgt = min(arr[0])
current[k] = nextgt
if current[k] > current[k+1]:
clearCurrnet(k+1)
else :
rowend = len(arr[k])-1
nexgt = findNextGreater(arr[k],arr[k-1],0,rowend)
current[k] = nexgt
if(nexgt>current[k+1]) or nexgt==-1:
clearCurrnet(k)
def findNextGreater(rowArr,value,low,high):
if low<high :
mid = low + int((high-low)/2)
if rowArr[mid]== value:
#return rowArr[mid]
if rowArr[mid+1] > value:
return rowArr[mid+1]
return findNextGreater(rowArr,value,mid+1,high)
elif rowArr[mid]< value:
if rowArr[mid+1] > value:
return rowArr[mid+1]
return findNextGreater(rowArr,value,mid+1,high)
else:
# if rowArr[mid-1] > value:
# return rowArr[mid-1]
return findNextGreater(rowArr,value,low,mid)
else:
return -1
def canUseSpecialWand():
if (-1 in current)== False :
print("YES")
return 0
if current[0]==-1:
firstRow = min(arr[0])
current[0] = firstRow
else :
firstRow = current[0]
nextGreater = firstRow
for a in range(1,n):
if current[a] != -1:
continue
if arr[a][0] > nextGreater:
current[a]= nextGreater
continue
rowend = len(arr[a])-1
if arr[a][rowend] < nextGreater:
nextGreater=-1
current[a]=nextGreater
break
nextGreater = findNextGreater(arr[a],nextGreater,0,rowend)
current[a]=nextGreater
# if nextGreater != -1:
# print("YES")
if (-1 in current)== True :
print("NO")
else:
print("YES")
for i in range(0,q):
operation = list(map(int,input().strip().split(" ")))
if operation[0]== 1:
addToRow(operation[1],operation[2])
elif operation[0]==0:
removeFromRow(operation[1])
else :
canUseSpecialWand()
|
import pika
class Server():
def __init__(self,host,exchange='',exchange_type=''):
self.host = host
self.exchange = exchange
self.exchange_type = exchange_type
self.severity = None
self.connection = pika.BlockingConnection(
pika.ConnectionParameters(host=self.host))
self.channel = self.connection.channel()
self.__exchange()
def __exchange(self):
self.channel.exchange_declare(exchange=self.exchange,
exchange_type=self.exchange_type)
pass
def __publish(self):
self.channel.basic_publish(exchange=self.exchange,
routing_key = self.severity,
body = self.response)
pass
def call(self,msg,response):
self.response = response
self.severity = msg
self.__publish()
pass
if __name__ == "__main__":
s = Server(host='localhost',exchange='topic_',exchange_type='topic')
while True:
msg = input(">>:").split(' ')
s.call(msg[0],msg[1])
pass
|
from flask import Blueprint, request, jsonify, Response
from ..controller import Aset
from ..controller import Pekerjaan
from flask_cors import cross_origin
import json
from ..controller.utils import upload_file
Aset_routes = Blueprint('Aset', __name__)#
@Aset_routes.route("/all", methods=['GET'])#view semua data
@cross_origin()
def get_all():
aset = Aset.get_all()
return jsonify(aset)
@Aset_routes.route('/id/<id>', methods=['GET'])#view By id data
@cross_origin()
def get_by_id(id:int):
aset = Aset.get_by_id(request.view_args["id"])
return jsonify(aset)
@Aset_routes.route("/add", methods=['POST'])
@cross_origin()
def add():
the_file = request.files['path_file']
is_hasil = request.form.get('is_hasil')
pekerjaan_id = request.form.get('pekerjaan_id')#diharapkan bisa menyambung dengan controler pekerjaan untuk mengambil ID
path_file = upload_file(the_file)
aset = Aset.add(path_file, is_hasil, pekerjaan_id)
return jsonify(aset)
@Aset_routes.route("/delete", methods=['POST'])#delete hanya dengan ID
@cross_origin()
def delete_by_id():
id = request.json.get('id')
aset = Aset.delete_by_id(id)
return jsonify(aset)
@Aset_routes.route("/update", methods=['PUT'])#mengupdate dengan memilih id
@cross_origin()
def update_by_id():
id = request.json.get('id')
path_file = request.json.get('path_file')
aset = Aset.update_by_id(id,path_file)
return jsonify(aset)
@Aset_routes.route("/update_hasil", methods=['PUT'])#mengupdate dengan memilih id
@cross_origin()
def update_hasil_by_id():
pekerjaan_id = request.json.get('pekerjaan_id')
the_file = request.json.get('file')
path_file = upload_file(the_file)
check_if_exist = Aset.get_hasil_by_pekerjaan_id(pekerjaan_id)
if check_if_exist:
aset = Aset.update_hasil_by_pekerjaan_id(pekerjaan_id,path_file)
else:
aset = Aset.add(path_file,"HASIL",pekerjaan_id)
return jsonify(aset) |
from django.core.management.base import BaseCommand
from core.jet_tree_convert import convert_jet
class Command(BaseCommand):
'''
Run test
'''
def handle(self, *args, **options):
print('Start test')
convert_jet()
print('End test')
|
from sqlalchemy import desc, asc
from flask_login import UserMixin
from server import db, app
import json
import hashlib
from werkzeug.security import check_password_hash, generate_password_hash
import datetime
from random import randint
import jwt
from time import time
'''
User Class
'''
class User(db.Model, UserMixin):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(25), nullable=False)
email = db.Column(db.String(50), nullable=False)
password = db.Column(db.Text(), nullable=False)
scan_quota = db.Column(db.Integer(), default=31)
is_admin = db.Column(db.Boolean(), default=False)
has_premium = db.Column(db.Boolean(), default=False)
token = db.Column(db.Text())
invoices = db.relationship('Invoice', backref='user', lazy=True, cascade="all,delete")
scans = db.relationship('Scan', backref='user', lazy=True, cascade="all,delete")
def __repr__(self):
return '{id} - {name}'.format(id=self.id, name=self.name)
def __init__(self, name, email, password):
if self.exists(email):
return
self.name = name
self.email = email
self.password = generate_password_hash(password, method="pbkdf2:sha256", salt_length=8)
db.session.add(self)
db.session.commit()
return
@staticmethod
def decode_token(token):
try:
tk = jwt.decode(token, app.config['SECRET_KEY'], algorithms=['HS256'])
except jwt.ExpiredSignatureError:
return False
except Exception as e:
return False
usertoken = User.query.filter_by(email=tk['user_email']).first()
if not usertoken:
return False
return usertoken
@staticmethod
def fetch(email=None, id=None):
if not email and not id:
raise 'Required params: Email or Id'
if email:
return User.query.filter_by(email=email).first()
if id:
return User.query.get(id)
@staticmethod
def exists(email):
return User.query.filter_by(email=email).first()
@staticmethod
def delete(user):
db.session.delete(user)
db.session.commit()
return
def check_password(self, password_to_compare):
return check_password_hash(self.password, password_to_compare)
def generate_session_token(self, expires_in=3600):
# DO NOT rename 'exp' flag. This is used inside jwt.encode() to verify if the token has expired.
token = jwt.encode({'user_email': self.email, 'id' : self.id ,
'exp': time() + expires_in}, app.config['SECRET_KEY'], algorithm='HS256').decode('utf-8')
self.token = token
db.session.commit()
return token
def delete_token(self):
self.token = None
db.session.add(self)
db.session.commit()
def start_new_scan(self, checkpoints, url, alias):
Scan(self.id, url, alias, checkpoints)
self._reduce_scan_quota()
return
def add_premium(self):
self.has_premium = True
db.session.add(self)
db.session.commit()
return
def remove_premium(self):
self.has_premium = False
db.session.add(self)
db.session.commit()
return
def _reduce_scan_quota(self):
self.scan_quota -= 1
db.session.add(self)
db.session.commit()
return
'''
Scan Class
'''
class Scan(db.Model):
__tablename__ = 'scans'
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DateTime(), default=datetime.datetime.now())
finished = db.Column(db.DateTime(), default=None, nullable=True)
report = db.relationship('Report', backref='scan', lazy=True, cascade="all,delete", uselist=False)
checkpoints = db.relationship('Checkpoint', backref='scan', lazy=True, cascade='all,delete')
hashid = db.Column(db.String(200), nullable=False)
userid = db.Column(db.Integer(), db.ForeignKey('users.id'), nullable=False)
url = db.Column(db.Text())
alias = db.Column(db.String(200))
def __repr__(self):
return f'[Scan] #{self.id}'
def __init__(self, userid, url, alias, checkpoints):
reportstring = f'{url}-{str(datetime.datetime.now())}-{randint(0, 1000)}'
self.hashid = hashlib.sha256(reportstring.encode('utf-8')).hexdigest()
self.userid = userid
self.url = url
self.alias = alias
self._add_checkpoints(checkpoints)
return
def _add_checkpoints(self, checkpoints):
for checkpoint in checkpoints:
self.checkpoints.append(checkpoint)
db.session.add(self)
db.session.commit()
return
def update_scan_status(self):
self.finished = datetime.datetime.now()
db.session.add(self)
db.session.commit()
return
def add_report(self, seo, accessibility, usability, results):
Report(self.id, seo, accessibility, usability, results)
return
'''
Invoice Class
'''
class Invoice(db.Model):
__tablename__ = 'invoices'
id = db.Column(db.Integer, primary_key=True)
datetime = db.Column(db.DateTime(), default=datetime.datetime.now())
ispaid = db.Column(db.Boolean(), default=False)
paymentconfirmationid = db.Column(db.String(50))
discount = db.Column(db.Float(), default=0)
amountdue = db.Column(db.Float(), nullable=False)
tax = db.Column(db.Float(), nullable=False)
description = db.Column(db.Text(), nullable=False)
userid = db.Column(db.Integer(), db.ForeignKey('users.id'), nullable=False)
def __repr__(self):
return f'[Invoice] #{self.id}: On: {self.datetime} Due: {self.amountdue} Paid: {self.ispaid}'
def __init__(self, amountdue, tax, description, userid):
self.amountdue = amountdue
self.tax = tax
self.description = description
self.userid = userid
db.session.add(self)
db.session.commit()
return
'''
Checkpoint Class
'''
class Checkpoint(db.Model):
__tablename__ = 'checkpoints'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text())
wcaglevels = db.Column(db.String(20))
benefits = db.Column(db.String(40))
regex = db.Column(db.Text())
scanid = db.Column(db.Integer(), db.ForeignKey('scans.id'), nullable=True)
def __str__(self):
return f'{self.id} - {self.name}'
def __init__(self, name, wcaglevels, benefits, regex, scanid):
self.name = name
self.wcaglevels = wcaglevels
self.benefits = benefits
self.regex = regex
self.scanid = scanid
db.session.add(self)
db.session.commit()
return
'''
Report Class
'''
class Report(db.Model):
__tablename__ = 'reports'
id = db.Column(db.Integer, primary_key=True)
results = db.Column(db.Text())
seo = db.Column(db.Float())
accessibility = db.Column(db.Float())
usability = db.Column(db.Float())
scanid = db.Column(db.Integer, db.ForeignKey('scans.id'), nullable=False)
def __init__(self, scanid, seo, accessibility, usability, results):
self.scanid = scanid
self.seo = seo
self.accessibility = accessibility
self.usability = usability
self.results = results
db.session.add(self)
db.session.commit()
return
def get_json_results(self):
if self.results:
return json.loads(self.results.replace("'",'"'))
return None
|
import numpy as np
from fnp.module.bert_for_sequence_classification_multi_head import BertForSequenceClassificationMultiHead
from fnp.ml.csv_classifier import CSVClassifier
class CSVClassifierMultiHead(CSVClassifier):
def load_model(self, num_labels=2):
model = BertForSequenceClassificationMultiHead.from_pretrained(
self.config.model_name, # Use the 12-layer BERT model, with an uncased vocab.
num_labels=num_labels, # The number of output labels--2 for binary classification.
# You can increase this for multi-class tasks.
output_attentions=False, # Whether the model returns attentions weights.
output_hidden_states=False, # Whether the model returns all hidden-states.
n_heads=self.config.n_heads,
hidden_dropout_prob=self.config.dropout_prob[0],
dropout_prob=self.config.dropout_prob,
after_dropout_prob=self.config.after_dropout_prob,
selected_layers=self.config.selected_layers,
selected_layers_by_heads=self.config.selected_layers_by_heads,
head_type=self.config.head_type,
aggregation_type=self.config.aggregation_type
)
# Tell pytorch to run this model on the GPU.
model.to(self.config.device)
return model
def convert_batch_of_outputs_to_list_of_logits(self, output):
logit_list = []
for b_output in output:
logit_list += list(np.array([i.detach().cpu().numpy() for i in b_output[1]]).transpose(1, 0, 2))
return logit_list
def evaluate(self, input_ids_list, labels_list, logits_list):
for head in range(self.config.n_heads):
logits = [i[head] for i in logits_list]
preds = np.argmax(logits, axis=1).flatten()
print()
print("Head " + str(head) + " ====================")
self.evaluate_head(labels_list, preds)
# sum of all model
logits = [sum(i) for i in logits_list]
preds = np.argmax(logits, axis=1).flatten()
print()
print("Aggregated head ====================")
self.evaluate_head(labels_list, preds)
def write_results(self, input_id_list, label_list, logit_list):
for head in range(self.config.n_heads):
logits = [i[head] for i in logit_list]
self.write_results_on_head(input_id_list, label_list, logits, "h_" + str(head))
logits = [i[-1] for i in logit_list]
self.write_results_on_head(input_id_list, label_list, logits, "h_sum") |
#-------------------------------------------------------------------------------------------------
# DISPLAY CLASS -------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------
import pygame
class Display:
def __init__(self):
self.FPS = 3
self.fpsClock = pygame.time.Clock()
self.display = pygame.display.set_mode([700, 700])
def update(self):
pygame.display.update() |
# homework2
# Filtering, Smoothing/Binning, and Multiplots
# author @ Yiqing Liu
# Question 1: How many people have been killed on each day between Jan 1st, 2013 - Feb 1st, 2013
# Question 2: How many people have been injured on each day between Jan 1st, 2013 - Feb 1st, 2013
# Question 3: How many people have been killed in each state?
# Question 4: How many people have been injured in each state?
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class Dataset:
def __init__(self,filepath):
self.data=pd.read_csv(filepath)
def plot_two_columns(self,plt,data,x_column,y_column,title):
x = data[x_column]
y = data[y_column]
plt.bar(x,y,width=0.5)
plt.xticks(fontsize=20,rotation=30)
plt.title(title,fontsize=60, bbox={'facecolor':'0.8', 'pad':8})
plt.xlabel(x_column,fontsize=60)
plt.ylabel(y_column,fontsize=60)
def contional_plot_two_columns(self,plt,x_column,y_column,title,start_date,end_date):
data=self.data[self.data[x_column]>=start_date]
data=data[data[x_column]<=end_date]
self.plot_two_columns(plt,data,x_column,y_column,title)
def simple_plot_two_columns(self,plt,x_column,y_column,title):
data=self.data
self.plot_two_columns(plt,data,x_column,y_column,title)
def main():
d=Dataset("gun-violence.csv")
fig = plt.figure(22)
fig.suptitle('Multiplots',fontsize=60)
fig.tight_layout()
plt.subplots_adjust(wspace =0.2, hspace =0.5)
plt.subplot(221)
d.contional_plot_two_columns(plt,'date','n_injured','how many people get injured','2013-01-01','2013-02-01')
plt.subplot(222)
d.contional_plot_two_columns(plt,'date','n_killed','how many people get killed','2013-01-01','2013-02-01')
plt.subplot(223)
d.simple_plot_two_columns(plt,'state','n_injured','how many people get injured')
plt.subplot(224)
d.simple_plot_two_columns(plt,'state','n_killed','how many people get killed')
fig.set_size_inches(60,30)
plt.savefig('/Users/yiqingliu/Google Drive/IS590DV/hw2/hw2_YiqingLiu.png')
if __name__=='__main__':
main() |
import math
import numpy as np
import pandas as pd
from vivarium.testing_utilities import get_randomness, build_table
from vivarium_public_health.testing.utils import make_uniform_pop_data
import vivarium_public_health.population.data_transformations as dt
def test_assign_demographic_proportions():
pop_data = dt.assign_demographic_proportions(make_uniform_pop_data(age_bin_midpoint=True))
assert np.allclose(pop_data['P(sex, location, age| year)'], len(pop_data.year_start.unique()) / len(pop_data))
assert np.allclose(
pop_data['P(sex, location | age, year)'], (len(pop_data.year_start.unique())
* len(pop_data.age.unique()) / len(pop_data)))
assert np.allclose(
pop_data['P(age | year, sex, location)'], (len(pop_data.year_start.unique()) * len(pop_data.sex.unique())
* len(pop_data.location.unique()) / len(pop_data)))
def test_rescale_binned_proportions_full_range():
pop_data = dt.assign_demographic_proportions(make_uniform_pop_data(age_bin_midpoint=True))
pop_data = pop_data[pop_data.year_start == 1990]
pop_data_scaled = dt.rescale_binned_proportions(pop_data, age_start=0, age_end=100)
pop_data_scaled = pop_data_scaled[pop_data_scaled.age.isin(pop_data.age.unique())]
assert np.allclose(pop_data['P(sex, location, age| year)'], pop_data_scaled['P(sex, location, age| year)'])
def test_rescale_binned_proportions_clipped_ends():
pop_data = dt.assign_demographic_proportions(make_uniform_pop_data(age_bin_midpoint=True))
pop_data = pop_data[pop_data.year_start == 1990]
scale = len(pop_data.location.unique()) * len(pop_data.sex.unique())
pop_data_scaled = dt.rescale_binned_proportions(pop_data, age_start=2, age_end=7)
base_p = 1/len(pop_data)
p_scaled = [base_p*7/5, base_p*3/5, base_p*2/5, base_p*8/5] + [base_p]*(len(pop_data_scaled)//scale - 5) + [0]
for group, sub_population in pop_data_scaled.groupby(['sex', 'location']):
assert np.allclose(sub_population['P(sex, location, age| year)'], p_scaled)
def test_rescale_binned_proportions_age_bin_edges():
pop_data = dt.assign_demographic_proportions(make_uniform_pop_data(age_bin_midpoint=True))
pop_data = pop_data[pop_data.year_start == 1990]
# Test edge case where age_start/age_end fall on age bin boundaries.
pop_data_scaled = dt.rescale_binned_proportions(pop_data, age_start=5, age_end=10)
assert len(pop_data_scaled.age.unique()) == len(pop_data.age.unique()) + 2
assert 7.5 in pop_data_scaled.age.unique()
correct_data = ([1/len(pop_data)]*(len(pop_data_scaled)//2 - 2) + [0, 0])*2
assert np.allclose(pop_data_scaled['P(sex, location, age| year)'], correct_data)
def test_smooth_ages():
pop_data = dt.assign_demographic_proportions(make_uniform_pop_data(age_bin_midpoint=True))
pop_data = pop_data[pop_data.year_start == 1990]
simulants = pd.DataFrame({'age': [22.5]*10000 + [52.5]*10000,
'sex': ['Male', 'Female']*10000,
'location': [1, 2]*10000})
randomness = get_randomness()
smoothed_simulants = dt.smooth_ages(simulants, pop_data, randomness)
assert math.isclose(len(smoothed_simulants.age.unique()), len(smoothed_simulants.index), abs_tol=1)
# Tolerance is 3*std_dev of the sample mean
assert math.isclose(smoothed_simulants.age.mean(), 37.5, abs_tol=3*math.sqrt(13.149778198**2/2000))
def test__get_bins_and_proportions_with_youngest_bin():
pop_data = dt.assign_demographic_proportions(make_uniform_pop_data(age_bin_midpoint=True))
pop_data = pop_data[(pop_data.year_start == 1990) & (pop_data.location == 1) & (pop_data.sex == 'Male')]
age = dt.AgeValues(current=2.5, young=0, old=7.5)
endpoints, proportions = dt._get_bins_and_proportions(pop_data, age)
assert endpoints.left == 0
assert endpoints.right == 5
bin_width = endpoints.right - endpoints.left
assert proportions.current == 1 / len(pop_data) / bin_width
assert proportions.young == 1 / len(pop_data) / bin_width
assert proportions.old == 1 / len(pop_data) / bin_width
def test__get_bins_and_proportions_with_oldest_bin():
pop_data = dt.assign_demographic_proportions(make_uniform_pop_data(age_bin_midpoint=True))
pop_data = pop_data[(pop_data.year_start == 1990) & (pop_data.location == 1) & (pop_data.sex == 'Male')]
age = dt.AgeValues(current=97.5, young=92.5, old=100)
endpoints, proportions = dt._get_bins_and_proportions(pop_data, age)
assert endpoints.left == 95
assert endpoints.right == 100
bin_width = endpoints.right - endpoints.left
assert proportions.current == 1 / len(pop_data) / bin_width
assert proportions.young == 1 / len(pop_data) / bin_width
assert proportions.old == 0
def test__get_bins_and_proportions_with_middle_bin():
pop_data = dt.assign_demographic_proportions(make_uniform_pop_data(age_bin_midpoint=True))
pop_data = pop_data[(pop_data.year_start == 1990) & (pop_data.location == 1) & (pop_data.sex == 'Male')]
age = dt.AgeValues(current=22.5, young=17.5, old=27.5)
endpoints, proportions = dt._get_bins_and_proportions(pop_data, age)
assert endpoints.left == 20
assert endpoints.right == 25
bin_width = endpoints.right - endpoints.left
assert proportions.current == 1 / len(pop_data) / bin_width
assert proportions.young == 1 / len(pop_data) / bin_width
assert proportions.old == 1 / len(pop_data) / bin_width
def test__construct_sampling_parameters():
age = dt.AgeValues(current=50, young=22, old=104)
endpoint = dt.EndpointValues(left=34, right=77)
proportion = dt.AgeValues(current=0.1, young=0.5, old=0.3)
pdf, slope, area, cdf_inflection_point = dt._construct_sampling_parameters(age, endpoint, proportion)
assert pdf.left == ((proportion.current - proportion.young)/(age.current - age.young)
* (endpoint.left - age.young) + proportion.young)
assert pdf.right == ((proportion.old - proportion.current) / (age.old - age.current)
* (endpoint.right - age.current) + proportion.current)
assert area == 0.5 * ((proportion.current + pdf.left)*(age.current - endpoint.left)
+ (pdf.right + proportion.current)*(endpoint.right - age.current))
assert slope.left == (proportion.current - pdf.left) / (age.current - endpoint.left)
assert slope.right == (pdf.right - proportion.current) / (endpoint.right - age.current)
assert cdf_inflection_point == 1 / (2 * area) * (proportion.current + pdf.left) * (age.current - endpoint.left)
def test__compute_ages():
assert dt._compute_ages(1, 10, 12, 0, 33) == 10 + 33/12*1
assert dt._compute_ages(1, 10, 12, 5, 33) == 10 + 12/5*(np.sqrt(1+2*33*5/12**2*1) - 1)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2018-09-17 16:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exam', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='examlibitem',
name='category',
field=models.CharField(choices=[(b'ip', b'IP'), (b'linux', b'Linux'), (b'lte', b'LTE'), (b'python', b'Python'), (b'cpp', b'C++'), (b'robot', b'Robot'), (b'test', b'Test'), (b'log', b'LOG')], max_length=45),
),
migrations.AlterField(
model_name='examlibitem',
name='content',
field=models.TextField(blank=True, max_length=1500, null=True),
),
]
|
#!/usr/bin/env python
# coding: utf-8
# In[6]:
def fizzbuzz(x_list):
#print number divisible by 3 as Fizz and divisible by 5 as Buzz, Divisible by both 3 and 5 and FizzBuzz. From 1~100
for num in x_list:
#where num represents the value from 1~100
if num%3==0 and num%5==0:
print(num,'FizzBuzz')
elif num%3==0:
print(num,'Fizz')
elif num%5==0:
print(num,'Buzz')
print(fizzbuzz(range(1,101)))
# In[ ]:
# In[ ]:
|
#!/usr/bin/env python
# encoding: utf-8
"""
Created by 'bens3' on 2013-06-21.
Copyright (c) 2013 'bens3'. All rights reserved.
python indexlot.py IndexLotDatasetAPITask --local-scheduler
python tasks/indexlot.py IndexLotDatasetCSVTask --local-scheduler
"""
import luigi
import pandas as pd
from collections import OrderedDict
from ke2mongo import config
from ke2mongo.tasks.dataset import DatasetTask, DatasetCSVTask, DatasetAPITask
from ke2mongo.tasks import DATASET_LICENCE, DATASET_AUTHOR, DATASET_TYPE
class IndexLotDatasetTask(DatasetTask):
record_type = 'Index Lot'
# CKAN Dataset params
package = {
'name': 'collection-indexlots',
'notes': u'Index Lot records from the Natural History Museum\'s collection',
'title': "Index Lot collection",
'author': DATASET_AUTHOR,
'license_id': DATASET_LICENCE,
'resources': [],
'dataset_category': DATASET_TYPE,
'owner_org': config.get('ckan', 'owner_org')
}
# And now save to the datastore
datastore = {
'resource': {
'name': 'Index Lots',
'description': 'Species level record denoting the presence of a taxon in the Museum collection',
'format': 'csv'
},
'primary_key': 'GUID'
}
columns = [
('etaxonomy2._id', '_current_name_irn', 'int32'),
('etaxonomy2.ClaScientificNameBuilt', 'Currently accepted name', 'string:100'),
('etaxonomy._id', '_taxonomy_irn', 'int32'),
('etaxonomy.ClaScientificNameBuilt', 'Original name', 'string:100'),
('etaxonomy.ClaKingdom', 'Kingdom', 'string:60'),
('etaxonomy.ClaPhylum', 'Phylum', 'string:100'),
('etaxonomy.ClaClass', 'Class', 'string:100'),
('etaxonomy.ClaOrder', 'Order', 'string:100'),
('etaxonomy.ClaSuborder', 'Suborder', 'string:100'),
('etaxonomy.ClaSuperfamily', 'Superfamily', 'string:100'),
('etaxonomy.ClaFamily', 'Family', 'string:100'),
('etaxonomy.ClaSubfamily', 'Subfamily', 'string:100'),
('etaxonomy.ClaGenus', 'Genus', 'string:100'),
('etaxonomy.ClaSubgenus', 'Subgenus', 'string:100'),
('etaxonomy.ClaSpecies', 'Species', 'string:100'),
('etaxonomy.ClaSubspecies', 'Subspecies', 'string:100'),
('etaxonomy.ClaRank', 'Taxonomic rank', 'string:20'), # NB: CKAN uses rank internally
('ecatalogue.AdmGUIDPreferredValue', 'GUID', 'uuid'),
('ecatalogue._id', 'IRN', 'int32'),
('ecatalogue.EntIndIndexLotNameRef', '_collection_index_irn', 'int32'),
('ecatalogue.EntIndMaterial', 'Material', 'bool'),
('ecatalogue.EntIndType', 'Type', 'bool'),
('ecatalogue.EntIndMedia', 'Media', 'bool'),
('ecatalogue.EntIndBritish', 'British', 'bool'),
('ecatalogue.EntIndKindOfMaterial', 'Kind of material', 'string:100'),
('ecatalogue.EntIndKindOfMedia', 'Kind of media', 'string:100'),
# Material detail
('ecatalogue.EntIndCount', 'Material count', 'string:100'),
('ecatalogue.EntIndSex', 'Material sex', 'string:100'),
('ecatalogue.EntIndStage', 'Material stage', 'string:100'),
('ecatalogue.EntIndTypes', 'Material types', 'string:100'),
('ecatalogue.EntIndPrimaryTypeNo', 'Material primary type no', 'string:100'),
# Separate Botany and Entomology
('ecatalogue.ColDepartment', 'Department', 'string:100'),
# Audit info
('ecatalogue.AdmDateModified', 'Modified', 'string:100'),
('ecatalogue.AdmDateInserted', 'Created', 'string:100'),
]
def process_dataframe(self, m, df):
"""
Process the dataframe, adding in the taxonomy fields
@param m: monary
@param df: dataframe
@return: dataframe
"""
# Try and get taxonomy using the collection index
# BS: 20140804 - Fix indexlots taxonomy bug
# When the index lot record's taxonomy is updated (via collection index),
# the index lot record's EntIndIndexLotTaxonNameLocalRef is not updated with the new taxonomy
# So we need to use collection index to retrieve the record taxonomy
df = super(IndexLotDatasetTask, self).process_dataframe(m, df)
# Convert booleans to yes / no for all columns in the main collection
for (_, field, field_type) in self.get_collection_source_columns(self.collection_name):
if field_type == 'bool':
df[field][df[field] == 'True'] = 'Yes'
df[field][df[field] == 'False'] = 'No'
df[field][df[field] == 'N/A'] = ''
# BUG FIX BS 140811
# ColCurrentNameRef Is not being updated correctly - see record 899984
# ColCurrentNameRef = 964105
# Not a problem, as indexlots are using ColTaxonomicNameRef for summary data etc.,
# So ColTaxonomicNameRef is the correct field to use.
collection_index_columns = [
('_id', '_collection_index_irn', 'int32'),
('ColTaxonomicNameRef', '_taxonomy_irn', 'int32'),
('ColCurrentNameRef', '_current_name_irn', 'int32'),
]
collection_index_irns = self._get_unique_irns(df, '_collection_index_irn')
collection_index_df = self.get_dataframe(m, 'ecollectionindex', collection_index_columns, collection_index_irns, '_collection_index_irn')
# Get all collection columns
collection_columns = self.get_collection_source_columns()
# And get the taxonomy for these collection
taxonomy_irns = self._get_unique_irns(collection_index_df, '_taxonomy_irn')
# The query to pre-load all taxonomy objects takes ~96 seconds
# It is much faster to load taxonomy objects on the fly, for the current block
# collection_index_irns = pd.unique(df._collection_index_irn.values.ravel()).tolist()
taxonomy_df = self.get_dataframe(m, 'etaxonomy', collection_columns['etaxonomy'], taxonomy_irns, '_taxonomy_irn')
# Merge the taxonomy into the collection index dataframe - we need to do this so we can merge into
# main dataframe keyed by collection index ID
collection_index_df = pd.merge(collection_index_df, taxonomy_df, how='inner', left_on=['_taxonomy_irn'], right_on=['_taxonomy_irn'])
# Add current name - same process as the main taxonomy but using _current_name_irn source fields
current_name_irns = self._get_unique_irns(collection_index_df, '_current_name_irn')
current_name_df = self.get_dataframe(m, 'etaxonomy', collection_columns['etaxonomy2'], current_name_irns, '_current_name_irn')
collection_index_df = pd.merge(collection_index_df, current_name_df, how='inner', left_on=['_current_name_irn'], right_on=['_current_name_irn'])
# Merge results into main dataframe
df = pd.merge(df, collection_index_df, how='outer', left_on=['_collection_index_irn'], right_on=['_collection_index_irn'])
return df
def get_output_columns(self):
"""
Get a list of output columns, with bool converted to string:3 (so can be converted to Yes/No)
@return:
"""
return OrderedDict((col[1], 'string:3' if col[2] == 'bool' else col[2]) for col in self.columns if self._is_output_field(col[1]))
class IndexLotDatasetCSVTask(IndexLotDatasetTask, DatasetCSVTask):
pass
class IndexLotDatasetAPITask(IndexLotDatasetTask, DatasetAPITask):
pass
if __name__ == "__main__":
luigi.run() |
#Ejercicio 13
"""
Un supermercado está estableciendo el precio de venta para nuevos productos, de estos productos desean
generar el 27 % de ganancia.
"""
precio_producto = float (input("Ingrese el precio del producto: "))
ganancia = precio_producto * 27 / 100
nuevo_producto = precio_producto + ganancia
print ("El nuevo precio del producto es de: " , nuevo_producto, "pesos")
|
from django.contrib import admin
from .models import ViewTestModel
# from .views import TestParserModel
admin.site.register(ViewTestModel)
# admin.site.register(TestParserModel)
|
# -*- coding: utf-8 -*-
import inject
import logging
import psycopg2
import asyncio
from asyncio import coroutine
from autobahn.asyncio.wamp import ApplicationSession
from model.config import Config
from model.systems.camaras.camaras import Camaras
class WampCamaras(ApplicationSession):
def __init__(self, config=None):
logging.debug('instanciando')
ApplicationSession.__init__(self, config)
self.serverConfig = inject.instance(Config)
self.camaras = inject.instance(Camaras)
@coroutine
def onJoin(self, details):
logging.debug('registering methods')
yield from self.register(self.findAllCameras_async, 'camaras.camaras.findAllCameras')
yield from self.register(self.findRecordings_async, 'camaras.camaras.findRecordings')
def _getDatabase(self):
host = self.serverConfig.configs['database_host']
dbname = self.serverConfig.configs['database_database']
user = self.serverConfig.configs['database_user']
passw = self.serverConfig.configs['database_password']
return psycopg2.connect(host=host, dbname=dbname, user=user, password=passw)
def findAllCameras(self):
con = self._getDatabase()
try:
return self.camaras.findAllCameras(con)
finally:
con.close()
@coroutine
def findAllCameras_async(self):
loop = asyncio.get_event_loop()
r = yield from loop.run_in_executor(None, self.findAllCameras)
return r
def findRecordings(self, start,end,camaras):
con = self._getDatabase()
try:
return self.camaras.findRecordings(con,start,end,camaras)
finally:
con.close()
@coroutine
def findRecordings_async(self, start,end,camaras):
loop = asyncio.get_event_loop()
r = yield from loop.run_in_executor(None, self.findRecordings, start,end,camaras)
return r
|
from django.urls import path
from . import views
urlpatterns = [
path('users/login', views.MyTokenObtainPairView.as_view(), name='token_obtain_pair'),
path('', views.movieList.as_view()),
path('searchResults/<str:name>', views.getMovies),
path('movies/<str:id>', views.getMovieById),
path('genre/<str:genre>', views.getMovieByGenre),
path('imdbMovies/<str:name>', views.IMDBAPI),
path('users/profile/', views.getUserProfile),
path('users/', views.getUsers),
path('users/register', views.registerUser),
path('users/addMovie', views.addMovie),
path('users/deleteMovie', views.deleteMovie),
# path('trailer/<str:name>',views.getTrailer)
]
|
from keras.models import load_model
import sys, os
from sklearn.metrics import classification_report, confusion_matrix
from setting import BATCH_SIZE,CLASSES, NUM_CLASSES
import matplotlib.pyplot as plt
import numpy as np
import itertools
model = load_model(sys.argv[1])
def evaluate():
print("Evaluating the model based on the best model...")
X_test = np.load('X_test.npy')
y_test = np.load('y_test.npy')
predictions = model.predict(X_test, batch_size=BATCH_SIZE)
y_pred = predictions.argmax(axis = 1)
y_true = y_test.argmax(axis = 1)
print(classification_report(y_true, y_pred, target_names = CLASSES))
print("Building and Saving the confusion matrix...")
confusion_mat = confusion_matrix(y_true = y_true, y_pred = y_pred)
normalize = False
accuracy = np.trace(confusion_mat) / float(np.sum(confusion_mat))
misclass = 1 - accuracy
if normalize:
confusion_mat = confusion_mat.astype('float') / confusion_mat.sum(axis=1)[:, np.newaxis]
plt.figure(figsize = (6,6))
plt.imshow(confusion_mat, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion matrix')
plt.colorbar()
tick_marks = np.arange(NUM_CLASSES)
plt.xticks(tick_marks, CLASSES, rotation=45)
plt.yticks(tick_marks, CLASSES)
thresh = confusion_mat.max() / 1.5 if normalize else confusion_mat.max()/ 2
for i, j in itertools.product(range(confusion_mat.shape[0]), range(confusion_mat.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(confusion_mat[i, j]),
horizontalalignment="center",
color="white" if confusion_mat[i, j] > thresh else "black")
else:
y = 0.7
if i ==0:
y= 0.2
plt.text(j, y, format(confusion_mat[i, j],'d'),
horizontalalignment="center",
color="white" if confusion_mat[i, j] > thresh else "black")
# plt.tight_layout()
plt.ylabel('Actual')
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))
confusion_matrix_plot_name = os.path.join('..','images', 'confusion_matrix_plot.png')
plt.savefig(confusion_matrix_plot_name)
plt.show()
confusion_matrix_file_name = os.path.join('..','logs', 'confusion_matrix_file.txt')
with open(confusion_matrix_file_name, 'w') as f:
f.write(np.array2string(confusion_mat, separator=', '))
evaluate()
|
import boto3
from botocore.exceptions import ClientError
import csv
# with open('instances.csv', 'r') as f:
# csv_reader = csv.reader(f)
# instances = list(csv_reader)
ec2client = boto3.client('ec2', region_name='us-west-2')
responses = ec2client.describe_instances()
instance_result = set()
for reservations in responses['Reservations']:
for instance in reservations['Instances']:
if (instance['InstanceId']):
instance_result.add(instance['InstanceId'])
print(len(instance_result))
print(instance_result)
print(responses)
# try:
# response = ec2.delete_security_group(GroupId='{}'.format(security_groups))
# print(response)
# .format(str(security_groups)[-1:1]))
# print('Deleting Security Group: {}'.format(str(security_groups)[-1:1]))
# print('Security Group Deleted')
# except ClientError as e:
# print(e)
# for sg in security_groups:
# print(sg)
# response = ec2.delete_security_group(GroupId='sg-0683d3523a393f9e1')
# # try:
# for gid in sg_groupid:
# response = ec2.delete_security_group(GroupId='{}'.format(gid))
# print('Security Group Deleted')
# except ClientError as e:
# print(e)
|
# -*- coding: utf-8 -*-
"""
This module holds two types of objects:
1. general-use functions, and
2. classes derived from wx that could be usable outside
of `threepy5`.
"""
import wx
import wx.lib.stattext as st
import wx.lib.newevent as ne
from math import sqrt
######################
# Auxiliary classes
######################
class AutoSize(wx.ScrolledWindow):
"""
`AutoSize` is a `wx.ScrolledWindow` that automates the process of setting
up a window which has a "virtual size". In `wx`, "virtual size" is the size of
the underlying contents of the window, while "size" is the "real" size of
the window (ie, the screen real estate it occupies). `AutoSize` also holds
various methods that build on top of that functionality.
"""
SCROLL_STEP = 10
def __init__(self, parent, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0):
"""Constructor.
* `parent: ` the parent `Deck`.
* `pos: ` by default, is `wx.DefaultPosition`.
* `size: ` by default, is `wx.DefaultSize`.
* `style: ` the style for this window.
"""
super(AutoSize, self).__init__(parent, pos=pos, size=size, style=style)
self.content_sz = wx.Size(size[0], size[1])
self.SetScrollRate(self.SCROLL_STEP, self.SCROLL_STEP)
# bindings
self.Bind(wx.EVT_SIZE, self.AutoSizeOnSize)
def UpdateContentSize(self, sz):
"""Recompute the virtual size.
* `sz: ` a `(width, height)` size tuple. If it contains a dimension that
is bigger than the current virtual size, change the virtual size.
"""
flag = False
virt_sz = self.content_sz
if sz.x > virt_sz.x:
flag = True
self.content_sz = wx.Size(sz.x, self.content_sz.y)
if sz.y > virt_sz.y:
flag = True
self.content_sz = wx.Size(self.content_sz.x, sz.y)
if flag:
self.SetVirtualSize(self.content_sz)
def FitToChildren(self):
"""Call to set the virtual size to tightly fit the children. If
there are no children, keeps the virtual size as it is (don't shrink).
"""
children = self.GetChildren()
if len(children) == 0: return
# set view start at (0,0) to get absolute cordinates
shown = self.IsShown()
if shown: self.Hide()
view = self.GetViewStart()
self.Scroll(0, 0)
# calculate children extension
rects = [c.GetRect() for c in self.GetChildren()]
right = max(rects, key=lambda r: r.right).right
bottom = max(rects, key=lambda r: r.bottom).bottom
# compare and update
sz = self.content_sz
if right > sz.x: sz = wx.Size(right, sz.y)
if bottom > sz.y: sz = wx.Size(sz.x, bottom)
self.content_sz = sz
self.SetVirtualSize(self.content_sz)
# return to the previous scroll position
self.Scroll(view[0], view[1])
if shown: self.Show()
def ExpandVirtualSize(self, dx, dy):
"""Enlarge the virtual size.
* `dx: ` pixels to enlarge add in the X direction.
* `dy: ` pixels to enlarge add in the Y direction.
"""
size = wx.Size(self.content_sz.x + dx, self.content_sz.y + dy)
self.SetVirtualSize(size)
self.content_sz = size
def GetViewStartPixels(self):
"""Return the point at which the current view starts, ie, the absolute
coordinates of that, due to the scrollbars, currently lies at `(0,0)`.
"""
view = self.GetViewStart()
return wx.Point(*[v * self.SCROLL_STEP for v in view])
### Callbacks
def AutoSizeOnSize(self, ev):
"""Listens to `wx.EVT_SIZE`."""
self.UpdateContentSize(ev.GetSize())
ev.Skip()
class ColouredText(wx.TextCtrl):
"""
`ColouredText` overrides `TextCtrl.SetBackgroundColour`, so that all chars'
background colours are changed correctly.
"""
def __init__(self, parent, value ="", size=wx.DefaultSize, pos=wx.DefaultPosition, style=0):
"""Constructor.
* `parent: ` the parent window.
* `value: ` the intial text for this control.
* `size: ` by default, is `wx.DefaultSize`.
* `pos: ` by default, is `wx.DefaultPosition`.
* `style: ` the style for this window.
"""
super(ColouredText, self).__init__(parent, value=value, size=size, pos=pos, style=style)
def SetBackgroundColour(self, new_cl):
"""Overridden from `wx.TextCtrl`. Changes the background colour respecting
each individual char's background, as set by `wx.TextCtrl.SetStyle`.
If we change background colour from A to B, but a char in the text
has background colour C, `TextCtrl.SetBackgroundColour` won't change
it correctly. This method solves that problem.
"""
# Solution: store the bg colour of those chars that have
# a different bg colour than the current one, change the bg for all
# and then restore the ones saved.
text = self.GetValue()
attr = wx.TextAttr()
cur = self.GetBackgroundColour()
char_old_bg = {}
# store those bg's different than the current
for i in range(len(text)):
self.GetStyle(i, attr)
old = attr.GetBackgroundColour()
if old != cur:
char_old_bg[i] = old
# set the new bg for all, but don't use attr again!
# char_old_bg is pointing to one of its members
super(ColouredText, self).SetBackgroundColour(new_cl)
self.SetStyle(0, len(text), wx.TextAttr(None, new_cl))
# restore the saved ones
for i in char_old_bg.keys():
attr.SetBackgroundColour(char_old_bg[i])
self.SetStyle(i, i+1, attr)
class EditText(ColouredText):
"""
`EditText` is a `wx.TextCtrl` that cahnges background colour when it has
focus. Basically, we want to make it look like a `wx.StaticText`, except when
the user is editing its contents, in which case we want it to look like
a `wx.TextCtrl`. The background colour `EditText` has when it looks like a
`wx.StaticText` (which is in most cases its parent's background colour) is
called "first colour". The colour it has when it looks like a regular `wx.TextCtrl`
is the "second colour". The second colour is usually whie.
"""
DEFAULT_SZ = (200, 20)
DEFAULT_STYLE = wx.BORDER_NONE|wx.TE_RICH|wx.TE_PROCESS_ENTER|wx.TE_MULTILINE|wx.TE_NO_VSCROLL
DEFAULT_FONT = (12, wx.SWISS, wx.ITALIC, wx.BOLD)
DEFAULT_2_CL = (255, 255, 255, 255)
def __init__(self, parent, value="", pos=wx.DefaultPosition, size=DEFAULT_SZ, style=DEFAULT_STYLE):
"""Constructor.
* `parent: ` the parent window.
* `value: ` the intial text for this control.
* `pos: ` by default, is `wx.DefaultPosition`.
* `size: ` by default, is `wx.DefaultSize`.
* `style: ` by default, is `EditText.DEFAULT_STYLE`.
"""
super(EditText, self).__init__(parent, pos=pos, size=size, style=style, value=value)
# colours
self.first_cl = parent.GetBackgroundColour()
self.second_cl = self.DEFAULT_2_CL
self.SetBackgroundColour(self.first_cl)
# style
self.SetFont(wx.Font(*self.DEFAULT_FONT))
# bindings
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
self.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
self.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
### Behavior functions
def ToggleColours(self):
"""Change between first and second colours."""
if self.GetBackgroundColour() == self.first_cl:
self.ShowSecondColour()
else:
self.ShowFirstColour()
def ShowFirstColour(self):
"""Set the background to the first colour."""
self.SetBackgroundColour(self.first_cl)
def ShowSecondColour(self):
"""Set the background to the second colour."""
self.SetBackgroundColour(self.second_cl)
def SetSecondColour(self, cl):
"""Sets the second colour."""
self.second_cl = cl
def GetSecondColour(self):
"""Get the second colour.
`returns: ` a `(R, G, B, alpha)` tuple."""
return self.second_cl
def SetFirstColour(self, cl):
"""Sets the first colour."""
self.first_cl = cl
self.SetBackgroundColour(self.first_cl)
def GetFirstColour(self):
"""Get the first colour.
`returns: ` a `(R, G, B, alpha)` tuple."""
return self.first_cl
### Callbacks
def OnKeyDown(self, ev):
"""Listens to `wx.EVT_KEY_DOWN`."""
if ev.GetKeyCode() == 9:
GetCardAncestor(self).OnTab(ev)
else:
ev.Skip()
def OnEnter(self, ev):
"""Listens to `wx.EVT_TEXT_ENTER`."""
self.ToggleColours()
self.Navigate(not wx.MouseState().ShiftDown())
def OnLeftDown(self, ev):
"""Listens to `wx.EVT_LEFT_DOWN`."""
if self.GetBackgroundColour() == self.first_cl:
self.ShowSecondColour()
ev.Skip()
def OnSetFocus(self, ev):
"""Listens to `wx.EVT_SET_FOCUS`."""
last = self.GetLastPosition()
self.SetInsertionPoint(last)
self.SetSelection(0, last)
self.ShowSecondColour()
def OnKillFocus(self, ev):
"""Listens to `wx.EVT_KILL_FOCUS`."""
self.SetSelection(0,0)
self.ShowFirstColour()
#######################
## Auxiliary functions
#######################
def GetAncestors(ctrl):
"""Returns a list of all of ctrl's wx.Window ancestors.
* `ctrl: ` a wx object.
`returns: ` a list of all wx ancestors of `ctrl`.
"""
ancestors = []
while ctrl:
ancestors.append(ctrl.GetParent())
ctrl = ctrl.GetParent()
# the last element was None
del ancestors[-1]
return ancestors
def GetCardAncestor(ctrl):
"""Returns the Card ancestor of its argument.
* `ctrl: ` a wx object.
`returns: ` The first `Card` ancestor of `ctrl`, or `None`.
"""
from card import Card
cards = [p for p in GetAncestors(ctrl) if isinstance(p, Card)]
if cards:
return cards[0]
else:
return None
def DumpSizerChildren(sizer, depth=1, full=False):
"""Recursively prints all children of a wx.Sizer.
* `sizer: ` a `wx.Sizer`.
* `depth: ` the depth at which to start printing items. Should always
be `1` when called from outside itself.
* `full: ` set to `True` to print full object information, including
memory address.
"""
# prepare the info string for the sizer
# indentation
sizer_info = str(" " * (depth - 1))
# obj info
if full: sizer_info = sizer_info + "Sizer: " # + str(sizer)
else: sizer_info = sizer_info + "Sizer: " # + str(sizer.__class__)
# orientation
orient = sizer.GetOrientation()
if orient == wx.VERTICAL: sizer_info = sizer_info + "vertical"
else: sizer_info = sizer_info + "horizontal"
print sizer_info
# for each children: indentation, class and shown state
for c in sizer.GetChildren():
if c.IsWindow():
msg = str(" " * depth)
if full: msg = msg + str(c.GetWindow())
else: msg = msg + str(c.GetWindow().__class__)
if c.IsShown(): msg = msg + ", shown"
else: msg = msg + ", hidden"
print msg
# and recursively for nested sizers
elif c.IsSizer():
DumpSizerChildren(c.GetSizer(), depth + 1, full)
def MakeEncirclingRect(p1, p2):
"""
Returns the wx.Rect with two opposite vertices at p1, p2.
Width and height are guaranteed to be positive.
* `p1: ` any object with two fields addressable as `p1[0]` and `p1[1]`.
* `p2: ` idem.
`returns: ` a `wx.Rect` with top left corner at `p1`, bottom right corner at `p2` and positive width and height.
"""
l = min(p1[0], p2[0])
t = min(p1[1], p2[1])
w = abs(p1[0] - p2[0])
h = abs(p1[1] - p2[1])
return wx.Rect(l, t, w, h)
def isnumber(s):
"""Return True of the argument is a string representing a number.
* `s: ` a string.
`returns: ` `True` if `s` is a number, or `False`.
"""
try:
float(s)
return True
except ValueError:
return False
def dist2(p1, p2):
"""Returns the squared euclidean distance between two points.
* `p1: ` any object with two fields addressable as `p1[0]` and `p1[1]`.
* `p2: ` idem.
`returns: ` the squared distance, always a `float`.
"""
return float(sum([i**2 for i in p1 - p2]))
def dist(p1, p2):
"""Returns the euclidean distance betwen two points.
* `p1: ` any object with two fields addressable as `p1[0]` and `p1[1]`.
* `p2: ` idem.
`returns: ` the distance, always a `float`.
"""
return float(sqrt(dist2(p1, p2)))
def IsFunctionKey(key):
"""Check if `key` is a function key.
* `key: ` a `wx.KeyCode`, eg, as returned by `wx.MouseEvent.GetKeyCode()`.
`returns: ` `True` if `key` is one of the 24 possible values of `wx.WXK_F*`, or `False`.
"""
fkeys = [wx.WXK_F1, wx.WXK_F2, wx.WXK_F3, wx.WXK_F4, wx.WXK_F5, wx.WXK_F6, wx.WXK_F7, wx.WXK_F8, wx.WXK_F9, wx.WXK_F10, wx.WXK_F11, wx.WXK_F12, wx.WXK_F13, wx.WXK_F14, wx.WXK_F15, wx.WXK_F16, wx.WXK_F17, wx.WXK_F18, wx.WXK_F19, wx.WXK_F20, wx.WXK_F21, wx.WXK_F22, wx.WXK_F23, wx.WXK_F24]
return any([key == k for k in fkeys])
###########################
# pdoc documentation setup
###########################
# __pdoc__ is the special variable from the automatic
# documentation generator pdoc.
# By setting pdoc[class.method] to None, we are telling
# pdoc to not generate documentation for said method.
__pdoc__ = {}
__pdoc__["field"] = None
# Since we only want to generate documentation for our own
# mehods, and not the ones coming from the base classes,
# we first set to None every method in the base class.
for field in dir(wx.ScrolledWindow):
__pdoc__['AutoSize.%s' % field] = None
for field in dir(wx.TextCtrl):
__pdoc__['ColouredText.%s' % field] = None
for field in dir(ColouredText):
__pdoc__['EditText.%s' % field] = None
# Then, we have to add again the methods that we have
# overriden. See https://github.com/BurntSushi/pdoc/issues/15.
for field in AutoSize.__dict__.keys():
if 'AutoSize.%s' % field in __pdoc__.keys():
del __pdoc__['AutoSize.%s' % field]
for field in ColouredText.__dict__.keys():
if 'ColouredText.%s' % field in __pdoc__.keys():
del __pdoc__['ColouredText.%s' % field]
for field in EditText.__dict__.keys():
if 'EditText.%s' % field in __pdoc__.keys():
del __pdoc__['EditText.%s' % field]
|
#-*- coding:utf8 -*-
# Copyright (c) 2020 barriery
# Python release: 3.7.0
# Create time: 2020-07-13
import json
import requests
class BDCaller(object):
def __init__(self, home=None):
self.home_ = home
def callAPI(self, params, home=None):
if home is None:
home = self.home_
if home is None:
raise Exception("params [home] can not be None")
r = requests.get(home, params=params)
resp = json.loads(r.content)
data = json.loads(resp["data"])
return data
def callContract(self, params, home=None):
if home is None:
home = self.home_
if home is None:
raise Exception("params [home] can not be None")
r = requests.get(home, params=params)
resp = json.loads(r.content)
data = json.loads(resp["data"])
result = json.loads(data["result"])
return result
|
#!/usr/bin/env python
# pairselect: randomly assign students to pairs
import getopt
import random
import sys
def print_usage(outstream):
usage = ("Usage: ./pairselect [options] students.txt\n"
" Options:\n"
" -h|--help print this help message and exit\n"
" -o|--out: FILE file to which output will be written;\n"
" default is terminal (standard output)\n")
print >> outstream, usage
# Parse options and arguments
outstream = sys.stdout
optstr = "ho:"
longopts = ["help", "out="]
(options, args) = getopt.getopt(sys.argv[1:], optstr, longopts)
for key, value in options:
if key in ("-h", "-help", "--help"):
print_usage(sys.stdout)
sys.exit(0)
elif key in ("-o", "-out", "--out"):
outfile = value
try:
outstream = open(outfile, "w")
except IOError as e:
print >> sys.stderr, "error opening output file %s" % options.outfile
print >> sys.stderr, e
sys.exit(1)
else:
assert False, "unsupported option '%s'" % key
infile = None
instream = None
if len(args) > 0:
infile = args[0]
try:
instream = open(infile, "r")
except IOError as e:
print >> sys.stderr, "error opening input file %s" % infile
print >> sys.stderr, e
sys.exit(1)
elif not sys.stdin.isatty():
instream = sys.stdin
else:
print >> sys.stderr, "error: please provide input with file or standard input"
print_usage(sys.stderr)
sys.exit(1)
students = []
for line in instream:
line = line.rstrip()
fields = line.split("\t")
students.append(fields[2])
instream.close()
random.shuffle(students)
while(len(students) > 0):
s1 = students.pop()
if len(students) > 0:
s2 = students.pop()
print >> outstream, "[%s, %s]" % (s1, s2)
else:
print >> outstream, "[%s]" % (s1)
outstream.close()
|
"""This module has a class to clean the raw data """
import numpy as np
import pandas as pd
class clean:
''' This class has the raw data and will clean it'''
def __init__(self,origin_data):
'''the constructor is to get the raw data'''
self.raw_data = origin_data
def clean_data(self):
'''this function is to clean the data and return the cleaned data'''
selected_data = self.raw_data[['CAMIS','BORO','GRADE','GRADE DATE']] # get the related coloums
selected_data_unique = selected_data.drop_duplicates()
selected_data_unique_valid_grade = selected_data_unique[(selected_data_unique['GRADE']=='A')|(selected_data_unique['GRADE']=='B')|(selected_data_unique['GRADE']=='C')] # get the grades
selected_data_final = selected_data_unique_valid_grade.dropna()
selected_data_final = selected_data_final[selected_data_final['BORO']!='Missing'] # clean the data with missing area value
Format_Date_Df = pd.DataFrame(pd.to_datetime(selected_data_final['GRADE DATE'])) # get dataframe with time.
Format_Date_Df.rename(columns={'GRADE DATE': 'FORMAT_DATE'}, inplace=True)
combined_df = pd.concat([selected_data_final, Format_Date_Df], axis=1)
return combined_df
|
import datetime
import os
import tkinter
import numpy as np
from PIL import Image as Img
from PIL import ImageTk
import pytesseract
import cv2
from tkinter import *
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
specs_ori = cv2.imread('img/glass.png', -1)
cigar_ori = cv2.imread('img/cigar.png', -1)
mus_ori = cv2.imread('img/mustache.png', -1)
width, height = 800, 450
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
cv2image = None
frame = None
gallery = None
filter = 0
def save():
global cv2image, frame
time = "img-saved/" + str(datetime.datetime.now().today()).replace(":", "-") +".png"
frame.save(time)
print()
def cancel():
print()
def filtering(i):
global filter
if filter != i:
filter = i
else:
filter = 0
def mail():
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
fromaddr = ‘sender@email.com’
toaddr = [‘sender@email.com’, ‘receiver@email.com’, ‘receiver2@email.com’]
# instance of MIMEMultipart
msg = MIMEMultipart()
# storing the senders email address
msg['From'] = fromaddr
# storing the receivers email address
msg['To'] = ','.join(toaddr)
# storing the subject
msg['Subject'] = "Your Magic Photo"
# string to store the body of the mail
body = "Hello,\n\nHere's your awesome magic photo from The PhotoBooth."
# attach the body with the msg instance
msg.attach(MIMEText(body, 'plain'))
# open the file to be sent
filename = 'D1G1TALArtboard4.jpg'
attachment = open('D1G1TALArtboard4.jpg', "rb")
# instance of MIMEBase and named as p
p = MIMEBase('application', 'octet-stream')
# To change the payload into encoded form
p.set_payload((attachment).read())
# encode into base64
encoders.encode_base64(p)
p.add_header('Content-Disposition', "attachment; filename= %s" % filename)
# attach the instance 'p' to instance 'msg'
msg.attach(p)
# creates SMTP session
s = smtplib.SMTP('smtp.gmail.com', 587)
# start TLS for security
s.starttls()
# Authentication
s.login(fromaddr, "password")
# Converts the Multipart msg into a string
text = msg.as_string()
# sending the mail
s.sendmail(fromaddr, toaddr, text)
# terminating the session
s.quit()
def transparentOverlay(src, overlay, pos=(0, 0), scale=1):
overlay = cv2.resize(overlay, (0, 0), fx=scale, fy=scale)
h, w, _ = overlay.shape # Size of foreground
rows, cols, _ = src.shape # Size of background Image
y, x = pos[0], pos[1] # Position of foreground/overlay image
for i in range(h):
for j in range(w):
if x + i >= rows or y + j >= cols:
continue
alpha = float(overlay[i][j][3] / 255.0) # read the alpha channel
src[x + i][y + j] = alpha * overlay[i][j][:3] + (1 - alpha) * src[x + i][y + j]
return src
def load_images():
pass
def open_gallery():
global gallery
try:
gallery.deiconify()
print('Gallery already open...')
except:
gallery = Toplevel(root)
gallery.title("Gallery")
gallery.geometry("500x600")
container = Frame(gallery, height=600)
canvas = Canvas(container, height=600)
scrollbar = Scrollbar(container, orient="vertical", command=canvas.yview)
scrollable_frame = Frame(canvas)
cacanvas = Canvas(scrollable_frame)
scrollable_frame.bind(
"<Configure>",
lambda e: canvas.configure(
scrollregion=canvas.bbox("all")
)
)
canvas.create_window((0, 0), window=scrollable_frame, anchor="nw")
canvas.configure(yscrollcommand=scrollbar.set)
load_images()
imgdir = os.path.dirname(__file__) + "\img-saved"
x, y, i = 0, 0, 0
for filename in reversed(os.listdir(imgdir)):
if filename.endswith(".png"):
file = "img-saved/" + filename
# print(file)
load = Img.open(str(file))
image1 = load.resize((250, 185), Img.ANTIALIAS)
render = ImageTk.PhotoImage(image1)
img = Label(scrollable_frame, image=render)
img.image = render
if i % 2 == 0:
x = 0
if i != 0:
y += 200
else:
x = 250
img.place(x=x, y=y)
print(i, " >> ", filename, " >>> ", x, " - ", y)
i += 1
else:
continue
container.pack(fill=BOTH)
canvas.pack(side="left", fill="both", expand=True)
scrollbar.pack(side="right", fill="y")
cacanvas.pack(side="left", fill="both", expand=True)
# root
root = Tk()
# root.iconbitmap('image/icon.ico')
root.title("Pydev")
root.geometry("970x600")
root.resizable(0, 0)
root.bind('<Escape>', lambda e: root.quit())
# frames
frame = Frame(root, width=970, height=600, background="black")
frame.pack(fill="both", expand=YES)
frame.pack_propagate(FALSE)
# frame filtre
frame_filtre = Frame(frame, width=200, height=600, background="blue")
frame_filtre.grid(row=0, rowspan=6, column=0, columnspan=1, sticky=N + E + W + S)
frame_filtre.grid_propagate(FALSE)
# frame sary
frame_sary = Frame(frame, width=800, height=450, background="purple")
frame_sary.grid(row=0, rowspan=5, column=1, columnspan=4, sticky=N + E + W + S)
frame_sary.grid_propagate(FALSE)
# frame bouton
frame_bouton = Frame(frame, width=800, height=150, background="red")
frame_bouton.grid(row=5, rowspan=1, column=1, columnspan=4, sticky=N + E + W + S)
frame_bouton.grid_propagate(FALSE)
lmain = Label(frame_sary)
lmain.pack()
########
frame_b1 = Frame(frame_bouton, width=300, height=150, background="gray")
frame_b1.grid(row=0, column=0, sticky=N + E + W + S)
frame_b1.grid_propagate(FALSE)
frame_b2 = Frame(frame_bouton, width=200, height=150, background="yellow")
frame_b2.grid(row=0, column=1, sticky=N + E + W + S)
frame_b2.grid_propagate(FALSE)
frame_b3 = Frame(frame_bouton, width=300, height=150, background="red")
frame_b3.grid(row=0, column=2, sticky=N + E + W + S)
frame_b3.grid_propagate(FALSE)
# buttons
bouton_cancel = Button(frame_b1, text="Gallery", bg='#000000', command=open_gallery,
relief=FLAT, font=("bold", 18), fg="white")
bouton_cancel.pack(padx=60, pady=29)
bouton_take = Button(frame_b2, text="Capture", bg='#000000', command=save, relief=FLAT,
font=("bold", 18), fg="white")
bouton_take.pack(padx=60, pady=29)
bouton_save = Button(frame_b3, text="Save", bg='#000000', command=save, relief=FLAT,
font=("bold", 18), fg="white")
bouton_save.pack(padx=60, pady=29)
bou1 = Button(frame_filtre, text='Glass', width=20, bg="black", fg="white",
relief=FLAT, command=lambda: filtering(1))
bou1.pack(side=TOP, padx=10, pady=12)
bou2 = Button(frame_filtre, text='Cigar', width=20, bg="black", fg="white",
relief=FLAT, command=lambda: filtering(2))
bou2.pack(side=TOP, padx=10, pady=4)
bou3 = Button(frame_filtre, text='Mustache', width=20, bg="black", fg="white",
relief=FLAT,
command=lambda: filtering(3))
bou3.pack(side=TOP, padx=10, pady=8)
def show_frame():
global filter, frame
_, frame = cap.read()
frame = cap.read()[1]
frame = cv2.flip(frame, 1)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(frame, 1.2, 5, 0, (120, 120), (350, 350))
for (x, y, w, h) in faces:
if h > 0 and w > 0:
glass_symin = int(y + 1.5 * h / 5)
glass_symax = int(y + 2.5 * h / 5)
sh_glass = glass_symax - glass_symin
cigar_symin = int(y + 4 * h / 6)
cigar_symax = int(y + 5.5 * h / 6)
sh_cigar = cigar_symax - cigar_symin
mus_symin = int(y + 3.5 * h / 6)
mus_symax = int(y + 5 * h / 6)
sh_mus = mus_symax - mus_symin
face_glass_roi_color = frame[glass_symin:glass_symax, x:x + w]
face_cigar_roi_color = frame[cigar_symin:cigar_symax, x:x + w]
face_mus_roi_color = frame[mus_symin:mus_symax, x:x + w]
specs = cv2.resize(specs_ori, (w, sh_glass), interpolation=cv2.INTER_CUBIC)
cigar = cv2.resize(cigar_ori, (w, sh_cigar), interpolation=cv2.INTER_CUBIC)
mustache = cv2.resize(mus_ori, (w, sh_mus), interpolation=cv2.INTER_CUBIC)
if filter == 1:
transparentOverlay(face_glass_roi_color, specs)
elif filter == 2:
transparentOverlay(face_cigar_roi_color, cigar, (int(w / 2), int(sh_cigar / 2)))
elif filter == 3:
transparentOverlay(face_mus_roi_color, mustache)
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
frame = Img.fromarray(cv2image)
frametk = ImageTk.PhotoImage(image=frame)
lmain.frametk = frametk
lmain.configure(image=frametk)
# key = cv2.waitKey(1) & 0xFF
# if key == ord("q"):
# print("kokokokkkook")
# return
#
# k = cv2.waitKey(30) & 0xff
# if k == 27:
# cv2.imwrite('frame.jpg', frame)
# return
lmain.after(10, show_frame)
show_frame()
root.mainloop()
|
K=int(input("Write month number: "))
def action1():
print("31")
def action2():
print("28")
def action3():
print("30")
def unknown_action():
print("'Error'")
if ((K==1)or(K==3)or(K==5)or(K==7)or(K==8)or(K==10)or(K==12)):
action1()
elif (K==2):
action2()
elif ((K==4)or(K==6)or(K==9)or(K==11)):
action3()
else:
unknown_action() |
import cx_Oracle
import sys
import common
import os.path
from vehicle import register_person
def print_opts():
print('Select one of the options:')
print('(1) Register a new person.')
print('(2) Create a licence for a person.')
print(' Or type \'exit\' to go back.')
def register_licence(conn):
while True:
licenceno = common.read_string('Licence Number', 15)
if licenceno == None:
return None
if not common.exists(conn, 'drive_licence', 'licence_no', licenceno):
break
print('The licence number already exists!')
while True:
sin = common.read_string('SIN', 15)
if sin == None:
return None
if common.exists(conn, 'drive_licence', 'sin', sin):
print('The selected person already has a licence!')
continue
if common.exists(conn, 'people', 'sin', sin):
break
print('No person with this social insurance number exists!')
lclass = common.read_string('Class', 10)
if lclass == None:
return None
while True:
upload = common.read_string('Upload Picture (y/n)')
if upload == None:
return None
if upload == 'y':
break
if upload == 'n':
image_data = None
break
print('Please select either \'y\' for yes or \'n\' for no!')
if upload == 'y':
while True:
fimage = common.read_string_exact('Picture File')
if fimage == None:
return None
if not os.path.isfile(fimage):
print('File not found!')
continue
try:
pimage = open(fimage, 'rb')
image_data = pimage.read()
pimage.close()
break
except:
print('Failed to read image file!')
continue
issuing_date = common.read_date('Issuing Date')
if issuing_date == None:
return None
issuing_date = common.format_date(issuing_date)
expiring_date = common.read_date('Expiring Date')
if expiring_date == None:
return None
expiring_date = common.format_date(expiring_date)
try:
curs = conn.cursor()
curs.bindarraysize = 1
#curs.setinputsizes(15,15,cx_Oracle.LONG_BINARY,8,8)
#curs.executemany('insert into drive_licence values (:1,:2,:3,:4,:5)',
# [(licenceno,sin,lclass,image_data,issuing_date,expiring_date)])
curs.setinputsizes(15,15,10,cx_Oracle.LONG_BINARY,8,8)
curs.executemany('insert into drive_licence values (:1,:2,:3,:4,to_date(:5,\'yyyymmdd\'),to_date(:6,\'yyyymmdd\'))',
[(licenceno,sin,lclass,image_data,issuing_date,expiring_date)])
curs.close()
conn.commit()
return True
except cx_Oracle.DatabaseError as e:
error, = e.args
if type(error) == str:
print('Unknown error', error,'!')
elif error.code == 1:
print('Error: The licence number already exists or the person already has a licence!')
elif error.code == 2291:
print('Error: No person with this social insurance number exists!')
else:
print('Unknown error', error.code,'!')
return False
def driver_licence_registration(conn):
while True:
common.clear()
print_opts()
line = sys.stdin.readline()
if not line:
return
line = line.rstrip()
if line == 'exit':
return
common.clear()
try:
r = None
if line == '1':
r = register_person(conn)
elif line == '2':
r = register_licence(conn)
else:
print('Invalid option!')
if r == None:
print('Operation cancelled.')
elif r == True:
print('Operation succeeded.')
elif r == False:
print('Operation failed.')
except cx_Oracle.DatabaseError as e:
print('Untreated exception...')
error, = e.args
print("Oracle-Error-Code:", error.code)
print("Oracle-Error-Offset:", error.offset)
print("Oracle-Error-Message:", error.message)
sys.stdin.readline()
|
#
# from wang.dataPretreatment import *
# #嵌入矩阵的维度
# embed_dim = 32
# #用户ID个数
# uid_max = max(features.take(0,1)) + 1 # 6040
# #性别个数
# gender_max = max(features.take(2,1)) + 1 # 1 + 1 = 2
# #年龄类别个数
# age_max = max(features.take(3,1)) + 1 # 6 + 1 = 7
# #职业个数
# job_max = max(features.take(4,1)) + 1# 20 + 1 = 21
#
# #电影ID个数
# movie_id_max = max(features.take(1,1)) + 1 # 3952
# #电影类型个数
# movie_categories_max = max(genres2int.values()) + 1 # 18 + 1 = 19
# #电影名单词个数
# movie_title_max = len(title_set) # 5216
#
# #对电影类型嵌入向量做加和操作的标志,考虑过使用mean做平均,但是没实现mean
# combiner = "sum"
#
# #电影名长度
# sentences_size = title_count # = 15
# #文本卷积滑动窗口,分别滑动2, 3, 4, 5个单词
# window_sizes = {2, 3, 4, 5}
# #文本卷积核数量
# filter_num = 8
#
# #电影ID转下标的字典,数据集中电影ID跟下标不一致,比如第5行的数据电影ID不一定是5
# movieid2idx = {val[0]:i for i, val in enumerate(movies.values)}
#
#
#
# # 超参
#
#
# # Number of Epochs
# num_epochs = 5
# # Batch Size
# batch_size = 256
#
# dropout_keep = 0.5
# # Learning Rate
# learning_rate = 0.0001
# # Show stats for every n number of batches
# show_every_n_batches = 20
#
# save_dir = './save'
|
#Standard imports
from __future__ import unicode_literals
import datetime
#Django imports
from django.core.cache import cache
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
import requests
import json
#Extra modules import
from tinymce.models import HTMLField
#Funciones extra y Choice Field
def obtener_organismos():#Funcion que obtiene del sistema de organigrama los organismos disponibles
organismos = cache.get("organismos")
if organismos is None:
r = requests.get('http://organigrama.jujuy.gob.ar/ws_org/')
orgs = json.loads(r.text)['data']
organismos = list()
for org in orgs:
organismos.append((org['id'],org['nombre']))
cache.set("organismos", organismos, 10 * 60) # guardar la data por 10 minutos, y después sola expira
return organismos
IMPORTANCIA = (
(0, 'Indefinida'),
(3, 'Leve'),
(6, 'Intermedia'),
(9, 'Prioritaria'),
)
# Create your models here.
class Estado(models.Model):
nombre = models.CharField('Nombre', max_length=100)
def __str__(self):
return self.nombre
class Organismo(models.Model):
nombre = models.CharField('Nombre', max_length=100)
usuario = models.ForeignKey(User, on_delete=models.SET_NULL ,blank=True, null=True)
def __str__(self):
return self.nombre
def cantidad_comunicados(self):
return Comunicacion.objects.filter(id_accion__in=Acciones.objects.filter(organismo_ws=self)).count()
def sin_comunicar(self):
return Acciones.objects.filter(organismo_ws=self, comunicaciones=None).count()
def get100_acciones(self):
return Acciones.objects.filter(organismo_ws=self)[:100]
class Financiacion(models.Model):
nombre = models.CharField('Nombre', max_length=100)
def __str__(self):
return self.nombre
class Departamento(models.Model):
nombre = models.CharField('Nombre', max_length=100)
def __str__(self):
return self.nombre
class Municipio(models.Model):
nombre = models.CharField('Nombre', max_length=100)
def __str__(self):
return self.nombre
class Localidad(models.Model):
nombre = models.CharField('Nombre', max_length=100)
def __str__(self):
return self.nombre
class Acciones(models.Model):
organismo = models.PositiveIntegerField(choices= obtener_organismos(), default=0)
importancia = models.IntegerField(choices=IMPORTANCIA, default=0)
id_ws = models.IntegerField(unique=True)
nombre = models.CharField('Nombre', max_length=100)
estado_id = models.ForeignKey(Estado, on_delete=models.CASCADE, related_name="acciones")#Estado
organismo_ws = models.ForeignKey(Organismo, on_delete=models.CASCADE, related_name="acciones")#Organismo
descripcion = HTMLField()
monto = models.IntegerField(default=0, blank=True, null=True)
financiacion_id = models.ForeignKey(Financiacion, on_delete=models.CASCADE, related_name="acciones")#Financiacion
latitud = models.DecimalField(max_digits=8, decimal_places=3, blank=True, null=True)
longitud = models.DecimalField(max_digits=8, decimal_places=3, blank=True, null=True)
departamento_id = models.ForeignKey(Departamento, on_delete=models.CASCADE, related_name="acciones")#Departamento
municipio_id = models.ForeignKey(Municipio, on_delete=models.CASCADE, related_name="acciones")#Municipio
localidad_id = models.ForeignKey(Localidad, on_delete=models.CASCADE, related_name="acciones")#localidad
borrado = models.BooleanField(default=False)
publicado = models.BooleanField('Publicable', default=False)
fecha_creacion = models.DateField(default=datetime.date.today)
def __str__(self):
return self.nombre
class Meta:
ordering = ('-importancia', 'organismo', )
class Tipo_Comunicacion(models.Model):
nombre = models.CharField('Nombre', max_length=100)
def __str__(self):
return self.nombre
class Medio(models.Model):
tipo = models.ForeignKey(Tipo_Comunicacion, on_delete=models.CASCADE)
nombre = models.CharField('Nombre', max_length=100)
def __str__(self):
return self.nombre
class Comunicacion(models.Model):
id_accion = models.ForeignKey(Acciones, on_delete=models.CASCADE, related_name="comunicaciones")
tipo = models.ForeignKey(Tipo_Comunicacion, on_delete=models.CASCADE)
medio = models.ForeignKey(Medio, on_delete=models.CASCADE)
titulo = models.CharField('Titulo', max_length=100)
descripcion = HTMLField(blank=True, null=True)
url = models.URLField(blank=True, null=True)
monto = models.IntegerField(default=0, blank=True, null=True)
fecha_creacion = models.DateField(default=datetime.date.today)
def __str__(self):
return self.id_accion.nombre + ' > ' + self.titulo |
from email.mime.text import MIMEText
from email.header import Header
from django.shortcuts import render, render_to_response, redirect
from django.template.context_processors import csrf
from django.contrib.auth.models import User
import smtplib
from .models import *
def open_server():
server = smtplib.SMTP('smtp.gmail.com:587')
mail_sender = 'settarov.a.i15@gmail.com'
username = mail_sender
password = 'Ctnnfhjdfhba1998'
server.starttls()
server.ehlo()
server.login(username, password)
return server
def message_mail(server,message,for_worker):
message = MIMEText(message,'plain','utf-8')
message['Subject'] = Header("Новый заказ для вас.", 'utf-8')
server.sendmail("settarov.a.i15@gmail.com",str(for_worker),message.as_string())
def close_server(server):
server.quit()
def send_for_workers(type,objects):
server = open_server()
workers = Workers.objects.all()
for object in objects:
for worker in workers:
if type in worker.type:
if object.city in worker.region:
for_worker= worker.email
message = object.message
message_mail(server,message,for_worker)
object.mailed = True
object.save()
close_server(server)
def create_email(type,model):
objects = model.objects.all()
not_mailed= []
for object in objects:
if object.mailed == False:
not_mailed.append(object)
send_for_workers(type,not_mailed)
def check_mail_order():
orders = Orders.objects.all()
not_mailed_orders = []
for order in orders:
if order.mailed==False:
not_mailed_orders.append(order)
mail_order(not_mailed_orders)
# Create your views here.
def index(request):
objects = Link_Section.objects.all()
args = {}
args['objects']= objects
return render_to_response('mainpage.html', args)
def pokraska(request):
args = {}
args.update(csrf(request))
return render_to_response('Orders/remont/pokraska_sten.html', args)
def pokraska_save(request):
if request.POST:
ploshad = request.POST['ploshad']
rooms = request.POST['rooms']
potolki = request.POST['potolki']
material_ot = request.POST['material_ot']
details = request.POST['details']
money = request.POST['money']
city = request.POST['city']
district = request.POST['district']
phone = request.POST['phone']
email = request.POST['email']
if len(details)<=1:
details = "Отсутсвуют"
if len(money) == 0:
money = "По договоренности"
else:
money = money+"р."
message = "Здравствуйте!\nМы нашли новый заказ для вас.\nДанные по заказу:\n" \
"Тип заказа: Побелить покрасить\nПлощадь: %s кв.м. Комнат: %s Покраска потолков: %s\n" \
"Материал предоставляет: %s\nПодробности:%s\nБюджет:%s" \
"Место работы: %s %s"%(ploshad,rooms,potolki,material_ot,details,money,city,district)
new_order = Pokraska(ploshad=ploshad,rooms=rooms, material_ot=material_ot,
details=details,money=money,city=city,district=district,
phone=phone,email=email, message=message)
new_order.save()
create_email("Побелить покрасить;", Pokraska)
return redirect('/')
def uteplenie(request):
args = {}
args.update(csrf(request))
return render_to_response('Orders/remont/obshivka.html', args)
def uteplenie_save(request):
if request.POST:
ploshad = request.POST['ploshad']
material = request.POST['material']
otkosi = request.POST['otkosi']
finish = request.POST['finish']
if finish == "Нужно":
finish_material = request.POST['finish_material']
else:
finish_material = "Без покрытия"
material_ot = request.POST['material_ot']
details = request.POST['details']
money = request.POST['money']
city = request.POST['city']
district = request.POST['district']
phone = request.POST['phone']
email = request.POST['email']
if len(details)<=1:
details = "Отсутсвуют"
if len(money) == 0:
money = "По договоренности"
else:
money = money+"р."
message = "Здравствуйте!\nМы нашли новый заказ для вас.\nДанные по заказу:\n" \
"Тип заказа: Утепление\nПлощадь: %s кв.м.\nПредпочтительный материал:%s\n" \
"Откосы(в погонных метрах): %s\nФинишная отделка: %s, Матриал:%s\n" \
"Материал предоставляет: %s\nПодробности:%s\nБюджет:%s\n" \
"Место работы: %s %s" % (ploshad, material,otkosi,finish,finish_material, material_ot, details, money, city, district)
new_order = Uteplenie(ploshad=ploshad,material=material,otkosi=otkosi,finish=finish,finish_material=finish_material,
material_ot=material_ot,
details=details, money=money, city=city, district=district,
phone=phone, email=email, message=message)
new_order.save()
create_email("Утепление фасадо;", Uteplenie)
return redirect('/')
def design(request):
args={}
args.update(csrf(request))
return render_to_response('Orders/remont/disign_interier.html',args)
def design_save(request):
if request.POST:
room_type = request.POST['room_type']
try:
design_interier = request.POST['disign_interier']
except:
design_interier =""
try:
design_project = request.POST['disign_project']
except:
design_project = ""
what_do = design_interier+";"+design_project
if what_do ==";":
what_do = "не поределено."
details = request.POST['details']
money = request.POST['money']
city = request.POST['city']
district = request.POST['district']
phone = request.POST['phone']
email = request.POST['email']
if len(details) <= 1:
details = "Отсутсвуют"
if len(money) == 0:
money = "По договоренности"
else:
money = money+"р."
message = "Здравствуйте!\nМы нашли новый заказ для вас.\nДанные по заказу:\n" \
"Тип заказа: Дизайн интерьер/проект\n" \
"Тип помещения: %s\n" \
"Объем работ: %s\n" \
"Подробности:%s\nБюджет:%s\n" \
"Место работы: %s %s" % (room_type, what_do,details, money, city, district)
new_order = Design(room_type=room_type,what_do=what_do,details=details, money=money, city=city, district=district,
phone=phone, email=email, message=message)
new_order.save()
create_email("Дизайн интерьер/проект;", Design)
return redirect('/')
def otoplenie(request):
args = {}
args.update(csrf(request))
return render_to_response('Orders/remont/otoplenie.html', args)
def otoplenie_save(request):
if request.POST:
room_type = request.POST['room_type']
ploshad = request.POST['ploshad']
works = request.POST["works"]
kotyol = request.POST["kotyol"]
details = request.POST['details']
money = request.POST['money']
city = request.POST['city']
district = request.POST['district']
phone = request.POST['phone']
email = request.POST['email']
if len(details) <= 1:
details = "Отсутсвуют"
if len(money) == 0:
money = "По договоренности"
else:
money = money+"р."
if len(works)<=1:
works = "Не определенно"
if len(kotyol)<=1:
kotyol = "Без котла"
message = "Здравствуйте!\nМы нашли новый заказ для вас.\nДанные по заказу:\n" \
"Тип заказа: Отопление\nТип помещения: %s, Площадь: %s кв.м.\n" \
"Объем работ: %s\nТип котла: %s\n"\
"Подробности:%s\nБюджет:%s\n" \
"Место работы: %s %s" % (room_type, ploshad, works,kotyol,details, money, city, district)
new_order = Otoplenie(room_type=room_type,ploshad=ploshad,works=works,kotyol=kotyol,details=details,money=money,
city=city,district=district,phone=phone,email=email,message=message)
new_order.save()
create_email("Отопление;", Otoplenie)
return redirect("/")
def osteklenie_balkonov(request):
args={}
args.update(csrf(request))
return render_to_response("Orders/remont/osteklenie_balkonov.html",args)
def osteklenie_balkonov_save(request):
if request.POST:
ploshad = request.POST['ploshad']
glassing_type = request.POST['glassing_type']
etazh = request.POST['etazh']
details = request.POST['details']
money = request.POST['money']
city = request.POST['city']
district = request.POST['district']
phone = request.POST['phone']
email = request.POST['email']
if len(details) <= 1:
details = "Отсутсвуют"
if len(money) == 0:
money = "По договоренности"
else:
money = money + "р."
message = "Здравствуйте!\nМы нашли новый заказ для вас.\nДанные по заказу:\n" \
"Тип заказа: Остекление балконов\nПлощадь: %s кв.м.\n" \
"Вид остекления: %s\nЭтаж: %s\n" \
"Подробности:%s\nБюджет:%s\n" \
"Место работы: %s %s" % (ploshad, glassing_type, etazh, details, money, city, district)
new_order = Osteklenie_balkonov(ploshad=ploshad,glassing_type=glassing_type,etazh=etazh,
details=details,money=money,city=city,district=district,
phone=phone,email=email,message=message)
new_order.save()
create_email("Остекление балконов;", Osteklenie_balkonov)
return redirect('/')
def kosmetik_remont(request):
args = {}
args.update(csrf(request))
return render_to_response('Orders/remont/kosmetik_remont.html', args)
def kosmetik_remont_save(request):
if request.POST:
ploshad = request.POST['ploshad']
rooms = request.POST['rooms']
works = request.POST['works']
details = request.POST['details']
money = request.POST['money']
city = request.POST['city']
district = request.POST['district']
phone = request.POST['phone']
email = request.POST['email']
if len(details) <= 1:
details = "Отсутсвуют"
if len(money) == 0:
money = "По договоренности"
else:
money = money + "р."
if len(works)<=1:
works = "Не определено."
message = "Здравствуйте!\nМы нашли новый заказ для вас.\nДанные по заказу:\n" \
"Тип заказа: Косметический ремонт\nПлощадь: %s кв.м.\n" \
"Область ремонта: %s\nОбъем работ: %s\n" \
"Подробности:%s\nБюджет:%s\n" \
"Место работы: %s %s" % (ploshad, rooms, works, details, money, city, district)
new_order= Redecorating(ploshad=ploshad,rooms=rooms,works=works,details=details,money=money,city=city,
district=district,phone=phone,email=email,message=message)
new_order.save()
create_email("Косметический ремонт;", Redecorating)
return redirect('/')
def plitka(request):
args= {}
args.update(csrf(request))
return render_to_response("Orders/remont/plitka.html",args)
def plitka_save(request):
if request.POST:
ploshad = request.POST['ploshad']
santehnika = request.POST['santehnika']
material_ot = request.POST['material_ot']
details = request.POST['details']
money = request.POST['money']
city = request.POST['city']
district = request.POST['district']
phone = request.POST['phone']
email = request.POST['email']
if len(details) <= 1:
details = "Отсутсвуют"
if len(money) == 0:
money = "По договоренности"
else:
money = money + "р."
message = "Здравствуйте!\nМы нашли новый заказ для вас.\nДанные по заказу:\n" \
"Тип заказа: Плитка\nПлощадь: %s кв.м.\n" \
"Установка сантехники: %s\n" \
"Материал предоставляет: %s\nПодробности:%s\nБюджет:%s\n" \
"Место работы: %s %s" % (ploshad, santehnika, material_ot, details, money, city, district)
new_order = Plitka(ploshad=ploshad,santehnika=santehnika,material_ot=material_ot,details=details,
money=money,city=city,district=district,phone=phone,email=email,message=message)
new_order.save()
create_email("Плитка;", Plitka)
return redirect("/")
def krovlya(request):
args ={}
args.update(csrf(request))
return render_to_response("Orders/remont/krovlya.html",args)
def krovlya_save(request):
if request.POST:
ploshad = request.POST['ploshad']
postavka_materiala = request.POST['postavka_materiala']
dop = request.POST['dop']
details = request.POST['details']
money = request.POST['money']
city = request.POST['city']
district = request.POST['district']
phone = request.POST['phone']
email = request.POST['email']
if len(details) <= 1:
details = "Отсутсвуют"
if len(dop)<=1:
dop = "Отсутствуют"
if len(money) == 0:
money = "По договоренности"
else:
money = money + "р."
message = "Здравствуйте!\nМы нашли новый заказ для вас.\nДанные по заказу:\n" \
"Тип заказа: Кровля\nПлощадь: %s кв.м.\n" \
"Поставка материла: %s\n" \
"Дополнительные элементы: %s\nПодробности:%s\nБюджет:%s\n" \
"Место работы: %s %s" % (ploshad, postavka_materiala, dop, details, money, city, district)
new_order = Krovlya(ploshad=ploshad, postavka_materiala=postavka_materiala, dop=dop, details=details,
money=money, city=city, district=district, phone=phone, email=email, message=message)
new_order.save()
create_email("Кровля;", Krovlya)
return redirect("/")
def poli(request):
args={}
args.update(csrf(request))
return render_to_response("Orders/remont/poli.html",args)
def poli_save(request):
if request.POST:
ploshad = request.POST['ploshad']
type_pokr = request.POST['type_pokr']
works = request.POST['works']
material_ot = request.POST['material_ot']
details = request.POST['details']
money = request.POST['money']
city = request.POST['city']
district = request.POST['district']
phone = request.POST['phone']
email = request.POST['email']
if len(details) <= 1:
details = "Отсутсвуют"
if len(works) <= 1:
works = "Не определено"
if len(money) == 0:
money = "По договоренности"
else:
money = money + "р."
message = "Здравствуйте!\nМы нашли новый заказ для вас.\nДанные по заказу:\n" \
"Тип заказа: Полы\nПлощадь: %s кв.м., Тип покрытия: %s\n" \
"Объем работ: %s\n" \
"Материал предоставляет: %s\nПодробности:%s\nБюджет:%s\n" \
"Место работы: %s %s" % (ploshad, type_pokr, works,material_ot, details, money, city, district)
new_order = Poli(ploshad=ploshad,type_pokr=type_pokr,works=works,material_ot=material_ot,details=details,money=money, city=city,
district=district, phone=phone, email=email, message=message)
new_order.save()
create_email("Полы;", Poli)
return redirect("/")
def pod_kluch(request):
args = {}
args.update(csrf(request))
return render_to_response("Orders/remont/pod_kluch.html",args)
def pod_kluch_save(request):
if request.POST:
ploshad = request.POST['ploshad']
room_type = request.POST['room_type']
works = request.POST['works']
details = request.POST['details']
money = request.POST['money']
city = request.POST['city']
district = request.POST['district']
phone = request.POST['phone']
email = request.POST['email']
if len(details) <= 1:
details = "Отсутсвуют"
if len(works) <= 1:
works = "Не определено"
if len(money) == 0:
money = "По договоренности"
else:
money = money + "р."
message = "Здравствуйте!\nМы нашли новый заказ для вас.\nДанные по заказу:\n" \
"Тип заказа: Работы под ключ\nПлощадь: %s кв.м., Тип помещения: %s\n" \
"Объем работ: %s\n" \
"Подробности:%s\nБюджет:%s\n" \
"Место работы: %s %s" % (ploshad, room_type, works,details, money, city, district)
new_order = Raboti_pod_kluch(ploshad=ploshad,room_type=room_type,works=works,details=details,money=money, city=city,
district=district, phone=phone, email=email, message=message)
new_order.save()
create_email("Работы под ключ;", Raboti_pod_kluch)
return redirect("/")
def santehnika(request):
args={}
args.update(csrf(request))
return render_to_response("Orders/remont/santehnika.html",args)
def santehnika_save(request):
if request.POST:
work_type = request.POST['work_type']
details = request.POST['details']
money = request.POST['money']
city = request.POST['city']
district = request.POST['district']
phone = request.POST['phone']
email = request.POST['email']
if len(details) <= 1:
details = "Отсутсвуют"
if len(money) == 0:
money = "По договоренности"
else:
money = money + "р."
message = "Здравствуйте!\nМы нашли новый заказ для вас.\nДанные по заказу:\n" \
"Тип заказа: Сантехника\n" \
"Вид работы: %s\n" \
"Подробности:%s\nБюджет:%s\n" \
"Место работы: %s %s" % (work_type, details, money, city, district)
new_order = Santehnika(work_type=work_type,details=details,money=money, city=city,
district=district, phone=phone, email=email, message=message)
new_order.save()
create_email("Сантехника;", Santehnika)
return redirect("/")
def potolki(request):
args = {}
args.update(csrf(request))
return render_to_response("Orders/remont/potolki.html",args)
def potolki_save(request):
if request.POST:
ploshad = request.POST['ploshad']
material = request.POST['material']
isol = request.POST['isol']
svet = request.POST['svet']
details = request.POST['details']
money = request.POST['money']
city = request.POST['city']
district = request.POST['district']
phone = request.POST['phone']
email = request.POST['email']
if len(details) <= 1:
details = "Отсутсвуют"
if len(isol)<=1:
isol = "Не нужна"
if len(money) == 0:
money = "По договоренности"
else:
money = money + "р."
message = "Здравствуйте!\nМы нашли новый заказ для вас.\nДанные по заказу:\n" \
"Тип заказа: Потолки\nПлощадь: %s кв.м.\nМатериал: %s\nИзоляция: %s\nМонтаж светотехники: %s\n" \
"Подробности:%s\nБюджет:%s\n" \
"Место работы: %s %s" % (ploshad,material,isol,svet, details, money, city, district)
new_order = Potolki(ploshad=ploshad,material=material,isol=isol, svet=svet,details=details,money=money, city=city,
district=district, phone=phone, email=email, message=message)
new_order.save()
create_email("Потолки;", Potolki)
return redirect("/")
def gipsokarton_peregorodki(request):
args={}
args.update(csrf(request))
return render_to_response("Orders/remont/gipsokarton_peregorodki.html",args)
def gipsokarton_peregorodki_save(request):
if request.POST:
ploshad = request.POST['ploshad']
finish = request.POST['finish']
details = request.POST['details']
money = request.POST['money']
city = request.POST['city']
district = request.POST['district']
phone = request.POST['phone']
email = request.POST['email']
if len(details) <= 1:
details = "Отсутсвуют"
if len(money) == 0:
money = "По договоренности"
else:
money = money + "р."
message = "Здравствуйте!\nМы нашли новый заказ для вас.\nДанные по заказу:\n" \
"Тип заказа: Гипсокартонные перегородки\nПлощадь: %s кв.м.\nФинишная отделка: %s\n" \
"Подробности:%s\nБюджет:%s\n" \
"Место работы: %s %s" % (ploshad, finish, details, money, city, district)
new_order = Gipsokarton_peregorodki(ploshad=ploshad, finish=finish, details=details, money=money,
city=city,
district=district, phone=phone, email=email, message=message)
new_order.save()
create_email("Гипсокартонные перегородки;", Gipsokarton_peregorodki)
return redirect("/")
def remont_vannoy(request):
args = {}
args.update(csrf(request))
return render_to_response("Orders/remont/remont_vannoy.html",args)
def remont_vannoy_save(request):
if request.POST:
ploshad = request.POST['ploshad']
plitka = request.POST['plitka']
santehnika = request.POST['santehnika']
details = request.POST['details']
money = request.POST['money']
city = request.POST['city']
district = request.POST['district']
phone = request.POST['phone']
email = request.POST['email']
if len(details) <= 1:
details = "Отсутсвуют"
if len(money) == 0:
money = "По договоренности"
else:
money = money + "р."
message = "Здравствуйте!\nМы нашли новый заказ для вас.\nДанные по заказу:\n" \
"Тип заказа: Ремонт ванной\nПлощадь: %s кв.м.\nКладка плитки: %s\nУстановка сантехники: %s\n" \
"Подробности:%s\nБюджет:%s\n" \
"Место работы: %s %s" % (ploshad, plitka,santehnika, details, money, city, district)
new_order = Remont_vannoy(ploshad=ploshad, plitka=plitka,santehika=santehnika, details=details, money=money,
city=city,
district=district, phone=phone, email=email, message=message)
new_order.save()
create_email("Ремонт ванной;", Remont_vannoy)
return redirect("/")
def reshetki(request):
args={}
args.update(csrf(request))
return render_to_response("Orders/remont/reshetki.html",args)
def reshetki_save(request):
if request.POST:
count = request.POST['count']
type = request.POST['type']
details = request.POST['details']
money = request.POST['money']
city = request.POST['city']
district = request.POST['district']
phone = request.POST['phone']
email = request.POST['email']
if len(details) <= 1:
details = "Отсутсвуют"
if len(money) == 0:
money = "По договоренности"
else:
money = money + "р."
message = "Здравствуйте!\nМы нашли новый заказ для вас.\nДанные по заказу:\n" \
"Тип заказа: Решетки на окна и двери.\nКоличество: %s шт.\nТип решеток: %s\n" \
"Подробности:%s\nБюджет:%s\n" \
"Место работы: %s %s" % (count, type, details, money, city, district)
new_order = Reshetki(count=count, type=type, details=details, money=money,
city=city,
district=district, phone=phone, email=email, message=message)
new_order.save()
create_email("Решетки на окна;", Reshetki)
return redirect("/")
def oboi(request):
args={}
args.update(csrf(request))
return render_to_response("Orders/remont/oboi.html",args)
def oboi_save(request):
if request.POST:
ploshad = request.POST['ploshad']
details = request.POST['details']
money = request.POST['money']
city = request.POST['city']
district = request.POST['district']
phone = request.POST['phone']
email = request.POST['email']
if len(details) <= 1:
details = "Отсутсвуют"
if len(money) == 0:
money = "По договоренности"
else:
money = money + "р."
message = "Здравствуйте!\nМы нашли новый заказ для вас.\nДанные по заказу:\n" \
"Тип заказа: Поклейка обоев.\nПлощадь: %s кв.м.\n" \
"Подробности:%s\nБюджет:%s\n" \
"Место работы: %s %s" % (ploshad, details, money, city, district)
new_order = Oboi(ploshad=ploshad, details=details, money=money,
city=city,
district=district, phone=phone, email=email, message=message)
new_order.save()
create_email("Поклейка обоев;", Oboi)
return redirect("/")
def beton(request):
args={}
args.update(csrf(request))
return render_to_response("Orders/remont/beton.html",args)
def beton_save(request):
if request.POST:
ploshad = request.POST['ploshad']
details = request.POST['details']
money = request.POST['money']
city = request.POST['city']
district = request.POST['district']
phone = request.POST['phone']
email = request.POST['email']
if len(details) <= 1:
details = "Отсутсвуют"
if len(money) == 0:
money = "По договоренности"
else:
money = money + "р."
message = "Здравствуйте!\nМы нашли новый заказ для вас.\nДанные по заказу:\n" \
"Тип заказа: Бетонные работы.\nПлощадь: %s кв.м.\n" \
"Подробности:%s\nБюджет:%s\n" \
"Место работы: %s %s" % (ploshad, details, money, city, district)
new_order = Beton(ploshad=ploshad, details=details, money=money,
city=city,
district=district, phone=phone, email=email, message=message)
new_order.save()
create_email("Бетонные работы;", Beton)
return redirect("/")
def natyazhnoi_potolok(request):
args={}
args.update(csrf(request))
return render_to_response("Orders/remont/natyazhnoi_potolok.html",args)
def natyazhnoi_potolok_save(request):
if request.POST:
ploshad = request.POST['ploshad']
svet = request.POST['svet']
details = request.POST['details']
money = request.POST['money']
city = request.POST['city']
district = request.POST['district']
phone = request.POST['phone']
email = request.POST['email']
if len(details) <= 1:
details = "Отсутсвуют"
if len(money) == 0:
money = "По договоренности"
else:
money = money + "р."
message = "Здравствуйте!\nМы нашли новый заказ для вас.\nДанные по заказу:\n" \
"Тип заказа: Натяжной потолок.\nПлощадь: %s кв.м.\n Монтаж светотежники: %s\n" \
"Подробности:%s\nБюджет:%s\n" \
"Место работы: %s %s" % (ploshad, svet,details, money, city, district)
new_order = Natyazhnoi_potolok(ploshad=ploshad,svet=svet, details=details, money=money,
city=city,
district=district, phone=phone, email=email, message=message)
new_order.save()
create_email("Натяжной потолок;", Natyazhnoi_potolok)
return redirect("/")
def login(request):
args={}
args.update(csrf(request))
return render_to_response('login.html', args)
def register(request):
args = {}
args.update(csrf(request))
return render_to_response('register.html', args)
def register_create_user(request):
if request.POST:
surname = request.POST["surname"]
name = request.POST["name"]
email = request.POST["email"]
phone = request.POST["phone"]
snils = request.POST["snils"]
password = request.POST["password"]
works = request.POST["works"]
region = request.POST["city"]
new_user = User(username=email,password=password)
new_worker = Workers(surname=surname,name=name,email=email,telephone=phone,SNILS=snils,password=password,
type=works,region=region)
new_user.save()
new_worker.save()
return redirect('/login/')
|
#=============================================================================
# This script looks at the mean climate patterns during drought events.
# author: Michael P. Erb
# date : 12/12/2019
#=============================================================================
import sys
sys.path.append('/home/mpe32/analysis/general_lmr_analysis/python_functions')
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import numpy.ma as ma
import xarray as xr
import compute_regional_means
from scipy import stats
import seaborn as sns
import pandas as pd
save_instead_of_plot = True
drought_criteria_txt = 'standard_dev_timevarying'
#drought_criteria_txt = 'standard_dev'
#drought_criteria_txt = '90th_percentile'
#drought_criteria_txt = '95th_percentile'
### LOAD DATA
# Load data from the production run
data_dir = '/projects/pd_lab/data/LMR/archive_output/'
experiment_name1 = 'productionFinal_gisgpcc_ccms4_LMRdbv0.4.0'
experiment_name2 = experiment_name1
handle = xr.open_dataset(data_dir+experiment_name1+'/pdsi_MCruns_ensemble_mean.nc',decode_times=False)
scpdsi_all = handle['pdsi'].values
lon = handle['lon'].values
lat = handle['lat'].values
time = handle['time'].values
handle.close()
handle = xr.open_dataset(data_dir+experiment_name2+'/sst_MCruns_ensemble_mean.nc',decode_times=False)
sst_all = handle['sst'].values
handle.close()
handle = xr.open_dataset(data_dir+experiment_name2+'/hgt500_MCruns_ensemble_mean.nc',decode_times=False)
zg_500hPa_all = handle['hgt500'].values
handle.close()
years_all = time/365
years_all = years_all.astype(int)
# Open the landmask file
handle = xr.open_dataset('/home/mpe32/analysis/5_drought/masks/output/oceanmask_landmask_lmr.nc',decode_times=False)
oceanmask = handle['oceanmask'].values
handle.close()
# Load the calculated climate indices
data_dir_new = '/projects/pd_lab/data/LMR/archive_output/production_indices/'
handle = xr.open_dataset(data_dir_new+'/posterior_climate_indices_MCruns_ensemble_full_LMRv2.0.nc',decode_times=False)
nino34_all_ens = handle['nino34'].values
soi_all_ens = handle['soi'].values
amo_all_ens = handle['amo'].values
pdo_all_ens = handle['pdo'].values
handle.close()
### CALCULATIONS
# Calculate the means of the climate indices over all ensemble members
nino34_all = np.mean(nino34_all_ens,axis=2)
soi_all = np.mean(soi_all_ens,axis=2)
amo_all = np.mean(amo_all_ens,axis=2)
pdo_all = np.mean(pdo_all_ens,axis=2)
# Shorten the data to cover only the desired years.
year_bounds = [1001,2000]
indices_chosen = np.where((years_all >= year_bounds[0]) & (years_all <= year_bounds[1]))[0]
scpdsi_all = scpdsi_all[indices_chosen,:,:,:]
sst_all = sst_all[indices_chosen,:,:,:]
zg_500hPa_all = zg_500hPa_all[indices_chosen,:,:,:]
nino34_all = nino34_all[indices_chosen,:]
soi_all = soi_all[indices_chosen,:]
amo_all = amo_all[indices_chosen,:]
pdo_all = pdo_all[indices_chosen,:]
years = years_all[indices_chosen]
# Remove the mean values for each quantity.
scpdsi_all = scpdsi_all - np.mean(scpdsi_all, axis=0)[None,:,:,:]
sst_all = sst_all - np.mean(sst_all, axis=0)[None,:,:,:]
zg_500hPa_all = zg_500hPa_all - np.mean(zg_500hPa_all,axis=0)[None,:,:,:]
nino34_all = nino34_all - np.mean(nino34_all, axis=0)[None,:]
soi_all = soi_all - np.mean(soi_all, axis=0)[None,:]
amo_all = amo_all - np.mean(amo_all, axis=0)[None,:]
pdo_all = pdo_all - np.mean(pdo_all, axis=0)[None,:]
# Compute a mean of all iterations
scpdsi_mean = np.mean(scpdsi_all, axis=1)
sst_mean = np.mean(sst_all, axis=1)
zg_500hPa_mean = np.mean(zg_500hPa_all,axis=1)
nino34_mean = np.mean(nino34_all, axis=1)
soi_mean = np.mean(soi_all, axis=1)
amo_mean = np.mean(amo_all, axis=1)
pdo_mean = np.mean(pdo_all, axis=1)
#plt.contourf(np.mean(scpdsi_mean,axis=0)); plt.colorbar()
# Detrend everything
# Mask the variables
scpdsi_mean = scpdsi_mean*oceanmask[None,:,:]
scpdsi_mean = ma.masked_invalid(scpdsi_mean)
# Compute average of PDSI from atlas and LMR for the entire region as well as the four regions used in Cook et al. 2014.
pdsi_mean_regions = compute_regional_means.compute_US_means(scpdsi_mean,lat,lon)
# Compute means over the drought indices
#pdsi_region_selected = pdsi_mean_regions['southwest']
def drought_means(pdsi_region_selected):
#
global drought_criteria_txt,scpdsi_mean,sst_mean,zg_500hPa_mean
#
# Select the right indices for drought
if drought_criteria_txt == 'standard_dev_timevarying':
nyears = len(pdsi_region_selected)
segment_length_oneside = 25
drought_indices = []
nondrought_indices = []
pluvial_indices = []
for i in range(nyears):
seg_start = i-segment_length_oneside
seg_end = i+segment_length_oneside
if seg_start < 0: seg_start = 0; seg_end = 50
if seg_end > nyears-1: seg_start = nyears-51; seg_end = nyears-1
print(i,seg_start,seg_end)
pdsi_selected = pdsi_region_selected[i] - np.mean(pdsi_region_selected[seg_start:seg_end+1])
pdsi_region_selected_seg = pdsi_region_selected[seg_start:seg_end+1] - np.mean(pdsi_region_selected[seg_start:seg_end+1])
# pdsi_region_selected_seg = pdsi_region_selected[seg_start:seg_end+1]
drought_criteria = -1*np.std(pdsi_region_selected_seg)
#
if pdsi_selected < drought_criteria: drought_indices.append(i)
if pdsi_selected >= drought_criteria: nondrought_indices.append(i)
if pdsi_selected > -1*drought_criteria: pluvial_indices.append(i)
#
drought_indices = np.array(drought_indices)
nondrought_indices = np.array(nondrought_indices)
pluvial_indices = np.array(pluvial_indices)
#
else:
if drought_criteria_txt == 'standard_dev': drought_criteria = -1*np.std(pdsi_region_selected)
elif drought_criteria_txt == '90th_percentile': drought_criteria = np.percentile(pdsi_region_selected,10)
elif drought_criteria_txt == '95th_percentile': drought_criteria = np.percentile(pdsi_region_selected,5)
#
# Select the right indices
drought_indices = np.where(pdsi_region_selected < drought_criteria)[0]
nondrought_indices = np.where(pdsi_region_selected >= drought_criteria)[0]
pluvial_indices = np.where(pdsi_region_selected > -1*drought_criteria)[0]
#
# Compute means over the drought indices
scpdsi_in_drought = np.mean(scpdsi_mean[drought_indices,:,:], axis=0)
sst_in_drought = np.mean(sst_mean[drought_indices,:,:], axis=0)
zg_500hPa_in_drought = np.mean(zg_500hPa_mean[drought_indices,:,:],axis=0)
#
return drought_indices,nondrought_indices,pluvial_indices,scpdsi_in_drought,sst_in_drought,zg_500hPa_in_drought
# Find the indeices which are in drought at every time point
regions = pdsi_mean_regions.keys()
drought_indices_all = {}; nondrought_indices_all = {}; scpdsi_in_drought_all = {}; sst_in_drought_all = {}; zg_500hPa_in_drought_all = {}
for region in regions:
drought_indices_all[region],nondrought_indices_all[region],_,scpdsi_in_drought_all[region],sst_in_drought_all[region],zg_500hPa_in_drought_all[region] = drought_means(pdsi_mean_regions[region])
# Calculate the percentage of time that drought years have a negative Nino3.4 index, for each region
percent_LaNina_allyears = (sum(nino34_mean < 0) / len(nino34_mean))*100
print('==============================================')
print('Percentage of years with below-average Nino3.4')
print('==============================================')
print('All years: '+str(percent_LaNina_allyears))
for region in ['northwest','southwest','central','southeast']:
nino34_selected = nino34_mean[drought_indices_all[region]]
percent_LaNina_selected = (sum(nino34_selected < np.mean(nino34_mean)) / len(nino34_selected))*100
print('Drought years in '+region+' US: '+str(percent_LaNina_selected))
### FIGURES
plt.style.use('ggplot')
# Make a time series of regional PDSI and "drought" years
for region in ['northwest','southwest','central','southeast']:
#
f = plt.figure(figsize=(16,4))
print(region,drought_indices_all[region].shape)
for drought_index in drought_indices_all[region]:
plt.axvline(x=years[drought_index],c='orangered')
#
plt.plot(years,pdsi_mean_regions[region],c='k')
plt.xlim(year_bounds)
plt.xlabel('Year (C.E.)')
plt.ylabel('PDSI')
plt.title('Regional drought for the '+region+' U.S., with drought years marked',fontsize=24)
if save_instead_of_plot == True:
plt.savefig('figures/pdsi_ts_'+region+'_'+drought_criteria_txt+'.png',dpi=300,format='png')
plt.close()
else:
plt.show()
# Specify the region to plot over
calc_bounds = [-25,70,90,360]
# Map
m = Basemap(projection='cyl',llcrnrlat=calc_bounds[0],urcrnrlat=calc_bounds[1],llcrnrlon=calc_bounds[2],urcrnrlon=calc_bounds[3],resolution='c')
lon_2d,lat_2d = np.meshgrid(lon,lat)
x, y = m(lon_2d,lat_2d)
# Plot the correlation between the region of interest and the selected variable everywhere.
f = plt.figure(figsize=(15,12))
ax = {}
ax[0] = plt.subplot2grid((4,6),(0,0),colspan=3)
ax[4] = plt.subplot2grid((4,6),(1,0),colspan=3)
ax[8] = plt.subplot2grid((4,6),(2,0),colspan=3)
ax[12] = plt.subplot2grid((4,6),(3,0),colspan=3)
ax[1] = plt.subplot2grid((4,6),(0,3)); ax[2] = plt.subplot2grid((4,6),(0,4)); ax[3] = plt.subplot2grid((4,6),(0,5))
ax[5] = plt.subplot2grid((4,6),(1,3)); ax[6] = plt.subplot2grid((4,6),(1,4)); ax[7] = plt.subplot2grid((4,6),(1,5))
ax[9] = plt.subplot2grid((4,6),(2,3)); ax[10] = plt.subplot2grid((4,6),(2,4)); ax[11] = plt.subplot2grid((4,6),(2,5))
ax[13] = plt.subplot2grid((4,6),(3,3)); ax[14] = plt.subplot2grid((4,6),(3,4)); ax[15] = plt.subplot2grid((4,6),(3,5))
regions_to_plot = ['northwest','southwest','central','southeast']
regions_to_plot_titles = ['a) Northwest','b) Southwest','c) Central','d) Southeast']
for i,region in enumerate(regions_to_plot):
#
ax[0+(4*i)].set_title(regions_to_plot_titles[i]+' U.S. drought, mean conditions',fontsize=20,loc='left')
m = Basemap(projection='cyl',llcrnrlat=calc_bounds[0],urcrnrlat=calc_bounds[1],llcrnrlon=calc_bounds[2],urcrnrlon=calc_bounds[3],resolution='c',ax=ax[0+(4*i)])
image1 = m.contourf(x,y,scpdsi_in_drought_all[region], np.linspace(-2.5,2.5,11),extend='both',cmap='BrBG', vmin=-2.5,vmax=2.5)
image2 = m.contourf(x,y,sst_in_drought_all[region], np.linspace(-.25,.25,11),extend='both',cmap='RdBu_r',vmin=-.25,vmax=.25)
image3 = m.contour( x,y,zg_500hPa_in_drought_all[region],np.linspace(-20,20,21),colors='k',linewidths=1)
m.drawparallels([0],labels=[True],fontsize=12)
m.drawcoastlines()
cbar1 = m.colorbar(image1,location='bottom')
cbar2 = m.colorbar(image2)
cbar1.ax.tick_params(labelsize=12)
cbar2.ax.tick_params(labelsize=12)
#
# Do some t-tests
stat_nino34,pvalue_nino34 = stats.ttest_ind(nino34_mean[drought_indices_all[region]],nino34_mean[nondrought_indices_all[region]],axis=0,equal_var=False,nan_policy='propagate')
stat_pdo, pvalue_pdo = stats.ttest_ind(pdo_mean[drought_indices_all[region]], pdo_mean[nondrought_indices_all[region]], axis=0,equal_var=False,nan_policy='propagate')
stat_amo, pvalue_amo = stats.ttest_ind(amo_mean[drought_indices_all[region]], amo_mean[nondrought_indices_all[region]], axis=0,equal_var=False,nan_policy='propagate')
note_nino34 = ''; note_pdo = ''; note_amo = ''
if pvalue_nino34 >= 0.05: note_nino34 = '*'
if pvalue_pdo >= 0.05: note_pdo = '*'
if pvalue_amo >= 0.05: note_amo = '*'
print(pvalue_nino34,pvalue_pdo,pvalue_amo)
#
n_drought = len(drought_indices_all[region])
n_nondrought = len(nondrought_indices_all[region])
df_nino34_drought = pd.DataFrame({'Index':['Nino34']*n_drought, 'Drought':['D']*n_drought, 'Value':nino34_mean[drought_indices_all[region]]})
df_nino34_nondrought = pd.DataFrame({'Index':['Nino34']*n_nondrought,'Drought':['ND']*n_nondrought,'Value':nino34_mean[nondrought_indices_all[region]]})
df_pdo_drought = pd.DataFrame({'Index':['PDO']*n_drought, 'Drought':['D']*n_drought, 'Value':pdo_mean[drought_indices_all[region]]})
df_pdo_nondrought = pd.DataFrame({'Index':['PDO']*n_nondrought, 'Drought':['ND']*n_nondrought,'Value':pdo_mean[nondrought_indices_all[region]]})
df_amo_drought = pd.DataFrame({'Index':['AMO']*n_drought, 'Drought':['D']*n_drought, 'Value':amo_mean[drought_indices_all[region]]})
df_amo_nondrought = pd.DataFrame({'Index':['AMO']*n_nondrought, 'Drought':['ND']*n_nondrought,'Value':amo_mean[nondrought_indices_all[region]]})
df_nino34 = pd.concat([df_nino34_drought,df_nino34_nondrought])
df_pdo = pd.concat([df_pdo_drought,df_pdo_nondrought])
df_amo = pd.concat([df_amo_drought,df_amo_nondrought])
#
sns.violinplot(x='Index',y='Value',hue='Drought',split=True,inner='quart',palette={'D':'tab:brown','ND':'tab:green'},data=df_nino34,ax=ax[1+(4*i)])
sns.violinplot(x='Index',y='Value',hue='Drought',split=True,inner='quart',palette={'D':'tab:brown','ND':'tab:green'},data=df_pdo, ax=ax[2+(4*i)])
sns.violinplot(x='Index',y='Value',hue='Drought',split=True,inner='quart',palette={'D':'tab:brown','ND':'tab:green'},data=df_amo, ax=ax[3+(4*i)])
ax[1+(4*i)].set_title('Nino3.4'+note_nino34,fontsize=20)
ax[2+(4*i)].set_title('PDO'+note_pdo,fontsize=20)
ax[3+(4*i)].set_title('AMO'+note_amo,fontsize=20)
ax[1+(4*i)].set_ylim(-2,2)
ax[2+(4*i)].set_ylim(-4,4)
ax[3+(4*i)].set_ylim(-.25,.25)
ax[1+(4*i)].tick_params(axis='both',which='major',labelsize=16)
ax[2+(4*i)].tick_params(axis='both',which='major',labelsize=16)
ax[3+(4*i)].tick_params(axis='both',which='major',labelsize=16)
#
for j in range(1,4):
if (i == 0) & (j == 1):
# ax[j+(4*i)].legend(loc=4)
ax[j+(4*i)].legend(loc=4,bbox_to_anchor=(1.1,0))
else:
ax[j+(4*i)].get_legend().remove()
ax[j+(4*i)].set_xlabel('')
ax[j+(4*i)].set_ylabel('')
ax[j+(4*i)].set_xticklabels([''])
f.suptitle('Climate conditions during regional drought',fontsize=24)
f.tight_layout()
f.subplots_adjust(top=.9)
if save_instead_of_plot == True:
plt.savefig('figures/climate_in_regional_drought_'+drought_criteria_txt+'.png',dpi=300,format='png')
plt.close()
else:
plt.show()
"""
# Save the drought years
output_dir = '/home/mpe32/analysis/5_drought/revisions_paper_v2/data/'
drought_years_northwest = years[drought_indices_all['northwest']]
drought_years_southwest = years[drought_indices_all['southwest']]
drought_years_central = years[drought_indices_all['central']]
drought_years_southeast = years[drought_indices_all['southeast']]
np.savez(output_dir+'drought_years.npz',drought_years_northwest=drought_years_northwest,drought_years_southwest=drought_years_southwest,drought_years_central=drought_years_central,drought_years_southeast=drought_years_southeast)
"""
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-19 07:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('lhcbpr_api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Executable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('content', models.CharField(max_length=200)),
],
),
migrations.RenameField(
model_name='platform',
old_name='cmtconfig',
new_name='content',
),
migrations.RenameField(
model_name='requestedplatform',
old_name='cmtconfig',
new_name='config',
),
migrations.RemoveField(
model_name='addedresult',
name='old_id',
),
migrations.RemoveField(
model_name='attribute',
name='old_id',
),
migrations.RemoveField(
model_name='handler',
name='old_id',
),
migrations.RemoveField(
model_name='handlerresult',
name='old_id',
),
migrations.RemoveField(
model_name='host',
name='old_id',
),
migrations.RemoveField(
model_name='job',
name='old_id',
),
migrations.RemoveField(
model_name='jobdescription',
name='old_id',
),
migrations.RemoveField(
model_name='jobhandler',
name='old_id',
),
migrations.RemoveField(
model_name='jobresult',
name='old_id',
),
migrations.RemoveField(
model_name='option',
name='is_standalone',
),
migrations.RemoveField(
model_name='option',
name='old_id',
),
migrations.RemoveField(
model_name='platform',
name='old_id',
),
migrations.RemoveField(
model_name='setupproject',
name='old_id',
),
migrations.RemoveField(
model_name='requestedplatform',
name='old_id',
),
migrations.AlterUniqueTogether(
name='requestedplatform',
unique_together=set([('job_description', 'config')]),
),
migrations.AddField(
model_name='jobdescription',
name='executable',
field=models.ForeignKey(db_index=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='job_descriptions', to='lhcbpr_api.Executable'),
),
migrations.AddField(
model_name='option',
name='executable',
field=models.ForeignKey(db_index=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='options', to='lhcbpr_api.Executable'),
),
]
|
from django.contrib import admin
from pruebas.models import BaseTestResult, DocumentTestResult, \
FrameAnnotation, DocumentAnnotation
admin.site.register(BaseTestResult)
admin.site.register(FrameAnnotation)
admin.site.register(DocumentTestResult)
admin.site.register(DocumentAnnotation)
|
import gobject
import pygst
pygst.require("0.10")
import gst
import settings
import webservice
import time
INIT_VOLUME = 0.5
class ClientPlay:
def __init__ (self, on_eos, on_error):
self.on_eos = on_eos
self.on_error = on_error
self.is_playing = False
self.volume = INIT_VOLUME
self.pipeline = None
self.watch_id = None
self.dspmp3sink = None
def play (self, request):
self.stream_url = webservice.invoke("request_stream", [request])
self.pipeline = gst.Pipeline()
if settings.config.get("client","device") == "N800":
self.gnomevfssrc = gst.element_factory_make("gnomevfssrc")
self.gnomevfssrc.set_property("location", self.stream_url)
self.dspmp3sink = gst.element_factory_make("dspmp3sink")
self.dspmp3sink.set_property("fvolume", self.volume)
self.pipeline.add(self.gnomevfssrc, self.dspmp3sink)
gst.element_link_many(self.gnomevfssrc, self.dspmp3sink)
else:
playbin = gst.element_factory_make("playbin")
playbin.set_property("uri", self.stream_url)
self.pipeline.add(playbin)
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.watch_id = self.bus.connect("message", self.get_message)
self.is_playing = True
self.pipeline.set_state(gst.STATE_PLAYING)
def get_message (self, bus, message):
#print message.src.get_name() + str(message.type)
if message.type == gst.MESSAGE_ERROR:
err, debug = message.parse_error()
print "Error from " + message.src.get_name() \
+ ": " + str(err) + " debug: " + debug
self.stop()
self.on_error(
"Sorry, there was a network error. " \
+ "Please try again in a few seconds.")
elif message.type == gst.MESSAGE_EOS:
self.on_eos()
def stop (self):
self.is_playing = False
if self.pipeline:
if self.watch_id:
self.pipeline.get_bus().remove_signal_watch()
self.pipeline.get_bus().disconnect(self.watch_id)
self.watch_id = None
self.pipeline.set_state(gst.STATE_NULL)
self.pipeline = None
def get_is_playing (self):
return self.is_playing
def set_volume (self, volume):
if self.dspmp3sink:
self.dspmp3sink.set_property("fvolume", volume)
def skip_ahead (self):
if self.pipeline:
webservice.invoke("skip_ahead", [{ "stream_url" : self.stream_url }])
|
import csv
import time
# data comes from: https://www.kaggle.com/wendykan/lending-club-loan-data
# Column 2 is : loan_amnt
# Column 3 is : funded_amnt
# Column 5 is : term
# Column 6 is : int_rate
# Column 8 is : grade
# Column 10 is : emp_title
# Column 13 is : annual_inc
# Column 16 is : loan_status
# Column 20 is : purpose
# Column 21 is : title
# since the dataset has no memeber id
# we cannot match the member with specific grade
# therefore, we calculate the mean grade and apply it fo all users
# Related to loans
loan_request = []
lend_issued = []
loan_term = []
loan_purpose = []
loan_title = []
# Related to user credit and interest rate
interest_rate = []
user_grade = []
# Related to user profile
emp_title = []
annula_income = []
time1 = time.time()
with open('loan.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
user_dict = {}
for row in csv_reader:
if line_count>10000:
pass
# break
if line_count != 0:
try:
loan_request.append(float(row[2]))
lend_issued.append(float(row[3]))
if row[5]==' 36 months':
loan_term.append(36)
elif row[5]==' 60 months':
loan_term.append(60)
else:
loan_term.append(0)
loan_purpose.append(row[20])
loan_title.append(row[21])
interest_rate.append(float(row[6]))
if row[8]=='A':
user_grade.append(20)
elif row[8]=='B':
user_grade.append(15)
elif row[8]=='C':
user_grade.append(10)
elif row[8]=='D':
user_grade.append(5)
else:
user_grade.append(0)
emp_title.append(row[10])
annual_inc.append(row[13])
except Exception as e:
pass
# print(f'Column names are {", ".join(row)}')
for i in range(1,200):
try:
pass
# loa n_request.append(row[])
# print("Column ",i," is : ",row[i])
except:
break
# print('Column: 1.',row[0],"2.",row[1],"3.",row[2],"4.",row[3])
line_count += 1
else:
# print(f'\t{row[0]} works in the {row[1]} department, and was born in {row[2]}.')
# print((row[1]))
# print(str(row[1]))
try:
user_dict[str(row[1])] += 1
except:
user_dict[str(row[1])] = 1
line_count += 1
print(f'Processed {line_count} lines.')
# print(user_grade)
index = 0
for i in [loan_request,lend_issued,loan_term,interest_rate,user_grade]:
names = ['loan_request','lend_issued','loan_term','interest_rate','user_grade']
average = sum(i)/len(i)
print(names[index],' info: ')
print('min: ',min(i))
print('max: ',max(i))
print('average: ',average)
proportion = [a for a in i if a >= average]
print('Above Average: ',len(proportion)/len(i))
index += 1
for key in user_dict:
print(key," -- ",user_dict[key])
index = 0
for i in [loan_request,lend_issued,loan_term,interest_rate,user_grade]:
names = ['loan_request.txt','lend_issued.txt','loan_term.txt','interest_rate.txt','user_grade.txt']
text_file = open(names[index], "w")
for num in i:
text_file.write("%s\n" % num)
pass
text_file.close()
index += 1
time2 = time.time()
# Processed 2260669 lines.
# loan_request info:
# min: 500.0
# max: 40000.0
# average: 15046.931227849467
# lend_issued info:
# min: 500.0
# max: 40000.0
# average: 15041.664056818605
# loan_term info:
# min: 36
# max: 60
# average: 42.91031854301472
# interest_rate info:
# min: 5.31
# max: 30.99
# average: 13.09291294419326
# user_grade info:
# min: 1
# max: 5
# average: 3.365366785392636
# member_id -- 1
# function took 41177.990 ms
print('function took {:.3f} ms'.format((time2-time1)*1000.0))
|
import shutil
from pathlib import Path
import unittest
from datasets.config import HF_DATASETS_CACHE
from fewshot.challenges import registry
from fewshot import make_challenge
class TestChallenge(unittest.TestCase):
def test_challenge_hashes(self):
shutil.rmtree(Path(HF_DATASETS_CACHE) / 'flex_challenge', ignore_errors=True)
specs = registry.specs
wrong_hash_msgs = []
for k in specs:
# Ignore sanity for now.
if 'sanity' not in k:
try:
make_challenge(k, ignore_verification=False)
except ValueError as e:
wrong_hash_msgs.append(str(e))
self.assertEqual(len(wrong_hash_msgs), 0, '\n'.join(wrong_hash_msgs))
if __name__ == '__main__':
unittest.main()
|
"""
name @ utils
utilities to work with names and strings
"""
import maya.cmds as mc
def removeSuffix(name):
"""
remove suffix from given name/string
@param name: given name string to process
@return str, name without characters beyond last '_'
"""
edits = name.split('_')
if len(edits) < 2 : #if didnt have an underscore
return name #just return the original name and don't go on
suffix = '_' + edits[-1] #last list item after split earlier
nameNoSuffix = name[:-len(suffix)]
return nameNoSuffix
def zipPairs(keyObjs,checkSuff):
"""
finds matching pairs based on a suffix modification and returns them as a list of tuples
used for stuff like parentConstraining a bunch of corresponding joints
@param keyObjs: list(str), the source objects to look for existing matches base on the checkSuff
@param checkSuff: str the modification for example: checkSuff = 'srf' --> my_geo becomes my_srf
@return list( tuple( , ) ) matching criteria
"""
matches = {}
for keyObj in keyObjs:
checkObj = removeSuffix(keyObj) + checkSuff
print 'checking existence of: ' + checkObj
if mc.objExists(checkObj):
print('found a match!')
matches[keyObj] = checkObj
else:
print 'no dice'
pairs = zip(matches.keys(),matches.values())
return pairs
|
#!/usr/bin/env python
import pika
import sys
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchane_declare(exchange='topic_logs', type='topic')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
binding_keys = sys.argv[1:]
for binding_key in binding_keys:
channel.queue_bind(exchange='topic_logs',queue=queue_name,routing_key=binding_key)
print " [*] Waiting for logs. To exit press CTRL+C"
def callback(ch,method,properties,body):
print " [x] %r:%r" %(method.routing_key,body,)
channel.basic_consume(callback,queue=queue_name,no_ack=True)
channel.start_consuming()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This test suite validates the scanners by running queries against ALL file formats and
# their permutations (e.g. compression codec/compression type). This works by exhaustively
# generating the table format test vectors for this specific test suite. This way, other
# tests can run with the normal exploration strategy and the overall test runtime doesn't
# explode.
from tests.common.impala_test_suite import ImpalaTestSuite
class TestTimeZones(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestTimeZones, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'text' and\
v.get_value('table_format').compression_codec == 'none')
def test_timezones(self, vector):
result = self.client.execute("select timezone, utctime, localtime, \
from_utc_timestamp(utctime,timezone) as impalaresult from functional.alltimezones \
where localtime != from_utc_timestamp(utctime,timezone)")
assert(len(result.data) == 0)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2018-06-11 07:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0006_video_learn_times'),
]
operations = [
migrations.AddField(
model_name='video',
name='path',
field=models.CharField(default='', max_length=100, verbose_name='视频路径'),
),
]
|
import numpy as np
one_dimensional_array = np.array([1.2, 2.4, 3.5, 4.7, 6.1, 7.2, 8.3, 9.5])
print('one_dimensional_array\n', one_dimensional_array)
two_dimensional_array = np.array([[6, 5], [11, 7], [4, 8]])
print('two_dimensional_array\n', two_dimensional_array)
sequence_of_integers = np.arange(5, 12)
print('sequence_of_integers\n', sequence_of_integers)
random_integers_between_50_and_100 = np.random.randint(low=50, high=101, size=(6))
print('random_integers_between_50_and_100\n', random_integers_between_50_and_100)
random_floats_between_0_and_1 = np.random.random([6])
print('random_floats_between_0_and_1\n', random_floats_between_0_and_1)
random_floats_between_2_and_3 = random_floats_between_0_and_1 + 2.0
print('random_floats_between_2_and_3\n', random_floats_between_2_and_3)
random_integers_between_150_and_300 = random_integers_between_50_and_100 * 3
print('random_integers_between_150_and_300\n', random_integers_between_150_and_300)
feature = np.arange(6, 21)
print('feature\n', feature)
label = (3 * feature) + 4
print('label\n', label)
noise = (np.random.random([len(feature)]) * 4) - 2
print('noise\n', noise)
label = label + noise
print('label\n', label)
|
#!/usr/bin/python2
import cgitb,cgi,commands,random
print "Contant-type:text/html"
print ""
cgitb.enable()
x=cgi.FieldStorage()
p1=x.getvalue("cho")
u=x.getvalue('uname')
p=x.getvalue('pas')
port=random.randint(6000,7000)
commands.getoutput("sudo systemctl restart docker")
if p1=="1" :
ip=commands.getstatusoutput("sudo docker run -itd -p "+ str(port)+":4200 pythom12 ")
commands.getoutput("sudo docker exec -t "+ip[1]+" service shellinaboxd restart")
print "<html>"
print "<a href='http://192.168.43.103:"+ str(port)+"' target='_blank'> python platform </a>"
print "access containers using login - rio ; password - 14 "
print "</html>"
elif p1=="1" :
ip=commands.getstatusoutput("sudo docker run -itd -p "+ str(port)+":4200 pythom12 ")
commands.getoutput("sudo docker exec -t "+ip[1]+" service shellinaboxd restart")
print "<html>"
print " <a href='http://192.168.43.103:"+ str(port)+"' target='_blank'> bash platform </a>"
print "access containers using login - ritesh14 ; password - redhat "
print "</html>"
|
import sys
import calendar
from datetime import datetime
# from kivy.config import Config
# Config.set('graphics', 'width', '600')
# Config.set('graphics', 'height', '1024')
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.lang import Builder
from kivy.logger import Logger
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
Builder.load_string('''
#:kivy 1.6
[SideBar@BoxLayout]:
content: content
orientation: 'vertical'
size_hint: ctx.size_hint if hasattr(ctx, 'size_hint') else (1, 1)
Image:
source: ctx.image
size_hint: (1, None)
height: root.width
GridLayout:
cols: 2
# just add a id that can be accessed later on
id: content
<Root>:
Button:
center_x: root.center_x
text: 'press to add_widgets'
size_hint: .2, .2
on_press:
root.load_content(sb.content)
#
# what comes after `:` is basically normal python code
#sb.content.clear_widgets()
# however using a callback that you can control in python
# gives you more control
#root.load_content(sb.content)
SideBar:
id: sb
size_hint: .2, 1
image: 'data/images/image-loading.gif'
''')
class Root(FloatLayout):
shown = False
def load_content(self, content):
if self.shown:
self.shown = False
content.clear_widgets()
else:
self.shown = True
for but in range(20):
content.add_widget(Button(text=str(but)))
class MyApp(App):
def build(self):
return Root()
class MainScreen(GridLayout):
# def __init__(self, **kwargs):
# Logger.info('xxx: %s'%kwargs)
# kwargs.setdefault('cols', 1)
# kwargs.setdefault('rows', 5)
# # kwargs.setdefault('size_hint',(1,1))
# super(GridLayout, self).__init__(**kwargs)
#
# def load_content(self, content):
# content.add_widget(Button(text='test'))
# content.add_widget(Button(text='test2'))
pass
class FasciaApp(App):
title = 'Fascia'
def build(self):
return MainScreen()
class CalendarApp(App):
def build(self):
today = datetime.now()
cal = calendar.Calendar(calendar.SUNDAY)
layout = GridLayout(cols=7)
for iweek,week in enumerate(cal.monthdays2calendar(today.year, today.month)):
if iweek == 0: # label each col by the day
for _,wkday in week:
label = Label(text='[b]%s[/b]'%calendar.day_abbr[wkday], markup=True)
layout.add_widget(label)
for day, wkday in week: # add each day
if day != 0:
button = Button(text='%d'%day)
else:
button = Label(text = '')
layout.add_widget(button)
return layout
if __name__ == '__main__':
MyApp().run()
# FasciaApp().run()
# CalendarApp().run() |
# Main Python file that connects to InBloom and fetches Data
# Once all the data is retrieved, index.html is rendered for display
#
from flask import Flask, redirect, url_for, request, jsonify, render_template
import requests
import numpy as np
import pandas as pd
import simplejson as json
params = {
'base_url': 'https://api.sandbox.slcedu.org',
'redirect_uri' : 'http://127.0.0.1:5000/oauth',
'client_id' : 'CLIENT ID',
'client_secret': 'SECRET KEY',
'oauth_code': '',
'req_code_url': '/api/oauth/authorize',
'req_token_url': '/api/oauth/token'
}
SECRET_KEY = 'SWSXEDU_SECRET_KEY'
DEBUG = True
# setup flask
app = Flask(__name__)
app.debug = DEBUG
app.secret_key = SECRET_KEY
@app.route('/')
def index():
slc_code_url = params['base_url'] \
+ params['req_code_url'] \
+ '?response_type=code' \
+ '&client_id=' + params['client_id'] \
+ '&redirect_uri=' + url_for('oauth', _external=True)
return redirect(slc_code_url)
@app.route('/oauth')
def oauth():
params['oauth_code'] = request.args.get('code')
if request.args.get('code') == None:
return redirect(url_for('index'))
else:
slc_token_url = params['base_url'] \
+ params['req_token_url'] \
+ '?grant_type=authorization_code' \
+ '&client_id=' + params['client_id'] \
+ '&client_secret=' + params['client_secret'] \
+ '&code=' + request.args.get('code') \
+ '&redirect_uri=' + url_for('oauth', _external=True)
oauth_token = requests.get(slc_token_url)
if oauth_token.status_code != 200:
return redirect(url_for('index'))
access_token = oauth_token.content[17:].strip('}"')
headers = {'Authorization': 'bearer ' + access_token}
students_jdata = requests.get('https://api.sandbox.inbloom.org/api/rest/v1.1/students', headers=headers)
students = []
for student_jdata in students_jdata.json():
student = {}
student['studentUniqueStateId'] = student_jdata.get('studentUniqueStateId', 0)
if student['studentUniqueStateId'] == 0:
continue
student_name = student_jdata.get('name')
if student_name:
student['firstName'] = student_name.get('firstName')
student['middleName'] = student_name.get('middleName', '')
student['lastSurname'] = student_name.get('lastSurname', '')
student['sex'] = student_jdata.get('sex')
student_links = student_jdata.get('links')
for link in student_links:
if link.get('rel') == 'getAttendances':
student[link['rel']] = link.get('href')
students.append(student)
students_col = list(students[0].keys())
stud_df = pd.DataFrame(students, columns=students_col)
students_atd = []
for i in range(len(stud_df)):
atd_jdata = requests.get(stud_df.ix[i].get('getAttendances'), headers=headers).json()[0]
for key in atd_jdata:
if key == 'schoolYearAttendance':
school_attend = atd_jdata[key]
for j in range(len(school_attend)):
for attend_event in school_attend[j]['attendanceEvent']:
student = {}
student['studentUniqueStateId'] = stud_df.ix[i]['studentUniqueStateId']
student['firstName'] = stud_df.ix[i]['firstName']
student['middleName'] = stud_df.ix[i]['middleName']
student['lastSurname'] = stud_df.ix[i]['lastSurname']
student['schoolYear'] = school_attend[j]['schoolYear']
student['date'] = attend_event['date']
student['event'] = attend_event['event']
student['reason'] = attend_event.get('reason', '')
if student['event'] == 'In Attendance':
student['attendance'] = 1
else:
student['attendance'] = 0
students_atd.append(student)
# students_col = list(students_atd[0].keys())
# students_atd_df = pd.DataFrame(students_atd, columns=students_col)
# students_atd_df.to_csv('student_attendance.csv', index=False)
return render_template('main.html', students_attend=students_atd)
@app.route('/templates/<iframe_html>')
def iframe(iframe_html):
return render_template(iframe_html)
app.run() |
import sys
class Genotype(object):
def __init__(self, variant, gt):
self.format = dict()
self.variant = variant
self.set_format('GT', gt)
def set_formats(self, fields, values):
format_set = self.variant.format_set
add_to_active = self.variant.active_formats.add
active_formats = self.variant.active_formats
format_dict = self.format
for field, value in zip(fields, values):
if field in format_set:
format_dict[field] = value
if field not in active_formats:
add_to_active(field)
else:
sys.stderr.write('\nError: invalid FORMAT field, \"' + field + '\"\n')
sys.exit(1)
def set_format(self, field, value, update_active=True):
if field in self.variant.format_set:
self.format[field] = value
if field not in self.variant.active_formats:
self.variant.active_formats.add(field)
self.variant.update_active_format_list()
else:
sys.stderr.write('\nError: invalid FORMAT field, \"' + field + '\"\n')
sys.exit(1)
def get_format(self, field):
return self.format[field]
def get_gt_string(self):
g_list = list()
for f in self.variant.active_format_list:
if f in self.format:
if type(self.format[f]) == float:
g_list.append('%0.2f' % self.format[f])
else:
g_list.append(str(self.format[f]))
else:
g_list.append('.')
return ':'.join(g_list)
|
from flask import request
from flask_restplus import Namespace, Resource, abort
from app.utils.exceptions import OdooIsDeadError
api_company = Namespace('companies', description='Request to odoo companies.')
@api_company.route("/")
class Company(Resource):
def get(self):
"""Get all companies from odoo.
"""
from app.controllers.odoo_controller import OdooController
try:
response = OdooController.get_odoo_companies()
except OdooIsDeadError as err:
abort(503, err, error_id='odoo_connection_error')
return response
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pickle
class APIConfig:
def __init__(self):
self.project_name = None
self.api_key = None
self.city_code = None
self.city_latitude = None
self.city_longitude = None
def setParameter(self, _project_name, _api_key, _city_code):
self.project_name = _project_name
self.api_key = _api_key
self.city_code = _city_code
def setGPS(self, latitude, longitude):
self.city_latitude = latitude
self.city_longitude = longitude
def loadFromFile(self, file_name):
f = open(file_name, 'rb')
c = pickle.load(f)
f.close()
self.__dict__.update(c)
def saveToFile(self, file_name):
f = open(file_name, 'wb')
pickle.dump(self.__dict__, f)
f.close()
|
from __future__ import absolute_import, division, unicode_literals
from twisted.trial.unittest import SynchronousTestCase
from twisted.internet.task import Clock
from mimic.core import MimicCore
from mimic.resource import MimicRoot
from mimic.test.helpers import json_request
class ValkyrieAPITests(SynchronousTestCase):
"""
Tests for the Valkyrie API
"""
def setUp(self):
"""
Initialize core and root
"""
self.core = MimicCore(Clock(), [])
self.root = MimicRoot(self.core).app.resource()
self.url = "/valkyrie/v2.0"
def test_post_auth_token_to_login_endpoint(self):
"""
Obtain an auth token
"""
data = {"something": "anything"}
(response, content) = self.successResultOf(json_request(self, self.root,
b"POST",
self.url + "/login", data))
self.assertEqual(200, response.code)
def test_post_auth_token_to_login_user_endpoint(self):
"""
Obtain an auth token
"""
data = {"something": "anything"}
(response, content) = self.successResultOf(json_request(self, self.root,
b"POST",
self.url + "/login_user", data))
self.assertEqual(200, response.code)
def test_get_devices_effective_permissions(self):
"""
Obtain list of device permissions for contact 12 on account 123456
"""
(response, content) = self.successResultOf(
json_request(self, self.root, b"GET",
self.url +
"/account/123456/permissions/contacts/devices/by_contact/12/effective"))
self.assertEqual(200, response.code)
self.assertTrue(content["contact_permissions"])
self.assertEqual(len(content["contact_permissions"]), 4)
def test_get_empty_accounts_effective_permissions(self):
"""
Obtain list of account permissions for contact 12 on account 123456
"""
(response, content) = self.successResultOf(
json_request(self, self.root, b"GET",
self.url +
"/account/123456/permissions/contacts/accounts/by_contact/12/effective"))
self.assertEqual(200, response.code)
self.assertFalse(content["contact_permissions"])
def test_get_accounts_effective_permissions(self):
"""
Obtain list of account permissions for contact 12 on account 123456
"""
(response, content) = self.successResultOf(
json_request(self, self.root, b"GET",
self.url +
"/account/123456/permissions/contacts/accounts/by_contact/34/effective"))
self.assertEqual(200, response.code)
self.assertTrue(content["contact_permissions"])
self.assertEqual(len(content["contact_permissions"]), 1)
self.assertEqual(content["contact_permissions"][0]["permission_type"], 15)
self.assertEqual(content["contact_permissions"][0]["item_type_name"], "accounts")
def test_get_empty_devices_effective_permissions(self):
"""
Obtain list of devices permissions for contact 34 on account 123456
"""
(response, content) = self.successResultOf(
json_request(self, self.root, b"GET",
self.url +
"/account/123456/permissions/contacts/devices/by_contact/34/effective"))
self.assertEqual(200, response.code)
self.assertFalse(content["contact_permissions"])
def test_get_devices_permissions_item_id(self):
"""
Obtain list of device permissions for contact 78 on account 654321
"""
(response, content) = self.successResultOf(
json_request(self, self.root, b"GET",
self.url +
"/account/654321/permissions/contacts/devices/by_contact/78/effective"))
self.assertEqual(200, response.code)
self.assertTrue(content["contact_permissions"])
self.assertEqual(len(content["contact_permissions"]), 1)
permission = content["contact_permissions"][0]
self.assertEqual(permission["permission_type"], 14)
self.assertEqual(permission["item_id"], 262144)
self.assertEqual(permission["item_type_name"], "devices")
def test_get_any_permissions(self):
"""
Obtain list of all permissions for contact 90 on account 654321
"""
(response, content) = self.successResultOf(
json_request(self, self.root, b"GET",
self.url +
"/account/654321/permissions/contacts/any/by_contact/90/effective"))
self.assertEqual(200, response.code)
self.assertTrue(content["contact_permissions"])
self.assertEqual(len(content["contact_permissions"]), 2)
device_permission = content["contact_permissions"][0]
self.assertEqual(device_permission["permission_type"], 12)
self.assertEqual(device_permission["item_id"], 1048576)
self.assertEqual(device_permission["item_type_name"], "devices")
account_permission = content["contact_permissions"][1]
self.assertEqual(account_permission["permission_type"], 15)
self.assertEqual(account_permission["item_id"], 654321)
self.assertEqual(account_permission["item_type_name"], "accounts")
|
from mpi4py import MPI
import sys
size = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
print("Helloworld! I am process %d of %d.\n" % (rank, size))
|
def belt_count(dictionary):
belts = list(dictionary.values())
for belt in set(belts):
num = belts.count(belt)
print(f"There are {num} {belt} belts")
ninja_belts = {}
def ninjaIntro(dict):
for key, val in dict.items():
print(f"I am {key}, and I am a {val} belt")
while True:
ninja_name = input("Enter a ninja name: ")
ninja_belt = input("Enter a belt color: ")
ninja_belts[ninja_name] = ninja_belt
another = input("Add another? (Y/N): ")
if another == 'y' or another == 'Y':
continue
else:
break
# ninjaIntro(ninja_belts)
belt_count(ninja_belts) |
class Solution:
# @return a list of lists of integer
def generateMatrix(self, n):
result = [[0 for i in range(n)] for j in range(n)]
direction = 1
i = j = 0
for x in xrange(1,n*n+1):
result[i][j] = x
if direction == 1:
if j+1<n and result[i][j+1] == 0:
j += 1
continue
else:
i += 1
direction = 2
continue
if direction == 2:
if i+1<n and result[i+1][j] == 0:
i += 1
continue
else:
j -= 1
direction = 3
continue
if direction == 3:
if j>0 and result[i][j-1] == 0:
j -= 1
continue
else:
i -= 1
direction = 4
continue
if direction == 4:
if i>0 and result[i-1][j] == 0:
i -= 1
continue
else:
j += 1
direction = 1
continue
return result
s = Solution()
print s.generateMatrix(4) |
#!/usr/bin/python
import os
import json
import subprocess
import json
import shutil
def main():
release_repository_path = 'release_repository'
package_json_path = 'package.json'
makedir(release_repository_path)
package_json_file = open(package_json_path, 'r')
package_json_str = package_json_file.read()
package_json = json.loads(package_json_str)
# get package json, version
version = package_json['version']
release_file_path = release_repository_path+'/v'+version
isContinue = 'y';
if os.path.exists(release_file_path):
isContinue = raw_input("version:"+version+" already exists, are you sure to override? (y/n)");
if isContinue != 'y':
return
rnbundle_file_path = release_file_path + '/bundle'
patches_file_path = release_file_path + '/patches'
patches_android_file_path = patches_file_path + '/android'
patches_ios_file_path = patches_file_path + '/ios'
rnbundle_android_file_path = rnbundle_file_path + '/android'
rnbundle_ios_file_path = rnbundle_file_path + '/ios'
ios_info = rnbundle_ios_file_path + '/info.json'
android_info = rnbundle_android_file_path + '/info.json'
makedir(release_file_path)
makedir(rnbundle_file_path)
makedir(rnbundle_android_file_path)
makedir(rnbundle_ios_file_path)
makedir(patches_file_path)
makedir(patches_android_file_path)
makedir(patches_ios_file_path)
outputJson = json.dumps({'version': version})
json_info_file = open(ios_info, 'w')
json_info_file.seek(0)
json_info_file.write(outputJson)
json_info_file.close();
json_info_file = open(android_info, 'w')
json_info_file.seek(0)
json_info_file.write(outputJson)
json_info_file.close();
android_bundle_file_path = rnbundle_android_file_path + '/index.android.jsbundle'
ios_bundle_file_path = rnbundle_ios_file_path + '/index.ios.jsbundle'
npm_shell_android = 'react-native bundle --platform android --dev false --entry-file ./index.android.js --bundle-output '+android_bundle_file_path+' --assets-dest '+rnbundle_android_file_path
npm_shell_ios = 'react-native bundle --platform ios --dev false --entry-file ./index.ios.js --bundle-output '+ios_bundle_file_path+' --assets-dest '+rnbundle_ios_file_path
print('generate folders complete! wainting for execture bundle...');
output = subprocess.Popen(npm_shell_android, shell=True, stdout=subprocess.PIPE).stdout.read()
print(output)
output = subprocess.Popen(npm_shell_ios, shell=True, stdout=subprocess.PIPE).stdout.read()
print(output)
output = subprocess.Popen('npm run bundle-patches', shell=True, stdout=subprocess.PIPE).stdout.read()
print(output)
print('all done')
def makedir(dir):
if(os.path.isdir(dir)):
pass
else:
print('create folder '+dir)
os.mkdir(dir)
if __name__ == "__main__":
main()
|
from .utils import *
from .inference import *
|
import multiprocessing
from mu.mel import mel
from pype import servos
class Pipe(object):
def __init__(self, pitch: mel.SimplePitch):
self._pitch = pitch
@property
def pitch(self) -> mel.SimplePitch:
return self._pitch
class ServoPipe(Pipe):
def __init__(
self,
pitch: mel.SimplePitch,
pin: int,
port: str = "/dev/ttyACM0",
# angle when the valve to the pipe is compeletely closed, so that no air is going
# to the pipe
closed_angle: float = 90,
# angle when the valve to the pipe is compeletely opened, so that all air is
# passing to the pipe
opened_angle: float = 10,
allowed_range_of_angles: tuple = (0, 180),
# how long it takes to move 60 degree, depending on the particular servo motor
# model that get used
operating_speed: float = 0.11,
# how many degree does the motor approximately move with each step
move_to_grid_size: float = 0.1,
):
self._closed_angle = closed_angle
self._opened_angle = opened_angle
super().__init__(pitch)
self._servo = servos.Servo(
pin,
port,
closed_angle,
allowed_range_of_angles,
operating_speed,
move_to_grid_size,
)
self._servo.move_to(opened_angle, 0.25)
self._servo.move_to(closed_angle, 0.25)
self._process = None
def _start(self, target, args: tuple = tuple([])) -> None:
# first stopping all previous processes
self.stop()
# overwriting process variable
self._process = multiprocessing.Process(target=target, args=args)
# starting the new process
self._process.start()
def stop(self) -> None:
"""stop all movements immediately"""
try:
self._process.terminate()
except AttributeError:
pass
def on(self, duration: float = 0) -> None:
"""completely open the valve in n seconds"""
self._start(self._servo.move_to, (self._opened_angle, duration))
def off(self, duration: float = 0) -> None:
"""completely close the valve in n seconds"""
self._start(self._servo.move_to, (self._closed_angle, duration))
def tremolo(self, duration_per_cycle: float) -> None:
def tremolo():
while True:
self._servo.move_to(self._opened_angle, duration)
self._servo.move_to(self._closed_angle, duration)
duration = duration_per_cycle * 0.5
self._start(tremolo)
|
# encoding: utf-8
from string import punctuation
from zhon import hanzi
import re
import jieba
# 单例
def singleton(cls):
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
# 去除标点符号(中文和英文)
def remove_punctuation(input_string):
punc = punctuation + hanzi.punctuation
output = re.sub(r'[{}]+'.format(punc), '', input_string)
return output
def remove_punc(tokens_list):
result = []
for item in tokens_list:
after_remove = remove_punctuation(item)
if after_remove.strip():
result.append(after_remove)
return result
# 分词器
@singleton
class Cutter:
def __init__(self):
self.cutter = jieba
self.cutter.initialize()
def cut(self, input_string):
return self.cutter.lcut(input_string)
def cut_all(self, input_string):
return self.cutter.cut(input_string, cut_all=True)
def cut_and_remove_punc(self, input_string):
cut_res = self.cutter.lcut(input_string)
remove_res = remove_punc(cut_res)
return remove_res
def cut_zi_and_remove_punc(self, input_string):
cut_res = ' '.join(input_string).split()
remove_res = remove_punc(cut_res)
return remove_res
# query 数据结构
class QueryItem:
def __init__(self):
self.query = ''
self.query_vec = ''
self.query_tokens_jieba = []
self.query_tokens_zi = []
# faq结果 数据结构
class FAQItem:
def __init__(self, query_item):
self.id = 0
self.question = ''
self.question_vec = ''
self.question_tokens_zi = []
self.question_tokens_jieba = []
self.answer = ''
self.query_item = query_item
self.is_term = False
self.is_semantic = False
self.term_score = 0.0
self.semantic_score = 0.0
self.score = 0.0
self.bm25_similarity_score = 0.0
self.edit_similarity_score = 0.0
self.jaccard_similarity_score = 0.0
self.abcnn_similarity = 0.0
def query_item_to_dict(query_item):
res = {}
for name, value in vars(query_item).items():
if name == 'query_vec':
continue
res[name] = value
return res
def faq_item_to_dict(faq_item, has_query=True):
res = {}
for name, value in vars(faq_item).items():
if name == 'query_item' or name == 'question_vec':
# if has_query:
# value = query_item_to_dict(value)
# else:
# value = ''
continue
res[name] = value
return res
def answer_faq_item_to_dict(faq_item):
for name, value in vars(faq_item).items():
if name == "answer":
return value
def suggestions_faq_item_to_dict(faq_item, has_query=True, has_question_vec=True):
res = {}
logger = logging.getLogger("utils.py--suggestions_faq_item_to_dict")
for name, value in vars(faq_item).items():
if name == "query_item":
if has_query:
value = query_item_to_dict(value)
else:
value = ''
if name == "question_vec":
if has_question_vec:
pass
else:
value = ''
if name in ['question', 'answer', 'score']:
res[name] = value
logger.debug('score'+str(res['score']))
return res
def faq_items_to_list(faq_item_list):
res = [{}] * len(faq_item_list)
for i in range(len(faq_item_list)):
res[i] = faq_item_to_dict(faq_item_list[i])
return res
def answer_suggestion_faq_items_to_list(faq_item_list, ans_sugg_len=4):
res = {}
res['answer'] = ''
res['suggestion'] = []
ans_n_sugg = min(len(faq_item_list), ans_sugg_len)
for i in range(ans_n_sugg):
if i == 0:
res['answer'] = answer_faq_item_to_dict(faq_item_list[i])
else:
res['suggestion'].append(suggestions_faq_item_to_dict(faq_item_list[i], has_query=False, has_question_vec=False))
return res
if __name__ == '__main__':
in_s = 'are you ok ??'
out_s = remove_punctuation(in_s)
print(out_s)
|
import matplotlib.pyplot as plt
import re
import os
def main():
seq = inlezen()
waardelijst,lijst = tellen(seq)
grafiek(waardelijst,lijst)
def inlezen():
file = open('identity.txt','r')
seq = []
for line in file:
line = line.split('\t')
for thing in line:
thing = thing.strip().split(' ')
for item in thing:
if item != '':
if ":" not in item and '_' not in item and '-' not in item:
seq.append(item)
return seq
def tellen(seq):
waardelijst = []
tellijst = []
lijst = []
i = 0
k = 1
while i != 100:
for waarde in seq:
if float(waarde) > i and float(waarde) <= k:
tellijst.append(waarde)
lijst.append(len(tellijst))
tellijst = []
waardelijst.append(i)
i += 1
k += 1
## print(waardelijst)
## print(lijst)
return waardelijst,lijst
def grafiek(waardelijst,lijst):
plt.bar(waardelijst,lijst)
#plt.plot(waardelijst,lijst)
plt.xlabel('Percentages in stappen van 1')
plt.ylabel('Aantal matches')
plt.title('%identity matches met stappen van 1 over 404 bestanden')
plt.show()
main()
|
from django.urls import path
from authnz import views as authnz_views
urlpatterns = [
path('authnz/register/', authnz_views.RegisterView.as_view(), name='register'),
path('authnz/login/', authnz_views.LoginView.as_view(), name='login'),
]
|
import urllib.request, json
with urllib.request.urlopen("https://pomber.github.io/covid19/timeseries.json") as url:
data = json.loads(url.read().decode())
import json
import csv
import copy
import pandas as pd
import argparse
import matplotlib.pyplot as plt
def getValue(keys,value):
key = keys.split('.')
num,n = len(key)-1, 0
while n<=num:
try:
value = value[key[n]]
n+=1
except KeyError:
value = None
break
return value
def getKeys(data):
if not isinstance(data,dict):
return ['']
res_ = []
for key in data.keys():
temp = copy.deepcopy(getKeys(data[key]))
for i, element in enumerate(temp):
if element == '':
temp[i]= key
else:
temp[i]= key+"."+ element
res_.append(temp[i])
return res_
if __name__=='__main__':
with urllib.request.urlopen("https://pomber.github.io/covid19/timeseries.json") as url:
data = json.loads(url.read().decode())
countries = list(data.keys())
keyList = getKeys(data[countries[0]][0])
frame = []
for country in countries:
CountryData = {key: [] for key in keyList}
for value in data[country]:
for item in keyList:
CountryData[item].append(getValue(item,value))
df = pd.DataFrame(CountryData)
df['Country'] = df.apply(lambda x: country, axis=1)
frame.append(df)
result = pd.concat(frame)
print(type(result))
result.to_csv('{}.csv'.format('corona'), sep='\t')
#result.plot(x ='date',y = keyList[1:])
|
from typing import List
from typing import Dict
from typing import Set
from typing import Tuple
import networkx as nx
def _compute_articulation_points(G: nx.Graph) -> List[int]:
"""
An articulation point or cut vertex is any node whose removal (along with all its incident edges) increases the number of connected components of a graph.
Source: https://networkx.github.io/documentation/stable/_modules/networkx/algorithms/components/biconnected.html#articulation_points
"""
articulation_points = list(nx.articulation_points(G))
return articulation_points
def _compute_biconnected_components_edges(G: nx.Graph) -> List[List[Tuple[int], Set[int]]]:
"""
Biconnected components are maximal subgraphs such that the removal of a node (and all edges incident on that node) will not disconnect the subgraph.
Source:
- https://networkx.github.io/documentation/stable/_modules/networkx/algorithms/components/biconnected.html#biconnected_components
- https://networkx.github.io/documentation/stable/_modules/networkx/algorithms/components/biconnected.html#biconnected_component_edges
------------------
Returns a list of lists of length 2: [Set, List of tuple pairs (edges)]
"""
biconnected_components = list(nx.biconnected_components(G))
biconnected_edges = list(nx.biconnected_component_edges(G))
components_and_edges = [[biconnected_components[idx], biconnected_edges[idx]] for idx in range(len(biconnected_components))]
return components_and_edges
def _compute_degree_assortativity_coefficient(G: nx.Graph) -> float:
"""
Source: https://networkx.github.io/documentation/stable/_modules/networkx/algorithms/assortativity/correlation.html#degree_assortativity_coefficient
"""
return nx.degree_assortativity_coefficient(G)
def _compute_rich_club_coefficient(G: nx.Graph) -> Dict[int,float]:
"""
Source: https://networkx.github.io/documentation/stable/_modules/networkx/algorithms/richclub.html#rich_club_coefficient
"""
return nx.rich_club_coefficient(G)
def _compute_avg_degree_connectivity(G: nx.Graph) -> Dict[int,float]:
"""
Source: https://networkx.github.io/documentation/stable/reference/algorithms/generated/networkx.algorithms.assortativity.k_nearest_neighbors.html#networkx.algorithms.assortativity.k_nearest_neighbors
"""
return nx.k_nearest_neighbors(G)
def _compute_bridges(G: nx.Graph) -> List[Tuple[int]]:
"""
Source: https://networkx.github.io/documentation/stable/_modules/networkx/algorithms/bridges.html#bridges
"""
return list(nx.bridges(G))
def _compute_shortest_path(G: nx.Graph) -> Dict[Dict[int,List[int]]]:
"""
Source: https://networkx.github.io/documentation/stable/_modules/networkx/algorithms/shortest_paths/generic.html#shortest_path
"""
return nx.shortest_path(G) |
__author__ = 'Justin'
import os
import sys
import json
import networkx as nx
from numpy import std,linspace,argsort,array,linspace,unique
from DisplayNetwork import networkdisplay
from ParetoFrontier import rand_paretofront
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from os.path import isfile, join
from datetime import datetime
import random
# DESCRIPTION: Generate Estimate of Factor Weight Error Distribution
#
# Initialize data
numweights = 30
weights = linspace(0,1,numweights)
weightchosen = {weight:0 for weight in weights}
uniquerange = [3,5]
numiter = 35
print('Enter your first name: ')
person = sys.stdin.readline()[0:-1]
# Load Networks
Graphs = []
cwd = os.getcwd()
folderpath = os.path.abspath(os.path.join(cwd, '..', 'Project Data','Networks','CstatHistory'))
files = [f for f in os.listdir(folderpath) if isfile(join(folderpath, f))]
for filename in files:
filepath = os.path.abspath(os.path.join(cwd,folderpath,filename))
fh=open(filepath,'rb')
G = nx.read_gexf(fh)
fh.close
myDate = datetime.strptime(filename,"%H-%M(%d-%m-%Y).gexf")
G.graph['datetime']=myDate
Graphs.append(G)
for _ in range(0,numiter,1):
# Generate Pareto Frontier
Gchosen = random.choice(Graphs)
cluster_weights, paths, pathsinfo = rand_paretofront(Gchosen,weights,['Zenness','currenttime'],
uniquerange[0],uniquerange[1],'Zenness')
# Print Route Information
print('------------------------------------------------------------------------------')
print('/////////////////////////////////////////////////////////////////////////////')
Zenscore_pts = []
time_pts = []
for index,path in enumerate(paths):
print('---------------------------------------')
print('Route '+str(index)+':')
print('-time(min):',pathsinfo[index]['currenttime']/60)
print('-zenness:',pathsinfo[index]['Zenness'])
print('----------------')
print('-zen diff:',pathsinfo[0]['Zenness']-pathsinfo[index]['Zenness'])
print('-time diff:',(pathsinfo[index]['currenttime']-pathsinfo[0]['currenttime'])/60)
print('---------------------------------------')
Zenscore_pts.append(pathsinfo[index]['Zenness'])
time_pts.append(pathsinfo[index]['currenttime']/60)
# Plot All Route Options
routestyles=[]
listcolors = ['#cc9999','#ccff99','#999933','#ffcc99','#996633','#767777']
# listcolors = ['#ffff4d','#66ff66','#00cd00','#008b00','#006400','#cc9999','#ccff99','#999933']
patches = []
for index,color in enumerate(listcolors):
patch = mpatches.Patch(color=color, label='Route '+str(index))
patches.append(patch)
# plt.legend(handles = patches)
# plt.show()
for index in range(0,len(paths),1):
dict = {'color': listcolors[index],'width': 10,'name': 'Route '+str(index)+':'}
routestyles.append(dict)
Zen_std = std(nx.get_edge_attributes(Gchosen,'Zenness').values())
networkdisplay(Gchosen,routes=paths,graphstyle='RdYlBu_r',routestyles = routestyles,
weightstring='Zenness',normValue=6.0*Zen_std, title='Pareto Optimal Routes')
# # Plot Pareto Frontier
# fig,ax = plt.subplots()
# MIN = min(time_pts)
# time_pts[:]=[value/MIN for value in time_pts] # Normalize time to minimum value
# ax.scatter(Zenscore_pts,time_pts,s=10)
# plt.title('Pareto Frontier Example')
# plt.xlabel('Zenscores')
# plt.ylabel('Time Normalized to Fastest Route')
# plt.show()
#
# for index,weightgroup in enumerate(cluster_weights):
# if(len(weightgroup)==1):
# a = "%.2f" % weightgroup[0]
# ax.annotate('['+a+']',(Zenscore_pts[index],time_pts[index]))
# else:
# a = "%.2f" % weightgroup[0]
# b = "%.2f" % weightgroup[-1]
# ax.annotate('['+a+'-'+b+']',(Zenscore_pts[index],time_pts[index]))
# Get User Feedback:
print('Options:')
print('Enter list of acceptable routes separated by commas')
print('Route indicated by number 0-'+str(len(paths)-1)+'')
print('Example => 1,2,3')
print('OR')
print("'s' for skip and 'r' for refine:")
string = sys.stdin.readline()
if(string[0] != 'r' and string[0] != 's'):
chosenindices = [int(element) for index,element in enumerate(string) if(index % 2 == 0) ]
if(string[0] == 'r'):
# Prune Options:
print('Options:')
print('Enter list separated by commas of choices to view')
print('Example => 1,2,3')
string = sys.stdin.readline()
chosenindices = [int(element) for index,element in enumerate(string) if(index % 2 == 0) ]
print('Chosen:',chosenindices)
# Plot All Chosen Options
routestyles=[]
for index in chosenindices:
dict = {'color': listcolors[index],'width': 10,'name': 'Route '+str(index)+':'}
routestyles.append(dict)
chosenpaths = [paths[i] for i in chosenindices]
chosenpathinfos = [paths[i] for i in chosenindices]
Zen_std = std(nx.get_edge_attributes(Gchosen,'Zenness').values())
networkdisplay(Gchosen,routes=chosenpaths,graphstyle='RdYlBu_r',routestyles = routestyles,
weightstring='Zenness',normValue=6.0*Zen_std, title='Pareto Optimal Routes')
# Get Refined User Feedback:
print('Options:')
print('Enter list of acceptable routes separated by commas')
print('Route indicated by number 0-'+str(len(paths)-1)+'')
print('Example => 1,2,3')
print('OR')
print("'s' for skip and 'r' for refine:")
string = sys.stdin.readline()
if(string[0] != 'r' and string[0] != 's'):
chosenindices = [int(element) for index,element in enumerate(string) if(index % 2 == 0) ]
if(string[0] != 's'):
# Save contribution to weight error distribution
for chosenindex in chosenindices:
for weight in cluster_weights[chosenindex]:
weightchosen[weight] += 1
print(chosenindices)
# Print Estimate of Factor Weight Error Distribution
fig,ax = plt.subplots()
x = sorted(weightchosen.keys())
y = [1.0-float(weightchosen[key])/float(numiter) for key in x]
ax.bar(x,y)
ax.set_xlim([0,1])
plt.title('Probability of Error vs. Zenweight')
plt.xlabel('Zenweight')
plt.ylabel('Prob. of Error')
plt.show()
# Save Information
folder = filepath = os.path.abspath(os.path.join(cwd, '..', 'Project Data','GradientOptimization',person))
filename = "ErrorDistribution.json"
filepath = os.path.abspath(os.path.join(folder,filename))
with open(filepath, 'w') as outfile:
json.dump(y, outfile)
filename = "zenweights.json"
filepath = os.path.abspath(os.path.join(folder,filename))
with open(filepath, 'w') as outfile:
json.dump(x, outfile)
|
import tigger.cluda.dtypes as dtypes
from tigger.core.transformation import *
from tigger.cluda.kernel import render_prelude, render_template
class Argument:
def __init__(self, name, dtype):
self.dtype = dtype
self.ctype = dtypes.ctype(dtype)
self.load = load_macro_call(name)
self.store = store_macro_call(name)
self._name = name
def __str__(self):
return leaf_name(self._name)
class OperationRecorder:
def __init__(self, ctx, tr_tree, basis, base_values):
self._ctx = ctx
self._tr_tree = tr_tree
self.basis = basis
self.values = AttrDict(base_values)
self.operations = []
self._allocations = {}
self._const_allocations = {}
self._temp_counter = 0
self._const_counter = 0
def add_allocation(self, shape, dtype):
"""
Adds an allocation to the list of actions.
Returns the string which can be used later in the list of argument names for kernels.
"""
name = "_temp" + str(self._temp_counter)
self._temp_counter += 1
value = ArrayValue(shape, dtype)
self.values[name] = value
self._allocations[name] = value
self._tr_tree.add_temp_node(name, value)
return name
def add_const_allocation(self, data):
name = "_const" + str(self._const_counter)
self._const_counter += 1
value = ArrayValue(data.shape, data.dtype)
self.values[name] = value
self._const_allocations[name] = data
self._tr_tree.add_temp_node(name, value)
return name
def add_kernel(self, template, defname, argnames,
global_size, local_size=None, render_kwds=None, inplace=None):
"""
Adds kernel execution to the list of actions.
See :ref:`tutorial-advanced-computation` for details on how to write kernels.
:param template: Mako template for the kernel.
:param defname: name of the definition inside the template.
:param argnames: names of the arguments the kernel takes.
These must either belong to the list of external argument names,
or be allocated by :py:meth:`add_allocation` earlier.
:param global_size: global size to use for the call.
:param local_size: local size to use for the call.
If ``None``, the local size will be picked automatically.
:param render_kwds: dictionary with additional values used to render the template.
:param inplace: list of pairs (output, input) which can point to the same point in memory
(used as a hint for the temporary memory manager).
"""
subtemplate = template.get_def(defname)
assert set(argnames).issubset(set(self.values))
args = [Argument(name, self.values[name].dtype) for name in argnames]
if render_kwds is None:
render_kwds = {}
additional_kwds = dict(
basis=self.basis,
kernel_definition=kernel_definition(defname))
# check that user keywords do not overlap with our keywords
intersection = set(render_kwds).intersection(additional_kwds)
if len(intersection) > 0:
raise ValueError("Render keywords clash with internal variables: " +
", ".join(intersection))
render_kwds = dict(render_kwds) # shallow copy
render_kwds.update(additional_kwds)
src = render_template(subtemplate, *args, **render_kwds)
op = KernelCall(defname, argnames, src, global_size, local_size=local_size)
op.prepare(self._ctx, self._tr_tree)
self.operations.append(op)
def add_computation(self, cls, *argnames, **kwds):
"""
Adds a nested computation call. The ``computation`` value must be a computation
with necessary basis set and transformations connected.
``argnames`` list specifies which positional arguments will be passed to this kernel.
"""
operation = ComputationCall(cls, *argnames, **kwds)
connections = self._tr_tree.connections_for(operation.argnames)
for tr, array_arg, new_array_args, new_scalar_args in connections:
operation.connect(tr, array_arg, new_array_args, new_scalar_args)
operation.prepare({name:value for name, value in self._tr_tree.leaf_signature()})
self.operations.append(operation)
def optimize_execution(self):
# In theory, we can optimize the usage of temporary buffers with help of views
# Now we just allocate them separately
self.allocations = {}
for name, value in self._allocations.items():
self.allocations[name] = self._ctx.allocate(
value.shape, value.dtype)
for name, data in self._const_allocations.items():
self.allocations[name] = self._ctx.to_device(data)
class Allocate:
def __init__(self, name, shape, dtype):
self.name = name
self.shape = shape
self.dtype = dtype
class ComputationCall:
def __init__(self, computation, *argnames, **kwds):
self.computation = computation
self.argnames = argnames
self.kwds = kwds
self._update_maps()
def _update_maps(self):
argnames = [x for x, _ in self.computation.leaf_signature()]
self.map_to_internal = {external_name:internal_name
for external_name, internal_name in zip(self.argnames, argnames)}
self.map_to_external = {internal_name:external_name
for external_name, internal_name in zip(self.argnames, argnames)}
def prepare(self, values):
args = [values[name] for name in self.argnames]
self.computation.prepare_for(*args, **self.kwds)
replace = lambda x: self.map_to_external.get(x, x)
argnames = [x for x, _ in self.computation.leaf_signature()]
self.leaf_argnames = [replace(name) for name in argnames]
def __call__(self, *args):
self.computation(*args)
def connect(self, tr, array_arg, new_array_args, new_scalar_args=None):
internal_array_arg = self.map_to_internal[array_arg]
self.computation.connect(tr, internal_array_arg, new_array_args, new_scalar_args)
new_signature = [x for x, _ in self.computation.leaf_signature()]
new_argnames = []
for internal_name in new_signature:
if internal_name in self.map_to_external:
new_argnames.append(self.map_to_external[internal_name])
elif internal_name in new_array_args:
new_argnames.append(internal_name)
elif new_scalar_args is not None and internal_name in new_scalar_args:
new_argnames.append(internal_name)
self.argnames = new_argnames
self._update_maps()
class KernelCall:
def __init__(self, name, base_argnames, base_src, global_size,
local_size=None):
self.name = name
self.base_argnames = list(base_argnames)
self.local_size = local_size
self.global_size = global_size
self.src = base_src
def prepare(self, ctx, tr_tree):
transformation_code = tr_tree.transformations_for(self.base_argnames)
self.full_src = transformation_code + self.src
self.kernel = ctx.compile_static(self.full_src, self.name,
self.global_size, local_size=self.local_size)
self.leaf_argnames = [name for name, _ in tr_tree.leaf_signature(self.base_argnames)]
def __call__(self, *args):
self.kernel(*args)
|
import numpy as np
import sys
sys.path.append("game/")
import skimage
from skimage import transform, color, exposure
import keras
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Flatten, Activation, Input
from keras.layers.convolutional import Convolution2D
from keras.optimizers import RMSprop
import keras.backend as K
from keras.callbacks import LearningRateScheduler, History
import tensorflow as tf
import pygame
import wrapped_flappy_bird as game
import scipy.misc
import scipy.stats as st
import threading
import time
import math
import matplotlib.pyplot as plt
import pandas as pd
import random
GAMMA = 0.99 # discount value
BETA = 0.01 # regularisation coefficient
IMAGE_ROWS = 40
IMAGE_COLS = 40
IMAGE_CHANNELS = 4
LEARNING_RATE = 7e-4
EPISODE = 0
THREADS = 8
t_max = 5
const = 1e-5
T = 0
episode_r = []
episode_state = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS))
episode_output = []
episode_critic = []
ACTIONS = 2
a_t = np.zeros(ACTIONS)
policyLoss = 0
criticLoss = 0
lrate = 0
# EPSILON
FINAL_EPSILON = 0.01 # final value of epsilon
INITIAL_EPSILON = 0.9 # starting value of epsilon
epsilon = INITIAL_EPSILON
EXPLORE = 50000.
# Loss function for policy output
def logloss(y_true, y_pred): # policy loss
return -K.sum(K.log(y_true * y_pred + (1 - y_true) * (1 - y_pred) + const), axis=-1)
# BETA * K.sum(y_pred * K.log(y_pred + const) + (1-y_pred) * K.log(1-y_pred + const)) #regularisation term
# Loss function for critic output
def sumofsquares(y_true, y_pred): # critic loss
return K.sum(K.square(y_pred - y_true), axis=-1)
# Function buildmodel() to define the structure of the neural network in use
def buildmodel():
print("Model building begins")
model = Sequential()
keras.initializers.RandomUniform(minval=-0.1, maxval=0.1, seed=None)
S = Input(shape=(IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS,), name='Input')
h0 = Convolution2D(16, kernel_size=(8, 8), strides=(4, 4), activation='relu', kernel_initializer='random_uniform',
bias_initializer='random_uniform')(S)
h1 = Convolution2D(32, kernel_size=(4, 4), strides=(2, 2), activation='relu', kernel_initializer='random_uniform',
bias_initializer='random_uniform')(h0)
h2 = Flatten()(h1)
h3 = Dense(256, activation='relu', kernel_initializer='random_uniform', bias_initializer='random_uniform')(h2)
P = Dense(1, name='o_P', activation='sigmoid', kernel_initializer='random_uniform',
bias_initializer='random_uniform')(h3)
V = Dense(1, name='o_V', kernel_initializer='random_uniform', bias_initializer='random_uniform')(h3)
model = Model(inputs=S, outputs=[P, V])
rms = RMSprop(lr=LEARNING_RATE, rho=0.99, epsilon=0.1)
model.compile(loss={'o_P': logloss, 'o_V': sumofsquares}, loss_weights={'o_P': 1., 'o_V': 0.5}, optimizer=rms)
return model
#Function to preprocess an image before giving as input to the neural network
def preprocess(image):
image = skimage.color.rgb2gray(image)
image = skimage.transform.resize(image, (IMAGE_ROWS, IMAGE_COLS), mode = 'constant')
image = skimage.exposure.rescale_intensity(image, out_range=(0,255))
image = exposure.rescale_intensity(image, in_range=(1, 2))
image = skimage.exposure.rescale_intensity(image, out_range=(0, 255))
image = image.reshape(1, image.shape[0], image.shape[1], 1)
return image
# Initialize a new model using buildmodel() or use load_model to resume training an already trained model
model = buildmodel()
# Model = load_model("saved_models/model_updates3900", custom_objects={'logloss': logloss, 'sumofsquares': sumofsquares})
model._make_predict_function()
graph = tf.get_default_graph()
intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer('o_P').output)
a_t[0] = 1 # Index 0 = no flap, 1 = flap
# Output of network represents probability of flap
game_state = []
for i in range(0, THREADS):
game_state.append(game.GameState(30000))
playLog = pd.DataFrame(columns=['score', 'random', 'predictedChoice', 'verySureMean'])
logCnt = 0
score = 0
randList = []
randMean = 0
verySureList = []
verySureMean = 0
myCount = 0
def runprocess(thread_id, s_t):
global T
global a_t
global model
global score
global logCnt
global trainingLog
global epsilon
global randList
global verySureList
global verySureMean
global randMean
global myCount
t = 0
t_start = t
terminal = False
r_t = 0
r_store = []
state_store = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS))
output_store = []
critic_store = []
s_t = s_t.reshape(1, s_t.shape[0], s_t.shape[1], s_t.shape[2])
randomAct = 0
verySure = 0
choices = [[0, 1], [1, 0]]
while t - t_start < t_max and terminal == False:
t += 1
T += 1
intermediate_output = 0
# action_index = 0
# choices = np.zeros([ACTIONS])
with graph.as_default():
predictedChoice = model.predict(s_t)[0]
intermediate_output = intermediate_layer_model.predict(s_t)
# EPSILON DRIVEN
if 1 == 1:
# IF VERY SURE THEN USE PREDICTION
if 0.3 < predictedChoice and predictedChoice < 0.7:
randomAct = 0
verySure = 1
if 0.5 < predictedChoice:
a_t = [0, 1]
else:
a_t = [1, 0]
# IF NOT SURE LETS TRY RANDOM SOMETIMES, BUT LESS AND LESS
else:
verySure = 0
randomChoice = np.random.rand()
if randomChoice < epsilon:
randomAct = 1
ActionIndex = np.random.randint(0, 2)
a_t = choices[ActionIndex]
else:
randomAct = 0
a_t = [0, 1] if 0.5 < predictedChoice else [1, 0]
# DETERMINISTIC
if 1 == 2:
if 0.5 < predictedChoice:
a_t = [0, 1]
else:
a_t = [1, 0]
# SUCCESSFUL ORIGINAL REWRITTEN
if 1 == 2:
randomChoice = np.random.rand()
if randomChoice < predictedChoice:
a_t = [0, 1]
else:
a_t = [1, 0]
# Times when random choice overwrites prediction
if 0.5 < predictedChoice and randomChoice > predictedChoice:
randomAct = 1
else:
randomAct = 0
if predictedChoice < 0.5 and randomChoice < predictedChoice:
randomAct = 1
else:
randomAct = 0
# SUCCESSFUL ORIGINAL
if 1 == 2:
randomChoice = np.random.rand()
a_t = [0, 1] if randomChoice < predictedChoice else [1, 0] # stochastic action
# x_t (next frame), r_t (0.1 if alive, +1.5 if it passes the pipe, -1 if it dies) and the input is a_t (action)
x_t, r_t, terminal = game_state[thread_id].frame_step(a_t)
x_t = preprocess(x_t)
# SAVE FRAMES
if 1 == 2:
if thread_id == 0:
mat = x_t[0, :, :, 0]
myCount += 1
print(mat)
# SAVE TO CSV
# fileName = "C:/Users/Treebeard/PycharmProjects/A3C_Keras_FlappyBird/spitout/img_" + str(myCount) + ".csv"
# np.savetxt(fileName, mat, fmt='%2.0f', delimiter=",")
# PLOT
plt.imshow(mat, cmap='hot')
#plt.show()
fileName2 = "C:/Users/Treebeard/PycharmProjects/A3C_Keras_FlappyBird/spitout/img_" + str(myCount) + ".png"
plt.savefig(fileName2)
# LOG GAME STEP
if thread_id == 0:
randList.append(randomAct)
verySureList.append(verySure)
if len(randList) > 41:
randMean = np.mean(randList[-40::])
else:
randMean = 0.5
if len(verySureList) > 41:
verySureMean = np.mean(verySureList[-40::])
else:
verySureMean = 0.0
score = score + r_t
playLog.loc[logCnt] = score, randMean, predictedChoice, verySureMean
logCnt += 1
if logCnt % 100 == 0:
playLog.to_csv("playLog.csv", index=True)
if terminal == True:
score = 0
with graph.as_default():
critic_reward = model.predict(s_t)[1]
y = 0 if a_t[0] == 1 else 1
r_store = np.append(r_store, r_t)
state_store = np.append(state_store, s_t, axis=0)
output_store = np.append(output_store, y)
critic_store = np.append(critic_store, critic_reward)
s_t = np.append(x_t, s_t[:, :, :, :3], axis=3)
print(
"Frame = " + str(T) + ", Updates = " + str(EPISODE) + ", Thread = " + str(thread_id) + ", Output = " + str(
intermediate_output))
if terminal == False:
r_store[len(r_store) - 1] = critic_store[len(r_store) - 1]
else:
r_store[len(r_store) - 1] = -1
s_t = np.concatenate((x_t, x_t, x_t, x_t), axis=3)
for i in range(2, len(r_store) + 1):
r_store[len(r_store) - i] = r_store[len(r_store) - i] + GAMMA * r_store[len(r_store) - i + 1]
# LOWER EPSILON
if epsilon > FINAL_EPSILON:
epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
return s_t, state_store, output_store, r_store, critic_store
# function to decrease the learning rate after every epoch. In this manner, the learning rate reaches 0, by 20,000 epochs
def step_decay(epoch):
# print("STEP DECAY BEINGUSED----------->")
global lrate
decay = 3.2e-8
lrate = LEARNING_RATE - epoch * decay
lrate = max(lrate, 0)
return lrate
class actorthread(threading.Thread):
def __init__(self, thread_id, s_t):
threading.Thread.__init__(self)
self.thread_id = thread_id
self.next_state = s_t
def run(self):
global episode_output
global episode_r
global episode_critic
global episode_state
threadLock.acquire()
self.next_state, state_store, output_store, r_store, critic_store = runprocess(self.thread_id, self.next_state)
self.next_state = self.next_state.reshape(self.next_state.shape[1], self.next_state.shape[2],
self.next_state.shape[3])
episode_r = np.append(episode_r, r_store)
episode_output = np.append(episode_output, output_store)
episode_state = np.append(episode_state, state_store, axis=0)
episode_critic = np.append(episode_critic, critic_store)
threadLock.release()
states = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, 4))
# initializing state of each thread
for i in range(0, len(game_state)):
image = game_state[i].getCurrentFrame()
image = preprocess(image)
state = np.concatenate((image, image, image, image), axis=3)
states = np.append(states, state, axis=0)
cnt = 0
df = pd.DataFrame(columns=['reward_mean', "epsilon", "lrate", 'loss', 'policy_loss', 'critic_loss'])
while True:
threadLock = threading.Lock()
threads = []
for i in range(0, THREADS):
threads.append(actorthread(i, states[i]))
states = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, 4))
for i in range(0, THREADS):
threads[i].start()
# thread.join() ensures that all threads fininsh execution before proceeding further
for i in range(0, THREADS):
threads[i].join()
for i in range(0, THREADS):
state = threads[i].next_state
state = state.reshape(1, state.shape[0], state.shape[1], state.shape[2])
states = np.append(states, state, axis=0)
e_mean = np.mean(episode_r)
# advantage calculation for each action taken
advantage = episode_r - episode_critic
print("backpropagating")
lrate = LearningRateScheduler(step_decay)
callbacks_list = [lrate]
weights = {'o_P': advantage, 'o_V': np.ones(len(advantage))}
# backpropagation
history = model.fit(episode_state, [episode_output, episode_r], epochs=EPISODE + 1, batch_size=len(episode_output),
callbacks=callbacks_list, sample_weight=weights, initial_epoch=EPISODE)
episode_r = []
episode_output = []
episode_state = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS))
episode_critic = []
# LOG SAVER
df.loc[cnt] = e_mean, epsilon, lrate, history.history['loss'], history.history['o_P_loss'], history.history[
'o_V_loss']
cnt += 1
if cnt % 100 == 0:
df.to_csv("trainingLog.csv", index=True)
if EPISODE % 50 == 0:
model.save("saved_models/model_updates" + str(EPISODE))
EPISODE += 1
if EPISODE > 15000:
break
print("GOT OUT")
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
BETA = 0.01
const = 1e-5
# loss function for policy output
def logloss(y_true, y_pred): # policy loss
return -K.sum(K.log(y_true * y_pred + (1 - y_true) * (1 - y_pred) + const), axis=-1)
# BETA * K.sum(y_pred * K.log(y_pred + const) + (1-y_pred) * K.log(1-y_pred + const)) #regularisation term
# loss function for critic output
def sumofsquares(y_true, y_pred): # critic loss
return K.sum(K.square(y_pred - y_true), axis=-1)
def preprocess(image):
image = skimage.color.rgb2gray(image)
image = skimage.transform.resize(image, (40, 40), mode='constant')
image = skimage.exposure.rescale_intensity(image, out_range=(0, 255))
image = exposure.rescale_intensity(image, in_range=(1, 2))
image = skimage.exposure.rescale_intensity(image, out_range=(0, 255))
image = image.reshape(1, image.shape[0], image.shape[1], 1)
return image
game_state = game.GameState(30)
currentScore = 0
topScore = 0
a_t = [1, 0]
FIRST_FRAME = True
terminal = False
r_t = 0
myCount = 1
# -------------- code for checking performance of saved models by finding average scores for 10 runs------------------
evalGamer = pd.DataFrame(columns=['model','evalScore'])
logCnt = 0
models = [""]
fileName = "saved_models/model_updates"
modelName = 0
for i in range(1, 300):
modelName += 50
fileName = "saved_models/model_updates" + str(modelName)
model = load_model(fileName, custom_objects={'logloss': logloss, 'sumofsquares': sumofsquares})
score = 0
counter = 0
while counter < 1:
x_t, r_t, terminal = game_state.frame_step(a_t)
score += 1
if r_t == -1:
counter += 1
x_t = preprocess(x_t)
if FIRST_FRAME:
s_t = np.concatenate((x_t, x_t, x_t, x_t), axis=3)
else:
s_t = np.append(x_t, s_t[:, :, :, :3], axis=3)
y = model.predict(s_t)
no = np.random.random()
#print(y)
if FIRST_FRAME:
a_t = [0, 1]
FIRST_FRAME = False
else:
no = np.random.rand()
a_t = [0, 1] if no < y[0] else [1, 0]
# a_t = [0,1] if 0.5 <y[0] else [1,0]
if r_t == -1:
FIRST_FRAME = True
if score % 200 == 0:
evalGamer.loc[logCnt] = modelName, score
evalGamer.to_csv("evalGamer.csv", index=True)
if terminal == True:
print("DIED", "SCORE:", score, "Model:", modelName)
logCnt += 1
evalGamer.loc[logCnt] = modelName, score
evalGamer.to_csv("evalGamer.csv", index=True)
|
from numpy.ctypeslib import as_ctypes as as_c
import numpy as np
import ctypes as C
from .ctype_helper import load_lib
lib = load_lib("libSigPyProcTim.so")
class TimeSeries(np.ndarray):
"""Class for handling pulsar data in time series.
:param input_array: 1 dimensional array of shape (nsamples)
:type input_array: :class:`numpy.ndarray`
:param header: observational metadata
:type header: :class:`~sigpyproc.Header.Header`
"""
def __new__(cls,input_array,header):
if getattr(input_array,"dtype",False) == np.dtype("float32"):
obj = input_array.view(cls)
else:
obj = input_array.astype("float32").view(cls)
obj.header = header
return obj
def __array_finalize__(self,obj):
if obj is None: return
if hasattr(obj,"header"):
self.header = obj.header
def fold(self,period,accel=0,nbins=50,nints=32):
"""Fold time series into discrete phase and subintegration bins.
:param period: period in seconds to fold with
:type period: float
:param nbins: number of phase bins in output
:type nbins: int
:param nints: number of subintegrations in output
:type nints: int
:returns: data cube containing the folded data
:rtype: :class:`~sigpyproc.FoldedData.FoldedData`
"""
if self.size/(nbins*nints) < 10:
raise ValueError,"nbins x nints is too large for length of data"
fold_ar = np.zeros(nbins*nints,dtype="float64")
count_ar = np.zeros(nbins*nints,dtype="int32")
lib.foldTim(as_c(self),
as_c(fold_ar),
as_c(count_ar),
C.c_double(self.header.tsamp),
C.c_double(period),
C.c_double(accel),
C.c_int(self.size),
C.c_int(nbins),
C.c_int(nints))
fold_ar/=count_ar
fold_ar = fold_ar.reshape(nints,1,nbins)
return FoldedData(fold_ar,
self.header.newHeader(),
period,
self.header.refdm,
accel)
def rFFT(self):
"""Perform 1-D real to complex forward FFT.
:return: output of FFTW3
:rtype: :class:`~sigpyproc.FourierSeries.FourierSeries`
"""
if self.size%2 ==0:
fftsize = self.size
else:
fftsize = self.size-1
fft_ar = np.empty(fftsize+2,dtype="float32")
lib.rfft(as_c(self),
as_c(fft_ar),
fftsize)
return FourierSeries(fft_ar,self.header.newHeader())
def runningMean(self,window=10001):
"""Filter time series with a running mean.
:param window: width in bins of running mean filter
:type window: int
:return: filtered time series
:rtype: :class:`~sigpyproc.TimeSeries.TimeSeries`
.. note::
Window edges will be dealt with only at the start of the time series.
"""
tim_ar = np.empty_like(self)
lib.runningMean(as_c(self),
as_c(tim_ar),
C.c_int(window),
C.c_int(self.size))
return tim_ar.view(TimeSeries)
def runningMedian(self,window=10001):
"""Filter time series with a running median.
:param window: width in bins of running median filter
:type window: int
:returns: filtered time series
:rtype: :class:`~sigpyproc.TimeSeries.TimeSeries`
.. note::
Window edges will be dealt with only at the start of the time series.
"""
tim_ar = np.empty_like(self)
lib.runningMedian(as_c(self),
as_c(tim_ar),
C.c_int(window),
C.c_int(self.size))
return tim_ar.view(TimeSeries)
def applyBoxcar(self,width):
"""Apply a boxcar filter to the time series.
:param width: width in bins of filter
:type width: int
:return: filtered time series
:rtype: :class:`~sigpyproc.TimeSeries.TimeSeries`
.. note::
Time series returned is of size nsamples-width with width/2 removed removed from either end.
"""
tim_ar = np.empty_like(self)
lib.runBoxcar(as_c(self),
as_c(tim_ar),
C.c_int(width),
C.c_int(self.size))
return tim_ar.view(TimeSeries)
def downsample(self,factor):
"""Downsample the time series.
:param factor: factor by which time series will be downsampled
:type factor: int
:return: downsampled time series
:rtype: :class:`~sigpyproc.TimeSeries.TimeSeries`
.. note::
Returned time series is of size nsamples//factor
"""
if factor == 1: return self
newLen = self.size//factor
tim_ar = np.zeros(newLen,dtype="float32")
lib.downsampleTim(as_c(self),
as_c(tim_ar),
C.c_int(factor),
C.c_int(newLen))
return TimeSeries(tim_ar,self.header.newHeader({'tsamp':self.header.tsamp*factor}))
def toDat(self,basename):
"""Write time series in presto ``.dat`` format.
:param basename: file basename for output ``.dat`` and ``.inf`` files
:type basename: string
:return: ``.dat`` file name and ``.inf`` file name
:rtype: :func:`tuple` of :func:`str`
.. note::
Method also writes a corresponding .inf file from the header data
"""
self.header.makeInf(outfile="%s.inf"%(basename))
datfile = open("%s.dat"%(basename),"w+")
if self.size%2 != 0:
self[:-1].tofile(datfile)
else:
self.tofile(datfile)
return "%s.dat"%(basename),"%s.inf"%(basename)
def toFile(self,filename):
"""Write time series in sigproc format.
:param filename: output file name
:type filename: str
:return: output file name
:rtype: :func:`str`
"""
outfile = self.header.prepOutfile(filename)
self.tofile(outfile)
return outfile.name
def pad(self,npad):
"""Pad a time series with mean valued data.
:param npad: number of padding points
:type nzeros: int
:return: padded time series
:rtype: :class:`~sigpyproc.TimeSeries.TimeSeries`
"""
new_ar = np.hstack((self,self.mean()*np.ones(npad)))
return TimeSeries(new_ar,self.header.newHeader())
def resample(self,accel,jerk=0):
"""Perform time domain resampling to remove acceleration and jerk.
:param accel: The acceleration to remove from the time series
:type accel: float
:param jerk: The jerk/jolt to remove from the time series
:type jerk: float
:param period: The mimimum period that the resampling
will be sensitive to.
:type period: float
:return: resampled time series
:rtype: :class:`~sigpyproc.TimeSeries.TimeSeries`
"""
if accel > 0:
new_size = self.size-1
else:
new_size = self.size
out_ar = np.zeros(new_size,dtype="float32")
print new_size
lib.resample(as_c(self),
as_c(out_ar),
C.c_int(new_size),
C.c_float(accel),
C.c_float(self.header.tsamp))
new_header = self.header.newHeader({"nsamples":out_ar.size,
"accel":accel})
return TimeSeries(out_ar,new_header)
def correlate(self,other):
"""Cross correlate with another time series of the same length.
:param other: array to correlate with
:type other: :class:`numpy.ndarray`
:return: time series containing the correlation
:rtype: :class:`sigpyproc.TimeSeries.TimeSeries`
"""
if type(self) != type(other):
try:
other = TimeSeries(other,self.header.newHeader())
except:
raise Exception("Could not convert argument to TimeSeries instance")
return (self.rFFT()*other.rFFT()).iFFT()
from sigpyproc.FoldedData import FoldedData
from sigpyproc.FourierSeries import FourierSeries
|
#!/usr/bin/env python
# This search CMIP5 data available on raijin that matches constraints passed on by user and return paths for all available versions.
"""
Copyright 2016 ARC Centre of Excellence for Climate Systems Science
author: Paola Petrelli <paola.petrelli@utas.edu.au>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from ARCCSSive import CMIP5
from ARCCSSive.CMIP5.other_functions import combine_constraints
import argparse
import sys
# check python version and then call main()
if sys.version_info < ( 2, 7):
# python too old, kill the script
sys.exit("This script requires Python 2.7 or newer!")
def parse_input():
''' Parse input arguments '''
parser = argparse.ArgumentParser(description='''Checks all the CMIP5 ensembles
available on raijin, matching the constraints passed as arguments.
Accept one value for the required arguments: experiment, model, variable, mip.
The others are optional and can be repeated:
example to select two particular ensembles:
-e r1i1p1 r2i1p1
The script returns all the ensembles satifying the constraints
var1 AND model1 AND exp1 AND mip1 AND other optional constraints
If a constraint isn't specified for one of the fields automatically all values
for that field will be selected.''', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-e','--experiment', type=str, nargs=1, help='CMIP5 experiment', required=True)
parser.add_argument('-m','--model', type=str, nargs=1, help='CMIP5 model', required=True)
parser.add_argument('-v','--variable', type=str, nargs=1, help='CMIP5 variable', required=True)
parser.add_argument('-t','--mip', type=str, nargs=1, help='CMIP5 MIP table', required=True)
parser.add_argument('-en','--ensemble', type=str, nargs="*", help='CMIP5 ensemble', required=False)
parser.add_argument('-ve','--version', type=str, nargs="*", help='CMIP5 version', required=False)
parser.add_argument('-c','--checksum', type=str, nargs=1, help='checksum_type: md5 or sha256', required=False)
parser.add_argument('-o','--output', type=str, nargs=1, help='output file name', required=False)
return vars(parser.parse_args())
def assign_constraints():
''' Assign default values and input to constraints '''
kwargs = parse_input()
for k,v in kwargs.items():
if v is None: kwargs.pop(k)
return kwargs
# Calling parse_input() function to build kwargs from external arguments paased by user
kwargs=assign_constraints()
# open output file
outfile=kwargs.pop("output",["search_result.txt"])
fout=open(outfile[0],'w')
# if checksum_type has been passed add checksum to output
checksum=False
cks = kwargs.pop("checksum",["None"])
if cks[0] in ["md5","sha256"]:
checksum=True
cks_type=cks[0]
# open connection to local database and intiate SQLalchemy session
cmip5 = CMIP5.connect()
# get constraints combination
combs=combine_constraints(**kwargs)
# for each constraints combination
for constraints in combs:
db_results=[]
print(constraints)
# search on local DB, return instance_ids
outputs=cmip5.outputs(**constraints)
# loop through returned Instance objects
db_results=[v for o in outputs for v in o.versions if v.is_latest]
if db_results==[]:
db_results=[v for o in outputs for v in o.versions if v==o.latest()[0]]
# write result to output file
if db_results==[]:
print("No local version exists for constraints:\n",constraints)
else:
for v in db_results:
fout.write(v.version + ", checksum: " + cks[0] + "\n")
vpath=v.path + "/"
if checksum:
fout.writelines(vpath + f.filename + ", " + str(f.__getattribute__(cks_type)) + "\n" for f in v.files)
else:
fout.writelines(vpath + f.filename + "\n" for f in v.files)
fout.close()
|
import tensorflow as tf
a=tf.placeholder("float")
b=tf.placeholder("float")
x=tf.constant(2.0)
c=tf.multiply(a,b)
with tf.Session() as sess:
for i in range(11):
feed_dict={a:i,b:x}
print(sess.run(c,feed_dict))
|
import datetime
from dateutil.relativedelta import relativedelta
import time
import requests
import json
import random
import os
import os.path
import pandas as pd
import shutil
import zipfile
#for text cleaning
import string
import re
class RedditHandler:
'''
class responsible for extracting and processing reddit data and the creation of users' network
'''
def __init__(self, out_folder, extract_post, extract_comment, categories, start_date, end_date, n_months=1, post_attributes=['id','author', 'created_utc', 'num_comments', 'over_18', 'is_self', 'score', 'selftext', 'stickied', 'subreddit', 'subreddit_id', 'title'], comment_attributes=['id', 'author', 'created_utc', 'link_id', 'parent_id', 'subreddit', 'subreddit_id', 'body', 'score']):
'''
Parameters
----------
out_folder : str
path of the output folder
extract_post: bool
True if you want to extract Post data, False otherwise
extract_comment : bool
True if you want to extract Comment data, False otherwise
categories : dict
dict with category name as key and list of subreddits in that category as value
start_date : str
beginning date in format %d/%m/%Y
end_date : str
end date in format %d/%m/%Y
n_months : int
integer inicating the time period considered
post_attributes : list, optional
post's attributes to be selected. The default is ['id','author', 'created_utc', 'num_comments', 'over_18', 'is_self', 'score', 'selftext', 'stickied', 'subreddit', 'subreddit_id', 'title']
comment_attributes : list, optional
comment's attributes to be selected. The default is ['id', 'author', 'created_utc', 'link_id', 'parent_id', 'subreddit', 'subreddit_id', 'body', 'score']
'''
self.out_folder = out_folder
if not os.path.exists(self.out_folder):
os.mkdir(self.out_folder)
self.extract_post = extract_post
self.extract_comment = extract_comment
self.categories = categories
# transforming date in a suitable format for folder name (category)
self.pretty_start_date = start_date.replace('/','-')
self.pretty_end_date = end_date.replace('/','-')
self.real_start_date = start_date # TODO metti nome piu carino
self.real_end_date = end_date # TODO metti nome piu carino
# converting date from format %d/%m/%Y to UNIX timestamp as requested by API
self.start_date = int(time.mktime(datetime.datetime.strptime(start_date, "%d/%m/%Y").timetuple()))
self.end_date = int(time.mktime(datetime.datetime.strptime(end_date, "%d/%m/%Y").timetuple()))
self.n_months = n_months
self.post_attributes = post_attributes
self.comment_attributes = comment_attributes
def _post_request_API(self, start_date, end_date, subreddit):
'''
API REQUEST to pushishift.io/reddit/submission
returns a list of 1000 dictionaries where each of them is a post
'''
url = 'https://api.pushshift.io/reddit/search/submission?&size=500&after='+str(start_date)+'&before='+str(end_date)+'&subreddit='+str(subreddit)
try:
r = requests.get(url) # Response Object
time.sleep(random.random()*0.02)
data = json.loads(r.text) # r.text is a JSON object, converted into dict
except (requests.exceptions.ConnectionError, json.decoder.JSONDecodeError):
return self._post_request_API(start_date, end_date, subreddit)
return data['data'] # data['data'] contains list of posts
def _comment_request_API(self, start_date, end_date, subreddit):
'''
API REQUEST to pushishift.io/reddit/comment
returns a list of 1000 dictionaries where each of them is a comment
'''
url = 'https://api.pushshift.io/reddit/search/comment?&size=500&after='+str(start_date)+'&before='+str(end_date)+'&subreddit='+str(subreddit)
try:
r = requests.get(url) # Response Object
time.sleep(random.random()*0.02)
data = json.loads(r.text) # r.text is a JSON object, converted into dict
except (requests.exceptions.ConnectionError, json.decoder.JSONDecodeError):
return self._comment_request_API(start_date, end_date, subreddit)
return data['data'] # data['data'] contains list of comments
def _clean_raw_text(self, text):
'''
Clean raw post/comment text with standard preprocessing pipeline
'''
# Lowercasing text
text = text.lower()
# Removing not printable characters
text = ''.join(filter(lambda x:x in string.printable, text))
# Removing XSLT tags
text = re.sub(r'</?[a-z]+>', '', text)
text = text.replace(r'&', 'and')
text = text.replace(r'>', '') # TODO: try another way to strip xslt tags
# Removing newline, tabs and special reddit words
text = text.replace('\n',' ')
text = text.replace('\t',' ')
text = text.replace('[deleted]','').replace('[removed]','')
# Removing URLs
text = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', text)
# Removing numbers
text = re.sub(r'\w*\d+\w*', '', text)
# Removing Punctuation
text = text.translate(str.maketrans('', '', string.punctuation))
# Removing extra spaces
text = re.sub(r'\s{2,}', " ", text)
# Stop words? Emoji?
return text
def extract_data(self):
'''
extract Reddit data from a list of subreddits in a specific time-period
'''
raw_data_folder = os.path.join(self.out_folder, 'Categories_raw_data')
if not os.path.exists(raw_data_folder):
os.mkdir(raw_data_folder)
categories_keys = list(self.categories.keys())
i = 0 #to iter over categories keys
for category in self.categories.keys():
print(f'Extracting category: {categories_keys[i]}')
users = dict() #users with post & comment shared in different subreddit belonging to the same category
for sub in self.categories[category]:
print(f'Extracting subbredit: {sub}')
current_date_post = self.start_date
current_date_comment = self.start_date
# handling time-period
if self.n_months == 0:
period_post = (datetime.datetime.strptime(self.real_start_date, "%d/%m/%Y"), datetime.datetime.strptime(self.real_end_date, "%d/%m/%Y"))
period_comment = (datetime.datetime.strptime(self.real_start_date, "%d/%m/%Y"), datetime.datetime.strptime(self.real_end_date, "%d/%m/%Y"))
else:
end_period = datetime.datetime.strptime(self.real_start_date, "%d/%m/%Y") + relativedelta(months=+self.n_months)
period_post = (datetime.datetime.strptime(self.real_start_date, "%d/%m/%Y"), end_period)
period_comment = (datetime.datetime.strptime(self.real_start_date, "%d/%m/%Y"), end_period)
# extracting posts
if self.extract_post:
posts = self._post_request_API(current_date_post, self.end_date, sub) #first call to API
while len(posts) > 0: #collecting data until there are no more posts to extract in the time period considered
# TODO: check if sub exists!
for raw_post in posts:
# saving posts for each period
current_date = datetime.datetime.utcfromtimestamp(raw_post['created_utc']).strftime("%d/%m/%Y")
condition1_post = datetime.datetime.strptime(current_date, "%d/%m/%Y") >= period_post[1]
condition2_post = (datetime.datetime.strptime(current_date, "%d/%m/%Y") + relativedelta(days=+1)) >= datetime.datetime.strptime(self.real_end_date, "%d/%m/%Y")
if condition1_post or condition2_post:
# Saving data: for each category a folder
path_category = os.path.join(raw_data_folder, f'{categories_keys[i]}_{self.pretty_start_date}_{self.pretty_end_date}')
if not os.path.exists(path_category):
os.mkdir(path_category)
pretty_period0_post = period_post[0].strftime('%d/%m/%Y').replace('/','-')
pretty_period1_post = period_post[1].strftime('%d/%m/%Y').replace('/','-')
path_period_category = os.path.join(path_category, f'{categories_keys[i]}_{pretty_period0_post}_{pretty_period1_post}')
if not os.path.exists(path_period_category):
os.mkdir(path_period_category)
# for each user in a period category a json file
for user in users:
user_filename = os.path.join(path_period_category, f'{user}.json')
if os.path.exists(user_filename):
with open(user_filename) as fp:
data = json.loads(fp.read())
data['posts'].extend(users[user]['posts'])
with open(user_filename, 'w') as fp:
json.dump(data, fp, sort_keys=True, indent=4)
else:
with open(user_filename, 'w') as fp:
json.dump(users[user], fp, sort_keys=True, indent=4)
users = dict()
if condition1_post:
period_post = (period_post[1], period_post[1] + relativedelta(months=+self.n_months))
print('PERIOD_POST', period_post)
elif condition2_post:
break
# collecting posts
if raw_post['author'] not in ['[deleted]', 'AutoModerator']: # discarding data concerning removed users and moderators
user_id = raw_post['author']
post = dict() #dict to store posts
# adding field category
post['category'] = category
# adding field date in a readable format
post['date'] = datetime.datetime.utcfromtimestamp(raw_post['created_utc']).strftime("%d/%m/%Y")
# cleaning body field
merged_text = raw_post['title']+' '+raw_post['selftext']
post['clean_text'] = self._clean_raw_text(merged_text)
# adding field time_period in a readable format
if self.n_months != 0:
post['time_period'] = (period_post[0].strftime('%d/%m/%Y'), period_post[1].strftime('%d/%m/%Y'))
else:
post['time_period'] = (datetime.datetime.utcfromtimestamp(self.start_date).strftime("%d/%m/%Y"),datetime.datetime.utcfromtimestamp(self.end_date).strftime("%d/%m/%Y"))
# selecting fields
for attr in self.post_attributes:
if attr not in raw_post.keys(): #handling missing values
post[attr] = None
elif (attr != 'selftext') and (attr != 'title'): # saving only clean text
post[attr] = raw_post[attr]
if len(post['clean_text']) > 2: # avoiding empty posts
if user_id not in users.keys():
if self.extract_post and self.extract_comment:
users[user_id] = {'posts':[], 'comments':[]}
else:
users[user_id] = {'posts':[]}
users[user_id]['posts'].append(post)
current_date_post = posts[-1]['created_utc'] # taking the UNIX timestamp date of the last record extracted
posts = self._post_request_API(current_date_post, self.end_date, sub)
pretty_current_date_post = datetime.datetime.utcfromtimestamp(current_date_post).strftime('%Y-%m-%d')
print(f'Extracted posts until date: {pretty_current_date_post}')
# extracting comments
if self.extract_comment:
comments = self._comment_request_API(current_date_comment, self.end_date, sub) # first call to API
while len(comments) > 0: #collecting data until there are no more comments to extract in the time period considered
for raw_comment in comments:
# saving comments for each period
current_date = datetime.datetime.utcfromtimestamp(raw_comment['created_utc']).strftime("%d/%m/%Y")
condition1_comment = datetime.datetime.strptime(current_date, "%d/%m/%Y") >= period_comment[1]
condition2_comment = (datetime.datetime.strptime(current_date, "%d/%m/%Y") + relativedelta(days=+1)) >= datetime.datetime.strptime(self.real_end_date, "%d/%m/%Y")
if condition1_comment or condition2_comment: # saving comment for period
# Saving data: for each category a folder
path_category = os.path.join(raw_data_folder, f'{categories_keys[i]}_{self.pretty_start_date}_{self.pretty_end_date}')
if not os.path.exists(path_category):
os.mkdir(path_category)
pretty_period0_comment = period_comment[0].strftime('%d/%m/%Y').replace('/','-')
pretty_period1_comment = period_comment[1].strftime('%d/%m/%Y').replace('/','-')
path_period_category = os.path.join(path_category, f'{categories_keys[i]}_{pretty_period0_comment}_{pretty_period1_comment}')
if not os.path.exists(path_period_category):
os.mkdir(path_period_category)
# for each user in a period category a json file
for user in users:
user_filename = os.path.join(path_period_category, f'{user}.json')
if os.path.exists(user_filename):
with open(user_filename) as fp:
data = json.loads(fp.read())
data['comments'].extend(users[user]['comments'])
with open(user_filename, 'w') as fp:
json.dump(data, fp, sort_keys=True, indent=4)
else:
with open(user_filename, 'w') as fp:
json.dump(users[user], fp, sort_keys=True, indent=4)
users = dict()
if condition1_comment:
period_comment = (period_comment[1], period_comment[1] + relativedelta(months=+self.n_months))
print('PERIOD_COMMENT', period_comment)
elif condition2_comment:
break
# collecting comments
if raw_comment['author'] not in ['[deleted]', 'AutoModerator']:
user_id = raw_comment['author']
comment = dict() # dict to store a comment
# adding field category
comment['category'] = category
# adding field date in a readable format
comment['date'] = datetime.datetime.utcfromtimestamp(raw_comment['created_utc']).strftime("%d/%m/%Y")
# cleaning body field
comment['clean_text'] = self._clean_raw_text(raw_comment['body'])
# adding time_period fieldin a readable format
if self.n_months != 0:
comment['time_period'] = (period_comment[0].strftime('%d/%m/%Y'), period_comment[1].strftime('%d/%m/%Y'))
else:
comment['time_period'] = (datetime.datetime.utcfromtimestamp(self.start_date).strftime("%d/%m/%Y"),datetime.datetime.utcfromtimestamp(self.end_date).strftime("%d/%m/%Y"))
# selecting fields
for attr in self.comment_attributes:
if attr not in raw_comment.keys(): #handling missing values
comment[attr] = None
elif attr != 'body': # saving only clean text
comment[attr] = raw_comment[attr]
if len(comment['clean_text']) > 2: # avoiding empty comments
if user_id not in users.keys():
if self.extract_post and self.extract_comment:
users[user_id] = {'posts':[], 'comments':[]}
else:
users[user_id] = {'comments':[]}
users[user_id]['comments'].append(comment)
current_date_comment = comments[-1]['created_utc'] # taking the UNIX timestamp date of the last record extracted
comments = self._comment_request_API(current_date_comment, self.end_date, sub)
pretty_current_date_comment = datetime.datetime.utcfromtimestamp(current_date_comment).strftime('%Y-%m-%d')
print(f'Extracted comments until date: {pretty_current_date_comment}')
print(f'Finished data extraction for subreddit {sub}')
# zip category folder
shutil.make_archive(path_category, 'zip', path_category)
shutil.rmtree(path_category)
print('Done to extract data from category:', categories_keys[i])
i+=1 #to iter over categories elements
def create_network(self):
'''
crate users' interaction network based on comments:
type of network: directed and weighted by number of interactions
self loops are not allowed
'''
if not self.extract_comment or not self.extract_post: # if user wants to create users interactions networks it is necessary to extract both posts and comments in extract_data()
raise ValueError('To create users interactions Networks you have to set self.extract_comment to True')
# creating folder with network data
user_network_folder = os.path.join(self.out_folder, 'Categories_networks')
if not os.path.exists(user_network_folder):
os.mkdir(user_network_folder)
path = os.path.join(self.out_folder, 'Categories_raw_data')
categories = os.listdir(path) # returns list
# unzip files
unzipped_categories = list()
for category in categories:
category_name = ''.join([i for i in category if i.isalpha()]).replace('zip','')
if (category_name in list(self.categories.keys())) and (self.pretty_start_date in category) and (self.pretty_end_date in category):
file_name = os.path.abspath(os.path.join(path,category))
if not zipfile.is_zipfile(file_name):
unzipped_filename = os.path.basename(file_name)
unzipped_categories.append(unzipped_filename)
elif os.path.basename(file_name).replace('.zip','') not in categories:
unzipped_filename = os.path.basename(file_name).replace('.zip','')
extract_dir = file_name.replace('.zip','')
shutil.unpack_archive(file_name, extract_dir, 'zip')
unzipped_categories.append(unzipped_filename)
print('unzipped:', unzipped_categories)
# Saving data: for each category a folder
for category in unzipped_categories:
network_category = os.path.join(user_network_folder, f'{category}')
if not os.path.exists(network_category):
os.mkdir(network_category)
path_category = os.path.join(path, category)
category_periods = os.listdir(path_category) # for each category a list of all files (i.e., periods) in that category
for period in category_periods:
print('PERIOD:', period)
path_period = os.path.join(path_category,period)
users_list = os.listdir(path_period)
users = dict()
for user in users_list:
user_filename = os.path.join(path_period, user)
submission_ids = list()
comment_ids = list()
parent_ids = list()
with open(user_filename, 'r') as f:
user_data = json.load(f)
for comment in user_data['comments']:
comment_ids.append(comment['id'])
parent_ids.append(comment['parent_id'])
for post in user_data['posts']:
submission_ids.append(post['id'])
pretty_username = user.replace('.json','')
if (len(submission_ids) > 0) or (len(comment_ids) > 0):
users[pretty_username] = {'post_ids': submission_ids, 'comment_ids': comment_ids, 'parent_ids':parent_ids}
interactions = dict()
nodes = set()
for user in users:
for parent_id in users[user]['parent_ids']:
if "t3" in parent_id: # it is a comment to a post
for other_user in users:
if parent_id.replace("t3_","") in users[other_user]['post_ids']:
if user != other_user: # avoiding self loops
nodes.add(other_user)
nodes.add(user)
if (user,other_user) not in interactions.keys():
interactions[(user,other_user)] = 0
interactions[(user,other_user)] +=1
elif "t1" in parent_id: # it is a comment to another comment
for other_user in users:
if parent_id.replace("t1_","") in users[other_user]['comment_ids']:
if user != other_user: # avoiding self loops
nodes.add(other_user)
nodes.add(user)
if (user,other_user) not in interactions.keys():
interactions[(user,other_user)] = 0
interactions[(user,other_user)] +=1
print('n_edges', len(interactions))
print('n_nodes', len(nodes))
# creating edge list csv file
nodes_from = list()
nodes_to = list()
edges_weight = list()
for interaction in interactions:
nodes_from.append(interaction[0])
nodes_to.append(interaction[1])
edges_weight.append(interactions[interaction])
tmp = {'Source':nodes_from,'Target':nodes_to,'Weight':edges_weight}
edge_list = pd.DataFrame(tmp)
# saving csv in category folder
last_path = os.path.join(network_category, f'{period}.csv')
edge_list.to_csv(last_path, index =False)
if __name__ == '__main__':
cwd = os.getcwd()
out_folder = os.path.join(cwd, 'RedditHandler_Outputs')
out_folder = 'RedditHandler_Outputs'
extract_post = True
extract_comment = True
category = {'gun':['guncontrol']}
start_date = '13/12/2018'
end_date = '13/03/2019'
n_months = 1
#default post attributes
post_attributes = ['id','author', 'created_utc', 'num_comments', 'over_18', 'is_self', 'score', 'selftext', 'stickied', 'subreddit', 'subreddit_id', 'title']
#default comment attributes
comment_attributes = ['id', 'author', 'created_utc', 'link_id', 'parent_id', 'subreddit', 'subreddit_id', 'body', 'score']
my_handler = RedditHandler(out_folder, extract_post, extract_comment, category, start_date, end_date, n_months=n_months, post_attributes=post_attributes, comment_attributes=comment_attributes)
my_handler.extract_data()
my_handler.create_network()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 10 15:14:01 2019
@author: murillon2003_00
"""
money_slips = {
'20' : 5,
'50' : 5,
'100' : 5
}
accounts_list = {
'0001-02' : {
'password' : '123456',
'name' : 'Fulano da Silva Sauro',
'value' : 100,
'admin' : False
},
'0002-02' : {
'password' : '123456',
'name' : 'Fulano da Silva',
'value' : 50,
'admin' : False
},
'1111-11' : {
'password' : '123456',
'name' : 'Admin da Silva',
'value' : 50,
'admin' : True
}
} |
# -*- coding: utf-8 -*-
# @Author: Fallen
# @Date: 2020-04-24 09:56:43
# @Last Modified by: Fallen
# @Last Modified time: 2020-04-24 17:25:18
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-04-22 10:48:58
# @Author : Fallen (xdd043@qq.com)
# @Link : https://github.com/fallencrasher/python-learning
# @Version : $Id$
import pickle
class User:
def __init__(self, name, pwd):
self.name = name
self.pwd = pwd
class Account:
def __init__(self):
# 用户列表,数据格式:[user对象,user对象,user对象]
self.user_list = 'userlist'
def login(self):
"""
用户登录,输入用户名和密码然后去self.user_list中校验用户合法性
:return:
"""
uid = input('username:')
pwd = input('password:')
ret = self.get_file_info()
for i in ret:
if uid == i.name and pwd == i.pwd:
print('login successfully')
return True
else:
print('login failed')
return False
def get_file_info(self):
import pickle
with open(self.user_list, mode='rb') as f:
while True:
try:
yield pickle.load(f)
except EOFError:
break
def register(self):
"""
用户注册,没注册一个用户就创建一个user对象,然后添加到self.user_list中,表示注册成功。
:return:
"""
name_valid = True
count = 0
while count<4:
uid = input('username:')
pwd = input('password:').strip()
pwd2 = input('password again:').strip()
if pwd == pwd2:
temp_f=self.get_file_info()
l =[]
for i in temp_f:
l.append(i.name)
if uid in l:
print('username is invalid')
count +=1
name_valid = False
# if i.name == uid:
# print('username is invalid.')
# name_valid = False
# count += 1
# continue
if name_valid:
import pickle
temp = User(uid, pwd)
with open(self.user_list,mode='ab') as f2:
pickle.dump(temp, f2)
print('register successfully')
break
else:
count += 1
print('the two passwords are not same.')
def run(self):
"""
主程序
:return:
"""
while True:
judge = input('1.register or 2.login?(please input the number!):')
if judge.isdigit() and int(judge) in (1,2):
if int(judge)==1:
self.register()
elif int(judge)==2:
self.login()
break
# # 还可以用 enumrate 来提取操作号
# opration = ['register', 'login']
# while True:
# for index, item in enumerate(opration):
# print(index+1, item)
# judge = input('please input the opration number:').strip()
# if judge.isdigit() and int(judge) in (1, 2):
# if int(judge) == 1:
# self.register()
# elif int(judge) == 2:
# self.login()
# break
if __name__ == '__main__':
obj = Account()
obj.run()
|
def replacewords(array):
house=[flat,apartment,]
with open("countries.txt") as f:
content = f.readlines()
content = [x.strip() and x.split() for x in content]
for index, i in enumerate(array):
if i == "COUNTRY":
for country in content:
input[index]=country
print "SEND THIS TO FURTHER ANALYSIS"
for index, i in enumerate(array):
if i == "HOUSE":
for house in house:
input[index]=house
print "SEND THIS TO FURTHER ANALYSIS"
|
# coding=utf8
""" 生成手机图片, 只作缩小. """
from PIL import Image, ImageEnhance
import os, sys
# 执行转换的文件后缀
EXTS = set(['.jpg', '.jpeg'])
# 调整宽度
WIDTH = 600
#WIDTH = 480
# JPEG quality
QUALITY = 87
# 保存目录
THUMB_DIR = '模特细节图_手机'
# 覆盖存在
OVERRIDE = False
def list_images(path='.'):
for f in os.listdir(path):
if os.path.isfile(f) and os.path.splitext(f)[1].lower() in EXTS:
yield f
def process(f):
target = os.path.join(THUMB_DIR, f)
if not OVERRIDE and os.path.exists(target):
print('Skip')
return
image = Image.open(f)
if(image.size[0]<=WIDTH):
size = image.size
else:
size = (WIDTH, int(image.size[1]*WIDTH/image.size[0]))
image.thumbnail(size, 1)
image.save(os.path.join(THUMB_DIR, f), quality=QUALITY)
def run():
if not os.path.exists(THUMB_DIR):
os.mkdir(THUMB_DIR)
for f in list_images():
print(f)
process(f)
print('Done!')
if __name__ == '__main__':
run()
|
import math
t = int(input("Program kac defa calısacak : "))
if t <=0:
print("Döngü sıfırdan küçük olamaz")
for i in range(0,t):
sayi1 = int(input("Birinci Sayıyı Giriniz: "))
sayi2 = int(input("İkinci Sayıyı Giriniz: "))
islem = input("Yapılmasını İstediğiniz İşlemi Giriniz: ")
if islem == "+":
print(sayi1+sayi2)
elif islem == "-":
print(sayi1-sayi2)
elif islem == "*":
print(sayi1*sayi2)
elif sayi2 == 0:
print("Bir sayıyı sıfıra bölemezsiniz")
elif islem == "/":
print(sayi1/sayi2)
elif islem == "cık":
break
else:
print("Lütfen + - / * isaretlerinden birini giriniz.") |
a = {'1': 'a', '2': 'b'}
b = {'3': 'c'}
dictMerged2 = dict( a, **b )
print(dictMerged2) |
import collections
class Solution:
def findRestaurant(self, list1: List[str], list2: List[str]) -> List[str]:
d = collections.defaultdict(int)
for i, s in enumerate(list1):
if s in list2:
d[s] = i
for i, s in enumerate(list2):
if s in list1:
d[s] += i
ans = []
target = min(d.values())
for key, value in d.items():
if value == target:
ans.append(key)
return ans
|
from datetime import datetime
from pythonping import ping
#il faudrait récupérer les valeurs envoyées par le master dans l'init
def __init__ (self):
self.ip = #receive de l'ip du master
self.tempsheure = #receive le datetime du master
#lancement du ddos
def ddos(ip, tempsheure):
format = "%Y-%m-%d %H:%M"
now = datetime.strftime(datetime.now(), format)
#ici, lancement de l'attaque immédiatement
if (now == tempsheure):
ping(ip, verbose=True, size=400, count=15)
print("ping okay")
#ici, lancement de l'attaque en retardé
#problème est qu'on ne pourra rien faire en attendant l'attaque...
else:
i=1
while i==1 :
if (now == tempsheure):
i = 0
ping(ip, verbose=True, size=400, count=15)
print("ping ok")
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
File Name: project.py
Description: 处理与项目(增减改)相关的工作
Author: Dexter Chen
Date:2017-09-04
-------------------------------------------------
Development Note:
1. 扫描所有项目文件夹,项目文件夹数可大于csv中登记数
2. 根据需要新建项目,生成必要文件(提醒重复);删除只需在csv中删除(保留文件夹)
3.
-------------------------------------------------
Change Log:
2018-08-31:
-------------------------------------------------
格式:
project: 名称,描述,创建时间
key_words(可以是表达式)
log: 信息,信息类型,生成时间
-------------------------------------------------
"""
import sys
import os
import utilities as ut
import mongodb_handler as mh
reload(sys)
sys.setdefaultencoding('utf8')
projects = [] # 把project.csv里面的信息先预读到内存
key_words = [] # 关键词
def projects_read():
projects = mh.
project_set = dh.csv_read("universal", "project")
projects = map(lambda x: x.split(",|,"), project_set)
def key_words_read(project_name):
global key_words
key_words_set = dh.text_read(project_name,"key_words")
key_words = key_words_set.split("\n")
def project_add(project_name, project_description):
if not dh.check_folders(project_name, "folder"): # 如果没有对应文件夹
dh.new_project_files(project_name) # 新建文件
project_set = dh.text_read("universal","project").split("\n")
time.sleep(0.1) # 确保文件夹读取后关闭
new_project = project_name, project_description, ut.time_str("full")
project_set.append(new_project)
dh.text_write(project_set,"universal","project","w")
else:
op.output("Folder already exist", "warning", ut.time_str("full"))
def project_des_update(project_name, project_description):
i = 0
for i in len(project_set):
if project_set[i].split(",|,")[0] == project_name:
op.output("Project description updated",
"notice", ut.time_str("full"))
else:
op.output('No project found', "notice", ut.time_str("full"))
def project_delete(project_name):
for project in project_set:
if project[0] == project_name:
del project
dh.csv_write(project_set, "universal", "project", "wb")
op.output("Project deleted", "notice", ut.time_str("full"))
else:
op.output('No project found', "notice", ut.time_str("full"))
def project_name():
project_names = []
for project in project_set:
project_names.append(project[0])
return project_names
def project_key_words(project_name):
key_words = []
for project in project_set:
if project[0] == project_name:
key_words.append(project[1])
return key_words
if __name__ == '__main__':
project_add("dexter", "test project for Dexter") |
import numpy as np
import pandas as pd
from integration import *
filepathH = 'Hamiltonian.xls'
filepathVec='Eigenvectors.xlsx'
filepathVal='Eigenvalues.xlsx'
m=0
n=0
#length in meters
L=(5.0)*10.0**(-10)
#Length in angstroms
#L=5
# this was an epxiremental attempt to numerically solve the integral it doesn't work yet
def integrand(x):
global m
global n
global L
#mass in kg
M=9.1094*10**-31
#charge in joules
a=10*1.602176634*10**(-19)
#charge in eV
#a=10
#hbar joules per second
h=1.054571817*10**(-34)
#hbar eV per second
#h=6.582119569*10**(-16)
s=np.sin((np.pi*m*x)/L)
v=a*x/L
dx2=-(((np.pi*n*x)/L)**2)*np.sin((np.pi*n*x)/L)
H=-((h**2)/(2*M))*dx2+v*np.sin((np.pi*n*x)/L)
f=s*H
return f
#mass in kg
M=9.1094*10**-31
#charge in joules
a=10*1.602176634*10**(-19)
#charge in eV
#a=10
#hbar joules per second
h=1.054571817*10**(-34)
#hbar eV per second
#h=6.582119569*10**(-16)
# size of matrix
N=10
# H matrix
Hamilt=np.empty([N,N])
'''
for row in range(0,N):
for col in range(0,N):
m=row+1
n=col+1
Hamilt[row][col]=swhole(100,0,L,integrand)[2]
'''
#fill H Matrix using the conditions found in part c
for row in range(0,N):
for col in range(0,N):
m=row+1
n=col+1
if(m==n):
Hamilt[row][col]=(((M*a*L**2)+((n**2)*(np.pi**2)*(h**2))))/(2*M*L**2)
elif((m%2!=0 and n%2==0) or (m%2==0 and n%2!=0)):
Hamilt[row][col]=((-8*a)/(np.pi**2))*((m*n)/(m**2 -n**2)**2)
elif(m%2==0 and n%2==0) or (m%2!=0 and n%2!=0):
Hamilt[row][col]=0
#get eigen values and eigen vectors of H matrix
evalues,evectors=np.linalg.eigh(Hamilt)
#get the ground state probability
ground=np.linalg.multi_dot([evectors[:,1],Hamilt,np.transpose(evectors[:,1])])
probsum=0
#sum over all states in the matrix
for i in range(1,N):
probsum+=np.linalg.multi_dot([evectors[:,i],Hamilt,np.transpose(evectors[:,i])])
#outputs Matrices to an excel file for debugging
evalues=6.242*10**(18)*evalues
dfH = pd.DataFrame (Hamilt)
dfevec=pd.DataFrame (evectors)
dfevalues=pd.DataFrame(evalues)
dfH.to_excel(filepathH, index=False)
dfevec.to_excel(filepathVec, index=False)
dfevalues.to_excel(filepathVal, index=False)
|
# 3. Elabore um programa recursivo em C que calcule o n-ésimo termo da
# sequência: 1, 2, 4, 8, 16, 32... .
# O termo deverá ser impresso na função main().
numero = 1
def multiplica(n):
global numero
if n == 0:
return
numero = numero * 2
n -= 1
multiplica(n)
if __name__ == '__main__':
multiplica(50)
print(numero)
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import sys
sys.path.append(f'{os.getcwd()}/SentEval')
import tensorflow as tf
# Prevent TF from claiming all GPU memory so there is some left for pytorch.
gpus = tf.config.list_physical_devices('GPU')
if gpus:
# Memory growth needs to be the same across GPUs.
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
import tensorflow_hub as hub
import tensorflow_text
import senteval
import time
# https://huggingface.co/sentence-transformers/nli-bert-base
PATH_TO_DATA = f'data'
MODEL = 'https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-base/1' #@param ['https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-base/1', 'https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-large/1']
PARAMS = 'rapid prototyping' #@param ['slower, best performance', 'rapid prototyping']
TASK = 'CR' #@param ['CR','MR', 'MPQA', 'MRPC', 'SICKEntailment', 'SNLI', 'SST2', 'SUBJ', 'TREC']
params_prototyping = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_prototyping['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
params_best = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 10}
params_best['classifier'] = {'nhid': 0, 'optim': 'adam', 'batch_size': 16,
'tenacity': 5, 'epoch_size': 6}
params = params_best if PARAMS == 'slower, best performance' else params_prototyping
preprocessor = hub.KerasLayer(
"https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3")
encoder = hub.KerasLayer(
"https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-base/1")
inputs = tf.keras.Input(shape=tf.shape(''), dtype=tf.string)
outputs = encoder(preprocessor(inputs))
model = tf.keras.Model(inputs=inputs, outputs=outputs)
def prepare(params, samples):
return
def batcher(_, batch):
batch = [' '.join(sent) if sent else '.' for sent in batch]
return model.predict(tf.constant(batch))["default"]
se = senteval.engine.SE(params, batcher, prepare)
print("Evaluating task %s with %s parameters" % (TASK, PARAMS))
start = time.time()
results = se.eval(TASK)
end = time.time()
print('Time took on task %s : %.1f. seconds' % (TASK, end - start))
print(results)
|
from rest_framework.response import Response
from rest_framework import status
def syllable_required(syllable_id, syllable_type=None, is_get_request=False):
def decorator(func):
def _wrapped_view(request, *args, **kwargs):
data = request.GET if is_get_request else request.data
r = data.get(syllable_id)
if r == None:
return Response(
{'detail': 'syllable {sid} is required'.format(sid=syllable_id)},
status=status.HTTP_400_BAD_REQUEST,
)
if syllable_type != None and type(r) != syllable_type:
return Response(
{'detail': 'syllable {sid} has wrong type'.format(sid=syllable_id)},
status=status.HTTP_400_BAD_REQUEST,
)
return func(request, *args, **kwargs)
return _wrapped_view
return decorator
def parameter_required(parameter_id):
def decorator(func):
def _wrapped_view(request, *args, **kwargs):
target = kwargs.get(parameter_id)
if target == None:
return Response(
{'detail': '{pid} in URI is required'.format(pid=parameter_id)},
status=status.HTTP_404_NOT_FOUND,
)
return func(request, *args, **kwargs)
return _wrapped_view
return decorator
def login_required():
def decorator(func):
def _wrapped_view(request, *args, **kwargs):
if not request.user.is_authenticated:
return Response(
{'detail': 'Login Required'}, status=status.HTTP_401_UNAUTHORIZED
)
return func(request, *args, **kwargs)
return _wrapped_view
return decorator
def parse_as_integer(sid):
def decorator(func):
def _wrapped_view(request, *args, **kwargs):
data = request.GET.copy()
if data.get(sid) is not None:
if data[sid].isdecimal():
data[sid] = int(data[sid])
else:
del data[sid]
request.GET = data
return func(request, *args, **kwargs)
return _wrapped_view
return decorator
|
import numpy as np
from ensemble_boxes import *
import glob
import os
import argparse
SAVE_FOLDER = ""
class WBF_VTX(object):
name2idx = {"person": 0}
idx2name = {0: "person"}
def __init__(self, opts, model_weighted, iou_thr=0.6, skip_box_thr=0.0001):
self.opt = opts
self.model_weighted = model_weighted
self.iou_thr = iou_thr
self.skip_box_thr = skip_box_thr
self.img_size = self.opt.img_size # w, h
self._check_condition_input()
def _check_condition_input(self):
keys = list(self.model_weighted.keys())
#print(keys)
for i in range(0, len(keys)):
pred_paths = glob.glob("{}/*".format(self.model_weighted[keys[i]]["path_prediction"]))
num = len(pred_paths)
if(num != self.opt.frame_length):
tmp_path = "/".join(pred_paths[0].split("/")[:-1])
self._fill_result_files(tmp_path)
def _fill_result_files(self, folder):
max_num = 6 # frame_{max_num}
exist_frame = os.listdir(folder)
for idx in range(self.opt.frame_length):
frame_name = "frame_" + "0" * (6 - len(str(idx))) + str(idx) + ".txt"
if(frame_name not in exist_frame):
print(frame_name)
with open(os.path.join(folder, frame_name), "wt") as f:
f.write("")
def _normalize_coords(self, box):
xmin, ymin, xmax, ymax = box
nxmin, nymin, nxmax, nymax = xmin / self.img_size[0], ymin / self.img_size[1], xmax / self.img_size[0], ymax / self.img_size[1]
return nxmin, nymin, nxmax, nymax
def _scale_coords(self, box):
nxmin, nymin, nxmax, nymax = box
xmin, ymin, xmax, ymax = nxmin * self.img_size[0], nymin * self.img_size[1], nxmax * self.img_size[0], nymax * self.img_size[1]
return xmin, ymin, xmax, ymax
def _process_data_frame(self, list_prediction_file):
boxes_list = []
score_list = []
labels_list = []
for file_ in list_prediction_file:
f = open(file_, "r")
data = f.read().splitlines()
boxes = []
scores = []
labels = []
for d in data:
tmp = d.split(" ")
scores.append(float(tmp[1]))
box = [float(_) for _ in tmp[2:]]
boxes.append(self._normalize_coords(box))
labels.append(WBF_VTX.name2idx[str(tmp[0])])
boxes_list.append(boxes)
score_list.append(scores)
labels_list.append(labels)
return boxes_list, score_list, labels_list
def _get_frame_predict_file(self, frame_name, models):
list_prediction_file = []
for model in models:
#print(model)
model_path = self.model_weighted[model]["path_prediction"]
list_prediction_file.append(os.path.join(model_path, frame_name))
return list_prediction_file
def _write_results(self, frame_name, results):
boxes, scores, labels = results
save_path = os.path.join(SAVE_FOLDER, self.opt.save_name)
# check folder exist
if(not os.path.exists(save_path)):
os.mkdir(save_path)
with open(os.path.join(save_path, frame_name), "wt") as f:
for idx in range(len(boxes)):
boxes[idx] = self._scale_coords(boxes[idx])
line = WBF_VTX.idx2name[labels[idx]] + " " + str(scores[idx]) + " " + " ".join(list(map(str, boxes[idx])))
f.write(line + "\n")
def _get_frame_wbf(self, frame_name, model_name, weights):
list_pred = self._get_frame_predict_file(frame_name, model_name)
boxes_list, scores_list, labels_list = self._process_data_frame(list_pred)
assert len(boxes_list) == len(scores_list), "Not matched"
boxes, scores, labels = weighted_boxes_fusion(boxes_list, scores_list, labels_list, weights=weights, iou_thr=self.iou_thr, skip_box_thr=self.skip_box_thr)
return boxes, scores, labels
def run_wbf(self, path_output):
keys = list(self.model_weighted.keys())
weights = list(map(lambda x: self.model_weighted[x]["weight"], keys))
frames = os.listdir(self.model_weighted[keys[0]]["path_prediction"])
for frame_name in frames:
#print("Process: ", frame_name)
boxes, scores, labels = self._get_frame_wbf(frame_name, keys, weights)
self._write_results(frame_name, [boxes, scores, labels])
#print("Boxes: ", len(boxes_list))
#print("Score: ", len(score_list))
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Change config for ensembles')
parser.add_argument('--save_name', type=str, required=True,
help='name of result files')
parser.add_argument('--frame_length', type=str, default=19200, help="num frame of videos")
parser.add_argument('-n', '--path_list', nargs='+', default=[])
parser.add_argument('--img_size', nargs='+', type=str, help="Image resolution (w, h)")
args = parser.parse_args()
args.frame_length = int(args.frame_length)
args.img_size = list(map(int, args.img_size))
print(args.path_list)
print(args.img_size)
model_weighted = {
"crowd": {
"weight": 2,
"path_prediction": args.path_list[0],
},
"baseline":{
"weight": 3,
"path_prediction": args.path_list[1],
}
}
wbf = WBF_VTX(args, model_weighted)
wbf.run_wbf("")
|
from lxml import html
import requests
import sqlite3
from time import sleep
base_url = 'http://stackoverflow.com'
conn = sqlite3.connect('morpheus11.db')
c = conn.cursor()
# Create table
for row in c.execute('SELECT count(*) FROM post_tb'):
code = row[0]
print code
conn.close()
|
from __future__ import unicode_literals
from contextlib import contextmanager
import os
from tempfile import NamedTemporaryFile
@contextmanager
def example_file(json_for_file):
ntf = NamedTemporaryFile(delete=False)
try:
ntf.write(json_for_file)
ntf.close()
yield ntf.name
finally:
os.remove(ntf.name)
|
def mad_libs():
person1 = input("Enter the first person of the mad_libs story:")
person2 = input("Enter the second person of the mad_libs story:")
person3 = input("Enter the third person of the mad_libs story:")
place = input("Enter the setting of the story:")
adjective = input("Enter an adjective for the story:")
print("Be kind to your",person1,"-footed",person2)
print("For a duck may be somebody's",person3,",")
print("Be kind to your girl in", place)
print("Where the weather is always",adjective,".")
mad_libs()
|
import re
aa_codes = dict(
ALA='A',
ARG='R',
ASN='N',
ASP='D',
CYS='C',
GLU='E',
GLN='Q',
GLY='G',
HIS='H',
ILE='I',
LEU='L',
LYS='K',
MET='M',
PHE='F',
PRO='P',
SER='S',
THR='T',
TRP='W',
TYR='Y',
VAL='V',
)
def parse_aa(gene: str,
ref: str,
alt: str,
nt_pos: int,
aa_pos: int,
snpeff_aa: str) -> str:
m = re.match(r'p\.([a-zA-Z]+)(\d+)([a-zA-Z]+)', snpeff_aa)
if snpeff_aa == '.' or m is None:
return f'{ref}{nt_pos}{alt}'
ref_aa, aa_pos_str, alt_aa = m.groups()
ref_aa = get_aa(ref_aa)
alt_aa = get_aa(alt_aa)
return f'{ref}{nt_pos}{alt}({gene}:{ref_aa}{aa_pos}{alt_aa})'
def get_aa(s: str) -> str:
out = ''
for i in range(0, len(s), 3):
aa = s[i: i + 3]
aa_code = aa_codes[aa.upper()]
out += aa_code
return out
|
import pygame
import logging
import os
cwd = os.getcwd()
class Sound:
def __init__(self, soundlog, settings):
pygame.mixer.init()
self.channels = {}
global volume, log
log = soundlog
if settings['audio']['enabled']:
volume = float(settings['audio']['volume'])/10
else:
volume = 0
self.effects = Effects()
self.music = Music()
try:
self.music.load("menuMusic", "menu.ogg")
self.music.play("menuMusic")
self.effects.load("boing", "boing.ogg")
except Exception as e:
logging.info(e, exc_info=True)
class Music:
def __init__(self):
self.song = pygame.mixer.music
def load(self, name, location):
log.debug("Loading music song: {name}, from {location}".format(name=name, location=location))
self.song.load(str(cwd + "/sound/music/" + location))
def play(self, name):
log.debug("Playing sound: {name}".format(name=name))
self.song.set_volume(volume)
self.song.play()
def fadeout(self, time):
log.debug("Fading out music song")
self.song.fadeout(time)
class Effects(dict):
def load(self, name, location):
logging.debug("Loading sound: {name}, from {location}".format(name=name, location=location))
self[name] = pygame.mixer.Sound(str(cwd + "/sound/effects/" + location))
self[name].set_volume(volume)
def play(self, name):
logging.debug("Playing sound: {name}".format(name=name))
self[name].play() |
class TipoA:
def __init__(self, f1, f2):
self.nombre = "Tipo A"
self.filaA = f1+1
self.filaB = f2+1
class TipoB:
def __init__(self, f1, c1):
self.nombre = "Tipo B"
self.filaC = f1+1
self.constanteA = c1
class TipoC:
def __init__(self, f1, f2, c1):
self.nombre = "Tipo C"
self.filaD = f1+1
self.filaE = f2+1
self.constanteB = c1 |
import sys
import re
# This script generates the swb code list. This is to be looped through for other scripts.
# For example, if you have a batch - you can run 'for line in file' in bash or zsh.
# Where the 'file' is the output of this script.
filename = sys.argv[1]
five_code = re.search(r"[0-9]{5}", filename).group(0)
final_code = five_code[1:]
with open('./results/codes.txt', 'a') as codes:
codes.write(final_code)
codes.write("\n") |
#!/usr/bin/env
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# @autor: Jorge de la Rosa #
# Date: 14/08/2021 #
# #
# Perceptron training using #
# GA to simulate AND gate #
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
import numpy as np
import random
def activation_function(arg):
# Discrete activation function
return 1 if arg > 0 else 0
def perceptron(inputs, weights_bias):
# Perceptron
return activation_function((np.dot(weights_bias[:2], inputs) + weights_bias[2]).tolist())
def funcion_objetivo(bitstring):
# El batch son los casos de prueba y result es lo que debería devolver el perceptrón
batch = [[0, 0],
[0, 1],
[1, 0],
[1, 1]]
result = [0, 0, 0, 1]
score = [1 if perceptron(batch[i], bitstring) == result[i] else 0 for i in range(len(batch))]
return 1 - (sum(score)/4)
def selection(population, scores, k=3):
rand_sample_index = [random.randint(0, len(population) - 1) for _ in range(k)]
best_spec = rand_sample_index[0]
for spec in rand_sample_index:
if scores[spec] < scores[best_spec]:
best_spec = spec
return population[best_spec]
def crossover(parent1, parent2, rate_cross=0.85):
child1, child2 = parent1.copy(), parent2.copy()
if random.random() < rate_cross:
cut_index = random.randint(1, len(parent1) - 1)
child1 = parent1[:cut_index] + parent2[cut_index:]
child2 = parent2[:cut_index] + parent1[cut_index:]
return child1, child2
def mutation(bitstring, rate_mutation):
for i in range(len(bitstring)):
if random.random() < rate_mutation:
bitstring[i] = random.uniform(-2, 2)
return bitstring
def genetic_algorithm(objetive, n_bits, n_gen, n_spec, r_cross, r_mut):
population = [[random.uniform(-2, 2) for _ in range(n_bits)] for _ in range(n_spec)]
best, best_eval = population[0], objetive(population[0])
for generation in range(n_gen):
scores = [objetive(chromosome) for chromosome in population]
for i in range(n_spec):
if scores[i] < best_eval:
best, best_eval = population[i], scores[i]
print(f"<Best chromosome> ID:{population[i]} SCORE:{scores[i]} GENERATION:{generation}")
parents = [selection(population, scores) for _ in range(n_spec)]
children = list()
for i in range(0, n_spec, 2):
parent1, parent2 = parents[i], parents[i+1]
child1, child2 = crossover(parent1, parent2, r_cross)
children.append(mutation(child1, r_mut))
children.append(mutation(child2, r_mut))
population = children
return best, best_eval
n_bits = 3
n_spec = 20
n_gen = 10
r_mut = 1/n_bits
r_cross = 0.85
(weight_1, weight_2, bias), _ = genetic_algorithm(funcion_objetivo, n_bits, n_gen, n_spec, r_cross, r_mut)
print(weight_1, weight_2, bias, score)
|
from django.core.paginator import Paginator
from django.http import JsonResponse
from django.shortcuts import render
from django.views import View
from django.db.models import Q
from .models import funds
"""
time:2020-01-17
author:JZ
function:获取基金数据(分页)
"""
def get_funds(request):
print('----get_funds_data------')
#第一步获取记录的数量
content = request.GET.get('content',None)
if content:
fund_list = funds.objects.filter(Q(funds_id__icontains=content)|Q(funds_name__icontains=content)).order_by("funds_id")
else:
fund_list = funds.objects.all().order_by("funds_id")
data = []
#第二部获取分页
page_num = request.GET['page_num']
if not page_num:
page_num = 1
pageinator = Paginator(fund_list,10)
pages = pageinator.page(page_num)
for obj in pages:
data.append({'id':obj.funds_id,'page_no':(int(page_num)-1)*10,'name':obj.funds_name,'accu':obj.funds_accu,'day_val':obj.funds_day_val,'day_rate':obj.funds_day_rate,'year_rate':obj.funds_year_rate})
return JsonResponse({'code':200,'data':data,'total':len(data)},json_dumps_params={'ensure_ascii':False})
"""
time:2020-01-17
author:JZ
function:获取基金的数量
"""
def get_total(request):
print('----get_funds------')
#第一步获取记录的数量
content = request.GET.get('content',None)
if content:
fund_list = funds.objects.filter(Q(funds_id__icontains=content)|Q(funds_name__icontains=content))
else:
fund_list = funds.objects.all()
return JsonResponse({'code':200,'total':len(fund_list)},json_dumps_params={'ensure_ascii':False})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.