content
stringlengths 5
1.05M
|
|---|
class Solution:
def isValid(self, s):
o = ['{', '(', '[']
c = ['}', ')', ']']
occurances = {}
occurances['{'] = '}'
occurances['('] = ')'
occurances['['] = ']'
list_occ = []
for letter in s:
if letter in o:
list_occ.append(letter)
elif letter in c:
if len(list_occ) ==0 :
return False
if letter == occurances[list_occ[len(list_occ)-1]]:
del list_occ[-1]
else:
return False
if len(list_occ):
return False
return True
a = Solution()
print(a.isValid("[]"))
|
__author__ = "arunrajms"
from rest_framework import serializers
from resource.models import Switch
from rest_framework.validators import UniqueValidator
import re
TIER_CHOICES = ['Spine','Leaf','Core','Host','Any','any']
class JSONSerializerField(serializers.Field):
""" Serializer for JSONField -- required to make field writable"""
def to_internal_value(self, data):
return data
def to_representation(self, value):
return value
class SwitchSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
name = serializers.CharField(max_length=100,required=True, \
validators=[UniqueValidator(queryset=Switch.objects.all())])
model = serializers.CharField(max_length=100,required=True)
image = serializers.CharField(max_length=100,required=True)
line_cards = serializers.CharField(max_length=100,required=True)
slots = serializers.IntegerField()
tier = serializers.ChoiceField(TIER_CHOICES)
def create(self,validated_data):
'''
TBD
'''
return Pool.objects.create(**validated_data)
class SwitchGetSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
name = serializers.CharField(max_length=100,required=True)
model = serializers.CharField(max_length=100,required=True)
image = serializers.CharField(max_length=100,required=True)
line_cards = serializers.CharField(max_length=100,required=True)
slots = serializers.IntegerField()
tier = serializers.CharField(max_length=100,required=True)
|
import os
import zipfile
import glob
import shutil
from pathlib import Path
def duplicate(path):
shutil.copy2(path, path + '_cpy')
return path + '_cpy'
def modify_to_rar(path):
if path.endswith('.pptx_cpy'):
new_str = path[:-9] + '.zip'
os.rename(path, new_str)
return new_str
else:
print('File Type Error')
def decompress(path):
zip_file = zipfile.ZipFile(path)
tmp_decompression_path = path[:-4] + '_files'
os.mkdir(tmp_decompression_path)
os.chdir(tmp_decompression_path)
zip_file.extractall()
zip_file.close()
return tmp_decompression_path
# extract audios to the parent directory of current path
def extract_audio(path):
files = glob.iglob(os.path.join(path + '/ppt/media', "*.m4a"))
original_path = Path(path)
savings_path = original_path.parent
for file in files:
if os.path.isfile(file):
shutil.copy2(file, savings_path)
def rm_cache(path):
if path.endswith('.zip'):
os.remove(path)
else:
shutil.rmtree(path)
def extract_from(path):
duplication_path = duplicate(path)
modification_path = modify_to_rar(duplication_path)
decompression_path = decompress(modification_path)
extract_audio(decompression_path)
rm_cache(modification_path)
rm_cache(decompression_path)
|
# This code belongs to the paper
#
# J. Hertrich and G. Steidl.
# Inertial Stochastic PALM (iSPALM) and Applications in Machine Learning.
# ArXiv preprint arXiv:2005.02204, 2020.
#
# Please cite the paper, if you use the code.
from palm_algs import *
import numpy.random
import numpy.matlib
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
import pickle
import os
#load and normalize data
mnist=tf.keras.datasets.mnist
(x_train,y_train),(x_test,y_test)=mnist.load_data()
x_train=1.0*x_train
x_test=1.0*x_test
x_train_flat=[]
x_test_flat=[]
y_train_vec=[]
y_test_vec=[]
for i in range(0,len(x_train)):
x_train_flat.append(x_train[i,:,:].reshape((28*28)))
y_vec=np.zeros(10)
y_vec[y_train[i]]=1.0
y_train_vec.append(y_vec)
for i in range(0,len(x_test)):
x_test_flat.append(x_test[i,:,:].reshape((28*28)))
y_vec=np.zeros(10)
y_vec[y_test[i]]=1.0
y_test_vec.append(y_vec)
x_train=1.0*np.array(x_train_flat).astype(np.float32)
y_train=1.0*np.array(y_train_vec).astype(np.float32)
x_test=1.0*np.array(x_test_flat).astype(np.float32)
y_test=1.0*np.array(y_test_vec).astype(np.float32)
mean_x_train=1.0/len(x_train)*np.sum(x_train,axis=0)
x_train=x_train-np.matlib.repmat(mean_x_train,len(x_train),1)
x_test=x_test-np.matlib.repmat(mean_x_train,len(x_test),1)
max_x_train=np.max(np.abs(x_train))
x_train=x_train/max_x_train
x_test=x_test/max_x_train
print(np.prod(y_train.shape))
# parameters
n=x_train.shape[1]
sizes=[784,400,200]
my_activation=tf.keras.activations.elu
# required functions for PALM models
def H(X,batch):
# computes the loss function of the model on the data contained in batch
params=X[0]
Ts=[]
bs=[]
ind=0
for i in range(len(sizes)):
Ts.append(tf.reshape(params[ind:(ind+sizes[i]*n)],[n,sizes[i]]))
ind+=sizes[i]*n
bs.append(params[ind:(ind+sizes[i])])
ind+=sizes[i]
Ts.append(tf.reshape(params[ind:],[10,n]))
x=batch[:,:n]
y=batch[:,n:]
for i in range(len(sizes)):
x=tf.linalg.matvec(Ts[i],x,transpose_a=True)+bs[i]
x=my_activation(x)
x=tf.linalg.matvec(Ts[i],x)
x=tf.linalg.matvec(Ts[-1],x)
x=tf.keras.activations.sigmoid(x)
loss=tf.reduce_sum((x-y)**2)
return loss
def accuracy(X,batch):
# computes the accuracy of the model on the data contained in batch
params=X[0]
Ts=[]
bs=[]
ind=0
for i in range(len(sizes)):
Ts.append(tf.reshape(params[ind:(ind+sizes[i]*n)],[n,sizes[i]]))
ind+=sizes[i]*n
bs.append(params[ind:(ind+sizes[i])])
ind+=sizes[i]
Ts.append(tf.reshape(params[ind:],[10,n]))
x=batch[:,:n]
y=batch[:,n:]
for i in range(len(sizes)):
x=tf.linalg.matvec(Ts[i],x,transpose_a=True)+bs[i]
x=my_activation(x)
x=tf.linalg.matvec(Ts[i],x)
x=tf.linalg.matvec(Ts[-1],x)
x=tf.keras.activations.sigmoid(x)
x_max=tf.argmax(x,axis=1)
y_max=tf.argmax(y,axis=1)
correct=tf.reduce_sum(tf.cast(tf.equal(x_max,y_max),dtype=tf.float32))
accuracy=correct/y.shape[0]
return accuracy.numpy()
@tf.function
def proj_orth(X):
# Projects the matrix X onto the Stiefel manifold.
num_iter=4
Y=X
for i in range(num_iter):
Y_inv=tf.eye(X.shape[1])+tf.matmul(Y,Y,transpose_a=True)
Y=2*tf.matmul(Y,tf.linalg.inv(Y_inv))
return Y
def prox_f(arg,lam):
# prox of the iota-function of the feasible set
out=[]
ind=0
for i in range(len(sizes)):
T=tf.reshape(arg[ind:(ind+sizes[i]*n)],[n,sizes[i]])
T=proj_orth(T)
out.append(tf.reshape(T,[-1]))
ind+=sizes[i]*n
out.append(arg[ind:(ind+sizes[i])])
ind+=sizes[i]
last_T=tf.maximum(tf.minimum(arg[ind:],100.),-100.)
out.append(last_T)
return tf.concat(out,0)
batch_size=1500
test_batch_size=1500
steps_per_epch=x_train.shape[0]//batch_size
epch=200
sarah_p=1000
ens_full=100
runs=10
springs=[]
ispalms=[]
palms=[]
ipalms=[]
sgds=[]
samples=np.concatenate([x_train,y_train],1)
samples_test=np.concatenate([x_test,y_test],1)
print(samples.shape)
# initialization
init_vals=[]
for i in range(len(sizes)):
T=tf.random.normal((n,sizes[i]))
q,r=tf.linalg.qr(T)
init_vals.append(tf.reshape(q,[-1]))
init_vals.append(tf.zeros(sizes[i]))
init_vals.append(0.05*tf.random.normal([10*n]))
init_vals=[tf.concat(init_vals,0).numpy()]
mydir='PNN_results'
if not os.path.isdir(mydir):
os.mkdir(mydir)
def record_grad(optimizer):
# computes after each epoch the norm of the Riemannian gradient
record_ds=tf.data.Dataset.from_tensor_slices(samples).batch(batch_size)
optimizer.precompile()
grad_norms=[]
for epoch in range(epch):
optimizer.exec_epoch()
grad_norm=0.
grad=0.
for batch in record_ds:
grad+=optimizer.model.grad_batch(batch,0)
ind=0
params=optimizer.model.X[0]
for i in range(len(sizes)):
grad_T=tf.reshape(grad[ind:(ind+sizes[i]*n)],[n,sizes[i]])
T=tf.reshape(params[ind:(ind+sizes[i]*n)],[n,sizes[i]])
W_hat=tf.linalg.matmul(grad_T,tf.transpose(T))-0.5*tf.linalg.matmul(T,tf.linalg.matmul(tf.transpose(T),tf.linalg.matmul(grad_T,tf.transpose(T))))
W=W_hat-tf.transpose(W_hat)
rie_grad=tf.linalg.matmul(W,T)
grad_norm+=tf.reduce_sum(rie_grad**2)
ind+=sizes[i]*n
grad_norm+=tf.reduce_sum(grad[ind:(ind+sizes[i])]**2)
ind+=sizes[i]
grad_norm+=tf.reduce_sum(grad[ind:]**2)
grad_norm=grad_norm.numpy()
grad_norms.append(grad_norm)
print(grad_norm)
return optimizer.my_times,optimizer.test_vals,optimizer.train_vals,grad_norms
for run in range(0,runs):
np.random.seed(10+2*run)
tf.random.set_seed(11+2*run)
sarah_seq=tf.random.uniform(shape=[epch*steps_per_epch+100],minval=0,maxval=1,dtype=tf.float32)
# PALM_Model declaration
model=PALM_Model(init_vals,dtype='float32')
model.H=H
model.prox_funs=[prox_f]
model2=PALM_Model(init_vals,dtype='float32')
model3=PALM_Model(init_vals,dtype='float32')
model4=PALM_Model(init_vals,dtype='float32')
model2.H=H
model2.prox_funs=[prox_f]
model3.H=H
model3.prox_funs=[prox_f]
model4.H=H
model4.prox_funs=[prox_f]
# run algorithms
print('\n-------------------- RUN '+str(run+1)+' SPRING --------------------\n')
spring_optimizer=PALM_Optimizer(model,data=samples,batch_size=batch_size,method='SPRING-SARAH',step_size=0.7,steps_per_epoch=steps_per_epch,sarah_seq=sarah_seq,ensure_full=ens_full,backup_dir=None,sarah_p=sarah_p,test_data=samples_test,test_batch_size=test_batch_size)
spring=record_grad(spring_optimizer)
spring=spring+(accuracy(model.X,samples_test),)
with open(mydir+'/spring'+str(run)+'.pickle', 'wb') as f:
pickle.dump(spring,f)
springs.append(spring)
print('\n-------------------- RUN '+str(run+1)+' iSPALM --------------------\n')
ispalm_optimizer=PALM_Optimizer(model2,data=samples,batch_size=batch_size,method='iSPALM-SARAH',step_size=.7,inertial_step_size=.99,steps_per_epoch=steps_per_epch,sarah_seq=sarah_seq,ensure_full=ens_full,backup_dir=None,sarah_p=sarah_p,test_data=samples_test,test_batch_size=test_batch_size)
ispalm=record_grad(ispalm_optimizer)
ispalm=ispalm+(accuracy(model2.X,samples_test),)
with open(mydir+'/ispalm'+str(run)+'.pickle', 'wb') as f:
pickle.dump(ispalm,f)
ispalms.append(ispalm)
if run==0:
print('\n-------------------- RUN '+str(run+1)+' PALM --------------------\n')
palm_optimizer=PALM_Optimizer(model3,data=samples,batch_size=batch_size,method='PALM',step_size=1.,backup_dir=None,test_data=samples_test)
palm=record_grad(palm_optimizer)
palm=palm+(accuracy(model3.X,samples_test),)
with open(mydir+'/palm'+str(run)+'.pickle', 'wb') as f:
pickle.dump(palm,f)
palms.append(palm)
print('\n-------------------- RUN '+str(run+1)+' iPALM --------------------\n')
ipalm_optimizer=PALM_Optimizer(model4,data=samples,batch_size=batch_size,method='iPALM',step_size=.9,backup_dir=None,test_data=samples_test)
ipalm=record_grad(ipalm_optimizer)
ipalm=ipalm+(accuracy(model4.X,samples_test),)
with open(mydir+'/ipalm'+str(run)+'.pickle', 'wb') as f:
pickle.dump(ipalm,f)
ipalms.append(ipalm)
av_acc_palm=np.mean([p[4] for p in palms])
av_acc_ipalm=np.mean([p[4] for p in ipalms])
av_acc_spring=np.mean([p[4] for p in springs])
av_acc_ispalm=np.mean([p[4] for p in ispalms])
print('Average accuracy PALM: '+str(av_acc_palm))
print('Average accuracy iPALM: '+str(av_acc_ipalm))
print('Average accuracy SPRING: '+str(av_acc_spring))
print('Average accuracy iSPALM: '+str(av_acc_ispalm))
av_grad_palm=np.mean(np.stack([p[3] for p in palms]),axis=0)
av_grad_ipalm=np.mean(np.stack([p[3] for p in ipalms]),axis=0)
av_grad_spring=np.mean(np.stack([p[3] for p in springs]),axis=0)
av_grad_ispalm=np.mean(np.stack([p[3] for p in ispalms]),axis=0)
fig=plt.figure()
plt.plot(av_grad_palm,'-',c='red')
plt.plot(av_grad_ipalm,'--',c='green')
plt.plot(av_grad_spring,'-.',c='black')
plt.plot(av_grad_ispalm,':',c='blue')
plt.yscale("log")
plt.legend(['PALM','iPALM','SPRING-SARAH','iSPALM-SARAH'])
fig.savefig(mydir+'/PNNs_grads.png',dpi=1200)
plt.close(fig)
av_steps_palm=np.mean(np.stack([p[2] for p in palms]),axis=0)/np.prod(y_train.shape)
av_steps_ipalm=np.mean(np.stack([p[2] for p in ipalms]),axis=0)/np.prod(y_train.shape)
av_steps_spring=np.mean(np.stack([p[2] for p in springs]),axis=0)/np.prod(y_train.shape)
av_steps_ispalm=np.mean(np.stack([p[2] for p in ispalms]),axis=0)/np.prod(y_train.shape)
if runs>1:
std_steps_spring=np.sqrt(np.mean((np.stack([p[2]/np.prod(y_train.shape) for p in springs])-av_steps_spring)**2,axis=0))
std_steps_ispalm=np.sqrt(np.mean((np.stack([p[2]/np.prod(y_train.shape) for p in ispalms])-av_steps_ispalm)**2,axis=0))
av_steps_palm_test=np.mean(np.stack([p[1] for p in palms]),axis=0)/np.prod(y_test.shape)
av_steps_ipalm_test=np.mean(np.stack([p[1] for p in ipalms]),axis=0)/np.prod(y_test.shape)
av_steps_spring_test=np.mean(np.stack([p[1] for p in springs]),axis=0)/np.prod(y_test.shape)
av_steps_ispalm_test=np.mean(np.stack([p[1] for p in ispalms]),axis=0)/np.prod(y_test.shape)
# Plot results
fig=plt.figure()
plt.plot(av_steps_palm,'-',c='red')
plt.plot(av_steps_ipalm,'--',c='green')
plt.plot(av_steps_spring,'-.',c='black')
plt.plot(av_steps_ispalm,':',c='blue')
plt.yscale("log")
plt.legend(['PALM','iPALM','SPRING-SARAH','iSPALM-SARAH'])
fig.savefig(mydir+'/PNNs_train.png',dpi=1200)
plt.close(fig)
if runs>1:
fig=plt.figure()
plt.plot(std_steps_spring,'-.',c='black')
plt.plot(std_steps_ispalm,':',c='blue')
plt.legend(['SPRING-SARAH','iSPALM-SARAH'])
fig.savefig(mydir+'/PNNs_train_std.png',dpi=1200)
plt.close(fig)
fig=plt.figure()
plt.plot(av_steps_palm_test,'-',c='red')
plt.plot(av_steps_ipalm_test,'--',c='green')
plt.plot(av_steps_spring_test,'-.',c='black')
plt.plot(av_steps_ispalm_test,':',c='blue')
plt.yscale("log")
plt.legend(['PALM','iPALM','SPRING-SARAH','iSPALM-SARAH'])
fig.savefig(mydir+'/PNNs_test.png',dpi=1200)
plt.close(fig)
all_end_times=[p[0][-1] for p in palms+ipalms+springs+ispalms]
my_end_time=np.min(all_end_times)
def average_times(data,end_time):
times=np.sort(np.concatenate([p[0] for p in data],0))
times=times[times<=end_time]
inds=[0]*len(data)
vals=[]
vals_train=[]
for t in times:
obj=0.
obj_train=0.
for i in range(len(data)):
p=data[i]
times_p=p[0]
while times_p[inds[i]+1]<t:
inds[i]+=1
t_low=times_p[inds[i]]
t_high=times_p[inds[i]+1]
coord=(t-t_low)/(t_high-t_low)
obj+=((1-coord)*p[1][inds[i]]+coord*p[1][inds[i]+1])/np.prod(x_test.shape)
obj_train+=((1-coord)*p[2][inds[i]]+coord*p[2][inds[i]+1])/np.prod(x_train.shape)
vals.append(obj/len(data))
vals_train.append(obj_train/len(data))
return times,vals,vals_train
t_palm,v_test_palm,v_palm=average_times(palms,my_end_time)
t_ipalm,v_test_ipalm,v_ipalm=average_times(ipalms,my_end_time)
t_spring,v_test_spring,v_spring=average_times(springs,my_end_time)
t_ispalm,v_test_ispalm,v_ispalm=average_times(ispalms,my_end_time)
fig=plt.figure()
plt.plot(t_palm,v_palm,'-',c='red')
plt.plot(t_ipalm,v_ipalm,'--',c='green')
plt.plot(t_spring,v_spring,'-.',c='black')
plt.plot(t_ispalm,v_ispalm,':',c='blue')
plt.yscale("log")
plt.legend(['PALM','iPALM','SPRING-SARAH','iSPALM-SARAH'])
fig.savefig(mydir+'/PNNs_train_times.png',dpi=1200)
plt.close(fig)
fig=plt.figure()
plt.plot(t_palm,v_test_palm,'-',c='red')
plt.plot(t_ipalm,v_test_ipalm,'--',c='green')
plt.plot(t_spring,v_test_spring,'-.',c='black')
plt.plot(t_ispalm,v_test_ispalm,':',c='blue')
plt.yscale("log")
plt.legend(['PALM','iPALM','SPRING-SARAH','iSPALM-SARAH'])
fig.savefig(mydir+'/PNNs_test_times.png',dpi=1200)
plt.close(fig)
|
# Copyright (c) 2016, Philippe Remy <github: philipperemy>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import print_function
import os
from argparse import ArgumentParser
from subprocess import Popen
from sys import argv
from sys import stderr
import nltk
stoplistlines = open("stopwords1.txt",'r').readlines()
stoplist = []
for i in stoplistlines:
stoplist.append(i.strip().lower())
JAVA_BIN_PATH = 'java'
DOT_BIN_PATH = 'dot'
STANFORD_IE_FOLDER = 'stanford-openie'
tmp_folder = '/tmp/openie/'
if not os.path.exists(tmp_folder):
os.makedirs(tmp_folder)
def arg_parse():
arg_p = ArgumentParser('Stanford IE Python Wrapper')
arg_p.add_argument('-f', '--filename', type=str, default=None)
arg_p.add_argument('-v', '--verbose', action='store_true')
arg_p.add_argument('-g', '--generate_graph', action='store_true')
return arg_p
def debug_print(log, verbose):
if verbose:
print(log)
def process_entity_relations(entity_relations_str, verbose=True):
# format is ollie.
entity_relations = list()
for s in entity_relations_str:
entity_relations.append(s[s.find("(") + 1:s.find(")")].split(';'))
return entity_relations
def generate_graphviz_graph(entity_relations, verbose=True):
"""digraph G {
# a -> b [ label="a to b" ];
# b -> c [ label="another label"];
}"""
graph = list()
graph.append('digraph {')
for er in entity_relations:
graph.append('"{}" -> "{}" [ label="{}" ];'.format(er[0], er[2], er[1]))
graph.append('}')
out_dot = tmp_folder + 'out.dot'
with open(out_dot, 'w') as output_file:
output_file.writelines(graph)
out_png = tmp_folder + 'out.png'
command = '{} -Tpng {} -o {}'.format(DOT_BIN_PATH, out_dot, out_png)
debug_print('Executing command = {}'.format(command), verbose)
dot_process = Popen(command, stdout=stderr, shell=True)
dot_process.wait()
assert not dot_process.returncode, 'ERROR: Call to dot exited with a non-zero code status.'
print('Wrote graph to {} and {}'.format(out_dot, out_png))
def stanford_ie(input_filename, verbose=True, generate_graphviz=True):
out = tmp_folder + 'out.txt'
input_filename = input_filename.replace(',', ' ')
new_filename = ''
for filename in input_filename.split():
if filename.startswith('/'): # absolute path.
new_filename += '{} '.format(filename)
else:
new_filename += '../{} '.format(filename)
absolute_path_to_script = os.path.dirname(os.path.realpath(__file__)) + '/'
command = 'cd {};'.format(absolute_path_to_script)
command += 'cd {}; {} -mx4g -cp "stanford-openie.jar:stanford-openie-models.jar:lib/*" ' \
'edu.stanford.nlp.naturalli.OpenIE {} -format ollie > {}'. \
format(STANFORD_IE_FOLDER, JAVA_BIN_PATH, new_filename, out)
if verbose:
debug_print('Executing command = {}'.format(command), verbose)
java_process = Popen(command, stdout=stderr, shell=True)
else:
java_process = Popen(command, stdout=stderr, stderr=open(os.devnull, 'w'), shell=True)
java_process.wait()
assert not java_process.returncode, 'ERROR: Call to stanford_ie exited with a non-zero code status.'
with open(out, 'r') as output_file:
results_str = output_file.readlines()
os.remove(out)
results = process_entity_relations(results_str, verbose)
if generate_graphviz:
generate_graphviz_graph(results, verbose)
return results
args = argv[:]
arg_p = arg_parse().parse_args(args[1:])
filename = arg_p.filename
verbose = arg_p.verbose
generate_graphviz = arg_p.generate_graph
if filename is None:
print('please provide a text file containing your input. Program will exit.')
exit(1)
if verbose:
debug_print('filename = {}'.format(filename), verbose)
global entities_relations
entities_relations = stanford_ie(filename, verbose, generate_graphviz)
global relations
pronouns = ['WP', 'PRP', '$WP', '$PRP','PRP$']
proper_nouns = ['NNP','NN']
#female = ['girl']
#female_pronoun = ['she','her']
#Requires mapping for both gender pronouns to replace pronouns with apt proper nouns
'''
for i in range(len(entities_relations)):
a = nltk.pos_tag(nltk.word_tokenize(entities_relations[i][0]))
print (entities_relations[i][0],a[0][1])
if a[0][1] in pronouns and entities_relations[i][0] not in female_pronoun:
for j in range(i-1,-1,-1):
print (nltk.pos_tag(nltk.word_tokenize(entities_relations[j][0]))[0][1], entities_relations[j][0])
if nltk.pos_tag(nltk.word_tokenize(entities_relations[j][0]))[0][1] in proper_nouns:
if entities_relations[i][0] not in female_pronoun:
entities_relations[i][0] = entities_relations[j][0]
print (entities_relations[i][0], entities_relations[j][0])
break
if a[0][1] in pronouns and entities_relations[i][0] in female_pronoun:
for j in range(i-1,-1,-1):
for k in female:
if k in entities_relations[j][0]:
entities_relations[i][0] = entities_relations[j][0]
break
print (entities_relations[i][0], entities_relations[j][0])
'''
"""************
#Replaces pronoun with previous propernoun
for i in range(len(entities_relations)):
a = nltk.pos_tag(nltk.word_tokenize(entities_relations[i][0]))
if entities_relations[i][0] == 'his friendship':
print (a, a[0][0],a[0][1] )
#print (entities_relations[i][0],a[0][1])
if a[0][1] in pronouns: #**************don't predict the pronoun and proper noun to be in 0th index in both. find the index of the pronoun and propernoun
for j in range(i-1,-1,-1):
#print (nltk.pos_tag(nltk.word_tokenize(entities_relations[j][0]))[0][1], entities_relations[j][0])
if nltk.pos_tag(nltk.word_tokenize(entities_relations[j][0]))[0][1] in proper_nouns:
entities_relations[i][0] = entities_relations[i][0].replace(a[0][0],entities_relations[j][0])
#print (entities_relations[i][0], entities_relations[j][0])
break
***********"""
'''
for i in entities_relations:
if i[0] in relations:
relations[i[0]].append(i[2])
else:
relations[i[0]] = [i[2]]
#Open IE output
for i, v in relations.items():
v = [j.decode('utf-8') for j in v]
i = [i.decode('utf-8')]
'''
relations = entities_relations[:]
for i in relations:
print (i)
|
from pytest import raises
from webhelpers2.number import *
def eq(a, b, cutoff=0.001):
"""Assert that two floats are equal within 'cutoff' margin of error.
"""
assert abs(a - b) < cutoff
class TestEQ(object):
def test_good(self):
eq(1.0001, 1.0002)
def test_bad(self):
with raises(AssertionError):
eq(1.0, 2.0)
def test_custom_cutoff(self):
eq(2.0, 4.0, cutoff=3.0)
class TestPercentOf(object):
def test1(self):
assert percent_of(5, 100) == 5.0
def test2(self):
assert percent_of(13, 26) == 50.0
class TestMean(object):
def test1(self):
assert mean([5, 10]) == 7.5
class TestMedian(object):
incomes = [18000, 24000, 32000, 44000, 67000, 9999999]
def test_median(self):
assert median(self.incomes) == 49500.0
def test_compare_to_mean(self):
eq(mean(self.incomes), 1697499.833333333)
class TestStandardDeviation(object):
temps_socal = [ # Temperatures in Southern California.
# Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec
70, 70, 70, 75, 80, 85, 90, 95, 90, 80, 75, 70]
temps_mt = [ # Temperatures in Montana.
# Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec
-32, -10, 20, 30, 60, 90, 100, 80, 60, 30, 10, -32]
def test1(self):
a = standard_deviation([0, 0, 14, 14])
eq(a, 8.082903768654761)
def test2(self):
a = standard_deviation([0, 6, 8, 14])
eq(a, 5.773502691896258)
def test3(self):
a = standard_deviation([6, 6, 8, 8])
eq(a, 1.1547005383792515)
def test4(self):
assert standard_deviation([0, 0, 14, 14], sample=False) == 7.0
def test5(self):
assert standard_deviation([0, 6, 8, 14], sample=False) == 5.0
def test6(self):
assert standard_deviation([6, 6, 8, 8], sample=False) == 1.0
def test_temperatures_southern_california(self):
a = standard_deviation(self.temps_socal)
eq(a, 9.00336637385)
def test_temperatures_southern_california2(self):
a = standard_deviation(self.temps_socal, sample=False)
eq(a, 8.620067027323)
def test_temperatures_montana(self):
a = standard_deviation(self.temps_mt)
eq(a, 45.1378360405574)
def test_temperatures_montana2(self):
a = standard_deviation(self.temps_mt, sample=False)
eq(a, 43.2161878106906)
class TestFormatDataSize(object):
def test_bytes(self):
assert format_byte_size(1) == "1 B"
def test_kibibytes(self):
assert format_byte_size(1000, binary=True) == "1000 B"
assert format_byte_size(1024, 0, True) == "1 KiB"
assert format_byte_size(1024, 2, True) == "1.00 KiB"
def test_kilobytes(self):
assert format_byte_size(1000), "1.0 kB"
assert format_byte_size(1024, 0, False) == "1 kB"
assert format_byte_size(1024, 2, False) == "1.02 kB"
assert format_byte_size(1024, 0, False, True) == "1 kilobytes"
assert format_byte_size(1024, 2, False, True) == "1.02 kilobytes"
def test_kilobits(self):
assert format_bit_size(1024, 0, False, False) == "1 kb"
assert format_bit_size(1024, 2, False, False) == "1.02 kb"
assert format_bit_size(1024, 0, False, True) == "1 kilobits"
assert format_bit_size(1024, 2, False, True) == "1.02 kilobits"
def test_megabytes(self):
assert format_byte_size(12345678, 2, True) == "11.77 MiB"
assert format_byte_size(12345678, 2, False) == "12.35 MB"
def test_terabytes(self):
assert format_byte_size(12345678901234, 2, True) == "11.23 TiB"
assert format_byte_size(12345678901234, 2, False) == "12.35 TB"
def test_zettabytes(self):
assert format_byte_size(1234567890123456789012, 2, True) == "1.05 ZiB"
assert format_byte_size(1234567890123456789012, 2, False) == "1.23 ZB"
def test_yottabytes(self):
assert format_byte_size(123456789012345678901234567890, 2, True) == "102121.06 YiB"
assert format_byte_size(123456789012345678901234567890, 2, False) == "123456.79 YB"
|
"""This makes functions a subpackage(submodule) of combine"""
|
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestBitfieldIvars(TestBase):
mydir = TestBase.compute_mydir(__file__)
def test(self):
self.build()
lldbutil.run_to_source_breakpoint(self, "// break here", lldb.SBFileSpec("main.m"))
self.expect_expr("chb->hb->field1", result_type="unsigned int", result_value="0")
## This should happen second
self.expect_expr("chb->hb->field2", result_type="unsigned int", result_value="1")
self.expect_expr("hb2->field1", result_type="unsigned int", result_value="10")
self.expect_expr("hb2->field2", result_type="unsigned int", result_value="3")
self.expect_expr("hb2->field3", result_type="unsigned int", result_value="4")
self.expect("frame var *hb2", substrs = [ 'x =', '100',
'field1 =', '10',
'field2 =', '3',
'field3 =', '4'])
# This test is meant to be xfailed, but running the test triggers an ASan
# issue, so it must be skipped for now.
@skipIf
def testExprWholeObject(self):
self.build()
lldbutil.run_to_source_breakpoint(self, "// break here", lldb.SBFileSpec("main.m"))
## FIXME expression with individual bit-fields obtains correct values but not with the whole object
self.expect("expr *hb2", substrs = [ 'x =', '100',
'field1 =', '10',
'field2 =', '3',
'field3 =', '4'])
|
import os, time, sys
from colorama import Fore
os.system("clear")
print(Fore.RED + """
[ 1 ] { DMITRY }
[ 2 ] { WAFW00F }
[ 3 ] { THEHARVESTER }
[ 4 ] { NBTSCAN }
[ 5 ] { SNMP-CHECK }
CODED + BY + Furkan
""")
isc = input(Fore.BLUE + "["+ Fore.GREEN + " ENTER YOUR SELECTION " + Fore.BLUE + "] : ")
if isc == "1":
os.system("clear")
os.system("dmitry")
elif isc == "2":
os.system("clear")
os.system("wafw00f")
elif isc == "3":
os.system("clear")
os.system("theHarvester")
elif isc == "4":
os.system("clear")
os.system("nbtscan")
elif isc == "5":
os.system("clear")
os.system("snmp-check")
|
def add_native_methods(clazz):
def flushBuffer__long__int__java_lang_Runnable__(a0, a1, a2, a3):
raise NotImplementedError()
clazz.flushBuffer__long__int__java_lang_Runnable__ = flushBuffer__long__int__java_lang_Runnable__
|
from mmdet.apis import init_detector, inference_detector, show_result
import mmcv
import os
demopath = os.path.dirname(os.path.realpath(__file__))
# config_file = 'configs/faster_rcnn_r50_fpn_1x.py'
# checkpoint_file = 'checkpoints/faster_rcnn_r50_fpn_1x_20181010-3d1b3351.pth'
config_file = 'configs/mask_rcnn_r50_fpn_1x.py'
checkpoint_file = 'checkpoints/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth'
# build the model from a config file and a checkpoint file
model = init_detector(config_file, checkpoint_file, device='cuda:0')
# test a single image and show the results
img = os.path.join(demopath, '000000125100.jpg') # or img = mmcv.imread(img), which will only load it once
result = inference_detector(model, img)
show_result(img, result, model.CLASSES)
# test a list of images and write the results to image files
# imgs = ['test1.jpg', 'test2.jpg']
# for i, result in enumerate(inference_detector(model, imgs)):
# show_result(imgs[i], result, model.CLASSES, out_file='result_{}.jpg'.format(i))
# test a video and show the results
# video = mmcv.VideoReader('video.mp4')
# for frame in video:
# result = inference_detector(model, frame)
# show_result(frame, result, model.CLASSES, wait_time=1)
|
from .alias import Alias
from .actor import Actor, HTTP_METHODS
from .execution import Execution, HTTP_METHODS
from .worker import Worker, HTTP_METHODS
from .nonce import Nonce
from .message import Message
|
"""Test cases describing different beliefs over quantities of interest of a linear
system."""
|
from __future__ import division
import mbuild as mb
import numpy as np
class SquarePattern(mb.Pattern):
"""A nanoparticle coating pattern where points are removed from two opposite poles on two axes.
Parameters
----------
chain_density : float
Density of chain coating on the nanoparticle (chains / nm^2)
radius : float
Radius of the nanoparticle (nm)
fractional_sa : float
Fractional surface area of the nanoparticle to exclude coating (nm^2)
"""
def __init__(self, chain_density, radius, fractional_sa, **args):
pattern = mb.SpherePattern(int(chain_density * 4.0 * np.pi * radius**2.0))
pattern.scale(radius)
total_sa = 4.0 * np.pi * radius**2.0
patch_sa = total_sa * fractional_sa
cutoff = patch_sa / (8 * np.pi * radius)
points = np.array([xyz for xyz in pattern.points if xyz[2] < radius-cutoff
and xyz[2] > cutoff-radius and xyz[1] < radius-cutoff
and xyz[1] > cutoff-radius])
super(SquarePattern, self).__init__(points=points, orientations=None)
if __name__ == "__main__":
from save_pattern import save_pattern
square_pattern = SquarePattern(4.0, 2.0, 0.5)
save_pattern('test.xyz', square_pattern)
|
from functools import partial
from Lab4.settings import START, STOP
def is_even(x):
return x % 2 == 0
def is_odd(x):
return x % 2 == 1
def is_divisible_by_n(x, n):
return x % n == 0
def does_data_satisfy_concept(data, concept):
return all(concept(item) for item in data)
def concept_power(concept):
return len([i for i in range(START, STOP + 1) if concept(i)])
def concept_label(concept):
if isinstance(concept, partial):
return "{}, n = {}".format(concept.func.__name__, concept.keywords['n'])
else:
return concept.__name__
if __name__ == "__main__":
pass
|
from .fixtures import foo_file
from libextract.core import parse_html, pipeline
def test_parse_html(foo_file):
etree = parse_html(foo_file, encoding='ascii')
divs = etree.xpath('//body/article/div')
for node in divs:
assert node.tag == 'div'
assert node.text == 'foo.'
assert len(divs) == 9
def test_pipeline():
functions = [
lambda x: x + [1],
lambda x: x + [2],
]
assert pipeline([], functions) == [1, 2]
assert pipeline([1], functions) == [1, 1, 2]
|
from typing import Optional, TYPE_CHECKING
import wx
if TYPE_CHECKING:
from gui.pane import FunctionPane
# noinspection PyPep8Naming
class ArrayControl(wx.ComboBox):
# noinspection PyShadowingBuiltins
def __init__(self, parent, id):
from functions import Function
choices = list(Function.get_all_vars().keys())
super().__init__(parent, id, choices=choices, value=choices[0])
def get_pane(self, window: wx.Window) -> "FunctionPane":
from gui.pane import FunctionPane
parent = window.GetParent()
if isinstance(parent, FunctionPane):
return parent
if parent is None:
raise ValueError("Could not find a FunctionPane parent for element")
return self.get_pane(parent)
def SetValue(self, value: Optional[str]):
if value is None:
self.SetSelection(0)
else:
super().SetValue(value)
def GetValue(self):
from gui.gui import MainFrame
frame: MainFrame = self.GetTopLevelParent()
return eval(super().GetValue(), frame.get_vars(self))
def GetCode(self):
return super().GetValue()
|
import os
def get_file_list(file_path):
files = []
for root, dirs, files in os.walk(file_path):
print('# of files: {0}'.format(len(files))) # can be "pass"
file_list = files
return file_list
|
import logging
import sys
import os
sys.path.append(os.path.realpath(os.path.dirname(os.path.abspath(__file__)) + "/../../.."))
from scripts.extract import extract_from
from taipan.core import TaipanTarget
def execute(cursor):
logging.info('Reading guides from database')
guides_db = extract_from(cursor, 'target', conditions=[
('is_guide', True),
],
columns=['target_id', 'ra', 'dec', 'ux', 'uy', 'uz'])
return_objects = [TaipanTarget(
g['target_id'], g['ra'], g['dec'], guide=True,
ucposn=(g['ux'], g['uy'], g['uz']),
) for g in guides_db]
logging.info('Extracted %d guides from database' % guides_db.shape[0])
return return_objects
|
import datetime
import unittest
from openregister_client.util import Year, YearMonth, camel_case, markdown, utc
from openregister_client.util import parse_curie, parse_datetime, parse_item_hash, parse_text, parse_timestamp
class UtilTestCase(unittest.TestCase):
def test_datetime_parsing(self):
self.assertEqual(parse_datetime('2017-02-02'), datetime.date(2017, 2, 2))
self.assertEqual(parse_datetime('2017-02-02T12'), datetime.datetime(2017, 2, 2, 12, tzinfo=utc))
self.assertEqual(parse_datetime('2017-02-02T12:30'), datetime.datetime(2017, 2, 2, 12, 30, tzinfo=utc))
self.assertEqual(parse_datetime('2017-02-02T12:30:15'), datetime.datetime(2017, 2, 2, 12, 30, 15, tzinfo=utc))
self.assertEqual(parse_datetime('2017-02-02T12:30:15Z'), datetime.datetime(2017, 2, 2, 12, 30, 15, tzinfo=utc))
self.assertEqual(parse_datetime('2017'), Year(2017))
self.assertEqual(parse_datetime('2017-02'), YearMonth(2017, 2))
self.assertEqual(str(parse_datetime('2017-02')), '2017-02')
with self.assertRaises(ValueError):
parse_datetime(None)
with self.assertRaises(ValueError):
parse_datetime('2017-02-30')
with self.assertRaises(ValueError):
parse_datetime('2017-02-30T12:00:00')
def test_item_hash_parsing(self):
item = parse_item_hash('sha-256:61c1403c4493fd7dffcdd122c62e46e22cfb64ef68f057a0b5d7d753b9237689')
self.assertEqual(item, 'sha-256:61c1403c4493fd7dffcdd122c62e46e22cfb64ef68f057a0b5d7d753b9237689')
self.assertEqual(item.algorithm, 'sha-256')
self.assertEqual(item.value, '61c1403c4493fd7dffcdd122c62e46e22cfb64ef68f057a0b5d7d753b9237689')
with self.assertRaises(ValueError):
parse_item_hash(None)
with self.assertRaises(ValueError):
parse_item_hash('')
with self.assertRaises(ValueError):
parse_item_hash('sha-256')
with self.assertRaises(ValueError):
parse_item_hash('sha-256:61c1403c4493fd7dffcdd122c62e46e22cfb64ef68f057a0b5d7d753b923768')
with self.assertRaises(ValueError):
parse_item_hash('md5:0dbf4d1543bf511ef9a99a6e64d1325a')
def test_timestamp_parsing(self):
self.assertEqual(parse_timestamp('2017-02-02T12:30:15Z'), datetime.datetime(2017, 2, 2, 12, 30, 15, tzinfo=utc))
with self.assertRaises(ValueError):
parse_timestamp(None)
with self.assertRaises(ValueError):
parse_timestamp('2017-02-02T12:30:15')
with self.assertRaises(ValueError):
parse_timestamp('2017-02-02')
with self.assertRaises(ValueError):
parse_timestamp('2017')
@unittest.skipUnless(markdown, 'Markdown support is not installed')
def test_text_datatype_support(self):
self.assertEqual(parse_text(None).html, '')
self.assertEqual(parse_text('').html, '')
self.assertEqual(parse_text('Ministry of Justice').html, '<p>Ministry of Justice</p>')
self.assertEqual(parse_text('*Ministry* of Justice').html, '<p><em>Ministry</em> of Justice</p>')
def test_camel_case(self):
self.assertEqual(camel_case('register'), 'Register')
self.assertEqual(camel_case('country-register'), 'CountryRegister')
self.assertEqual(camel_case('COUNTRY-REGISTER'), 'CountryRegister')
self.assertEqual(camel_case('CountryRegister'), 'Countryregister')
def test_curie(self):
curie = parse_curie('country:GB')
self.assertEqual(curie.prefix, 'country')
self.assertEqual(curie.reference, 'GB')
self.assertEqual(curie.safe_format, '[country:GB]')
curie = parse_curie('[country:FR]')
self.assertEqual(curie.prefix, 'country')
self.assertEqual(curie.reference, 'FR')
self.assertEqual(str(curie), 'country:FR')
self.assertEqual(curie.safe_format, '[country:FR]')
with self.assertRaises(ValueError):
parse_curie(None)
with self.assertRaises(ValueError):
parse_curie('')
with self.assertRaises(ValueError):
parse_curie('country:GB:Wales')
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
"""
Created on Tue Mar 07 11:48:49 2017
@author:
"""
from .exceptions import *
from .gismo import GISMOdataManager
from .gismo import GISMOqcManager
from sharkpylib.file.file_handlers import SamplingTypeSettingsDirectory, MappingDirectory
import os
import json
import pickle
import shutil
# Setup logger
import logging
gismo_logger = logging.getLogger('gismo_session')
gismo_logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s\t%(levelname)s\t%(module)s (row=%(lineno)d)\t%(message)s')
logger_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'log')
if not os.path.exists(logger_directory):
os.mkdir(logger_directory)
logger_file_path = os.path.join(logger_directory, 'gismo_session.log')
file_handler = logging.FileHandler(logger_file_path)
file_handler.setFormatter(formatter)
gismo_logger.addHandler(file_handler)
#==============================================================================
#==============================================================================
class FileInfo(dict):
"""
Created 20180628
Updated 20180713
Holds file information. Source file and pkl file.
"""
def __init__(self,
file_path='',
pkl_directory=''):
self.file_path = file_path
self.pkl_directory = pkl_directory
file_name = os.path.basename(file_path)
name, ending = os.path.splitext(file_name)
directory = os.path.dirname(file_path)
pkl_file_path = os.path.join(pkl_directory, '{}.pkl'.format(name))
self['file_id'] = name
self['directory'] = directory
self['file_path'] = file_path
self['pkl_file_path'] = pkl_file_path
def get_file_path(self):
return self['file_path']
#==============================================================================
#==============================================================================
class UserInfo():
"""
Created 20180627
"""
#==========================================================================
def __init__(self,
user='',
user_directory=''):
"""
Created 20180627
Updated 20180627
Loads the json info file.
If non existing the file is created.
If existing the fields are updated with "update_data".
The fields must be initiated here. No "creations" of keys are made in
this or other scripts.
The json info file will hold the following information:
user name
loaded files and there origin
"""
assert all([user, user_directory])
self.user = user
self.user_directory = user_directory
self.pkl_directory = os.path.join(self.user_directory, 'pkl_files')
update_data = {'user': self.user,
'loaded_files': {}}
self.file_id_sampling_type_mapping = {}
if not os.path.exists(self.user_directory):
os.makedirs(self.user_directory)
if not os.path.exists(self.pkl_directory):
os.makedirs(self.pkl_directory)
self.info_file_path = os.path.join(self.user_directory, 'user_info.json')
self.content = {}
# Load info file if existing
if os.path.exists(self.info_file_path):
with open(self.info_file_path, "r") as fid:
self.content = json.load(fid)
# Update content with possibly new fields
self.content.update(update_data)
# Save info file
self._save_file()
#==========================================================================
def _save_file(self):
"""
Created 20180627
Save self to json file
"""
with open(self.info_file_path, "w") as fid:
json.dump(self.content, fid)
#==========================================================================
def add_file(self,
sampling_type='',
file_path='',
settings_file_path=''):
"""
Created 20180627
Updated 20180713
Information to be added when adding a file.
returns a "file name dict" containing information about data file and settings file:
directory
file_path
pkl_file_path
"""
assert all([sampling_type, file_path])
# file_name = os.path.basename(file_path)
# name, ending = os.path.splitext(file_name)
# directory = os.path.dirname(file_path)
# pkl_file_path = os.path.join(self.pkl_directory, '{}.pkl'.format(name))
#
# info_dict = {file_name: {'directory': directory,
# 'file_path': file_path},
# 'pkl_file_path': pkl_file_path}
self.content.setdefault('loaded_files', {}).setdefault(sampling_type, {})
info_data = FileInfo(file_path=file_path,
pkl_directory=self.pkl_directory)
info_settings = FileInfo(file_path=settings_file_path,
pkl_directory=self.pkl_directory)
file_id = info_data.get('file_id')
self.content['loaded_files'][sampling_type][file_id] = {}
self.content['loaded_files'][sampling_type][file_id]['file_id'] = file_id
self.content['loaded_files'][sampling_type][file_id]['data_file'] = info_data
# print('settings_file_path', settings_file_path)
self.content['loaded_files'][sampling_type][file_id]['settings_file'] = info_settings
self.file_id_sampling_type_mapping[file_id] = sampling_type
self._save_file()
return self.content['loaded_files'][sampling_type][file_id]
#==========================================================================
def delete_file(self,
sampling_type='',
file_id=''):
"""
Created 20180628
Updated 20180713
Deletes information about a file.
"""
files_dict = self.content['loaded_files'].get(sampling_type, {})
if file_id in files_dict:
files_dict.pop(file_id)
self.file_id_sampling_type_mapping.pop(file_id)
return True
#==========================================================================
def get_sampling_type_for_file_id(self, file_id):
"""
Created 20180713
"""
return self.file_id_sampling_type_mapping.get(file_id, None)
#==========================================================================
def get_file_id_list(self, sampling_type):
"""
Created 20180713
Updated
Returns a list of the loaded files (file_id) for the given sampling type.
"""
return sorted(self.content['loaded_files'].get(sampling_type, {}).keys())
def get_file_path(self, file_id):
sampling_type = self.get_sampling_type_for_file_id(file_id)
return self.content['loaded_files'][sampling_type][file_id]['data_file'].get_file_path()
#==============================================================================
#==============================================================================
class GISMOsession(object):
"""
Created 20180625
"""
#==========================================================================
def __init__(self,
root_directory='',
users_directory='',
log_directory='',
user='default',
sampling_types_factory=None,
qc_routines_factory=None,
**kwargs):
"""
Created 20180625
Updated 20181003
root_directory is optional but needs to be provided if "root" is in the settings files.
kwargs can include:
save_pkl
"""
gismo_logger.info('Start session')
#if not all([users_directory, user, sampling_types_factory]):
# raise GISMOExceptionMissingInputArgument
self.root_directory = root_directory
self.users_directory = users_directory
self.log_directory = log_directory
self.save_pkl = kwargs.get('save_pkl', False)
self.sampling_types_factory = sampling_types_factory
self.qc_routines_factory = qc_routines_factory
self.user = user
self.user_directory = os.path.join(self.users_directory, self.user)
self.data_manager = GISMOdataManager(factory=self.sampling_types_factory)
self.qc_manager = GISMOqcManager(factory=self.qc_routines_factory)
self.compare_objects = {}
self._load_attributes()
self._startup_session()
def _load_attributes(self):
# Settings files
self.settings_files = SamplingTypeSettingsDirectory()
# if not os.path.exists(self.settings_files_directory):
# os.makedirs(self.settings_files_directory)
# self.settings_files = {}
# for file_name in os.listdir(self.settings_files_directory):
# if not file_name.endswith('.json'):
# continue
# self.settings_files[file_name] = os.path.join(self.settings_files_directory, file_name)
# Mapping files
self.mapping_files = MappingDirectory()
# if not os.path.exists(self.mapping_files_directory):
# os.makedirs(self.mapping_files_directory)
# self.mapping_files = {}
# for file_name in os.listdir(self.mapping_files_directory):
# if not file_name.endswith('txt'):
# continue
# self.mapping_files[file_name] = os.path.join(self.mapping_files_directory, file_name)
# ==========================================================================
def _startup_session(self):
"""
Created 20180625
Updated 20180713
"""
# Create and load json info file
self.user_info = UserInfo(self.user,
self.user_directory)
# # Initate Boxen that will hold all data
# self.boxen = gtb_core.Boxen(controller=self,
# root_directory=self.root_directory)
def add_settings_directory(self, directory):
self.settings_files.add_directory(directory)
def add_mapping_directory(self, directory):
self.mapping_files.add_directory(directory)
def add_compare_object(self, main_file_id, compare_file_id, **kwargs):
pass
def get_valid_flags(self, file_id):
"""
Returns the valid flags of the gismo object with the given file_id.
:param file_id:
:return:
"""
return self.data_manager.get_valid_flags(file_id)
def flag_data(self, file_id, flag, *args, **kwargs):
"""
Method to manually flag data in given file.
:param file_id:
:param flag:
:param args:
:param kwargs:
:return: None
"""
self.data_manager.flag_data(file_id, flag, *args, **kwargs)
# ==========================================================================
def get_sampling_types(self):
return self.sampling_types_factory.get_list()
def get_station_list(self):
return self.data_manager.get_station_list()
def get_settings_files(self):
return self.settings_files.get_list()
# ==========================================================================
def get_qc_routines(self):
return self.qc_routines_factory.get_list()
def get_valid_qc_routines(self, file_id):
return self.data_manager.get_valid_qc_routines(file_id)
# ==========================================================================
def get_sampling_type_requirements(self, sampling_type):
return self.sampling_types_factory.get_requirements(sampling_type)
# ==========================================================================
def get_qc_routine_requirements(self, routine):
return self.qc_routines_factory.get_requirements(routine)
# ==========================================================================
def get_qc_routine_options(self, routine):
return self.qc_manager.get_qc_options(routine)
def get_file_path(self, file_id):
"""
Returns the file path for the given file_id
"""
gismo_object = self.get_gismo_object(file_id)
return os.path.abspath(gismo_object.file_path)
def get_filter_options(self, file_id, **kwargs):
"""
Created 20181004
:param file_id:
:param kwargs:
:return: list of filter options
"""
return self.data_manager.get_filter_options(file_id, **kwargs)
def get_flag_options(self, file_id, **kwargs):
"""
Created 20181005
:param file_id:
:param kwargs:
:return: list of flag options
"""
return self.data_manager.get_flag_options(file_id, **kwargs)
def get_flag_options_mandatory(self, file_id, **kwargs):
"""
Created 20191130
:param file_id:
:param kwargs:
:return: list of mandatory flag options
"""
return self.data_manager.get_flag_options_mandatory(file_id, **kwargs)
def get_filtered_file_id_list(self, **kwargs):
"""
Returns a list of the loaded file_id:s that matches the given criteria.
:param kwargs:
:return: list of file_id matching given filter
"""
return self.data_manager.get_filtered_file_id_list(**kwargs)
def get_mask_options(self, file_id, **kwargs):
"""
Created 20181005
:param file_id:
:param kwargs:
:return: list of mask options
"""
return self.data_manager.get_mask_options(file_id, **kwargs)
def get_save_data_options(self, file_id, **kwargs):
"""
Created 20181106
:param file_id:
:param kwargs:
:return: list of mask options
"""
return self.data_manager.get_save_data_options(file_id, **kwargs)
def get_data(self, file_id, *args, **kwargs):
"""
Created 20181004
:param file_id:
:param args:
:param kwargs:
:return: data as list/array (if one args) or list of lists/arrays (if several args)
"""
return self.data_manager.get_data(file_id, *args, **kwargs)
def get_match_data(self, main_file_id, match_file_id, *args, **kwargs):
return self.data_manager.get_match_data(main_file_id, match_file_id, *args, **kwargs)
def get_merge_data(self, main_file_id, match_file_id, *args, **kwargs):
return self.data_manager.get_merge_data(main_file_id, match_file_id, *args, **kwargs)
def get_match_object(self, main_file_id, match_file_id, *args, **kwargs):
return self.data_manager.get_match_object(main_file_id, match_file_id, *args, **kwargs)
def match_files(self, main_file_id, match_file_id, **kwargs):
self.data_manager.match_files(main_file_id, match_file_id, **kwargs)
def get_metadata_tree(self, file_id):
gismo_object = self.get_gismo_object(file_id)
return gismo_object.get_metadata_tree()
# ==========================================================================
def load_file(self,
sampling_type='',
data_file_path='',
settings_file='',
**kwargs):
"""
:param sampling_type:
:param data_file_path:
:param settings_file: must be found in self.settings_files_path
:param kwargs:
:return:
"""
"""
Created 20180628
Updated 20181004
If reload==True the original file is reloaded regardless if a pkl file exists.
sampling_type refers to SMTYP in SMHI codelist
kwargs can be:
file_encoding
"""
if sampling_type not in self.data_manager.sampling_type_list:
raise GISMOExceptionInvalidSamplingType(sampling_type)
# print('=', self.settings_files)
# print('-', settings_file)
# if not settings_file.endswith('.json'):
# settings_file = settings_file + '.json'
# settings_file_path = self.settings_files.get(settings_file, None)
settings_file_path = self.settings_files.get_path(settings_file)
if not settings_file_path:
raise GISMOExceptionMissingSettingsFile
kw = dict(data_file_path=data_file_path,
settings_file_path=settings_file_path,
# root_directory=self.root_directory,
# mapping_files_directory=self.mapping_files_directory,
)
kw.update(kwargs)
# Check sampling type requirements
sampling_type_requirements = self.get_sampling_type_requirements(sampling_type)
if not sampling_type_requirements:
raise GISMOExceptionMissingRequirements
for item in sampling_type_requirements:
if not kw.get(item):
raise GISMOExceptionMissingInputArgument(item)
return self.data_manager.load_file(data_file_path=data_file_path,
sampling_type=sampling_type,
settings_file_path=settings_file_path,
mapping_files=self.mapping_files)
# if not all([sampling_type, file_path, settings_file_path]):
# raise GISMOExceptionMissingInputArgument
# if not all([os.path.exists(file_path), os.path.exists(settings_file_path)]):
# raise GISMOExceptionInvalidPath
# Add file path to user info
file_path = os.path.abspath(kw.get('data_file_path'))
settings_file_path = os.path.abspath(kw.get('settings_file_path'))
file_paths = self.user_info.add_file(sampling_type=sampling_type,
file_path=file_path,
settings_file_path=settings_file_path)
# Get file paths
data_file_path = file_paths.get('data_file', {}).get('file_path', '')
data_file_path_pkl = file_paths.get('data_file', {}).get('pkl_file_path', '')
data_file_path_settings = file_paths.get('settings_file', {}).get('file_path', '')
# Get file_id
file_id = file_paths.get('data_file', {}).get('file_id', '')
if not file_id:
raise GISMOExceptionMissingKey
# print(data_file_path)
# print(data_file_path_settings)
# Check type of file and load
if kwargs.get('reload') or not os.path.exists(data_file_path_pkl):
# Load original file
self.data_manager.load_file(data_file_path=data_file_path,
sampling_type=sampling_type,
settings_file_path=data_file_path_settings,
# root_directory=self.root_directory, # Given in kwargs
save_pkl=self.save_pkl,
pkl_file_path=data_file_path_pkl,
mapping_files=self.mapping_files,
# mapping_files_directory=self.mapping_files_directory,
**kwargs)
else:
# Check if sampling_type is correct
# file_name = os.path.basename(file_path)
# expected_sampling_type = self.user_info.get_sampling_type_for_file_id(file_id)
# if expected_sampling_type != sampling_type:
# return False
# Load buffer pickle file
self.data_manager.load_file(sampling_type=sampling_type,
load_pkl=self.save_pkl,
pkl_file_path=data_file_path_pkl)
return file_id
def load_files(self,
sampling_type='',
data_file_paths=[],
settings_file='',
**kwargs):
"""
Load several files using the same settings_file. Calls self.load_file with every path in data_file_paths
:param sampling_type:
:param data_file_paths:
:param settings_file:
:param kwargs:
:return:
"""
file_id_list = []
for data_file_path in data_file_paths:
file_id = self.load_file(sampling_type=sampling_type,
data_file_path=data_file_path,
settings_file=settings_file,
**kwargs)
file_id_list.append(file_id)
return file_id_list
def has_file_id(self):
return self.data_manager.has_file_id()
def has_metadata(self, file_id):
return self.data_manager.has_metadata(file_id)
def remove_file(self, file_id):
"""
Removes the given file_id from the session.
:param file_id:
:return:
"""
self.data_manager.remove_file(file_id)
#==========================================================================
def _load_pickle_file(self, data_file_path_pkl):
"""
Created 20180828
Loads a pickle file that contains data and settings information.
Returns a gismo object.
"""
with open(data_file_path_pkl, "rb") as fid:
gismo_object = pickle.load(fid)
return gismo_object
def save_file(self, file_id, **kwargs):
"""
Created 20181106
:param file_id:
:param kwargs:
:return: None
"""
self.data_manager.save_file(file_id, **kwargs)
# ==========================================================================
def get_file_id_list(self, sampling_type=None):
"""
Created 20180713
Updated
Returns a list of the loaded files (file_id) for the given sampling type.
"""
return self.data_manager.get_file_id_list(sampling_type=sampling_type)
# ==========================================================================
def get_gismo_object(self, file_id=''):
"""
Created 20180713
Updated 20181022
Returns a the gismo object marked with the given file_id
"""
if not file_id:
raise GISMOExceptionMissingInputArgument('file_id')
return self.data_manager.get_data_object(file_id)
# ==========================================================================
def get_parameter_list(self, file_id='', **kwargs):
if not file_id:
raise GISMOExceptionMissingInputArgument
return self.data_manager.get_parameter_list(file_id, **kwargs)
def get_position(self, file_id, **kwargs):
"""
:param file_id:
:param kwargs:
:return: List with position(s). Two options:
fixed position: [lat, lon]
trajectory: [[lat, lat, lat, ...], [lon, lon, lon, ...]]
"""
return self.data_manager.get_position(file_id, **kwargs)
def get_unit(self, file_id='', unit='', **kwargs):
if not file_id:
raise GISMOExceptionMissingInputArgument
return self.data_manager.get_unit(file_id, unit, **kwargs)
# ==========================================================================
def print_list_of_gismo_objects(self):
"""
Created 20180926
Updated 20180927
Prints a list of all loaded gismo object. sorted by sampling_type.
"""
for st in sorted(self.gismo_objects):
print('Sampling type:', st)
for file_id in sorted(self.gismo_objects[st]):
print(' {}'.format(file_id))
def run_automatic_qc(self, file_id=None, qc_routine=None, **kwargs):
"""
Runs automatic qc controls on the given gismo object. All routines in qc_routines ar run.
:param qismo_object:
:param qc_routines:
:param kwargs:
:return: True if successful
"""
if not file_id:
raise GISMOExceptionMissingInputArgument
if type(file_id) != list:
file_id = [file_id]
gismo_objects = [self.get_gismo_object(f) for f in file_id]
# Check qc requirements
qc_requirements = self.get_qc_routine_requirements(qc_routine)
if qc_requirements is None:
raise GISMOExceptionMissingRequirements
for item in qc_requirements:
if not kwargs.get(item):
raise GISMOExceptionMissingInputArgument(item)
return self.qc_manager.run_automatic_qc(gismo_objects=gismo_objects, qc_routine=qc_routine, **kwargs)
if __name__ == '__main__':
from sharkpylib.gismo.session import GISMOsession
from sharkpylib.gismo import sampling_types
from sharkpylib.gismo import qc_routines
from sharkpylib.odv.create import SimpleODVfile
from sharkpylib.odv.spreadsheet import SpreadsheetFile
d = r'C:\mw\temp_odv'
sampling_types_factory = sampling_types.PluginFactory()
qc_routines_factory = qc_routines.PluginFactory()
session = GISMOsession(root_directory=d,
users_directory=d,
log_directory=d,
user='temp_user',
sampling_types_factory=sampling_types_factory,
qc_routines_factory=qc_routines_factory,
save_pkl=False)
# file_path = r'C:\mw\temp_odv/TransPaper_38003_20120601001612_20120630235947_OK.txt'
# session.load_file('Ferrybox CMEMS', file_path, 'cmems_ferrybox')
#
# g = session.data_manager.objects[session.get_file_id_list()[0]]
#
# s = SimpleODVfile.from_gismo_object(g)
#
# s.create_file(r'C:\mw\temp_odv/transpaper_odv.txt')
#
# file_path = r'C:\mw\temp_odv/data_from_asko_odv(5).txt'
#
# sp = SpreadsheetFile(file_path)
# data = sp.get_edited_flags(qf_prefix='8')
|
from typing import Sequence
from Bot import BotState, ReplyResult, ReplyUtils, telegrammer, Room, Message
def room_human(msg: Message.Message, _: BotState) -> ReplyResult:
if 'Растеряться' in msg.replies: # Билл Гейтс
yield 'Растеряться'
telegrammer.get_message()
for i in range(6):
yield 'Загрузка'
telegrammer.get_message()
yield 'Загрузка'
elif 'Зайти в дверь' in msg.replies: # yegrof1
yield 'Зайти в дверь'
telegrammer.get_message()
yield 'Идти дальше по коридору'
telegrammer.get_message()
yield 'Зайти в дверь слева'
elif 'Попросить лису' in msg.replies: # kiba
yield 'Уйти'
else:
for var in [
'—А... э-э... как Вы здесь оказались?', # Хидэо Кодзима
'—Как поживаете?' # Гейб Ньюэл
]:
if var in msg.replies:
yield var
break
else:
yield 'Войти спиной вперёд'
telegrammer.get_message()
yield 'Я... э-э... (сбежать)'
def room_river(msg: Message.Message, __: BotState) -> ReplyResult:
for i in msg.replies:
if i.startswith('Вернуться'):
return i
rooms: Sequence[Room.Room] = [
Room.Room("Архимаг из прошлых эпох",
{'rooms/default/monster/easy/archemage.py': 'b857a8aca9235e4ee286a70592e5f11d0495f981'},
ReplyUtils.battle),
Room.Room("Армия Вьетнамцев",
{'rooms/vietnam/monster/vietnam_army.py': 'f9991f8651da6b33d3a0d7de90794dc84c25c682'},
ReplyUtils.battle),
Room.Room("Армия Вьетнамцев",
{'rooms/default/missions/caravan/army.py': '2c43669f2d24084b5b50f26b0f5cb1520e892d41',
'rooms/vietnam/missions/caravan/army.py': '6b5f8be0f777cb2d814b871dd8f204c8a66e8ed0'},
ReplyUtils.battle),
Room.Room("Банкиры Люцифера",
{'rooms/default/usual/lucifer_bank.py': 'af24e3fe59f333dd64d26d2f5e4f46171e82100f'},
ReplyUtils.reply('Придержать у себя')),
Room.Room("Библиотека",
{'rooms/default/usual/librarian.py': '5972fdd02b9b12ecee68ffbad71dbd6a96f4e667'},
ReplyUtils.reply('Поучиться')),
Room.Room("Билл Шифр",
{'rooms/default/special/bill_cypher.py': '06a376f6e563b679f925630a019d1ae94d8c018f',
'rooms/vietnam/special/bill_cypher.py': '06a376f6e563b679f925630a019d1ae94d8c018f'},
ReplyUtils.reply("Забирай, они мне не нужны")),
Room.Room("Василиск",
{'rooms/default/monster/hard/basilisk.py': '6d14e46a86d01d7dc820444496af9f359656794c'},
ReplyUtils.battle),
Room.Room("Веган",
{'rooms/default/usual/vegan.py': 'f1843a5c051e481796f96f55ccf5acaa76abbe0a'},
ReplyUtils.go_away),
Room.Room("Вейпер",
{'rooms/default/monster/hard/vaper.py': '3c505d676cc00a04103974e044a9997f58fdbc39'},
ReplyUtils.battle),
Room.Room(("Вода", "_Вода_"),
{'rooms/default/usual/water.py': 'd5d7257b0d06f309ffd6c5e77a85ee0378032ee2'},
ReplyUtils.concat(ReplyUtils.reply('Ты видишь сундук! Доплыть до него'), ReplyUtils.dice)),
Room.Room("Волк-оборотень",
{'rooms/default/monster/easy/werewolf.py': 'e882385ad4e6b92609e508dcdacd159f6c6a4c5d'},
ReplyUtils.battle),
Room.Room("Гидеон",
{'rooms/default/usual/gideon.py': '7b9dc0d904656557f55dc5af0460f1118fead42c'},
ReplyUtils.concat(
ReplyUtils.dice,
ReplyUtils.get_reply(
ReplyUtils.concat(
ReplyUtils.conditional(
lambda msg, _: (
'Что-о-о? Как ты это сделал?' in msg.text
or 'От шока Гидеон выронил дневник из рук.' in msg.text
),
ReplyUtils.set_item('Книга тайн №2', 1),
),
ReplyUtils.repeat_message,
ignore_func=None
)
),
ignore_func=None
)),
Room.Room("Гном в красной шапке",
{'rooms/default/usual/gnome.py': '1a0e14ee3cd86a91dc73d5aa3611d9a88841656d'},
ReplyUtils.reply("1")),
Room.Room("Горшок золота",
{'rooms/default/usual/lepricone.py': '3ec9ad6fb9326c5d70080bb22bf04306f5237f49'},
ReplyUtils.reply('Взять золото')),
Room.Room("Группа Бандитов",
{'rooms/default/monster/medium/bandits.py': '011e6d7de2fd3125249a83d9422e62c8d70e3e0b'},
ReplyUtils.reply('Блин, вы либо дольше держите, либо глубже опускайте!')),
Room.Room("Гуль",
{'rooms/default/monster/medium/ghoul.py': '5d917beef7bcd36b060a5650940c2479462e17ea'},
ReplyUtils.battle),
Room.Room("Дверь",
{'rooms/default/usual/door.py': 'd5c0a0a50209996df3cf45519848087066ae9565'},
ReplyUtils.reply('Открыть')),
Room.Room("Дети",
{'rooms/default/monster/kids.py': '27fb10298f2d3579742cb30ec6d0def7eab0d6a5'},
ReplyUtils.concat(ReplyUtils.reply('Гадость'), ReplyUtils.reply('➰ Использовать: ✨ Воображение'),
ReplyUtils.dice)),
Room.Room("Джейсон Вурхиз",
{'rooms/default/monster/hard/jayson.py': '918fe8cd08d11db9324ebdb7ca0322330965c724'},
ReplyUtils.battle),
Room.Room("Диппер",
{'rooms/default/monster/medium/dipper.py': 'e44301263f09e8d817fe41a43165d8b981a20de7'},
ReplyUtils.concat(
ReplyUtils.battle,
ReplyUtils.set_item("Книга тайн №3", 1),
ignore_func=None
)),
Room.Room("Длинноволосая брюнетка",
{'rooms/default/monster/medium/chubaka.py': 'ca6a6b6ccee1a333384331690a2c23a1cf41b893'},
ReplyUtils.battle),
Room.Room("Дозоры",
{'rooms/default/usual/watches.py': 'df973e10eb2e5cfedaa24792ff946bdf6002174f'},
ReplyUtils.go_away),
Room.Room("Доктор кто",
{'rooms/default/monster/doctor_who.py': '313a19a228ec23ea0858d5d909519eb8b1df049e'},
ReplyUtils.reply('Сдаться')),
Room.Room("Дракон",
{'rooms/default/monster/dragon.py': 'afca1a6f8cb97531b1d63818e6d4c55271423d35'},
ReplyUtils.battle),
Room.Room("Дух бесплотный",
{'rooms/default/monster/easy/spirit.py': '7783c513c54bd7ca97bbd6995554bd259cc2df47'},
ReplyUtils.battle),
Room.Room("Дядя Стенли",
{'rooms/default/usual/uncle_stanley.py': 'dcd192e46b40bac97ff8a1ee943233c0611f1b05'},
ReplyUtils.concat(
ReplyUtils.set_item('Книга тайн №1', 1),
ReplyUtils.go_away,
ignore_func=None
)),
Room.Room("Зеркало",
{'rooms/default/monster/medium/mirror.py': 'ecacdbf013032ba1fba1936a8b4a4a3b4bcc56f4',
'rooms/default/missions/main/third.py': '907941d7e85942acff6f2650a4270dd6b166f7fa',
'rooms/vietnam/missions/main/third.py': '907941d7e85942acff6f2650a4270dd6b166f7fa'},
ReplyUtils.conditional(
lambda msg, _: 'Разбить' in msg.replies,
ReplyUtils.reply("Разбить"),
ReplyUtils.battle
)),
Room.Room("Зомби",
{'rooms/default/monster/easy/zombie.py': '830b9ee2955577cf31d1eca398323d79ef4dc882'},
ReplyUtils.battle),
Room.Room("Зуллуинский магазин",
{'rooms/default/special/helloween_shop.py': '0b0ddee8deffefc09afcbb0b195ad6a8fd8a92d2'},
ReplyUtils.reply("Тыква")),
Room.Room("Интернет тролль",
{'rooms/default/monster/medium/internet_troll.py': '1be1574e599841f942f2265253e355b90a56326d'},
ReplyUtils.battle),
Room.Room("Кабинет директора",
{'rooms/default/usual/dumbledore_office.py': 'e7921f2b6dbda3efaa365a9e8b5f37af8800a058'},
ReplyUtils.go_away),
Room.Room('Казино "Марианская Впадина"',
{'rooms/default/usual/roulette.py': 'c70b95810d4fa8e366065b895ba88b1b9aabb8b6'},
ReplyUtils.go_away),
Room.Room("Какой-то игрок",
{'rooms/default/usual/some_player.py': '06f0b48bf7ba84dbac4bb7dc8e43e2b6a0e038c4'},
ReplyUtils.go_away),
Room.Room("Какой-то мужчина",
{'rooms/default/usual/witcher.py': 'c46e4f4898d122faf7239c50effc5cc5f6900fa1'},
ReplyUtils.reply('Ты носишь двуручник за спиной? Ты в каком фильме это видел?')),
Room.Room("Караван Дварфов",
{'rooms/default/missions/caravan/caravan.py': 'e17a35c25f06ab97e25b588b716fc46e017f63de',
'rooms/vietnam/missions/caravan/caravan.py': '5ece9df31858db029763ff0b0da048030544f5bd',
'rooms/default/missions/caravan/first.py': '4c93401ffa981c886437daa99e303ed576ae3b5c',
'rooms/vietnam/missions/caravan/first.py': 'fc22a1eefc1ad656965a9f581281fc60e9c8373c'},
ReplyUtils.go_away),
Room.Room("Катапульта",
{'rooms/default/special/stone_room.py': None,
'rooms/vietnam/special/stone_room.py': None},
ReplyUtils.concat(ReplyUtils.reply("@RogBotBot"), ReplyUtils.reply("Привет от бота!"))),
Room.Room("Книга судьбы",
{'rooms/default/usual/destiny_book.py': 'd495dab11801dd8bc2dfc0df8044257b51fde08c'},
ReplyUtils.go_away),
Room.Room("Колизей",
{'rooms/default/special/tornament.py': '19ed4328987cbe44591241b06e1c216841d7695a',
'rooms/vietnam/special/tornament.py': '6268e2a24bff30fb856b2aa06c49543d31aa1bda'},
ReplyUtils.battle),
Room.Room("Комиссар",
{'rooms/default/usual/comissar.py': 'e3ef680bc45f01b9247cab3f5f331eb0a4b7855e'},
ReplyUtils.concat(
ReplyUtils.reply('Сказать, что готовы'),
ReplyUtils.reply('Да'),
ReplyUtils.reply('Нет'),
ReplyUtils.reply('Нет'))),
Room.Room("Конец игры",
{'rooms/default/usual/exit.py': '2304732b666cd8ca101ad0472a5402f217328a6d'},
ReplyUtils.go_away),
Room.Room("Король лич",
{'rooms/default/boss/lich_king.py': '96d3cebbeb9179c68ee2bf19a44aea8f0b867a19'},
ReplyUtils.battle),
Room.Room("Красная Виверна",
{'rooms/default/boss/hellkite_dragon.py': '76fbfbddd7910186c48c0abfd7af04281c285904'},
ReplyUtils.battle),
Room.Room("Красотка",
{'rooms/default/usual/spanish_girl.py': '45f45ce23deaa2f0b272e9f01afbd4913ebecc4e'},
ReplyUtils.go_away),
Room.Room("Кролик",
{'rooms/default/monster/medium/rabbit.py': '3329e6e7f7f722b65216ae0f6144a998730f345f'},
ReplyUtils.battle),
Room.Room("Крыса",
{'rooms/default/monster/easy/rat.py': '1cea9b12acbf85237ffe7b2204db022304630a35'},
ReplyUtils.battle),
Room.Room("Крыса-летяга",
{'rooms/default/monster/easy/bat.py': '9eedc151cc8b46d9371aa6264ed9b8458f128276'},
ReplyUtils.battle),
Room.Room("Ктулху",
{'rooms/default/boss/cthulhu.py': 'f80b02bdd25c973690f7a3f5974071a1348e8da3'},
ReplyUtils.battle),
Room.Room("Куст",
{'rooms/vietnam/monster/bush_soldier.py': '47bd022aea513061881a0f47847e7652021c0b5e'},
ReplyUtils.battle),
Room.Room("Леприкон",
{'rooms/default/missions/lepricone/first.py': 'dd0a7834f88568b61b4d35ffb03a855c30fbb5f5',
'rooms/vietnam/missions/lepricone/first.py': '6fc7056e593c37dedf6a9cc8bace69821b609dcb'},
ReplyUtils.battle),
Room.Room(('Лес', 'Дорога', 'Дупло', 'Ничего', 'Мама'),
{'rooms/default/usual/nothing.py': '8f08acd5c154d757573cf285cd65f3fc4ee6dbdb'},
ReplyUtils.reply("Идти дальше")),
Room.Room("Листва",
{'rooms/vietnam/usual/trap.py': '81211364b6cf5af7b5c1b1715df6832f71e55ef8'},
ReplyUtils.reply('Выбираться')),
Room.Room("Лунная Бабочка",
{'rooms/default/boss/moonlight_butterfly.py': 'ad9d1a98624aaeffac00189a91197f3dbc008118'},
ReplyUtils.battle),
Room.Room("Лягушка",
{'rooms/default/usual/frog.py': '508f5632c762ae9267dbafc8c492b35fd290f6e2'},
ReplyUtils.reply('Смотреть и ждать')),
Room.Room("Медведь",
{'rooms/default/monster/medium/bear.py': '3f9d4f8a45d307c1ab49fca6dfd01a7bafa175a8'},
ReplyUtils.battle),
Room.Room("Механочерепаха",
{'rooms/default/monster/medium/turtle.py': '9510e5397addf0d1876414b4ec8f6f6bf231c5f5'},
ReplyUtils.battle),
Room.Room("Миньон",
{'rooms/default/monster/easy/minion.py': '9f4bc3a1c57154cdbe7c98bdc36ae9a24aa91930'},
ReplyUtils.battle),
Room.Room("Мишень",
{'rooms/default/usual/goal.py': '5d64ddafbd2fa4b64efac24603604353e63284e2'},
ReplyUtils.concat(ReplyUtils.reply('Попробовать попасть в нее'), ReplyUtils.dice)),
Room.Room("Мост",
{'rooms/default/usual/troll_bridge.py': '173d027493e3d63f62c4cff232c7dbe404937868'},
ReplyUtils.reply('Пройти мимо')),
Room.Room("Назгул",
{'rooms/default/monster/medium/nazgul.py': 'c0d8c4f054c249423a544c404372dc54d2ce3041'},
ReplyUtils.battle),
Room.Room("Неестественная семилапка",
{'rooms/default/monster/easy/quinquepede.py': '5c43b66b763ac9966218a89ee410df4982a7e92c'},
ReplyUtils.battle),
Room.Room("Нечто из глубин",
{'rooms/default/special/the_thing_from_below.py': 'e984a28680b7a5ed040ff2820aa02949788473ca',
'rooms/vietnam/special/the_thing_from_below.py': 'e984a28680b7a5ed040ff2820aa02949788473ca'},
ReplyUtils.battle),
Room.Room("Озеро из мороженного",
{'rooms/default/special/icecream.py': '2830343ede707107e4b81cadc7f5cc603c1b1794',
'rooms/vietnam/special/icecream.py': '2830343ede707107e4b81cadc7f5cc603c1b1794'},
ReplyUtils.reply("Набрать себе")),
Room.Room("Орк",
{'rooms/default/monster/hard/ork.py': 'd82a84068e87666268c975074d0b6e94c961b529'},
ReplyUtils.battle),
Room.Room("Останки",
{'rooms/default/special/remains.py': '1187c37030773a737422f96809149741d80f1f27',
'rooms/vietnam/special/remains.py': '8520df6937b2f7334aa5d2db3551ee1025db8a2b'},
ReplyUtils.go_away),
Room.Room("Отдел кадров",
{'rooms/default/usual/call_back.py': '1e2b8aeeb46bb4d1459dc421bc1559be0df82bb1'},
ReplyUtils.go_away),
Room.Room("Парикмахерская",
{'rooms/default/usual/haircutter.py': 'e755e86029f87391cec5564db06dd65a580528c7'},
ReplyUtils.reply('Подстричься за 10 золотых')),
Room.Room("Подсказка",
{'rooms/default/missions/tips/tips.py': '71467737094907733b57ad6572fe727e4a1ebb89',
'rooms/vietnam/missions/tips/tips.py': '78c316889771328066b869ea9af117418bc8389b'},
ReplyUtils.reply("Послушать и уйти")),
Room.Room("Портал",
{'rooms/default/usual/rick_and_morty.py': '990a07dceb8de522dcc1e792e66eafe4bd91631a'},
ReplyUtils.concat(ReplyUtils.reply('Подойти ближе'), ReplyUtils.reply('Поздороваться'),
ReplyUtils.no_pet)),
Room.Room("Потрёпанный мужик с дробовиком",
{'rooms/default/usual/no_sudden_movement.py': '4f0cd9a7ec553bf1690a2a832b16418ede4b0a49'},
ReplyUtils.reply('Уйти', delay=15 + 5)),
Room.Room("Промоутер",
{'rooms/default/monster/medium/promote.py': '174dbd42831002ad42f4fb8b6b163d23d61f7fff'},
ReplyUtils.battle),
Room.Room("Разверстый Дракон",
{'rooms/default/boss/naping_dragon.py': 'fda52fed1fabc7c4f1543941979681ea8c1158df'},
ReplyUtils.battle),
Room.Room("Река",
{'rooms/default/usual/river.py': '4c6d0bc2631d6646f58c659e89725c283b3d16f6',
'rooms/vietnam/usual/river.py': '93c8d1120f2ed888409fe310e10c6ed533bdf3b0'},
room_river),
Room.Room("Рыцарь",
{'rooms/default/monster/medium/knight.py': '6df97ddce6befc640de638cac690a3157791156e'},
ReplyUtils.battle),
Room.Room("Сайтама",
{'rooms/default/monster/expert/saitama.py': '2e02d1bb70fd4a7010abc1b75ade8adf32d2441d'},
ReplyUtils.battle),
Room.Room("Сильфида",
{'rooms/default/monster/easy/sylph.py': '6c0e2a4143d13428800bccb10d858b6ac7a70e06'},
ReplyUtils.battle),
Room.Room("Слизень",
{'rooms/default/monster/easy/slime.py': 'd8395df941471814c2ddfdf8c6654012c0377ff6'},
ReplyUtils.reply('Раздавить')),
Room.Room("Собачка",
{'rooms/default/usual/dog.py': '3a0a4cba9cff146851f21daf0c4604ee3e720d9a'},
ReplyUtils.no_pet),
Room.Room("Сокровищница",
{'rooms/default/special/rick_astley.py': '510b6f4e67409dbfd49c02b83677e2a179115bda',
'rooms/vietnam/special/rick_astley.py': '8c39b1858f6cdd4f5a29e83ff62590e2444f5ba7'},
ReplyUtils.concat(ReplyUtils.reply('NEVER GONNA GIVE YOU UP!'),
ReplyUtils.reply('NEVER GONNA RUN AROUND!'),
ReplyUtils.reply('NEVER GONNA MAKE YOU CRY!'))),
Room.Room("Спортзал",
{'rooms/default/usual/musclelot.py': '3106188b57cbde78fa94dcb45575b408a435f663'},
ReplyUtils.reply('Подкачаться')),
Room.Room("Старик",
{'rooms/default/missions/main/first.py': '99ebfeaaeb07e06b33a7a1f2a94451907fa7320e',
'rooms/vietnam/missions/main/first.py': '6f9128268ba12ea84a399b07c6b23c68f103bfb1'},
ReplyUtils.go_away),
Room.Room("Сундук",
{'rooms/default/monster/medium/mimic.py': '42c4cc9191bc6ce2ca8ddbc4511b04eebf6b784c',
'rooms/default/usual/chest.py': '3a9f0acb99e088b585c7d3e61bed9eabbb303cf1'},
ReplyUtils.go_away),
Room.Room("Сэр Качкалот",
{'rooms/default/missions/main/second.py': '4fc0fa4c2edbf7cef18caf83902ced379a2d35cb',
'rooms/vietnam/missions/main/second.py': '4fc0fa4c2edbf7cef18caf83902ced379a2d35cb'},
ReplyUtils.go_away),
Room.Room("Твиттер-блоггер",
{'rooms/default/monster/twi_monster.py': '41513b65caef2224f60de60c23e8827ca1aa7fa8'},
ReplyUtils.nothing),
Room.Room("Темная комната",
{'rooms/default/usual/fog_door.py': '38bc16d0b2e22d5e6b9f87ad3182db021b98ca45'},
ReplyUtils.go_away),
Room.Room("Темный лорд",
{'rooms/default/monster/hard/darklord.py': '95bb5e0a7633ccacdb5a93f0d5eb70d119af3ee5'},
ReplyUtils.nothing),
Room.Room("Торговец",
{'rooms/default/usual/orc_shop.py': '5f6780f2a4fad715c0b9e5a243fc84d14b75341f'},
ReplyUtils.go_away),
Room.Room("Трава",
{'rooms/default/monster/easy/grass.py': '11570f96ed1553213d9d9bba306d5895fe589a1b'},
ReplyUtils.go_away),
Room.Room("Туман",
{'rooms/default/usual/slender.py': 'e5d6ea2a7c31368b4636f2d541f6eedc639f3d97'},
ReplyUtils.go_away),
Room.Room("Удача",
{'rooms/default/usual/luck.py': '1b145e589e1ee9ea44ebc2087b8c05d51c7c018a'},
ReplyUtils.nothing),
Room.Room("Указатель",
{'rooms/default/special/sign.py': '259eee2f111e609113303b138d25d30b9249efb6',
'rooms/vietnam/special/sign.py': '259eee2f111e609113303b138d25d30b9249efb6'},
lambda _, replies, __: replies[0]),
Room.Room("Утка",
{'rooms/default/monster/easy/duck.py': '10d1ada297a0358ac641d2e65b7894cbc492addf'},
ReplyUtils.battle),
Room.Room("Уютная комната",
{'rooms/default/usual/devil.py': 'f5980453c5514e28d84758f78da38c814ad399eb'},
ReplyUtils.go_away),
Room.Room("Хиппи", {'rooms/vietnam/monster/hippie.py': 'b9f99e7b94fd0219b04e8204921058f26ce769bd'},
ReplyUtils.battle),
Room.Room("Цезарь",
{'rooms/default/usual/cesar.py': '4f0c99f28ff7b90a7904046971abe17c648a348d'},
ReplyUtils.go_away),
Room.Room("Человек",
{'rooms/default/special/bill_gates.py': '4a378fcbddeb670c84be02bd94b80ce787b313bc',
'rooms/vietnam/special/bill_gates.py': '4a378fcbddeb670c84be02bd94b80ce787b313bc',
'rooms/default/special/gabe.py': '2ae11911abde825a263dc815867d16baedf54658',
'rooms/default/special/kiba.py': 'ca162b633f299c74d68ef26ecb16ad8dd04fbabd',
'rooms/vietnam/special/kiba.py': 'ca162b633f299c74d68ef26ecb16ad8dd04fbabd',
'rooms/default/special/kodzima.py': '4f16e355fde3674e85ad8baf9243cbddfb22bd49',
'rooms/default/special/yegorf1.py': 'acfdc7af26b08cc6a92aed2395076bd799cd8d1d',
'rooms/vietnam/special/yegorf1.py': 'acfdc7af26b08cc6a92aed2395076bd799cd8d1d',
'rooms/default/usual/vladislav.py': '2bce14d125da9b7562afdf3c4706572792b44bc0',
'rooms/vietnam/special/gabe.py': 'd503ea4026ea8815bd8dab7bdfcf251cff310688',
'rooms/vietnam/special/kodzima.py': '40d31d5d9f9737e2d6745b8e2aae4cacfaf38ba5'
},
room_human),
Room.Room("Черный Рыцарь",
{'rooms/default/boss/black_knight.py': '0ba4107847e3d6336c171c1e3f892e32bae82061'},
ReplyUtils.battle),
Room.Room("Шаолиньский монастырь",
{'rooms/default/usual/shaolin_monastery.py': 'e34f3d53fb0d46439b46816113af196ad9202d80'},
ReplyUtils.reply('Потренироваться')),
Room.Room("Шар",
{'rooms/default/usual/clairvoyance.py': 'c5435cd9e02bbfec6da1c1ca936b970e9dfc42c8'},
ReplyUtils.reply('Идти на свет')),
Room.Room("Яблоня",
{'rooms/default/usual/apple_tree.py': '457eb7c7b559f17649a51e2560b5607ad98f1701'},
ReplyUtils.reply('Взять')),
Room.Room("先生",
{'rooms/default/usual/sensei.py': '26c3a434b8c0440799baf5c1637489aa81aa616b'},
ReplyUtils.reply('Сказать, что уже знаешь')),
]
|
def disp(*txt, p):
print(*txt)
ask = lambda p : p.b(input())
functions = {">>":disp,"?":ask}
|
# imports
import matlab.engine
import sys
# get the path from the shell arguments
target_path = sys.argv[1]
# start the matlab engine
eng = matlab.engine.start_matlab()
# add paths to the matlab path with the required functions
# TODO: add these from a separate file
eng.addpath('D:\Code Repos\miniscope_processing')
eng.addpath('R:\Share\Simon\Drago_Volker_Simon\invivo_depth_GUI')
# run min1PIPE
path_out = eng.min1pipe_HPC_Python(target_path, nargout=1)
# show the generated path
print(path_out)
|
# we have if-else conditionals
# int and input are builtin functions
data = int(input('enter a number: '))
if data == 0:
print('it was zero')
elif data < 0:
print('negative')
else:
data = 'abc'
print(data)
print(type(data))
# while loop
# while input('enter x: ') != 'x':
# print('error, enter x')
# for loop
# in python, you always iterate over a sequence of some kind
for i in range(0, 5, 1): # or range(5), same thing here
print(i)
# range is a sequence data type in python
# that is 'lazy', it doesn't precompute the whole thing like a list,
# it calculates each value as needed
# usually you don't even write loops like that... you figure out a way to
# iterate over the collections of data that you'd be using that "i" to access
sequence = [1, 3, 7, 2, 5 ]
for item in sequence:
print(item)
# comprehension syntax in python is applied here and it's pretty powerful
# if you want to iterate over a list AND have an index as well
for i, item in enumerate(sequence):
if i > 0:
print(item != sequence[i - 1])
|
#!/usr/bin/env python
import random
import sys
number = 100
if len(sys.argv) > 1:
number = int(sys.argv[1])
def accumulate(list):
acu = 0
out = []
for el in list:
acu += el
out.append(acu)
return out
numbers = [ '00' ]
numbers += [ str(x) for x in xrange(0,37) ]
from enum import Enum
COLORS = Enum('COLORS', 'green red black')
colors = [ COLORS.green, COLORS.green ]
# colors = [ COLORS.red, COLORS.black ]
colors += [ COLORS.red if x % 2 == 0 else COLORS.black for x in xrange(1,37) ]
from collections import namedtuple
pair = namedtuple('pair', 'number color')
wheel = [ pair(*x) for x in zip(numbers,colors) ]
def simulate(number):
redblack = [ 1 if random.choice(wheel).color == COLORS.red else -1 for x in xrange(number) ]
field = [ 36 if int(random.choice(wheel).number) == 17 else -1 for x in xrange(number) ]
acc_redblack = accumulate(redblack)
acc_field = accumulate(field)
return acc_redblack, acc_field
import matplotlib.pyplot as plt
acc_redblack, acc_field = simulate(number)
plt.ioff()
plt.clf()
# plt.plot(redblack)
plt.plot(acc_redblack)
# plt.plot(field)
plt.plot(acc_field)
plt.draw()
plt.show()
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
import six
import logging
import os
import itertools
import json
import abc
import numpy as np
@six.add_metaclass(abc.ABCMeta)
class Model():
def __init__(self, config, mode):
"""
Args:
config (dict): hyper param
mode (propeller.RunMode): will creat `TRAIN` and `EVAL` model in propeller.train_and_eval
"""
self.mode = mode
@abc.abstractmethod
def forward(self, features):
"""
Args:
features (list of Tensor): depends on your Dataset.output_shapes
Returns:
return (Tensor):
"""
pass
@abc.abstractmethod
def loss(self, predictions, label):
"""
Args:
predictions (Tensor): result of `self.forward`
label (Tensor): depends on your Dataset.output_shapes
Returns:
return (paddle scalar): loss
"""
pass
@abc.abstractmethod
def backward(self, loss):
"""
Call in TRAIN mode
Args:
loss (Tensor): result of `self.loss`
Returns:
None
"""
pass
@abc.abstractmethod
def metrics(self, predictions, label):
"""
Call in EVAL mode
Args:
predictions (Tensor): result of `self.forward`
label (Tensor): depends on your Dataset.output_shapes
Returns:
(dict): k-v map like: {"metrics_name": propeller.Metrics }
"""
return {}
|
# -*- coding: utf-8 -*-
import urllib
import urllib.request
import urllib.parse
import os
import todoist
from datetime import timedelta, date, datetime
slack_icon_url = 'https://pbs.twimg.com/profile_images/644169103750512640/zWCOmZLI.png'
def get_today_items():
today_items = []
api = todoist.TodoistAPI(token=os.environ["TODOIST_TOKEN"])
response = api.sync(resource_types=['items'])
for item in response['Items']:
if item['due_date'] != None:
due_date = datetime.strptime(item['due_date'], '%a %d %b %Y %H:%M:%S +0000')
if date.today() == due_date.date():
today_items.append(item['content'])
# print(today_items)
return today_items
def get_uncompleted_items():
uncompleted_items = []
api = todoist.TodoistAPI(token=os.environ["TODOIST_TOKEN"])
response = api.sync(resource_types=['items'])
for item in response['Items']:
if item['due_date'] != None:
due_date = datetime.strptime(item['due_date'], '%a %d %b %Y %H:%M:%S +0000')
if date.today() != due_date.date():
uncompleted_items.append(item['content'])
# print(uncompleted_items)
return uncompleted_items
def get_yesterday_completed_items():
yesterday_completed_items = []
yesterday = date.today() - timedelta(days=1)
api = todoist.TodoistAPI(token=os.environ["TODOIST_TOKEN"])
response = api.get_all_completed_items(kwargs='')
for item in response['items']:
completed_date = datetime.strptime(item['completed_date'], '%a %d %b %Y %H:%M:%S +0000')
if yesterday == completed_date.date():
yesterday_completed_items.append(item['content'])
# print(yesterday_completed_items)
return yesterday_completed_items
def get_today_completed_items():
today_completed_items = []
today = date.today()
api = todoist.TodoistAPI(token=os.environ["TODOIST_TOKEN"])
response = api.get_all_completed_items(kwargs='')
for item in response['items']:
completed_date = datetime.strptime(item['completed_date'], '%a %d %b %Y %H:%M:%S +0000')
if today == completed_date.date():
today_completed_items.append(item['content'])
# print(yesterday_completed_items)
return today_completed_items
def generate_posts():
posts = ["\n:heavy_check_mark: *Yesterday's completed task*\n"]
yesterday_completed_items = get_yesterday_completed_items()
if len(yesterday_completed_items) != 0:
for item in yesterday_completed_items:
posts.append(':ballot_box_with_check: ' + item + '\n')
else:
posts.append('Nothing.:sob:\n')
posts.append("\n:heavy_check_mark: *Today's completed task*\n")
today_completed_items = get_today_completed_items()
if len(today_completed_items) != 0:
for item in today_completed_items:
posts.append(':ballot_box_with_check: ' + item + '\n')
else:
posts.append('Nothing.:sob:\n')
posts.append("\n\n:warning: *Today's uncompleted task*\n")
today_items = get_today_items()
if len(today_items) != 0:
for item in today_items:
posts.append(':white_medium_square: ' + item + '\n')
else:
posts.append('Nothing.:sleepy:\n')
posts.append("\n\n:warning: *Other uncompleted task*\n")
uncompleted_items = get_uncompleted_items()
if len(uncompleted_items) != 0:
for item in uncompleted_items:
posts.append(':white_medium_square: ' + item + '\n')
else:
posts.append(':white_flower:Nothing! Excellent!!\n')
slack_post(''.join(posts))
def slack_post(text):
url = "https://slack.com/api/chat.postMessage"
params = dict(token=os.environ["SLACK_TOKEN"],
channel=os.environ["SLACK_CHANNEL"],
username='Todoist',
icon_url=slack_icon_url,
text=text)
params = urllib.parse.urlencode(params)
params = params.encode('ascii')
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
request = urllib.request.Request(url, data=params, headers=headers)
with urllib.request.urlopen(request) as response:
response.read()
def main():
generate_posts()
if __name__ == '__main__':
main()
|
from modeltranslation.translator import translator, TranslationOptions
from .models import Person
class PersonTranslationOptions(TranslationOptions):
fields = ('name', 'surname')
translator.register(Person, PersonTranslationOptions)
|
# -*- coding: utf-8 -*-
""" Dataloading class for backtesting.
"""
from utils.common import *
from utils.timeseriessplit import TimeSeriesSplitCustom
from dataviewer import Viewer
class Loader:
""" Dataloading class.
"""
def __init__(self):
log_path = '.'.join(['backtest', __name__])
#self.logger = logging.getLogger(log_path)
trade_dir = os.path.dirname(os.path.realpath(__file__))
parent_dir = os.path.abspath(trade_dir + '/../')
self.data_path = os.path.join(parent_dir, 'datamining/data/')
self.price_path = os.path.join(self.data_path, 'exchanges/')
self.viewer = Viewer()
def get_prices(self, coin):
""" Gets price data (OHLCV) for COIN.
"""
return self.viewer.get_price_data(coin)
def get_y(self, df, ternary=False):
""" Generates Y DataFrame for training/test set.
This method generates labels when the Y is ternary:
Y {-1, 1}.
"""
return self.get_avg(df, col='close')
def get_avg(self, df, col=''):
""" Generates average for COL for DF. COL is expected to have
multiple values for multiple exchanges, ex. multiple Open
prices. Will only consider USD for now. May consider Volume
weighing in the future.
Args:
df (pd.DataFrame): DataFrame to average COLs.
col (str): Columns to average for each row.
Returns:
avg_df (pd.DataFrame): DataFrame with averaged values with
axis=1.
"""
avg_cols = [x for x in df.columns if 'USD' in x and col in x]
avg_df = df[avg_cols].mean(axis=1)
return avg_df
def split_dataset(self, df, split_all=True, split_size=24, train_splits=24):
""" Splits DF into training, and test sets.
Cross-validation will be done in a walk-forward fashion.
Args:
df (pd.DataFrame): DataFrame to split
split_size (int): # of ticks to distribute to each split.
split_all (bool): Determines whether to make each tick a split.
If False, each split will have 1+ ticks.
Returns:
split (iterable): df split into training, cv
test_i (np.array): array of range with test indices
"""
n_samples = len(df)
n_test = n_samples // 4
test_start = n_samples - n_test + 1
test_i = np.arange(test_start, n_samples - 1)
if split_all:
n_splits = n_samples - n_test
else:
n_splits = (n_samples - n_test) // split_size - 1
tscv = TimeSeriesSplitCustom(n_splits=n_splits)
split = tscv.split(df[:test_start], fixed_length=True,
train_splits=train_splits,
test_splits=1)
return split, test_i
def preprocess(self, df, stationarity=False, lag=False, n_lags=5):
if stationarity:
df = self.difference(df)
if lag:
df = self.lag(df, n_lags=n_lags)
return df
def difference(self, df):
""" Returns differentiated price data to insure stationarity
"""
diff_df = df.diff(periods=1)
diff_df = diff_df.dropna()
return diff_df
def invert_diff(self, diff_df, price_df):
""" Inverts difference to create price DataFrame.
"""
return price_df + diff_df
def lag(self, df, n_lags):
""" Adds lagged price data to DataFrame.
Args:
df (pd.DataFrame): DataFrame with price data.
n_lags (int): Number of lags to input as features into DF.
Returns:
df (pd.DataFrame): DataFrame with lags as features
"""
return_df = df
for i in range(1, n_lags+1):
lag_df = df.shift(i)
return_df = return_df.merge(lag_df, left_index=True,
right_index=True,
suffixes=('', '_lag{}'.format(i)))
return_df.dropna(axis=0, inplace=True)
return return_df
|
# -*- mode: Python; tab-width: 4 -*-
import sys
import getopt
import os
import os.path
import subprocess
import configparser
# Example config
# [Global]
# ctags=c:\wsr\apps\ctags58\ctags.exe
# [Carmageddon]
# wildcards=*.cpp;*.c;*.h;*.inl
# tagpaths=c:\Dev\LoveBucket\Development\Beelzebub\SOURCE;c:\Dev\LoveBucket\Development\Build\Source
# flags=--c++-kinds=cfnstunedm;--c-kinds=cfnstunedm;--extra=+q
ConfigFile = "tagbank.conf"
def is_one_of(file_name, filemasks):
return "*"+os.path.splitext(file_name)[1] in filemasks
def index_filename(name):
return name + ".files"
def create_index(name, projectdirs, filetypes):
index_file = open(index_filename(name), "w")
for d in projectdirs:
for root, dirs, files in os.walk(d):
for name in files:
fullname = os.path.join(root, name)
if is_one_of(fullname, filetypes):
index_file.write(fullname + '\n')
index_file.close()
def invoke_ctags(prog, flags):
args = [ prog ] + flags
subprocess.call(args)
if __name__ == "__main__":
config = configparser.ConfigParser()
config.read(ConfigFile)
ctags_exe = config.get("Global", "ctags")
for section in config.sections():
if (section == "Global"):
continue
tagpaths = config.get(section, "tagpaths").split(';')
wildcards = config.get(section, "wildcards").split(';')
flags = config.get(section, "flags").split(';')
create_index(section, tagpaths, wildcards)
invoke_ctags(ctags_exe, flags + [ "-o" ] + [ section + ".TAGS" ] + [ "-L"] + [ index_filename(section) ])
sys.exit(0)
|
import json
import time
from typing import List
from instagrapi.exceptions import (ClientLoginRequired, ClientNotFoundError,
LocationNotFound)
from instagrapi.extractors import extract_location
from instagrapi.types import Location, Media
class LocationMixin:
"""
Helper class to get location
"""
def location_search(self, lat: float, lng: float) -> List[Location]:
"""
Get locations using lat and long
Parameters
----------
lat: float
Latitude you want to search for
lng: float
Longitude you want to search for
Returns
-------
List[Location]
List of objects of Location
"""
params = {
"latitude": lat,
"longitude": lng,
# rankToken=c544eea5-726b-4091-a916-a71a35a76474 - self.uuid?
# fb_access_token=EAABwzLixnjYBABK2YBFkT...pKrjju4cijEGYtcbIyCSJ0j4ZD
}
result = self.private_request("location_search/", params=params)
locations = []
for venue in result["venues"]:
if "lat" not in venue:
venue["lat"] = lat
venue["lng"] = lng
locations.append(extract_location(venue))
return locations
def location_complete(self, location: Location) -> Location:
"""
Smart complete of location
Parameters
----------
location: Location
An object of location
Returns
-------
Location
An object of Location
"""
assert location and isinstance(
location, Location
), f'Location is wrong "{location}" ({type(location)})'
if location.pk and not location.lat:
# search lat and lng
info = self.location_info(location.pk)
location.lat = info.lat
location.lng = info.lng
if not location.external_id and location.lat:
# search extrernal_id and external_id_source
try:
venue = self.location_search(location.lat, location.lng)[0]
location.external_id = venue.external_id
location.external_id_source = venue.external_id_source
except IndexError:
pass
if not location.pk and location.external_id:
info = self.location_info(location.external_id)
if info.name == location.name or (
info.lat == location.lat and info.lng == location.lng
):
location.pk = location.external_id
return location
def location_build(self, location: Location) -> str:
"""
Build correct location data
Parameters
----------
location: Location
An object of location
Returns
-------
str
"""
if not location:
return "{}"
if not location.external_id and location.lat:
try:
location = self.location_search(location.lat, location.lng)[0]
except IndexError:
pass
data = {
"name": location.name,
"address": location.address,
"lat": location.lat,
"lng": location.lng,
"external_source": location.external_id_source,
"facebook_places_id": location.external_id,
}
return json.dumps(data, separators=(",", ":"))
def location_info_a1(self, location_pk: int) -> Location:
"""
Get a location using location pk
Parameters
----------
location_pk: int
Unique identifier for a location
Returns
-------
Location
An object of Location
"""
try:
data = self.public_a1_request(f"/explore/locations/{location_pk}/")
if not data.get("location"):
raise LocationNotFound(location_pk=location_pk, **data)
return extract_location(data["location"])
except ClientNotFoundError:
raise LocationNotFound(location_pk=location_pk)
def location_info(self, location_pk: int) -> Location:
"""
Get a location using location pk
Parameters
----------
location_pk: int
Unique identifier for a location
Returns
-------
Location
An object of Location
"""
return self.location_info_a1(location_pk)
def location_medias_a1(
self, location_pk: int, amount: int = 24, sleep: float = 0.5, tab_key: str = ""
) -> List[Media]:
"""
Get medias for a location
Parameters
----------
location_pk: int
Unique identifier for a location
amount: int, optional
Maximum number of media to return, default is 24
sleep: float, optional
Timeout between requests, default is 0.5
tab_key: str, optional
Tab Key, default value is ""
Returns
-------
List[Media]
List of objects of Media
"""
medias = []
end_cursor = None
while True:
data = self.public_a1_request(
f"/explore/locations/{location_pk}/",
params={"max_id": end_cursor} if end_cursor else {},
)["location"]
page_info = data["edge_location_to_media"]["page_info"]
end_cursor = page_info["end_cursor"]
edges = data[tab_key]["edges"]
for edge in edges:
if amount and len(medias) >= amount:
break
node = edge["node"]
medias.append(self.media_info_gql(node["id"]))
# time.sleep(sleep)
if not page_info["has_next_page"] or not end_cursor:
break
if amount and len(medias) >= amount:
break
time.sleep(sleep)
uniq_pks = set()
medias = [m for m in medias if not (m.pk in uniq_pks or uniq_pks.add(m.pk))]
if amount:
medias = medias[:amount]
return medias
def location_medias_top_a1(
self, location_pk: int, amount: int = 9, sleep: float = 0.5
) -> List[Media]:
"""
Get top medias for a location
Parameters
----------
location_pk: int
Unique identifier for a location
amount: int, optional
Maximum number of media to return, default is 9
sleep: float, optional
Timeout between requests, default is 0.5
Returns
-------
List[Media]
List of objects of Media
"""
return self.location_medias_a1(
location_pk, amount, sleep=sleep, tab_key="edge_location_to_top_posts"
)
def location_medias_top(
self, location_pk: int, amount: int = 9, sleep: float = 0.5
) -> List[Media]:
"""
Get top medias for a location
Parameters
----------
location_pk: int
Unique identifier for a location
amount: int, optional
Maximum number of media to return, default is 9
sleep: float, optional
Timeout between requests, default is 0.5
Returns
-------
List[Media]
List of objects of Media
"""
try:
return self.location_medias_top_a1(location_pk, amount, sleep)
except ClientLoginRequired as e:
if not self.inject_sessionid_to_public():
raise e
return self.location_medias_top_a1(location_pk, amount, sleep) # retry
def location_medias_recent_a1(
self, location_pk: int, amount: int = 24, sleep: float = 0.5
) -> List[Media]:
"""
Get recent medias for a location
Parameters
----------
location_pk: int
Unique identifier for a location
amount: int, optional
Maximum number of media to return, default is 24
sleep: float, optional
Timeout between requests, default is 0.5
Returns
-------
List[Media]
List of objects of Media
"""
return self.location_medias_a1(
location_pk, amount, sleep=sleep, tab_key="edge_location_to_media"
)
def location_medias_recent(
self, location_pk: int, amount: int = 24, sleep: float = 0.5
) -> List[Media]:
"""
Get recent medias for a location
Parameters
----------
location_pk: int
Unique identifier for a location
amount: int, optional
Maximum number of media to return, default is 24
sleep: float, optional
Timeout between requests, default is 0.5
Returns
-------
List[Media]
List of objects of Media
"""
try:
return self.location_medias_recent_a1(location_pk, amount, sleep)
except ClientLoginRequired as e:
if not self.inject_sessionid_to_public():
raise e
return self.location_medias_recent_a1(location_pk, amount, sleep) # retry
|
import scrapy
import requests
from bs4 import BeautifulSoup
import urllib.parse as urlparse
from clien_crawl.items import Article
from pymongo import MongoClient
import datetime
import sys
import PyRSS2Gen
class MacSpider(scrapy.Spider):
name = 'mac'
allowed_domains = ['clien.net']
start_urls = ['https://www.clien.net/service/board/cm_mac']
base_url = 'https://www.clien.net'
request_header = {
"Accept": "*/*",
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac Os X 10_9_5) AppleWebKit 537.36 (KHMTL, like Gecko) Chrome',
'Accept-Language': 'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4'
}
def parse(self, response):
meta = dict()
for i in range(3,-1,-1):
if i == 0:
print(self.start_urls[0])
try:
r = requests.get(self.start_urls[0])
bs_obj = BeautifulSoup(r.text, 'lxml')
links = bs_obj.findAll('div', {'class' : 'list_item symph_row'})
for link in links:
article_url = link.find('a')['href']
article_title = link.a.find('span', {'data-role' : 'list-title-text'}).attrs['title']
meta['article_title'] = article_title
meta['article_url'] = urlparse.urljoin(self.base_url, article_url)
yield scrapy.Request(
urlparse.urljoin(self.base_url, article_url),
callback=self.parse_articles,
headers=self.request_header,
meta=meta
)
except:
continue
else:
# print u + '&po=%d' % i
try:
r = requests.get(self.start_urls[0] + '?&po=%d' % i)
bs_obj = BeautifulSoup(r.text, 'lxml')
links = bs_obj.findAll('div', {'class' : 'list_item symph_row'})
for link in links:
article_url = link.find('a')['href']
article_title = link.a.find('span', {'data-role' : 'list-title-text'}).attrs['title']
meta['article_title'] = article_title
meta['article_url'] = urlparse.urljoin(self.base_url, article_url)
yield scrapy.Request(
urlparse.urljoin(self.base_url, article_url),
callback=self.parse_articles,
headers=self.request_header,
meta=meta
)
except:
continue
def parse_articles(self, response):
meta = response.meta.copy()
item = Article()
bs_obj = BeautifulSoup(response.text, 'lxml')
client = MongoClient('localhost', 27017)
db = client.scrap_clien
item['title'] = meta['article_title']
item['body'] = bs_obj.find('div', {'class' : 'post_article fr-view'}).text # or .get_text()
item['url'] = meta['article_url']
collection = db.article
collection.insert_one({'title': item['title'], 'body': item['body'], 'url': item['url']})
yield item
|
import os
from dotenv import load_dotenv
import openai
load_dotenv()
openai.organization = "org-D2FBgBhwLFkKAOsgtSp86b4i"
openai.api_key = os.getenv("OPENAI_API_KEY")
# Creates a fine-tuned model for search
remote_files = openai.File.list()["data"]
fine_tuning_files = filter(lambda f: "fine_tune.jsonl" in f["filename"], remote_files)
validation_files = filter(lambda f: "fine_tune_validation.jsonl" in f["filename"], remote_files)
latest_fine_tuning_file = max(fine_tuning_files, key=lambda x: x["created_at"])
latest_validation_file = max(validation_files, key=lambda x: x["created_at"])
openai.FineTune.create(
training_file=latest_fine_tuning_file["id"],
validation_file=latest_validation_file["id"],
model="ada",
n_epochs=4,
batch_size=4,
learning_rate_multiplier=0.1,
prompt_loss_weight=0.1
)
|
'''
Created on Oct 18, 2018
@author: srwle
'''
def str2ascii(in_str):
ret=''
if isinstance(in_str, str):
for c in in_str:
ret += hex(ord(c))
return ret
else:
return None
def testmain():
print(str2ascii('1234'))
if __name__ == '__main__':
testmain()
|
from revoke import RevocationList
def test_set_check():
nl = RevocationList()
assert 16 * 1024 * 8 == nl.size()
assert not nl.is_revoked(54)
nl.revoke(54)
assert nl.is_revoked(54)
assert not nl.is_revoked(10320)
nl.revoke(10320)
assert nl.is_revoked(10320)
assert nl.is_revoked(54)
def test_lifecycle():
nl = RevocationList()
revoke_list = [2, 131000, 57]
for i in revoke_list:
assert not nl.is_revoked(i)
for i in revoke_list:
nl.revoke(i)
for i in revoke_list:
assert nl.is_revoked(i)
encoded = nl.encode()
back = RevocationList.decode(encoded)
for i in revoke_list:
assert back.is_revoked(i)
assert not back.is_revoked(1)
|
from model.contact import Contact
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.by import By
class ContactHelper:
def __init__(self, application):
self.app = application
def create(self, contact):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
self.fill_form(contact)
wd.find_element_by_xpath("(//input[@name='submit'])[2]").click()
self.app.navigation.back_to_home_page()
self.contacts_cache = None
def delete_first(self):
self.delete_by_index(0)
def delete_by_index(self, index):
wd = self.app.wd
self.app.navigation.open_home_page()
wd.find_elements_by_name("selected[]")[index].click()
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to.alert.accept()
WebDriverWait(wd, 10).until(ec.presence_of_element_located((By.ID, "maintable")))
self.contacts_cache = None
def select_to_edit_by_index(self, index):
wd = self.app.wd
self.app.navigation.open_home_page()
wd.find_elements_by_xpath("//*[@title='Edit']")[index].click()
def edit_first(self, contact):
self.edit_by_index(contact, 0)
def edit_by_index(self, contact, index):
wd = self.app.wd
self.select_to_edit_by_index(index)
self.fill_form(contact)
wd.find_element_by_name("update").click()
self.contacts_cache = None
def fill_form(self, contact):
self.enter_field("firstname", contact.first_name)
self.enter_field("lastname", contact.last_name)
self.enter_field("title", contact.title)
self.enter_field("company", contact.company)
self.enter_field("address", contact.primary_address)
self.enter_field("mobile", contact.mobile_number)
self.enter_field("home", contact.home_number)
self.enter_field("work", contact.work_number)
self.enter_field("email", contact.email)
self.enter_field("email2", contact.email2)
self.enter_field("email3", contact.email3)
def enter_field(self, by_name, value):
wd = self.app.wd
if value is not None:
wd.find_element_by_name(by_name).clear()
wd.find_element_by_name(by_name).send_keys(value)
def get_count(self):
wd = self.app.wd
self.app.navigation.open_home_page()
return len(wd.find_elements_by_name("selected[]"))
contacts_cache = None
def get_contacts(self):
if self.contacts_cache is None:
wd = self.app.wd
self.app.navigation.open_home_page()
self.contacts_cache = []
for element in wd.find_elements_by_name("entry"):
id = element.find_element_by_name("selected[]").get_attribute("value")
cols = element.find_elements_by_tag_name("td")
l_name = cols[1].text
f_name = cols[2].text
address = cols[3].text
emails = cols[4].text
phones = cols[5].text
self.contacts_cache.append(Contact(first_name=f_name, last_name=l_name, primary_address=address,
all_emails=emails, all_phones=phones, id=id))
return list(self.contacts_cache)
def get_field_value(self, by_name):
wd = self.app.wd
return wd.find_element_by_name(by_name).get_attribute("value")
def get_contact_from_edit_page(self, index):
self.select_to_edit_by_index(index)
first_name = self.get_field_value("firstname")
last_name = self.get_field_value("lastname")
title = self.get_field_value("title")
company = self.get_field_value("company")
address = self.get_field_value("address")
mobile_phone = self.get_field_value("mobile")
home_phone = self.get_field_value("home")
work_phone = self.get_field_value("work")
email = self.get_field_value("email")
email_2 = self.get_field_value("email2")
email_3 = self.get_field_value("email3")
return Contact(first_name=first_name, last_name=last_name, title=title, company=company,
primary_address=address, mobile_number=mobile_phone, email=email, email2=email_2,
email3=email_3, home_number=home_phone, work_number=work_phone)
|
from . import api
@api.route('/')
def index():
return "Hello, world!"
@api.route('/hello')
def hello():
return "<h1>Testing</h1>"
|
# this script helps to move issue from one youtrack instance to another
import sys
def main() :
try :
source_url, source_login, source_password, source_issue_id, target_url, target_login = sys.argv[1:7]
target_password, target_project_id = sys.argv[7:9]
except :
print "Usage : "
print "copyOneIssue source_url source_login source_password source_issue_id target_url target_login target_password target_project_id"
def doCopy(source_url, source_login, source_password, source_issue_id, target_url, target_login, target_password, target_project_id) :
print "source_url : " + source_url
print "source_login : " + source_login
print "source_password : " + source_password
print "source_issue_id : " + source_issue_id
print "target_url : " + target_url
print "target_login : " + target_login
print "target_password : " + target_password
print "target_project_id : " + target_project_id
if __name__ == "__main__":
main()
|
import openpyxl
import re
import sys
import datetime
import json
import requests
import threading
import time
import gatherData
import printData
import getItemIds
path = "Rs3 Investments.xlsx"
wb_obj = openpyxl.load_workbook(path.strip())
# from the active attribute
sheet_obj = wb_obj.active
max_column=sheet_obj.max_column
max_row=sheet_obj.max_row
items = []
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
def get_Data(x,y):
lock = threading.Lock()
start_time = time.time()
total_items_added = 0
gatherData.load_urls()
for i in range(x, y):
url = 'http://services.runescape.com/m=itemdb_rs/api/catalogue/items.json?category='+ str(i) + '&alpha=a'
items_in_category = gatherData.get_items_in_category(url)
print('Items in this category: ' + str(items_in_category))
current_items = 0
for j in alphabet:
url = 'http://services.runescape.com/m=itemdb_rs/api/catalogue/items.json?category='+ str(i) + '&alpha=' + str(j) + '&page=1'
gatherData.run(url,1,lock)
current_items = current_items + gatherData.get_current_items()
print('Items added so far in category: ' + str(current_items) +'/'+str(items_in_category))
gatherData.reset_current_items()
if (current_items == items_in_category):
break
total_items_added = total_items_added + current_items
print('Total items added: ' + str(total_items_added))
print('Toal time: ' + str((time.time() - start_time)) + ' seconds.')
print("grabbing item IDs from spreadsheet")
for j in range(3, max_column+1):#column checking item IDs
item_id = sheet_obj.cell(row=2,column=j)
items.append(item_id.value)
items.reverse()
currentrow = 0
print("adding the new date row")
for j in range(14,max_row):#setting date/updating values
date = sheet_obj.cell(row=j,column=2)
if date.value is None:
current_time = datetime.date.today()
dttm = current_time.strftime('%m/%d/%Y')
date.value = dttm
currentrow = j
break
if currentrow == 0:
currentrow = max_row + 1
print("adding updated prices to new row")
item_url = "https://services.runescape.com/m=itemdb_rs/api/graph/X.json"
amtItems = len(items)
amtSamePrice = 0
for j in range(3, max_column+1):#updating value for gp for today
price = sheet_obj.cell(row=currentrow,column=j)
if price.value is None:#use items.pop() for the name to check in database for new GP value
item = items.pop()
request = item_url.replace("X", "{}".format(item))
response = requests.get(request)
if response.status_code is not 200:
print("HTTP Error. API Response Was Not 200: " + str(response.status_code))
else:
jsonResponse = response.json()
lastKey = list(jsonResponse['daily'].keys())[-1]
lastValue = jsonResponse['daily'][lastKey]
print(lastValue)
previousValue = sheet_obj.cell(row=currentrow-1,column=j)
price.value = lastValue
if(previousValue.value == lastValue or previousValue.value is None): #If amount matches the amount from the previous day, or if it is a newly added item to the sheet
amtSamePrice += 1
print("amt the same:" + str(amtSamePrice))
print("amt items:" + str(amtItems))
if amtSamePrice == amtItems:
print("GE isnt updated yet, all values the same. Task should be scheduled to restart this 4 hours from now.")
wb_obj.close()
exit(1)
wb_obj.save(path)
wb_obj.close()
#Update Database:
#for i in range(0,1):
#lower = 0 + (i*18)
#upper = (18+i) + (i*18)
lower = 0
upper = 37
mythread = threading.Thread(name = "Thread-{}".format(1),target = get_Data,kwargs={'x': lower,'y': upper})
mythread.start()
time.sleep(.1)
#get_Data(0,37)
|
from website.app import init_app
from website.models import Node, User
from framework import Q
from framework.analytics import piwik
app = init_app('website.settings', set_backends=True)
# NOTE: This is a naive implementation for migration, requiring a POST request
# for every user and every node. It is possible to bundle these together in a
# single request, but it would require duplication of logic and strict error
# checking of the result. Doing it this way is idempotent, and allows any
# exceptions raised to halt the process with a usable error message.
for user in User.find():
if user.piwik_token:
continue
piwik.create_user(user)
for node in Node.find(Q('is_public', 'eq', True) & Q('is_deleted', 'eq', False)):
if node.piwik_site_id:
continue
piwik._provision_node(node._id)
|
from torch import nn
from torch.nn import functional as F
from .ops import gram_matrix
class ContentLoss(nn.Module):
def __init__(self, target,):
super(ContentLoss, self).__init__()
# we 'detach' the target content from the tree used
# to dynamically compute the gradient: this is a stated value,
# not a variable. Otherwise the forward method of the criterion
# will throw an error.
self.target = target.detach()
def forward(self, input):
self.loss = F.mse_loss(input, self.target)
return input
class StyleLoss(nn.Module):
def __init__(self, target_feature):
super(StyleLoss, self).__init__()
self.target = gram_matrix(target_feature).detach()
def forward(self, input):
G = gram_matrix(input)
self.loss = F.mse_loss(G, self.target)
return input
|
# import hashlib
# # from allauth.account.signals import user_signed_up, user_logged_in
# from django.dispatch.dispatcher import receiver
# from .models import Profile
# @receiver(user_signed_up)
# def social_login_fname_lname_profilepic(sociallogin, user, **kwargs):
# preferred_avatar_size_pixels=256
# picture_url = "http://www.gravatar.com/avatar/{0}?s={1}".format(
# hashlib.md5(user.email.encode('UTF-8')).hexdigest(),
# preferred_avatar_size_pixels
# )
# if sociallogin:
# # Extract first / last names from social nets and store on User record
# if sociallogin.account.provider == 'facebook':
# # f_name = sociallogin.account.extra_data['first_name']
# # l_name = sociallogin.account.extra_data['last_name']
# # if f_name:
# # user.first_name = f_name
# # if l_name:
# # user.last_name = l_name
# #verified = sociallogin.account.extra_data['verified']
# picture_url = "http://graph.facebook.com/{0}/picture?width={1}&height={1}".format(
# sociallogin.account.uid, preferred_avatar_size_pixels)
# if sociallogin.account.provider == 'google':
# # f_name = sociallogin.account.extra_data['given_name']
# # l_name = sociallogin.account.extra_data['family_name']
# # if f_name:
# # user.first_name = f_name
# # if l_name:
# # user.last_name = l_name
# #verified = sociallogin.account.extra_data['verified_email']
# picture_url = sociallogin.account.extra_data['picture']
# user.save()
# profile = Profile(user=user, social_img=picture_url)
# profile.save()
|
#!/usr/bin/env python3
import os, sys
import json
from urllib import request
from urllib.parse import urlencode
ENTRY = os.environ.get('GITLAB_URL')
TOKEN = os.environ.get('GITLAB_TOKEN')
DOING_LABEL = 'Doing'
GANTT_START = 'GanttStart:'
GANTT_END = 'GanttDue:'
# ToDo: get project by url
# ToDO: group by milestone
# ToDo: issue start date by /spend
class Gitlab:
def __init__(self, gitlab_url, gitlab_token):
self.gitlab_url, self.gitlab_token = gitlab_url, gitlab_token
def req_api(self, path, data=None):
headers = {
'PRIVATE-TOKEN': self.gitlab_token,
'Access-Control-Request-Headers': 'private-token',
'User-Agent': 'gitlab-issue-keeper',
}
if data is None:
r = request.Request(f'{self.gitlab_url}{path}', headers=headers)
else:
params = urlencode(data)
# stupid RESTful
r = request.Request(f'{self.gitlab_url}{path}?{params}', headers=headers, method='PUT')
return json.loads(request.urlopen(r, timeout=4).read())
def get_projects(self):
r = self.req_api('/api/v4/projects/?membership=true&per_page=100')
return dict((
p["path_with_namespace"], {
"path": p["path_with_namespace"],
"name": p["name"],
"namespace": p["namespace"].get("path"),
"last_activity_at": p["last_activity_at"],
"description": p["description"],
"url": p["web_url"],
"id": p["id"],
}
) for p in r if
p["archived"] is False and p["namespace"].get("kind") == "group")
@classmethod
def guess_progress(cls, issue):
total = issue["time_stats"].get("time_estimate")
if not total or total <= 0:
return
spent = issue["time_stats"].get("total_time_spent") or 0
return spent / total * 100
class GitlabProject:
def __init__(self, gitlab_obj, project_id):
self.project_id = project_id
self.gitlab_obj = gitlab_obj
@property
def labels_map(self):
labels = getattr(self, '_labels', None)
self.labels = labels or dict((x['name'], x['id']) for x in
self.gitlab_obj.req_api(f'/api/v4/projects/{self.project_id}/labels') or []) # noqa
return self.labels
def get_issue_notes(self, iid):
r = self.gitlab_obj.req_api(f'/api/v4/projects/{self.project_id}/issues/{iid}/notes')
return r
def get_doing_close_date(self, iid):
label_id = self.labels_map[DOING_LABEL]
issue_notes = list(self.get_issue_notes(iid))
starts = sorted([x for x in issue_notes if
x['system'] and x['body'] == f'added ~{label_id} label' # noqa
], key=lambda x: x['id']) or issue_notes[-2:]
if not starts:
return None, None
ends1 = sorted([x for x in issue_notes if
x['system'] and x['body'] == 'closed' # noqa
], key=lambda x: x['id'])
ends2 = sorted([x for x in issue_notes if
x['system'] and x['body'] == f'removed ~{label_id} label' # noqa
], key=lambda x: x['id'])
ends = ends1[-2:] + ends2[-2:]
return starts[0]['updated_at'], min(ends, key=lambda x: x['id'])['updated_at'] if ends else None
def list_issues(self):
r = self.gitlab_obj.req_api(f'/api/v4/projects/{self.project_id}/issues?page=1&per_page=100&state=all')
for issue in r:
start, end = self.get_doing_close_date(issue['iid'])
yield issue, start, end
def update_issue(self, issue, start, end):
"""issue = {iid: 1, description: "", project_id: 0}"""
gantt_str = ''
if start:
gantt_str = '%s\n%s%s' % (gantt_str, GANTT_START, start)
if end:
gantt_str = '%s\n%s%s' % (gantt_str, GANTT_END, end)
if start or end:
# remove old str
lines = []
inline_edit = False
for l in issue['description'].splitlines():
if l.startswith(GANTT_START) and start:
lines.append(f'{GANTT_START}{start}')
inline_edit = True
elif l.startswith(GANTT_END) and end:
lines.append(f'{GANTT_END}{end}')
inline_edit = True
desc_back = '\n'.join(lines)
desc = '%s\n\n<!--\n下面是issue耗时跟踪不要删\n%s\n-->' % (desc_back, gantt_str)
r = self.gitlab_obj.req_api(f'/api/v4/projects/{issue["project_id"]}/issues/{issue["iid"]}', {"description": desc})
def output_json(data, padding=None):
if padding:
# @ToDo: add sanitize padding
return '%s(%s)' % (padding, json.dumps(data))
else:
return json.dumps(data)
if '__main__' == __name__:
exit(0)
|
'''This file contains the lengthy dictionaries used by the validation tool script'''
#list of field names
fieldNames = ["OID@","SHAPE@","STATEID","PARCELID","TAXPARCELID","PARCELDATE","TAXROLLYEAR",
"OWNERNME1","OWNERNME2","PSTLADRESS","SITEADRESS","ADDNUMPREFIX","ADDNUM","ADDNUMSUFFIX","PREFIX","STREETNAME",
"STREETTYPE","SUFFIX","LANDMARKNAME","UNITTYPE","UNITID","PLACENAME","ZIPCODE","ZIP4","STATE","SCHOOLDIST",
"SCHOOLDISTNO","IMPROVED","CNTASSDVALUE","LNDVALUE","IMPVALUE","FORESTVALUE","ESTFMKVALUE","NETPRPTA","GRSPRPTA",
"PROPCLASS","AUXCLASS","ASSDACRES","DEEDACRES","GISACRES","CONAME","LOADDATE","PARCELFIPS","PARCELSRC",
"SHAPE@LENGTH","SHAPE@AREA","SHAPE@XY","GeneralElementErrors","AddressElementErrors","TaxrollElementErrors","GeometricElementErrors"]
fieldListPass = ["OID","OID@","SHAPE","SHAPE@","SHAPE_LENGTH","SHAPE_AREA","SHAPE_XY","SHAPE@LENGTH","SHAPE@AREA","SHAPE@XY","LONGITUDE","LATITUDE","GENERALELEMENTERRORS","ADDRESSELEMENTERRORS","TAXROLLELEMENTERRORS","GEOMETRICELEMENTERRORS"]
#V3 schema requirements
schemaReq = {
'STATEID':[['String'],[100]],
'PARCELID':[['String'],[100]],
'TAXPARCELID':[['String'],[100]],
'PARCELDATE':[['String'],[25]],
'TAXROLLYEAR':[['String'],[10]],
'OWNERNME1':[['String'],[254]],
'OWNERNME2':[['String'],[254]],
'PSTLADRESS':[['String'],[200]],
'SITEADRESS':[['String'],[200]],
'ADDNUMPREFIX':[['String'],[50]],
'ADDNUM':[['String'],[50]],
'ADDNUMSUFFIX':[['String'],[50]],
'PREFIX':[['String'],[50]],
'STREETNAME':[['String'],[50]],
'STREETTYPE':[['String'],[50]],
'SUFFIX':[['String'],[50]],
'LANDMARKNAME':[['String'],[50]],
'UNITTYPE':[['String'],[50]],
'UNITID':[['String'],[50]],
'PLACENAME':[['String'],[100]],
'ZIPCODE':[['String'],[50]],
'ZIP4':[['String'],[50]],
'STATE':[['String'],[50]],
'SCHOOLDIST':[['String'],[50]],
'SCHOOLDISTNO':[['String'],[50]],
'IMPROVED':[['String'],[10]],
'CNTASSDVALUE':[['String','Double'],[50,8]],
'LNDVALUE':[['String','Double'],[50,8]],
'IMPVALUE':[['String','Double'],[50,8]],
'FORESTVALUE':[['String','Double'],[50,8]],
'ESTFMKVALUE':[['String','Double'],[50,8]],
'NETPRPTA':[['String','Double'],[50,8]],
'GRSPRPTA':[['String','Double'],[50,8]],
'PROPCLASS':[['String'],[150]],
'AUXCLASS':[['String'],[150]],
'ASSDACRES':[['String','Double'],[50,8]],
'DEEDACRES':[['String','Double'],[50,8]],
'GISACRES':[['String','Double'],[50,8]],
'CONAME':[['String'],[50]],
'LOADDATE':[['String'],[10]],
'PARCELFIPS':[['String'],[10]],
'PARCELSRC':[['String'],[50]],
}
#bad characters dictionary
fieldNamesBadChars = {
"PARCELID": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")"],
"TAXPARCELID": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")"],
"PARCELDATE": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\-"],
"TAXROLLYEAR": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',"\-"],
"OWNERNME1": ["\n","\r"],
"OWNERNME2": ["\n","\r"],
"PSTLADRESS": ["\n","\r"],
"SITEADRESS": ["\n","\r"],
"ADDNUMPREFIX": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',','.',"\-"],
"ADDNUM": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',','.',"\-"],
"ADDNUMSUFFIX": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~",',','.'],
"PREFIX": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',','.',"\-"],
"STREETNAME": ["\n","\r","$","^","=","<",">","@","#","%","?","!","*","~","(",")"],
"STREETTYPE": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',','.',"\-"],
"SUFFIX": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',','.',"\-"],
"LANDMARKNAME": ["\n","\r"],
"UNITTYPE": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',','.',"\-"],
"UNITID": ["\n","\r","$","^","=","<",">","@","%","?","`","!","*","~","(",")",','],
"PLACENAME": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',',"\-"],
"ZIPCODE": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',','.',"\-"],
"ZIP4": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',','.',"\-"],
"STATE": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',','.',"\-"],
"SCHOOLDIST": ["\n","\r","$","^","=","<",">","@","#","%","&","?","!","*","~","(",")","\\",'/',','],
"SCHOOLDISTNO": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',','.',"\-"],
"IMPROVED": ["\n","\r","$","^","=","@","#","%","&","?","`","!","*","~","(",")",',','.',"\-"],
"CNTASSDVALUE": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',',"\-"],
"LNDVALUE": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',',"\-"],
"IMPVALUE": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")",',',"\-"],
"FORESTVALUE": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',',"\-"],
"ESTFMKVALUE": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',',"\-"],
"NETPRPTA": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',',"\-"],
"GRSPRPTA": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',',"\-"],
"PROPCLASS": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/','.',"\-"],
"AUXCLASS": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/','.',"\-"],
"ASSDACRES": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',',"\-"],
"DEEDACRES": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',',"\-"],
"GISACRES": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',','],
"CONAME": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',',"\-"],
"LOADDATE": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")",',','.','\-'],
"PARCELFIPS": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',','.',"\-"],
"PARCELSRC": ["\n","\r","$","^","=","<",">","@","#","%","&","?","`","!","*","~","(",")","\\",'/',',','.',"\-"]
}
#acceptable COP domains
copDomains = ['1','2','3','4','5','6','7','5M','M']
#acceptable AUXCOP domains
auxDomins = ['W1','W2','W3','W4','W5','W6','W7','W8','W9','X1','X2','X3','X4','M']
#dictionary for V3 completeness collection
v3CompDict = {
'STATEID':0,
'PARCELID':0,
'TAXPARCELID':0,
'PARCELDATE':0,
'TAXROLLYEAR':0,
'OWNERNME1':0,
'OWNERNME2':0,
'PSTLADRESS':0,
'SITEADRESS':0,
'ADDNUMPREFIX':0,
'ADDNUM':0,
'ADDNUMSUFFIX':0,
'PREFIX':0,
'STREETNAME':0,
'STREETTYPE':0,
'SUFFIX':0,
'LANDMARKNAME':0,
'UNITTYPE':0,
'UNITID':0,
'PLACENAME':0,
'ZIPCODE':0,
'ZIP4':0,
'STATE':0,
'SCHOOLDIST':0,
'SCHOOLDISTNO':0,
'IMPROVED':0,
'CNTASSDVALUE':0,
'LNDVALUE':0,
'IMPVALUE':0,
'FORESTVALUE':0,
'ESTFMKVALUE':0,
'NETPRPTA':0,
'GRSPRPTA':0,
'PROPCLASS':0,
'AUXCLASS':0,
'ASSDACRES':0,
'DEEDACRES':0,
'GISACRES':0,
'CONAME':0,
'LOADDATE':0,
'PARCELFIPS':0,
'PARCELSRC':0,
}
|
# Reference : https://www.w3schools.com/python/python_file_handling.asp
# Modes
'''
By default "mode=r"
Both read & write: mode=r+
"r" - Read - Default value. Opens a file for reading, error if the file does not exist
"a" - Append - Opens a file for appending, creates the file if it does not exist
"w" - Write - Opens a file for writing, creates the file if it does not exist
"x" - Create - Creates the specified file, returns an error if the file exists
In addition you can specify if the file should be handled as binary or text mode
"t" - Text - Default value. Text mode
"b" - Binary - Binary mode (e.g. images) (Like mode = wb)
'''
f = open('test.txt')
print(f.read())
''' Output : Hey There
Its me
'''
'''Remember that read() reads the entire thing present in the file. So if you want
to re read the file from the beginning make sure to f.seek(0) to place the cursor back to the
starting'''
f = open('test.txt')
print(f.readline()) # Reads the contents of the file line by line
# Output : Hey There
f = open('test.txt')
print(f.readlines()) #Returns a lists which contains all the lines
# Output : ['Hey there \n', 'Its me']
# Standard Way To Open A File
with open('test.txt') as f :
print(f.readlines())
# Always close the file once opened
f.close()
#Another Example
with open('test.txt',mode='r+') as f:
text = f.write('hellooooo')
'''Output : hellooooo
Its me
'''
# Its better to perform file operations within try and except block
|
# Code Sample from the tutorial at https://learncodeshare.net/2015/07/09/delete-crud-using-cx_oracle/
# section titled "Deleting records referenced by Foreign Keys" 1st example
# Using the base template, the example code executes a simple delete using named bind variables.
# When following the tutorial with default data this section intentionally throws an error
# to demonstrate foreign key functionality.
import cx_Oracle
import os
connectString = os.getenv('DB_CONNECT') # The environment variable for the connect string: DB_CONNECT=user/password@database
con = cx_Oracle.connect(connectString)
def get_all_rows(label, data_type='people'):
# Query all rows
cur = con.cursor()
if (data_type == 'pets'):
statement = 'select id, name, owner, type from lcs_pets order by owner, id'
else:
statement = 'select id, name, age, notes from lcs_people order by id'
cur.execute(statement)
res = cur.fetchall()
print(label + ': ')
print (res)
print(' ')
cur.close()
get_all_rows('Original People Data', 'people')
get_all_rows('Original Pet Data', 'pets')
cur = con.cursor()
statement = 'delete from lcs_people where id = :id'
cur.execute(statement, {'id':1})
con.commit()
get_all_rows('New People Data', 'people')
get_all_rows('New Pet Data', 'pets')
|
from .learner import DBRMLearner
from .nn_structure import make_ope_networks
|
import skimage
import skimage.feature
import numpy as np
import llops as yp
import llops.operators as ops
import llops.filter as filters
import scipy as sp
# Feature-based registration imports
from skimage.feature import ORB, match_descriptors, plot_matches
from skimage.measure import ransac
from skimage.transform import EuclideanTransform
def _preprocessForRegistration(image0, image1, methods=['pad'], **kwargs):
# Ensure methods argument is a list
if type(methods) not in (list, tuple):
methods = [methods]
# Perform 'reflection', which pads an object with antisymmetric copies of itself
if 'pad' in methods:
# Get pad factor
pad_factor = kwargs.get('pad_factor', 2)
# Get pad value
pad_type = kwargs.get('pad_type', 'reflect')
# Generate pad operator which pads the object to 2x it's size
pad_size = [sp.fftpack.next_fast_len(int(pad_factor * s)) for s in yp.shape(image0)]
# Perform padding
image0 = yp.pad(image0, pad_size, pad_value=pad_type, center=True)
image1 = yp.pad(image1, pad_size, pad_value=pad_type, center=True)
# Normalize to the range [0,1] (Always do if we're filtering)
if 'normalize' in methods:
image0 = filters._normalize(image0)
image1 = filters._normalize(image1)
# Sobel filtering
if 'sobel' in methods:
image0 = filters.sobel(image0)
image1 = filters.sobel(image1)
# Gaussian filtering
if 'gaussian' in methods:
image0 = filters.gaussian(image0, sigma=1)
image1 = filters.gaussian(image1, sigma=1)
# High-pass filtering (using gaussian)
if 'highpass' in methods:
image0 = filters.gaussian(image0, sigma=2) - filters.gaussian(image0, sigma=4)
image1 = filters.gaussian(image1, sigma=2) - filters.gaussian(image1, sigma=4)
# Roberts filtering
if 'roberts' in methods:
image0 = filters.roberts(image0)
image1 = filters.roberts(image1)
# Scharr filtering
if 'scharr' in methods:
image0 = filters.scharr(image0)
image1 = filters.scharr(image1)
# Prewitt filtering
if 'prewitt' in methods:
image0 = filters.prewitt(image0)
image1 = filters.prewitt(image1)
# Canny filtering
if 'canny' in methods:
image0 = filters.canny(image0, sigma=kwargs.get('sigma', 1), low_threshold=kwargs.get('low_threshold', 0.01), high_threshold=kwargs.get('high_threshold', 0.05))
image1 = filters.canny(image1, sigma=kwargs.get('sigma', 1), low_threshold=kwargs.get('low_threshold', 0.01), high_threshold=kwargs.get('high_threshold', 0.05))
return image0, image1
def registerImage(image0, image1, method='xc', axis=None,
preprocess_methods=['reflect'], debug=False, **kwargs):
# Perform preprocessing
if len(preprocess_methods) > 0:
image0, image1 = _preprocessForRegistration(image0, image1, preprocess_methods, **kwargs)
# Parameter on whether we can trust our registration
trust_ratio = 1.0
if method in ['xc' or 'cross_correlation']:
# Get energy ratio threshold
trust_threshold = kwargs.get('energy_ratio_threshold', 1.5)
# Pad arrays for optimal speed
pad_size = tuple([sp.fftpack.next_fast_len(s) for s in yp.shape(image0)])
# Perform padding
if pad_size is not yp.shape(image0):
image0 = yp.pad(image0, pad_size, pad_value='edge', center=True)
image1 = yp.pad(image1, pad_size, pad_value='edge', center=True)
# Take F.T. of measurements
src_freq, target_freq = yp.Ft(image0, axes=axis), yp.Ft(image1, axes=axis)
# Whole-pixel shift - Compute cross-correlation by an IFFT
image_product = src_freq * yp.conj(target_freq)
# image_product /= abs(src_freq * yp.conj(target_freq))
cross_correlation = yp.iFt(image_product, center=False, axes=axis)
# Take sum along axis if we're doing 1D
if axis is not None:
axis_to_sum = list(range(yp.ndim(image1)))
del axis_to_sum[axis]
cross_correlation = yp.sum(cross_correlation, axis=axis_to_sum)
# Locate maximum
shape = yp.shape(src_freq)
maxima = yp.argmax(yp.abs(cross_correlation))
midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])
shifts = np.array(maxima, dtype=np.float64)
shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints]
# If its only one row or column the shift along that dimension has no
# effect. We set to zero.
for dim in range(yp.ndim(src_freq)):
if shape[dim] == 1:
shifts[dim] = 0
# If energy ratio is too small, set all shifts to zero
trust_metric = yp.scalar(yp.max(yp.abs(cross_correlation) ** 2) / yp.mean(yp.abs(cross_correlation) ** 2))
# Determine if this registraition can be trusted
trust_ratio = trust_metric / trust_threshold
elif method == 'orb':
# Get user-defined mean_residual_threshold if given
trust_threshold = kwargs.get('mean_residual_threshold', 40.0)
# Get user-defined mean_residual_threshold if given
orb_feature_threshold = kwargs.get('orb_feature_threshold', 25)
match_count = 0
fast_threshold = 0.05
while match_count < orb_feature_threshold:
descriptor_extractor = ORB(n_keypoints=500, fast_n=9,
harris_k=0.1,
fast_threshold=fast_threshold)
# Extract keypoints from first frame
descriptor_extractor.detect_and_extract(np.asarray(image0).astype(np.double))
keypoints0 = descriptor_extractor.keypoints
descriptors0 = descriptor_extractor.descriptors
# Extract keypoints from second frame
descriptor_extractor.detect_and_extract(np.asarray(image1).astype(np.double))
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
# Set match count
match_count = min(len(keypoints0), len(keypoints1))
fast_threshold -= 0.01
if fast_threshold == 0:
raise RuntimeError('Could not find any keypoints (even after shrinking fast threshold).')
# Match descriptors
matches = match_descriptors(descriptors0, descriptors1, cross_check=True)
# Filter descriptors to axes (if provided)
if axis is not None:
matches_filtered = []
for (index_0, index_1) in matches:
point_0 = keypoints0[index_0, :]
point_1 = keypoints1[index_1, :]
unit_vec = point_0 - point_1
unit_vec /= np.linalg.norm(unit_vec)
if yp.abs(unit_vec[axis]) > 0.99:
matches_filtered.append((index_0, index_1))
matches_filtered = np.asarray(matches_filtered)
else:
matches_filtered = matches
# Robustly estimate affine transform model with RANSAC
model_robust, inliers = ransac((keypoints0[matches_filtered[:, 0]],
keypoints1[matches_filtered[:, 1]]),
EuclideanTransform, min_samples=3,
residual_threshold=2, max_trials=100)
# Note that model_robust has a translation property, but this doesn't
# seem to be as numerically stable as simply averaging the difference
# between the coordinates along the desired axis.
# Apply match filter
matches_filtered = matches_filtered[inliers, :]
# Process keypoints
if yp.shape(matches_filtered)[0] > 0:
# Compute shifts
difference = keypoints0[matches_filtered[:, 0]] - keypoints1[matches_filtered[:, 1]]
shifts = (yp.sum(difference, axis=0) / yp.shape(difference)[0])
shifts = np.round(shifts[0])
# Filter to axis mask
if axis is not None:
_shifts = [0, 0]
_shifts[axis] = shifts[axis]
shifts = _shifts
# Calculate residuals
residuals = yp.sqrt(yp.sum(yp.abs(keypoints0[matches_filtered[:, 0]] + np.asarray(shifts) - keypoints1[matches_filtered[:, 1]]) ** 2))
# Define a trust metric
trust_metric = residuals / yp.shape(keypoints0[matches_filtered[:, 0]])[0]
# Determine if this registration can be trusted
trust_ratio = 1 / (trust_metric / trust_threshold)
print('===')
print(trust_ratio)
print(trust_threshold)
print(trust_metric)
print(shifts)
else:
trust_metric = 1e10
trust_ratio = 0.0
shifts = np.asarray([0, 0])
elif method == 'optimize':
# Create Operators
L2 = ops.L2Norm(yp.shape(image0), dtype='complex64')
R = ops.PhaseRamp(yp.shape(image0), dtype='complex64')
REAL = ops.RealFilter((2, 1), dtype='complex64')
# Take Fourier Transforms of images
image0_f, image1_f = yp.astype(yp.Ft(image0), 'complex64'), yp.astype(yp.Ft(image1), 'complex64')
# Diagonalize one of the images
D = ops.Diagonalize(image0_f)
# Form objective
objective = L2 * (D * R * REAL - image1_f)
# Solve objective
solver = ops.solvers.GradientDescent(objective)
shifts = solver.solve(iteration_count=1000, step_size=1e-8)
# Convert to numpy array, take real part, and round.
shifts = yp.round(yp.real(yp.asbackend(shifts, 'numpy')))
# Flip shift axes (x,y to y, x)
shifts = np.fliplr(shifts)
# TODO: Trust metric and trust_threshold
trust_threshold = 1
trust_ratio = 1.0
else:
raise ValueError('Invalid Registration Method %s' % method)
# Mark whether or not this measurement is of good quality
if not trust_ratio > 1:
if debug:
print('Ignoring shift with trust metric %g (threshold is %g)' % (trust_metric, trust_threshold))
shifts = yp.zeros_like(np.asarray(shifts)).tolist()
# Show debugging figures if requested
if debug:
import matplotlib.pyplot as plt
plt.figure(figsize=(6, 5))
plt.subplot(131)
plt.imshow(yp.abs(image0))
plt.axis('off')
plt.subplot(132)
plt.imshow(yp.abs(image1))
plt.title('Trust ratio: %g' % (trust_ratio))
plt.axis('off')
plt.subplot(133)
if method in ['xc' or 'cross_correlation']:
if axis is not None:
plt.plot(yp.abs(yp.squeeze(cross_correlation)))
else:
plt.imshow(yp.abs(yp.fftshift(cross_correlation)))
else:
plot_matches(plt.gca(), yp.real(image0), yp.real(image1), keypoints0, keypoints1, matches_filtered)
plt.title(str(shifts))
plt.axis('off')
# Return
return shifts, trust_ratio
def register_roi_list(measurement_list, roi_list, axis=None,
use_overlap_region=True, debug=False,
preprocess_methods=['highpass', 'normalize'],
use_mean_offset=False, replace_untrusted=False,
tolerance=(200, 200), force_2d=False,
energy_ratio_threshold=1.5, method='xc'):
"""
Register a list of overlapping ROIs
"""
# Loop over frame indicies
offsets = []
trust_mask = []
# Parse and set up axis definition
if axis is not None and force_2d:
_axis = None
else:
_axis = axis
# Loop over frames
rois_used = []
for frame_index in range(len(measurement_list)):
# Get ROIs
roi_current = roi_list[frame_index]
frame_current = measurement_list[frame_index]
# Determine which rois overlap
overlapping_rois = [(index, roi) for (index, roi) in enumerate(rois_used) if roi.overlaps(roi_current)]
# Loop over overlapping ROIs
if len(overlapping_rois) > 0:
local_offset_list = []
for index, overlap_roi in overlapping_rois:
# Get overlap regions
overlap_current, overlap_prev = yp.roi.getOverlapRegion((frame_current, measurement_list[index]),
(roi_current, roi_list[index]))
# Perform registration
_local_offset, _trust_metric = registerImage(overlap_current,
overlap_prev,
axis=_axis,
method=method,
preprocess_methods=preprocess_methods,
pad_factor=1.5,
pad_type=0,
energy_ratio_threshold=energy_ratio_threshold,
sigma=0.1,
debug=False)
# Deal with axis definitions
if axis is not None and force_2d:
local_offset = [0] * len(_local_offset)
local_offset[axis] = _local_offset[axis]
else:
local_offset = _local_offset
# Filter to tolerance
for ax in range(len(local_offset)):
if abs(local_offset[ax]) > tolerance[ax]:
local_offset[ax] = 0
# local_offset = np.asarray([int(min(local_offset[i], tolerance[i])) for i in range(len(local_offset))])
# local_offset = np.asarray([int(max(local_offset[i], -tolerance[i])) for i in range(len(local_offset))])
# Append offset to list
if _trust_metric > 1.0:
local_offset_list.append(local_offset)
if debug:
print('Registered with trust ratio %g' % _trust_metric)
else:
if debug:
print('Did not register with trust ratio %g' % _trust_metric)
# Append offset to list
if len(local_offset_list) > 0:
offsets.append(tuple((np.round(yp.mean(np.asarray(local_offset_list), axis=0)[0]).tolist())))
trust_mask.append(True)
else:
offsets.append((0, 0))
trust_mask.append(False)
else:
offsets.append((0, 0))
trust_mask.append(True)
# Store thir ROI in rois_used
rois_used.append(roi_current)
# Convert offsets to array and reverse diretion
offsets = -1 * np.array(offsets)
if not any(trust_mask):
print('WARNING: Did not find any good registration values! Returning zero offset.')
offsets = [np.asarray([0, 0])] * len(offsets)
else:
# Take mean of offsets if desired
if use_mean_offset:
# This flag sets all measurements to the mean of trusted registration
offsets = np.asarray(offsets)
trust_mask = np.asarray(trust_mask)
offsets[:, 0] = np.mean(offsets[trust_mask, 0])
offsets[:, 1] = np.mean(offsets[trust_mask, 1])
offsets = offsets.tolist()
elif replace_untrusted:
# This flag replaces untrusted measurements with the mean of all trusted registrations
offsets = np.asarray(offsets)
trust_mask = np.asarray(trust_mask)
trust_mask_inv = np.invert(trust_mask)
offsets[trust_mask_inv, 0] = np.round(np.mean(offsets[trust_mask, 0]))
offsets[trust_mask_inv, 1] = np.round(np.mean(offsets[trust_mask, 1]))
offsets = offsets.tolist()
# Convert to numpy array
offsets = np.asarray(offsets)
# Determine aggrigate offsets
aggrigate_offsets = [offsets[0]]
for offset_index in range(len(offsets) - 1):
aggrigate_offsets.append(sum(offsets[slice(0, offset_index + 2)]).astype(np.int).tolist())
# Return the recovered offsets
return aggrigate_offsets
def register_translation(src_image, target_image, upsample_factor=1,
energy_ratio_threshold=2, space="real"):
"""
Efficient subpixel image translation registration by cross-correlation.
This code gives the same precision as the FFT upsampled cross-correlation
in a fraction of the computation time and with reduced memory requirements.
It obtains an initial estimate of the cross-correlation peak by an FFT and
then refines the shift estimation by upsampling the DFT only in a small
neighborhood of that estimate by means of a matrix-multiply DFT.
Parameters
----------
src_image : ndarray
Reference image.
target_image : ndarray
Image to register. Must be same dimensionality as ``src_image``.
upsample_factor : int, optional
Upsampling factor. Images will be registered to within
``1 / upsample_factor`` of a pixel. For example
``upsample_factor == 20`` means the images will be registered
within 1/20th of a pixel. Default is 1 (no upsampling)
space : string, one of "real" or "fourier", optional
Defines how the algorithm interprets input data. "real" means data
will be FFT'd to compute the correlation, while "fourier" data will
bypass FFT of input data. Case insensitive.
return_error : bool, optional
Returns error and phase difference if on,
otherwise only shifts are returned
Returns
-------
shifts : ndarray
Shift vector (in pixels) required to register ``target_image`` with
``src_image``. Axis ordering is consistent with numpy (e.g. Z, Y, X)
error : float
Translation invariant normalized RMS error between ``src_image`` and
``target_image``.
phasediff : float
Global phase difference between the two images (should be
zero if images are non-negative).
References
----------
.. [1] Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
"Efficient subpixel image registration algorithms,"
Optics Letters 33, 156-158 (2008). :DOI:`10.1364/OL.33.000156`
.. [2] James R. Fienup, "Invariant error metrics for image reconstruction"
Optics Letters 36, 8352-8357 (1997). :DOI:`10.1364/AO.36.008352`
"""
# images must be the same shape
if yp.shape(src_image) != yp.shape(target_image):
raise ValueError("Error: images must be same size for "
"register_translation")
# only 2D data makes sense right now
if yp.ndim(src_image) > 3 and upsample_factor > 1:
raise NotImplementedError("Error: register_translation only supports "
"subpixel registration for 2D and 3D images")
# assume complex data is already in Fourier space
if space.lower() == 'fourier':
src_freq = src_image
target_freq = target_image
# real data needs to be fft'd.
elif space.lower() == 'real':
src_freq = yp.Ft(src_image)
target_freq = yp.Ft(target_image)
else:
raise ValueError("Error: register_translation only knows the \"real\" "
"and \"fourier\" values for the ``space`` argument.")
# Whole-pixel shift - Compute cross-correlation by an IFFT
shape = yp.shape(src_freq)
image_product = src_freq * yp.conj(target_freq)
cross_correlation = yp.iFt(image_product, center=False)
# Locate maximum
maxima = yp.argmax(yp.abs(cross_correlation))
midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])
shifts = np.array(maxima, dtype=np.float64)
shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints]
# if upsample_factor > 1:
# # Initial shift estimate in upsampled grid
# shifts = np.round(shifts * upsample_factor) / upsample_factor
# upsampled_region_size = np.ceil(upsample_factor * 1.5)
# # Center of output array at dftshift + 1
# dftshift = np.fix(upsampled_region_size / 2.0)
# upsample_factor = np.array(upsample_factor, dtype=np.float64)
# normalization = (src_freq.size * upsample_factor ** 2)
# # Matrix multiply DFT around the current shift estimate
# sample_region_offset = dftshift - shifts*upsample_factor
# cross_correlation = _upsampled_dft(image_product.conj(),
# upsampled_region_size,
# upsample_factor,
# sample_region_offset).conj()
# cross_correlation /= normalization
# # Locate maximum and map back to original pixel grid
# maxima = np.array(np.unravel_index(
# np.argmax(np.abs(cross_correlation)),
# cross_correlation.shape),
# dtype=np.float64)
# maxima -= dftshift
#
# shifts = shifts + maxima / upsample_factor
# If its only one row or column the shift along that dimension has no
# effect. We set to zero.
for dim in range(yp.ndim(src_freq)):
if shape[dim] == 1:
shifts[dim] = 0
# If energy ratio is too small, set all shifts to zero
energy_ratio = yp.max(yp.abs(cross_correlation) ** 2) / yp.sum(yp.abs(cross_correlation) ** 2) * yp.prod(yp.shape(cross_correlation))
if energy_ratio < energy_ratio_threshold:
print('Ignoring shift with energy ratio %g (threshold is %g)' % (energy_ratio, energy_ratio_threshold))
shifts = yp.zeros_like(shifts)
return shifts
|
"""
Sample data:
{
"data": {
"id": "95.0",
"meta": {
"type": "attribute_change",
"entity_id": 758,
"new_value": "This is the newest description ever!!",
"old_value": "This is the old description!",
"entity_type": "Asset",
"attribute_name": "description",
"field_data_type": "text"
},
"user": {
"id": 113,
"type": "HumanUser"
},
"entity": {
"id": 758,
"type": "Asset"
},
"project": {
"id": 65,
"type": "Project"
},
"operation": "update",
"created_at": "2019-07-12 21:14:36.598835",
"event_type": "Shotgun_Asset_Change",
"session_uuid": "07473c00-a4ea-11e9-b3b8-0242ac110006",
"attribute_name": "description",
"event_log_entry_id": 248249
}
}
"""
import socket
import pika
from flask import Flask
from flask import request
from celery_worker import tasks
app = Flask(__name__)
@app.route('/')
def index():
return f'Hello World! {socket.gethostname()}'
@app.route('/event', methods=['POST'])
def event():
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='hello')
channel.basic_publish(
exchange='',
routing_key='hello',
body=request.get_data())
connection.close()
return '200'
if __name__ == '__main__':
app.run(debug=True)
|
from django.db import models
from decimal import Decimal
# Create your models here.
class College(models.Model):
name = models.CharField(max_length=255, default='')
slug = models.CharField(max_length=255, default='')
acceptance = models.DecimalField(max_digits=3, decimal_places=2, default=0.00)
city = models.CharField(max_length=255, default='')
state = models.CharField(max_length=255, default='')
grad_rate = models.DecimalField(max_digits=4, decimal_places=2, default=0.00)
desirability = models.IntegerField(default=0)
influence = models.IntegerField(default=0)
overall_rank = models.IntegerField(default=0)
sat = models.IntegerField(default=0)
act = models.IntegerField(default=0)
undergrad_student_body = models.IntegerField(default=0)
tuition = models.IntegerField(default=0)
domain = models.CharField(max_length=255, default='')
def __str__(self):
return self.name
|
import unittest
from datetime import timedelta
from cachepot.expire import to_timedelta
class TestExpireSeconds(unittest.TestCase):
def test_to_timedelta(self) -> None:
# float
self.assertEqual(to_timedelta(1.0), timedelta(seconds=1.0))
# int
self.assertEqual(to_timedelta(3), timedelta(seconds=3))
# timedelta
self.assertEqual(timedelta(days=5), timedelta(days=5))
|
from web3 import Web3
OWNER_ACCOUNT_NO = 0
ADDER_ACCOUNT_NO = 1
# these values need to match with the values
# defined in CounterControllerV1 and CounterControllerV2
VALUE_INITIALIZED = 100
VALUE_INITIALIZED_V2 = 42
|
from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
from django.contrib.auth.models import User
# Create your models here.
class Location(models.Model):
title = models.CharField(max_length=200)
category = models.CharField(max_length=100)
location_logo = models.FileField()
description = models.TextField(null=True,blank=True)
lalitude = models.CharField(max_length=50,null=True,blank=True)
longitude = models.CharField(max_length=50,null=True,blank=True)
def __str__(self):
return self.title
class Myrating(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
location = models.ForeignKey(Location, on_delete=models.CASCADE)
rating = models.IntegerField(default=0, validators=[MaxValueValidator(5), MinValueValidator(0)])
class MyList(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
location = models.ForeignKey(Location, on_delete=models.CASCADE)
Visited = models.BooleanField(default=False)
|
def get_abi():
return [
{
"inputs": [],
"stateMutability": "nonpayable",
"type": "constructor"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"internalType": "address",
"name": "owner",
"type": "address"
},
{
"indexed": True,
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"indexed": False,
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "Approval",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"internalType": "address",
"name": "from",
"type": "address"
},
{
"indexed": True,
"internalType": "address",
"name": "to",
"type": "address"
},
{
"indexed": False,
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "Transfer",
"type": "event"
},
{
"inputs": [],
"name": "DOMAIN_SEPARATOR",
"outputs": [
{
"internalType": "bytes32",
"name": "",
"type": "bytes32"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "PERMIT_TYPEHASH",
"outputs": [
{
"internalType": "bytes32",
"name": "",
"type": "bytes32"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_owner",
"type": "address"
},
{
"internalType": "address",
"name": "_spender",
"type": "address"
}
],
"name": "allowance",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "_value",
"type": "uint256"
}
],
"name": "approve",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_owner",
"type": "address"
}
],
"name": "balanceOf",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "decimals",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "name",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"name": "nonces",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "owner",
"type": "address"
},
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "value",
"type": "uint256"
},
{
"internalType": "uint256",
"name": "deadline",
"type": "uint256"
},
{
"internalType": "uint8",
"name": "v",
"type": "uint8"
},
{
"internalType": "bytes32",
"name": "r",
"type": "bytes32"
},
{
"internalType": "bytes32",
"name": "s",
"type": "bytes32"
}
],
"name": "permit",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "symbol",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "totalSupply",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_to",
"type": "address"
},
{
"internalType": "uint256",
"name": "_value",
"type": "uint256"
}
],
"name": "transfer",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_from",
"type": "address"
},
{
"internalType": "address",
"name": "_to",
"type": "address"
},
{
"internalType": "uint256",
"name": "_value",
"type": "uint256"
}
],
"name": "transferFrom",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
}
]
|
from enum import Enum, auto
class JagEvent(Enum):
DISCOVERED = auto()
AWARENESS = auto()
PREPARING = auto()
ADDRESSING = auto()
COMPLETION = auto()
SUMMARY = auto()
def __lt__(self, other):
"""
USC
"""
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
|
ALLOWED_HOSTS = ['*']
DEBUG = True
LOCAL_SETTINGS = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'fusionti_amcm_db',
'USER': 'fusionti_amcm_user',
'PASSWORD': 'UZLs{R4s{Yr~',
'HOST': '',
'PORT': '',
}
}
|
import numpy as np
import glob
import tqdm
import cv2
import os
import math
import datetime
import seaborn as sns
import pandas as pd
import random
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
class DatasetRebalancing():
def __init__(self):
self.grid_info = {
"0": [0, 0.33, 0, 0.33], "1": [0.33, 0.67, 0, 0.33], "2": [0.67, 1.00, 0, 0.33],
"3": [0, 0.33, 0.33, 0.67], "4": [0.33, 0.67, 0.33, 0.67], "5": [0.67, 1.00, 0.33, 0.67],
"6": [0, 0.33, 0.67, 1.00], "7": [0.33, 0.67, 0.67, 1.00], "8": [0.67, 1.00, 0.67, 1.00],
}
self.vector_list = None
self.clustering_vector_list = None
self.candidate_list = None
self.train_list = None
self.valid_list = None
def color_extractor(self, img, topk=1):
kmeans = KMeans(n_clusters=topk)
kmeans = kmeans.fit(img.reshape((img.shape[1] * img.shape[0], 3)))
color = [int(kmeans.cluster_centers_[0][0]), int(kmeans.cluster_centers_[0][1]),
int(kmeans.cluster_centers_[0][2])]
return color
def img2vector(self, img, grid_info):
vector = []
for grid in grid_info.values():
st_x = int(grid[0] * img.shape[1])
end_x = int(grid[1] * img.shape[1])
st_y = int(grid[2] * img.shape[0])
end_y = int(grid[3] * img.shape[0])
crop = img[st_y:end_y, st_x:end_x]
vector.extend(self.color_extractor(crop))
return np.array(vector)
def save_vector_info(self, vector, file_path):
with open(file_path, 'a') as file:
for index in range(len(vector)):
item = vector[index]
file.write(str(item))
if (index != len(vector) - 1):
file.write(",")
file.write("\n")
def save_dataset(self, data, file_path):
with open(file_path, 'w') as file:
for index in range(len(data)):
jpg_path = data[index]
if index != (len(data) - 1):
file.write(jpg_path + "\n")
else:
file.write(jpg_path)
print(file_path + " is saved")
def read_vector_info(self, file_path):
vector_list = []
with open(file_path, 'r') as file:
lines = file.readlines()
print("== Start load vector ==")
print("load file path ->", file_path)
for line in tqdm.tqdm(lines):
line = line.replace("\n", "")
vector = []
for value in line.split(","):
vector.append(int(value))
vector_list.append(vector)
self.vector_list = vector_list
print("== Finish load vector ==")
def read_clustering_vector_info(self, file_path):
vector_list = []
with open(file_path, 'r') as file:
lines = file.readlines()
print("== Start load clustering vector ==")
print("load file path :", file_path)
for line in tqdm.tqdm(lines):
line = line.replace("\n", "")
vector = []
for value in line.split(","):
vector.append(int(value))
vector_list.append(vector)
self.clustering_vector_list = np.array(vector_list)
print("== Finish load clustering vector ==")
def read_dataset(self, file_path):
self.candidate_list = {}
with open(file_path, 'r') as file:
lines = file.readlines()
print("== Start load dataset ==")
print("target :", file_path)
for line in tqdm.tqdm(lines):
cls, img_path = line.split(',')
img_path = img_path.replace("\n", "")
cls = int(cls)
if cls in self.candidate_list:
lst = self.candidate_list[cls]
lst.append(img_path)
self.candidate_list[cls] = lst
else:
self.candidate_list[cls] = [img_path]
print("== Finish load dataset ==")
def vector_clustering(self, result_path, topk=1000, verbose=1):
vector_list = np.array(self.vector_list)
kmeans = KMeans(n_clusters=topk, verbose=verbose)
kmeans = kmeans.fit(vector_list)
result = []
for index in range(topk):
result.append(kmeans.cluster_centers_[index])
with open(result_path, 'w') as file:
for vector in result:
for index in range(len(vector)):
value = str(int(vector[index]))
file.write(value)
if index != len(vector) - 1:
file.write(",")
file.write("\n")
return result
def vector_extraction(self, dataset_path):
vector_list = []
now = datetime.datetime.now()
vector_filepath = now.strftime('%Y-%m-%d %H-%M-%S' + "_vector_info.txt")
for img_path in tqdm.tqdm(dataset_path):
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
vector = self.img2vector(img, self.grid_info)
self.save_vector_info(vector, vector_filepath)
vector_list.append(vector)
self.vector_list = vector_list
return vector_list
def mse(self, vector1, vector2):
error = np.mean(np.power(vector1 - vector2, 2), axis=1)
return error
def classification(self, result_path, dataset):
self.candidate_list = {}
if os.path.isfile(result_path):
with open(result_path, 'w') as file:
pass
for img_path in tqdm.tqdm(dataset):
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
vector = np.array(self.img2vector(img, self.grid_info))
cur_vector_tile = np.tile(vector, (len(self.clustering_vector_list), 1))
error_list = self.mse(cur_vector_tile, self.clustering_vector_list)
cls = np.argmin(error_list)
with open(result_path, 'a') as file:
file.write(str(cls) + "," + img_path + "\n")
if cls in self.candidate_list:
lst = self.candidate_list[cls]
lst.append(img_path)
self.candidate_list[cls] = lst
else:
self.candidate_list[cls] = [img_path]
def collection(self, train_lst_path="train.txt", valid_lst_path="valid.txt", valid_ratio=0.2, select_num=2,
verbose=True):
'''
:param train_lst_path: train file path
:param valid_lst_path: valid file path
:param valid_ratio: validation ratio
:param select_option: data select num
:param verbose: plot show option
:return:
'''
self.candidate_list = sorted(self.candidate_list.items())
self.train_list = []
self.valid_list = []
cls_list = []
count_list = []
for dict in tqdm.tqdm(self.candidate_list):
cls = dict[0]
count = len(dict[1])
cls_list.append(cls)
count_list.append(count)
count_list = np.array(count_list)
min_cls = np.argmin(count_list)
max_cls = np.argmax(count_list)
min_count = count_list[min_cls]
max_count = count_list[max_cls]
mean_count = int(np.mean(count_list))
if verbose:
_fig = plt.figure(figsize=(20, 5))
colors = sns.color_palette('hls', len(cls_list))
plt.bar(cls_list, count_list, color=colors)
plt.xlabel("class")
plt.ylabel("count")
plt.show()
print("minimum_count = ", min_count)
print("maximum_count = ", max_count)
print("mean_count = ", mean_count)
train_thresh_count = select_num
for dict in tqdm.tqdm(self.candidate_list):
file_list = dict[1]
loop = len(file_list)
if loop > train_thresh_count:
loop = train_thresh_count
for index in range(loop):
self.train_list.append(file_list[index])
valid_thresh_count = int(len(self.train_list) * valid_ratio)
for dict in tqdm.tqdm(self.candidate_list):
file_list = dict[1]
loop = len(file_list)
if loop == train_thresh_count:
continue
for index, file_path in enumerate(file_list[train_thresh_count:]):
self.valid_list.append(file_path)
random.shuffle(self.valid_list)
self.valid_list = self.valid_list[:valid_thresh_count]
self.save_dataset(self.train_list, train_lst_path)
self.save_dataset(self.valid_list, valid_lst_path)
if __name__ == "__main__":
target_dataset = glob.glob("/home/fsai2/sangmin/ObjectDetection/dteg/dataset/negative/**/*.jpg", recursive=True)
dr = DatasetRebalancing()
clustering_data = dr.vector_extraction(target_dataset)
# dr.read_vector_info("./2021-10-25 13-01-51_vector_info.txt")
vector_list = dr.vector_clustering("./negative_kmeans_vector_info.txt", topk=5000, verbose=1)
dr.read_clustering_vector_info("./negative_kmeans_vector_info.txt")
dr.classification("./negative_dataset_list.txt", target_dataset)
dr.read_dataset("./negative_dataset_list.txt")
dr.collection("negative_train.txt", "negative_valid.txt", select_num=2, verbose=False)
|
from enum import Enum
from typing import List, Dict
import numpy as np
from nagi.constants import IZ_MEMBRANE_POTENTIAL_THRESHOLD, STDP_PARAMS, STDP_LEARNING_WINDOW, NEURON_WEIGHT_BUDGET, \
THRESHOLD_THETA_INCREMENT_RATE, THRESHOLD_THETA_DECAY_RATE, IZ_SPIKE_VOLTAGE
from nagi.neat import Genome, NeuralNodeGene, InputNodeGene, OutputNodeGene
from nagi.stdp import *
class StdpType(Enum):
input = 1
output = 2
class SpikingNeuron(object):
"""Class representing a single spiking neuron."""
def __init__(self, bias: float, a: float, b: float, c: float, d: float, inputs: List[int],
learning_rule: LearningRule, is_inhibitory: bool, stdp_parameters: Dict[str, float]):
"""
a, b, c, and d are the parameters of the Izhikevich model.
:param bias: The bias of the neuron.
:param a: The time-scale of the recovery variable.
:param b: The sensitivity of the recovery variable.
:param c: The after-spike reset value of the membrane potential.
:param d: The after-spike reset value of the recovery variable.
:param inputs: A dictionary of incoming connection weights.
"""
self.bias = bias
self.a = a
self.b = b
self.c = c
self.d = d
self.inputs = {key: np.random.normal(0.5, 0.1) for key in inputs}
self._normalize_weights()
self.learning_rule = learning_rule
self.is_inhibitory = is_inhibitory
self.stdp_parameters = stdp_parameters
self.membrane_potential = self.c
self.membrane_recovery = self.b * self.membrane_potential
self.fired = 0
self.current = self.bias
self.threshold_theta = 0
# Variables containing time elapsed since last input and output spikes.
self.output_spike_timing: float = 0
self.input_spike_timings: Dict[int, List[float]] = {key: [] for key in self.inputs.keys()}
self.has_fired = False
def advance(self, dt: float):
"""
Advances simulation time by the given time step in milliseconds.
Update of membrane potential "v" and membrane recovery "u" given by formulas:
v += dt * (0.04 * v^2 + 5v + 140 - u + I)
u += dt * a * (b * v - u)
Once membrane potential exceeds threshold:
v = c
u = u + d
:param dt: Time step in milliseconds.
"""
if self.fired:
self.membrane_potential = self.c
self.membrane_recovery += self.d
self.threshold_theta += THRESHOLD_THETA_INCREMENT_RATE
else:
v = self.membrane_potential
u = self.membrane_recovery
self.membrane_potential += dt * (0.04 * v ** 2 + 5 * v + 140 - u + self.current)
self.membrane_recovery += dt * self.a * (self.b * v - u)
self.fired = 0
self.output_spike_timing += dt
for key in self.input_spike_timings.keys():
# STDP update on received input spike.
if 0 in self.input_spike_timings[key] and self.has_fired:
self.stpd_update(key, StdpType.input)
self.input_spike_timings[key] = [t + dt for t in self.input_spike_timings[key] if
t + dt < STDP_LEARNING_WINDOW]
if self.membrane_potential > IZ_MEMBRANE_POTENTIAL_THRESHOLD + self.threshold_theta:
self.fired = IZ_SPIKE_VOLTAGE if not self.is_inhibitory else -IZ_SPIKE_VOLTAGE
self.has_fired = True
self.output_spike_timing = 0
# STDP on output spike.
for key in self.input_spike_timings.keys():
self.stpd_update(key, StdpType.output)
else:
self.threshold_theta -= THRESHOLD_THETA_DECAY_RATE * self.threshold_theta
def reset(self):
""" Resets all state variables."""
self.membrane_potential = self.c
self.membrane_recovery = self.b * self.membrane_potential
self.fired = 0
self.current = self.bias
self.output_spike_timing = 0
self.input_spike_timings = {key: 0 for key in self.inputs.keys()}
def apply_learning_rule(self, delta_t: float):
return get_learning_rule_function(self.learning_rule)(delta_t, **self.stdp_parameters)
def stpd_update(self, key: int, stdp_type: StdpType):
"""
Applies STDP to the weight with the supplied key.
:param stdp_type:
:param key: The key identifying the synapse weight to be updated.
:return: void
"""
delta_weight = 0
weight = self.inputs[key]
sigma, w_min, w_max = STDP_PARAMS['sigma'], STDP_PARAMS['w_min'], STDP_PARAMS['w_max']
if stdp_type is StdpType.input:
delta_t = self.output_spike_timing - 0
if abs(delta_t) < STDP_LEARNING_WINDOW:
delta_weight = self.apply_learning_rule(delta_t)
elif stdp_type is StdpType.output:
for input_spike_timing in self.input_spike_timings[key]:
delta_t = self.output_spike_timing - input_spike_timing
if abs(delta_t) < STDP_LEARNING_WINDOW:
delta_weight += self.apply_learning_rule(delta_t)
if delta_weight > 0:
self.inputs[key] += sigma * delta_weight * (w_max - weight)
elif delta_weight < 0:
self.inputs[key] += sigma * delta_weight * (weight - abs(w_min))
self._normalize_weights()
def _normalize_weights(self):
sum_of_input_weights = sum(self.inputs.values())
if sum_of_input_weights > NEURON_WEIGHT_BUDGET:
self.inputs = {key: value * NEURON_WEIGHT_BUDGET / sum_of_input_weights for key, value in
self.inputs.items()}
class SpikingNeuralNetwork(object):
"""Class representing a spiking neural network."""
def __init__(self, neurons: Dict[int, SpikingNeuron], inputs: List[int], outputs: List[int]):
"""
:param neurons: Dictionary containing key/node pairs.
:param inputs: List of input node keys.
:param outputs: List of output node keys.
:var self.input_values: Dictionary containing input key/voltage pairs.
"""
self.neurons = neurons
self.inputs = inputs
self.outputs = outputs
self.input_values: Dict[int, float] = {}
self.number_of_hidden_neurons = len(self.neurons) - len(outputs)
def set_inputs(self, inputs: List[float]):
"""
Assigns voltages to the input nodes.
:param inputs: List of voltage values."""
assert len(inputs) == len(
self.inputs), f"Number of inputs {len(inputs)} does not match number of input nodes {len(self.inputs)} "
for key, voltage in zip(self.inputs, inputs):
self.input_values[key] = voltage
def advance(self, dt: float) -> List[float]:
"""
Advances the neural network with the given input values and neuron states. Iterates through each neuron, then
through each input of each neuron and evaluates the values to advance the network. The values can come from
either input nodes, or firing neurons in a previous layer.
:param dt: Time step in miliseconds.
:return: List of the output values of the network after advance."""
for neuron in self.neurons.values():
neuron.current = neuron.bias
for key, weight in neuron.inputs.items():
in_neuron = self.neurons.get(key)
if in_neuron is not None:
in_value = in_neuron.fired
else:
in_value = self.input_values[key]
# Trigger STDP on received input spike.
if in_value:
neuron.input_spike_timings[key].append(0)
neuron.current += in_value * weight
for neuron in self.neurons.values():
neuron.advance(dt)
return [self.neurons[key].fired for key in self.outputs]
def reset(self):
"""Resets all state variables in all neurons in the entire neural network."""
for neuron in self.neurons.values():
neuron.reset()
def get_weights(self):
weights = {}
for destination_key, neuron in self.neurons.items():
for origin_key, weight in neuron.inputs.items():
weights[(origin_key, destination_key)] = weight
return weights
def get_membrane_potentials(self):
return {key: (neuron.membrane_potential, IZ_MEMBRANE_POTENTIAL_THRESHOLD + neuron.threshold_theta) for key, neuron
in self.neurons.items()}
@staticmethod
def create(genome: Genome, bias: float, a: float, b: float, c: float, d: float):
learning_nodes = {key: node for key, node in genome.nodes.items() if isinstance(node, NeuralNodeGene)}
node_inputs = {key: [] for key in learning_nodes.keys()}
input_keys = [node.key for node in genome.nodes.values() if isinstance(node, InputNodeGene)]
output_keys = [node.key for node in genome.nodes.values() if isinstance(node, OutputNodeGene)]
for connection_gene in genome.get_enabled_connections():
node_inputs[connection_gene.destination_node].append(connection_gene.origin_node)
neurons = {key: SpikingNeuron(bias, a, b, c, d, inputs, learning_nodes[key].learning_rule,
learning_nodes[key].is_inhibitory, learning_nodes[key].stdp_parameters)
for key, inputs in node_inputs.items()}
return SpikingNeuralNetwork(neurons, input_keys, output_keys)
|
# NOTICE
#
# This software was produced for the U. S. Government under Basic Contract No.
# W56KGU-19-D-0004, and is subject to the Rights in Noncommercial Computer
# Software and Noncommercial Computer Software Documentation Clause
# 252.227-7014 (FEB 2012)
#
# (c) 2020 The MITRE Corporation. Approved for Public Release. Distribution Unlimited. Case Number 20-2258
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import sys
import time
import uuid
import json
import re
from dxlclient.callbacks import RequestCallback, ResponseCallback
from dxlclient.client import DxlClient
from dxlclient.client_config import DxlClientConfig
from dxlclient.message import Message, Request, Response
from dxlclient.service import ServiceRegistrationInfo
from dxlclient.message import Event
from messages import InitiateAssessmentMessage, RequestAcknowledgementMessage, CancelAssessmentMessage, ReportResultsMessage, MessageType, QueryMessage, QueryResultMessage, CollectorRequestMessage
# Import common logging and configuration
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..")
from common import *
# Configure local logger
logger = logging.getLogger(__name__)
# Topic that manager listens on for assessment requests
SERVICE_INITIATE_ASSESSMENT_TOPIC = "/scap/service/assessment/initiate"
# Topic that manager listens on for cancel requests TODO: May go away
SERVICE_CANCEL_ASSESSMENT_TOPIC = "/scap/service/assessment/cancel"
# Topic used to send collection requests to the collector
EVENT_COLLECTOR_REQUEST_TOPIC = "/scap/event/collector/request"
# Topic used to send queries to the repository
SERVICE_REPOSITORY_QUERY_TOPIC = "/scap/service/repository/query"
# Base topic for application assessment results
EVENT_ASSESSMENT_RESULTS_TOPIC = "/scap/event/assessment/results"
# Topic that repository listens for data to store
EVENT_STORE_DATA_TOPIC = "/scap/event/data/store"
# Create DXL configuration from file
config = DxlClientConfig.create_dxl_config_from_file(CONFIG_FILE)
# Stores initiate assessment requests for processing
assessment_requests = []
# Stores transactions of ongoing assessments for each application
transactions = {}
# Stores transactions associated with targets so we know where
# to send cancellation requests
transactions_targets = {}
# Create the client
with DxlClient(config) as client:
# Query the repository for certain information
def query_repository(query):
# Create query message and send it to the repository
req = Request(SERVICE_REPOSITORY_QUERY_TOPIC)
qm = QueryMessage(query)
req.payload = (qm.to_json()).encode()
res = client.sync_request(req)
# Parse and return the query results
qrm = QueryResultMessage()
qrm.parse(res.payload.decode())
return qrm
# Send collection results to the appropriate application
def send_collection_results_event(rrsm):
send_event(EVENT_ASSESSMENT_RESULTS_TOPIC+"/"+rrsm.requestor_id, rrsm.to_json())
# Store data in the repository
def store_data(m):
logger.info("Storing data in the repository: %s", m.to_s())
send_event(EVENT_STORE_DATA_TOPIC, m.to_json())
# Task specific collector with collection request
def task_collector(crm, collector):
logger.info("Tasking collector %s with request: %s", collector, crm.to_s())
send_event(EVENT_COLLECTOR_REQUEST_TOPIC+"/"+collector, crm.to_json())
# Send event to the specified topic
def send_event(topic, m):
event = Event(topic)
event.payload = m.encode()
client.send_event(event)
# Acknowledge incoming requests with an acknowledgement message
# and assign a transaction id
def acknowledge_request(request):
# Prepare response and assign a new transaction
# id
res = Response(request)
rm = RequestAcknowledgementMessage()
rm.transaction_id = str(uuid.uuid4())
res.payload = (rm.to_json()).encode()
# Send the request acknowledgement to the application
# in reponse to the request and return the transaction
# id
logger.info("Sending request acknowlegement: %s", rm.to_s())
client.send_response(res)
return rm.transaction_id
# Parse the content and extract the check identifiers
# If there is content just extract imaginary identifiers
# 1, 2, and 3. Otherwise, it should remain the empty string
# because it represents a cancel assessment request
def get_ids(content):
ids = ""
if content == "inventory":
ids = "1,2,3"
elif content == "assess":
ids = "4,5,6"
elif content == "remaining_request":
ids = "7,8,9"
return ids
# Determine if the report request represents on-going monitoring
# or a point-in-time assessment. Only support point-in-time
# assessments for now.
def is_point_in_time_assessment(iam):
if iam.content == "assess" or iam.content == "inventory":
return True
else:
return False
# Build repository query based on targeting information and the
# type of information that is needed
def query_builder(targeting, option):
query = ""
if re.search(".*[Ww][Ii][Nn][Dd][Oo][Ww][Ss].*",targeting):
query = "windows"
elif re.search(".*[Rr][Hh][Ee][Ll].*",targeting):
query = "rhel"
elif re.search(".*[Ss][Oo][Ll][Aa][Rr][Ii][Ss].*",targeting):
query = "solaris"
elif re.search(".*[Mm][Aa][Cc][Oo][Ss].*",targeting):
query = "macos"
elif re.search(".*[Uu][Bb][Uu][Nn][Tt][Uu].*",targeting):
query = "ubuntu"
elif re.search("\*", targeting):
query = "windows_rhel_solaris_macos_ubuntu"
else:
return None
return query+"_"+option
# Get previous assessment results from the repository
def get_previous_results(iam, targets):
logger.info("Searching for previous results")
query = query_builder(iam.targeting, "results")
# Query the repository and filter based on oldest results,
# result_format_filters, collection_method, and targeting
# properties of the report request message
qrm = query_repository(query)
if qrm.result == "":
logger.info("Found: "+str(None))
return None
else:
rrsm = ReportResultsMessage(iam.transaction_id, iam.requestor_id, "", "", qrm.result)
logger.info("Found: "+qrm.result)
return rrsm
# Get applicable/undetermined targets from the repository
def get_applicable_targets(iam):
logger.info("Searching for applicable/undetermined targets")
query = query_builder(iam.targeting, "targets")
# Query the repository for applicable and undetermined
# assets
qrm = query_repository(query)
logger.info("Found: "+str(qrm.result))
return qrm.result
# Get in scope collectors based on targets from the repository
def get_collectors(targets):
logger.info("Searching for in scope collectors")
query = "targets_"+json.dumps(targets)
# Query the repository for in scope collectors
# based on specified targets
qrm = query_repository(query)
logger.info("Found: "+str(qrm.result))
return qrm.result
# Task the collectors with the initiate assessment request
def task_collectors(iam, targets, collectors):
logger.info("Tasking the collectors %s with collection request", collectors)
# Extract the check identifiers from the content and create
# a collection request using the information from the initiate
# assessment request
ids = get_ids(iam.content)
crm = CollectorRequestMessage(ids, iam.targeting, iam.latest_return,
iam.collection_method, iam.result_format_filters,
iam.collection_parameters, iam.transaction_id,
iam.requestor_id, targets)
# Send collector request events to the appropriate
# collectors
for collector in collectors:
task_collector(crm, collector)
# Using the previously collected results and the initiate
# assessment request to determine what checks are remaining
# from the report request. Just pass the request through for now
def get_remaining_request(previous_results, iam):
# Assume the previous results cover the entire assessment
# so just return None
if previous_results == None:
return iam
else:
iam.content = "remaining_request"
return iam
# Process incoming assessments from applications
class InitiateAssessmentCallback(RequestCallback):
def on_request(self, request):
# Acknowledge the initiate assessment request
# and get a new transaction id
transaction_id = acknowledge_request(request)
# Parse the initiate assessment request message
# and set the transaction id with the new
# transaction id
iam = InitiateAssessmentMessage()
iam.parse(request.payload.decode())
iam.transaction_id = transaction_id
logger.info("Manager recieved initiate assessment request: %s", iam.to_s())
# Add to the list of active assessment transactions
if iam.requestor_id in transactions.keys():
transactions[iam.requestor_id].append(transaction_id)
else:
transactions[iam.requestor_id] = [transaction_id]
# Append the initiate assessment request to the list
# of requests that need to be processed
assessment_requests.append(iam)
# Process incoming cancel assessment messages from the application
class CancelAssessmentCallback(RequestCallback):
def on_request(self, request):
# Parse cancel assessment message
cam = CancelAssessmentMessage()
cam.parse(request.payload.decode())
logger.info("Manager received cancel assessment request: %s", cam.to_s())
# Check to make sure it came from the application
# that originally requested the assessment. If it
# is not, just ignore the message
if cam.requestor_id in transactions.keys() and cam.transaction_id in transactions[cam.requestor_id]:
assessment_requests.append(cam)
# Cancel request didn't come from originating application so ignore
else:
logger.info("Ignoring cancel request "+cam.transaction_id+" for application "+cam.requestor_id)
# Send request acknowledgement message with the transaction
# id that was cancelled
res = Response(request)
ram = RequestAcknowledgementMessage()
ram.transaction_id = cam.transaction_id
res.payload = (ram.to_json()).encode()
client.send_response(res)
# Prepare service registration information
info = ServiceRegistrationInfo(client, "/scap/manager")
# Have manager provide assessment request, cancel assessment, and query services
info.add_topic(SERVICE_INITIATE_ASSESSMENT_TOPIC, InitiateAssessmentCallback())
info.add_topic(SERVICE_CANCEL_ASSESSMENT_TOPIC, CancelAssessmentCallback())
# Connect to the message fabric and register the service
client.connect()
client.register_service_sync(info, 10)
# Wait forever
while True:
# Process all initiate assessment requests that were received
while assessment_requests:
ar = assessment_requests.pop(0)
if ar.message_type == MessageType.CANCEL_ASSESSMENT.value:
iam = InitiateAssessmentMessage()
iam.transaction_id = ar.transaction_id
iam.requestor_id = ar.requestor_id
targets = transactions_targets[ar.transaction_id]
# Query the repository for in scope collectors
collectors = get_collectors(targets)
# Task the in scope collectors
task_collectors(iam, targets, collectors)
else:
# Store the initiate assessment request in the repository
store_data(ar)
# Query the repository for applicable targets
targets = get_applicable_targets(ar)
# Store targets associated with the transaction_id
transactions_targets[ar.transaction_id] = targets
# If point-in-time assessment, get any previous results from
# the database
previous_results = None
if is_point_in_time_assessment(ar):
previous_results = get_previous_results(ar, targets)
# If there are previous results, send the results to the
# application
if previous_results != None:
# Send results to the requesting application
send_collection_results_event(previous_results)
# Based on previous results determine what is left
# and task the collector
rr_ar = get_remaining_request(previous_results,ar)
# Query the repository for in scope collectors
collectors = get_collectors(targets)
# Task the in scope collectors
task_collectors(rr_ar, targets, collectors)
time.sleep(1)
|
class Solution:
res = []
# 生成第 row 行的数据
def search(self, row, all_rows):
if row == all_rows:
return
arr = [1] * (row + 1)
for i in range(1, row):
arr[i] = self.res[row - 1][i - 1] + self.res[row - 1][i]
self.res.append(arr)
self.search(row + 1, all_rows)
def generate(self, numRows):
self.res.clear()
self.search(0, numRows)
return self.res
|
from enum import Enum
from operator import attrgetter
from django.db import models
from django.db.models import sql
from django.db.models.deletion import Collector
from django.utils import six
from django_types.operations import CustomTypeOperation
from .fields import EnumField
"""
Use a symbol = value style as per Enum expectations.
Where a value is the human readable or sensible value, and the symbol is the
constant or programming flag to use.
For readbility of the database values, the human readable values are used.
"""
class EnumState:
@classmethod
def values(cls):
return [em.value for em in cls]
@classmethod
def values_set(cls):
return set(cls.values())
def enum_state(values, name=None, app_label=None):
""" Create an EnumState representing the values or Enum """
if isinstance(values, type) and issubclass(values, Enum):
if not name:
name = values.__name__
values = (em.value for em in values)
elif not name:
name = 'Unnamed Enum'
e = Enum(name, [(v, v) for v in values], type=EnumState)
e.Meta = type('Meta', (object,), {})
e.Meta.app_label = app_label
return e
class SQLCollector(Collector):
""" Collector that generates the required deletion SQL instead of performing it """
def as_sql(self):
""" Generate SQL queries that perform related deletion """
# List of (sql, params) tuples to perform deletion
query_list = []
for model, instances in self.data.items():
self.data[model] = sorted(instances, key=attrgetter("pk"))
self.sort()
# Do not send pre_delete signals as in .delete()
# Fast deletes
for qs in self.fast_deletes:
# TODO Check for any potential caveats from complex queries - assume none are generated by Collector
# Clone queryset into DeleteQuery to use .as_sql()
query_list.append(qs.query.clone(klass=sql.DeleteQuery).get_compiler(self.using).as_sql())
# update fields
for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
query = sql.UpdateQuery(model)
for (field, value), instances in six.iteritems(instances_for_fieldvalues):
query.add_update_values({field.name: value})
query.add_q(models.Q(pk__in=[obj.pk for obj in instances]))
query_list.append(query.get_compiler(using=self.using).as_sql())
# reverse instance collections
for instances in six.itervalues(self.data):
instances.reverse()
# delete instances
for model, instances in six.iteritems(self.data):
query = sql.DeleteQuery(model)
pk_list = [obj.pk for obj in instances]
query.where = query.where_class()
query.add_q(models.Q(pk__in=pk_list))
query_list.append(query.get_compiler(using=self.using).as_sql())
# Do not update instances as in .delete()
return query_list
class EnumOperation(CustomTypeOperation):
field_type = EnumField
class CreateEnum(EnumOperation):
def __init__(self, db_type, values):
# Values follow Enum functional API options to specify
self.db_type = db_type
self.values = values
def describe(self):
return 'Create enum type {db_type}'.format(db_type=self.db_type)
def state_forwards(self, app_label, state):
enum = enum_state(self.values, name=self.db_type, app_label=app_label)
state.add_type(self.db_type, enum)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if schema_editor.connection.features.requires_enum_declaration:
enum = to_state.db_types[self.db_type]
sql = schema_editor.sql_create_enum % {
'enum_type': self.db_type,
'values': ', '.join(['%s'] * len(enum))}
schema_editor.execute(sql, enum.values())
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if schema_editor.connection.features.requires_enum_declaration:
sql = schema_editor.sql_delete_enum % {
'enum_type': self.db_type}
schema_editor.execute(sql)
class RemoveEnum(EnumOperation):
def __init__(self, db_type):
self.db_type = db_type
def describe(self):
return 'Remove enum type {db_type}'.format(db_type=self.db_type)
def state_forwards(self, app_label, state):
# TODO Add dependency checking and cascades
state.remove_type(self.db_type)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if schema_editor.connection.features.requires_enum_declaration:
sql = schema_editor.sql_delete_enum % {
'enum_type': self.db_type}
schema_editor.execute(sql)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if schema_editor.connection.features.requires_enum_declaration:
enum = to_state.db_types[self.db_type]
sql = schema_editor.sql_create_enum % {
'enum_type': self.db_type,
'values': ', '.join(['%s'] * len(enum))}
schema_editor.execute(sql, enum.values())
class RenameEnum(EnumOperation):
def __init__(self, old_type, new_type):
self.old_db_type = old_type
self.db_type = new_type
def describe(self):
return 'Rename enum type {old} to {new}'.format(
old=self.old_db_type,
new=self.db_type)
def state_forwards(self, app_label, state):
old_enum = state.db_types[self.old_db_type]
enum = enum_state(old_enum, name=self.db_type, app_label=app_label)
state.remove_type(self.old_db_type)
state.add_type(self.db_type, enum)
# Update all fields using this enum
for info in self.get_fields(state, db_type=self.old_db_type):
changed_field = info.field.clone()
changed_field.type_name = self.db_type
info.model_state.fields[info.field_index] = (info.field_name, changed_field)
state.reload_model(info.model_app_label, info.model_name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if schema_editor.connection.features.requires_enum_declaration:
sql = schema_editor.sql_rename_enum % {
'old_type': self.old_db_type,
'enum_type': self.db_type}
schema_editor.execute(sql)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.old_db_type, self.db_type = self.db_type, self.old_db_type
self.database_forwards(app_label, schema_editor, from_state, to_state)
self.old_db_type, self.db_type = self.db_type, self.old_db_type
class AlterEnum(EnumOperation):
temp_db_type = 'django_enum_temp'
transition_db_type = 'django_enum_transition'
def __init__(self, db_type, add_values=None, remove_values=None, on_delete=models.PROTECT):
self.db_type = db_type
self.add_values = set(add_values or ())
self.remove_values = set(remove_values or ())
self.on_delete = on_delete
def describe(self):
return 'Alter enum type {db_type},{added}{removed}'.format(
db_type=self.db_type,
added=' added {} value(s)'.format(len(self.add_values)) if self.add_values else '',
removed=' removed {} value(s)'.format(len(self.remove_values)) if self.remove_values else '')
def state_forwards(self, app_label, state):
from_enum = state.db_types[self.db_type]
to_enum = enum_state((from_enum.values_set() | self.add_values) - self.remove_values, name=self.db_type, app_label=app_label)
state.add_type(self.db_type, to_enum)
# Update all fields using this enum
for info in self.get_fields(state):
changed_field = info.field.clone()
changed_field.type_def = to_enum
info.model_state.fields[info.field_index] = (info.field_name, changed_field)
state.reload_model(info.model_app_label, info.model_name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
# Compare from_state and to_state and generate the appropriate ALTER commands
pre_actions = []
post_actions = []
# Make sure ORM is ready for use
from_state.clear_delayed_apps_cache()
db_alias = schema_editor.connection.alias
# Get field/model list
fields = [
(from_model, to_model, from_field, self.on_delete or from_field.on_delete)
for info in self.get_fields(from_state)
for from_model in [from_state.apps.get_model(info.model_app_label, info.model_name)]
for from_field in [from_model._meta.get_field(info.field_name)]
for to_model in [to_state.apps.get_model(info.model_app_label, info.model_name)]
]
if self.remove_values:
# The first post delete actions are to finalise the field types
if schema_editor.connection.features.has_enum:
if schema_editor.connection.features.requires_enum_declaration:
sql_alter_column_type = getattr(
schema_editor,
'sql_alter_column_type_using',
schema_editor.sql_alter_column_type)
for (from_model, to_model, field, on_delete) in fields:
db_table = schema_editor.quote_name(from_model._meta.db_table)
db_field = schema_editor.quote_name(field.column)
sql = schema_editor.sql_alter_column % {
'table': db_table,
'changes': sql_alter_column_type % {
'column': db_field,
'type': self.temp_db_type,
'old_type': self.db_type}}
post_actions.append((sql, []))
else:
for (from_model, to_model, field, on_delete) in fields:
db_table = schema_editor.quote_name(from_model._meta.db_table)
db_field = schema_editor.quote_name(field.column)
new_field = to_model._meta.get_field(field.name)
db_type, params = new_field.db_type(schema_editor.connection).paramatized
sql = schema_editor.sql_alter_column % {
'table': db_table,
'changes': schema_editor.sql_alter_column_type % {
'column': db_field,
'type': db_type}}
post_actions.append((sql, params))
if self.add_values:
# If there's the possibility of inconsistent actions, use transition type
# ie, ADD VALUE 'new_val' and REMOVE VALUE 'rem_val' ON DELETE SET('new_val')
# On DB's without enum support this isn't necessary as they are always CHAR
transition_fields = [
(from_model, field)
for (from_model, to_model, field, on_delete) in fields
if hasattr(on_delete, 'deconstruct')
or (on_delete == models.SET_DEFAULT and field.get_default() in self.add_values)]
if transition_fields and schema_editor.connection.features.has_enum:
transition_values = to_state.db_types[self.db_type].values_set() | self.remove_values
transition_enum = enum_state(transition_values, 'transitional_enum')
if schema_editor.connection.features.requires_enum_declaration:
# Create transition type
sql = schema_editor.sql_create_enum % {
'enum_type': self.transition_db_type,
'choices': ', '.join(['%s'] * len(transition_values))}
pre_actions.append((sql, list(transition_values)))
# Drop transition type after done
sql = schema_editor.sql_delete_enum % {
'enum_type': self.transition_db_type}
post_actions.append((sql, []))
# Set fields to transition type
for (model, field) in transition_fields:
db_table = schema_editor.quote_name(model._meta.db_table)
db_field = schema_editor.quote_name(field.column)
field.type_name = self.transition_db_type
field.type_def = transition_enum
db_type, params = field.db_type(schema_editor.connection).paramatized
sql = schema_editor.sql_alter_column % {
'table': db_table,
'changes': schema_editor.sql_alter_column_type % {
'column': db_field,
'type': db_type}}
pre_actions.append((sql, params))
if schema_editor.connection.features.requires_enum_declaration:
# Create new type with temporary name
to_enum = to_state.db_types[self.db_type]
sql = schema_editor.sql_create_enum % {
'enum_type': self.temp_db_type,
'values': ', '.join(['%s'] * len(to_enum))}
pre_actions.append((sql, to_enum.values()))
# Clean up original type and rename new one to replace it
sql = schema_editor.sql_delete_enum % {
'enum_type': self.db_type}
post_actions.append((sql, []))
sql = schema_editor.sql_rename_enum % {
'old_type': self.temp_db_type,
'enum_type': self.db_type}
post_actions.append((sql, []))
elif self.add_values:
# Just adding values? Directly modify types, no hassle!
if schema_editor.connection.features.requires_enum_declaration:
for value in self.add_values:
sql = schema_editor.sql_alter_enum % {
'enum_type': self.db_type,
'value': '%s'}
post_actions.append((sql, [value]))
elif schema_editor.connection.features.has_enum:
for (from_model, to_model, field, on_delete) in fields:
db_table = schema_editor.quote_name(from_model._meta.db_table)
db_field = schema_editor.quote_name(field.column)
new_field = to_model._meta.get_field(field.name)
db_type, params = new_field.db_type(schema_editor.connection).paramatized
schema_editor.sql_alter_column % {
'table': db_table,
'changes': schema_editor.sql_alter_column_type % {
'column': db_field,
'type': db_type}}
post_actions.append((sql, params))
# Prepare database for data to be migrated
for sql, params in pre_actions:
schema_editor.execute(sql, params)
# Apply all on_delete actions making data consistent with to_state values
if self.remove_values:
# Cheap hack to allow on_delete to work
for (from_model, to_model, field, on_delete) in fields:
field.remote_field = self
# Records affected by on_delete action
on_delete_gen = ((
field,
from_model.objects.using(db_alias).filter(
models.Q(('{}__in'.format(field.name), self.remove_values))
).only('pk'),
on_delete)
for (from_model, to_model, field, on_delete) in fields)
# Validate on_delete constraints
collector = SQLCollector(using=db_alias)
for (field, qs, on_delete) in on_delete_gen:
if qs:
# Trigger the on_delete collection directly
on_delete(collector, field, qs, db_alias)
for sql, params in collector.as_sql():
# Use SQLCollector.as_sql() instead of directly executing
# Such that manage.py sqlmigration correctly reflects all actions
schema_editor.execute(sql, params)
# Apply final changes
for sql, params in post_actions:
schema_editor.execute(sql, params)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.add_values, self.remove_values = self.remove_values, self.add_values
self.database_forwards(app_label, schema_editor, from_state, to_state)
self.add_values, self.remove_values = self.remove_values, self.add_values
|
from uvm.base import sv
from uvm.comps import UVMScoreboard
from uvm.macros import uvm_component_utils, uvm_info
from uvm.tlm1 import UVMAnalysisImp
class ram_scoreboard(UVMScoreboard):
"""
Compare signals from the DUT to a reference and give it a score
(PASS / FAIL)
"""
def __init__(self, name, parent):
UVMScoreboard.__init__(self, name, parent)
def build_phase(self, phase):
pass
def write(self, trans):
# TODO
|
"""Serializer of Category"""
from rest_framework import serializers
from apps.category.models import Category
class CategorySerializer(serializers.ModelSerializer):
"""Serializer Class of Category"""
class Meta:
"""Meta Class"""
model = Category
fields = "__all__"
|
"""trunk URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import include
from django.urls import path, re_path
from django.conf import settings
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from ncore.views import LoginView
schema_view = get_schema_view(
openapi.Info(
title="XXX API",
default_version='v1',
description="XXXX",
terms_of_service="http://www.polarwin.cn/contact",
contact=openapi.Contact(email="info@polarwin.cn"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
path('admin/', admin.site.urls),
path('login/', LoginView.as_view(), name='login'),
]
if settings.DEBUG:
urlpatterns += [
path(r'api-auth/', include('rest_framework.urls', namespace='rest_framework')),
re_path(r'swagger(?P<format>\.json|\.yaml)$',
schema_view.without_ui(cache_timeout=0), name='schema-json'),
path(r'swagger/', schema_view.with_ui('swagger',
cache_timeout=0), name='schema-swagger-ui'),
path(r'cached/swagger/', schema_view.with_ui('swagger',
cache_timeout=None), name='schema-swagger-ui-cached'),
path(r'redoc/', schema_view.with_ui('redoc',
cache_timeout=0), name='schema-redoc'),
]
if settings.DEBUG:
from rest_framework.routers import DefaultRouter
from ncore.views import UserView
from ncore.views import CustomObtainJSONWebTokenView, CustomRefreshJSONWebTokenView
from dingtalkapi.views import DingTalkLoginView
from entwechatapi.views import EntWeChatLoginView
from wechatminiprogramapi.views import WeChatMiniProgramCodeView, WeChatMiniProgramLoginView
router = DefaultRouter()
router.register(r'users', UserView, base_name='user')
urlpatterns += [
path(r'', include(router.urls)),
# path('dlogin/', DingTalkLoginView.as_view(), name='dlogin'),
# path('entwlogin/', EntWeChatLoginView.as_view(), name='entwlogin'),
# path('api-token-auth/', CustomObtainJSONWebTokenView.as_view(), name='jwt_obtain'),
# path('api-token-refresh/', CustomRefreshJSONWebTokenView.as_view(), name='jwt_refresh'),
# path('wechat-mini-program-code/', WeChatMiniProgramCodeView.as_view(), name='wechat_mini_program_code'),
# path('wechat-mini-program-login/', WeChatMiniProgramLoginView.as_view(), name='wechat_mini_program_login'),
]
# urlpatterns += [path(r'silk/', include('silk.urls', namespace='silk'))]
|
#!/usr/bin/env python3
print((int(input())//2)**2)
|
class Config():
def __init__(self):
self.is_training = True
# path
self.glove = 'data/glove.840B.300d.txt.filtered'
self.turian = 'data/turian.50d.txt'
self.train_path = "data/train.english.jsonlines"
self.dev_path = "data/dev.english.jsonlines"
self.test_path = "data/test.english.jsonlines"
self.char_path = "data/char_vocab.english.txt"
self.cuda = "0"
self.max_word = 1500
self.epoch = 200
# config
# self.use_glove = True
# self.use_turian = True #No
self.use_elmo = False
self.use_CNN = True
self.model_heads = True #Yes
self.use_width = True # Yes
self.use_distance = True #Yes
self.use_metadata = True #Yes
self.mention_ratio = 0.4
self.max_sentences = 50
self.span_width = 10
self.feature_size = 20 #宽度信息emb的size
self.lr = 0.001
self.lr_decay = 1e-3
self.max_antecedents = 100 # 这个参数在mention detection中没有用
self.atten_hidden_size = 150
self.mention_hidden_size = 150
self.sa_hidden_size = 150
self.char_emb_size = 8
self.filter = [3,4,5]
# decay = 1e-5
# 打印用
def __str__(self):
d = self.__dict__
out = 'config==============\n'
for i in list(d):
out += i+":"
out += str(d[i])+"\n"
out+="config==============\n"
return out
if __name__=="__main__":
config = Config()
print(config)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-30 11:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fpraktikum', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='course',
name='graduation',
field=models.CharField(blank=True, choices=[('BA', 'Bachelor'), ('MA', 'Master'), ('L', 'Lehramt')],
max_length=2),
),
migrations.AlterField(
model_name='fpregistration',
name='semester',
field=models.CharField(blank=True, default='WS17', max_length=4, verbose_name='semester'),
),
]
|
import os
import doodad as pd
import doodad.ssh as ssh
import doodad.mount as mount
from doodad.easy_sweep import hyper_sweep
import os.path as osp
import glob
instance_types = {
'c4.large': dict(instance_type='c4.large',spot_price=0.20),
'c4.xlarge': dict(instance_type='c4.xlarge',spot_price=0.20),
'c4.2xlarge': dict(instance_type='c4.2xlarge',spot_price=0.50),
'c4.4xlarge': dict(instance_type='c4.4xlarge',spot_price=0.50),
'c5.large': dict(instance_type='c5.large',spot_price=0.20),
'c5.xlarge': dict(instance_type='c5.xlarge',spot_price=0.20),
'c5.2xlarge': dict(instance_type='c5.2xlarge',spot_price=0.50),
'c5.4xlarge': dict(instance_type='c5.4xlarge',spot_price=0.50),
}
def launch(project_dir, method, params, mode='local', code_dependencies=list(), data_dependencies=dict(), instance_type='c4.xlarge', docker_image='justinfu/rlkit:0.4', s3_log_prefix='rlkit'):
"""
Arguments:
project_dir (str): The main directory containing all project files. Data will be saved to project_dir/data
This will additionally add all dependencies in project_dir/dependencies
method (fn): The function to call
params (dict): The set of hyperparameters to sweep over
mode (str): Choose between ['ec2', 'docker', 'local']
code_dependencies (list): List of code locations that need to be additionally mounted in the Docker image and added to the python path.
data_dependencies (dict): for remote_location:data_dir in this dict, the directory `data_dir` will be accessible in the Docker image at `/tmp/data/{remote_location}`
instance_type (str / dict): if str, uses an instance type from `instance_types` above. If dict, need keys `instance_type` and `spot_price`
docker_image (str): Name of docker image
s3_log_prefix (str): Where data will be stored on s3
"""
PROJECT_DIR = osp.realpath(project_dir)
LOCAL_OUTPUT_DIR = osp.join(PROJECT_DIR,'data')
# Set up code and output directories
REMOTE_OUTPUT_DIR = '/tmp/outputs' # this is the directory visible to the target
REMOTE_DATA_DIR = '/tmp/data'
main_mount = mount.MountLocal(local_dir=PROJECT_DIR, pythonpath=True, filter_dir=('data','analysis','dependencies'))
all_code_dependencies = [
item
for item in glob.glob(osp.join(PROJECT_DIR, "dependencies/*")) + code_dependencies
if osp.isdir(item)
]
code_mounts = [main_mount] + [mount.MountLocal(local_dir=directory, pythonpath=True) for directory in all_code_dependencies]
# MuJoCo
code_mounts.append(mount.MountLocal(local_dir=osp.expanduser('~/.mujoco'), mount_point='/root/.mujoco', pythonpath=True))
code_mounts.append(mount.MountLocal(local_dir=osp.expanduser('~/projects/rank_collapse/doodad_old/'), pythonpath=True))
params['output_dir'] = [REMOTE_OUTPUT_DIR]
params['data_dir'] = [REMOTE_DATA_DIR]
if mode == 'local':
doodad_mode = pd.mode.Local()
params['output_dir'] = [LOCAL_OUTPUT_DIR]
elif mode == 'docker':
doodad_mode = pd.mode.LocalDocker(
image=docker_image
)
elif mode == 'ec2':
assert instance_type in instance_types
doodad_mode = pd.mode.EC2AutoconfigDocker(
image=docker_image,
image_id='ami-086ecbff428fa44a8',
region='us-west-1', # EC2 region
s3_log_prefix=s3_log_prefix, # Folder to store log files under
s3_log_name=s3_log_prefix,
terminate=True, # Whether to terminate on finishing job
**instance_types[instance_type]
)
data_mounts = [
mount.MountLocal(local_dir=osp.realpath(directory), mount_point=osp.join(REMOTE_DATA_DIR,remote_name))
for remote_name,directory in data_dependencies.items()
]
if mode == 'local':
output_mounts = []
elif mode == 'docker' or mode == 'ssh':
output_dir = osp.join(LOCAL_OUTPUT_DIR, 'docker/')
output_mounts= [mount.MountLocal(local_dir=output_dir, mount_point=REMOTE_OUTPUT_DIR,output=True)]
elif mode == 'ec2':
output_mounts = [mount.MountS3(s3_path='data',mount_point=REMOTE_OUTPUT_DIR,output=True)]
mounts = code_mounts + data_mounts + output_mounts
hyper_sweep.run_sweep_doodad(method, params, doodad_mode, mounts)
|
def test_enter(player, limo):
player.perform("enter limo")
assert player.saw("You enter a fancy limo")
assert player.saw("electric")
def test_enter_and_exit(player, limo):
player.perform("enter limo")
player.forget()
player.perform("go out")
assert player.saw("Antechamber")
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for Affy module."""
import unittest
import struct
import os
import sys
try:
from numpy import array
import numpy.testing
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install NumPy if you want to use Bio.Affy.CelFile"
) from None
from Bio.Affy import CelFile
class AffyTest(unittest.TestCase):
def setUp(self):
self.affy3 = "Affy/affy_v3_example.CEL"
self.affy4 = "Affy/affy_v4_example.CEL"
self.affy4Bad = "Affy/affy_v4_bad_example.CEL"
with open(self.affy4Bad, "wb") as f:
self.writeExampleV4(f, bad=True)
def tearDown(self):
os.remove(self.affy4Bad)
# tests the Affymetrix v3 parser
def testAffy3(self):
with open(self.affy3) as f:
record = CelFile.read(f)
self.assertGreater(len(record.DatHeader), 0)
self.assertEqual(record.intensities.shape, (5, 5))
self.assertEqual(record.intensities.shape, record.stdevs.shape)
self.assertEqual(record.intensities.shape, record.npix.shape)
self.assertEqual(record.ncols, 5)
self.assertEqual(record.nrows, 5)
self.assertEqual(record.version, 3)
self.assertEqual(record.GridCornerUL, (206, 129))
self.assertEqual(record.GridCornerUR, (3570, 107))
self.assertEqual(record.GridCornerLR, (3597, 3470))
self.assertEqual(record.GridCornerLL, (234, 3492))
self.assertEqual(record.DatHeader["filename"], "1g_A9AF")
self.assertEqual(record.DatHeader["CLS"], 3684)
self.assertEqual(record.DatHeader["RWS"], 3684)
self.assertEqual(record.DatHeader["XIN"], 1)
self.assertEqual(record.DatHeader["YIN"], 1)
self.assertEqual(record.DatHeader["VE"], 30)
self.assertAlmostEqual(record.DatHeader["laser-power"], 2.0)
self.assertEqual(record.DatHeader["scan-date"], "08/23/07")
self.assertEqual(record.DatHeader["scan-time"], "11:23:24")
self.assertEqual(record.DatHeader["scanner-id"], "50205880")
self.assertEqual(record.DatHeader["scanner-type"], "M10")
self.assertEqual(record.DatHeader["array-type"], "Tgondii_SNP1.1sq")
self.assertEqual(record.DatHeader["image-orientation"], 6)
self.assertEqual(record.Algorithm, "Percentile")
self.assertEqual(len(record.AlgorithmParameters), 16)
self.assertEqual(record.AlgorithmParameters["Percentile"], 75)
self.assertEqual(record.AlgorithmParameters["CellMargin"], 2)
self.assertAlmostEqual(record.AlgorithmParameters["OutlierHigh"], 1.500)
self.assertAlmostEqual(record.AlgorithmParameters["OutlierLow"], 1.004)
self.assertEqual(record.AlgorithmParameters["AlgVersion"], "6.0")
self.assertEqual(
record.AlgorithmParameters["FixedCellSize"], True
) # noqa: A502
self.assertEqual(record.AlgorithmParameters["FullFeatureWidth"], 7)
self.assertEqual(record.AlgorithmParameters["FullFeatureHeight"], 7)
self.assertEqual(
record.AlgorithmParameters["IgnoreOutliersInShiftRows"], False
) # noqa: A502
self.assertEqual(
record.AlgorithmParameters["FeatureExtraction"], True
) # noqa: A502
self.assertEqual(record.AlgorithmParameters["PoolWidthExtenstion"], 2)
self.assertEqual(record.AlgorithmParameters["PoolHeightExtension"], 2)
self.assertEqual(
record.AlgorithmParameters["UseSubgrids"], False
) # noqa: A502
self.assertEqual(
record.AlgorithmParameters["RandomizePixels"], False
) # noqa: A502
self.assertEqual(record.AlgorithmParameters["ErrorBasis"], "StdvMean")
self.assertAlmostEqual(record.AlgorithmParameters["StdMult"], 1.0)
self.assertEqual(record.NumberCells, 25)
global message
try:
numpy.testing.assert_allclose(
record.intensities,
[
[234.0, 170.0, 22177.0, 164.0, 22104.0],
[188.0, 188.0, 21871.0, 168.0, 21883.0],
[188.0, 193.0, 21455.0, 198.0, 21300.0],
[188.0, 182.0, 21438.0, 188.0, 20945.0],
[193.0, 20370.0, 174.0, 20605.0, 168.0],
],
)
message = None
except AssertionError as err:
message = str(err)
if message is not None:
self.fail(message)
try:
numpy.testing.assert_allclose(
record.stdevs,
[
[24.0, 34.5, 2669.0, 19.7, 3661.2],
[29.8, 29.8, 2795.9, 67.9, 2792.4],
[29.8, 88.7, 2976.5, 62.0, 2914.5],
[29.8, 76.2, 2759.5, 49.2, 2762.0],
[38.8, 2611.8, 26.6, 2810.7, 24.1],
],
)
message = None
except AssertionError as err:
message = str(err)
if message is not None:
self.fail(message)
try:
numpy.testing.assert_array_equal(
record.npix,
[
[25, 25, 25, 25, 25],
[25, 25, 25, 25, 25],
[25, 25, 25, 25, 25],
[25, 25, 25, 25, 25],
[25, 25, 25, 25, 25],
],
)
message = None
except AssertionError as err:
message = str(err)
if message is not None:
self.fail(message)
self.assertEqual(record.nmask, 3)
try:
numpy.testing.assert_array_equal(
record.mask,
[
[False, False, False, False, False],
[False, False, False, True, True],
[False, False, False, False, True],
[False, False, False, False, False],
[False, False, False, False, False],
],
)
message = None
except AssertionError as err:
message = str(err)
if message is not None:
self.fail(message)
self.assertEqual(record.noutliers, 3)
try:
numpy.testing.assert_array_equal(
record.outliers,
[
[False, False, False, False, False],
[False, True, True, False, False],
[False, False, False, False, False],
[False, True, False, False, False],
[False, False, False, False, False],
],
)
message = None
except AssertionError as err:
message = str(err)
if message is not None:
self.fail(message)
self.assertEqual(record.nmodified, 3)
try:
numpy.testing.assert_allclose(
record.modified,
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 189.0, 220.0],
[0.0, 0.0, 0.0, 21775.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
)
message = None
except AssertionError as err:
message = str(err)
if message is not None:
self.fail(message)
def testAffy4(self):
with open(self.affy4, "rb") as f:
record = CelFile.read(f)
self.assertEqual(record.intensities.shape, (5, 5))
self.assertEqual(record.intensities.shape, record.stdevs.shape)
self.assertEqual(record.intensities.shape, record.npix.shape)
self.assertEqual(record.ncols, 5)
self.assertEqual(record.nrows, 5)
global message
try:
numpy.testing.assert_allclose(
record.intensities,
[
[0.0, 1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
[20.0, 21.0, 22.0, 23.0, 24.0],
],
)
message = None
except AssertionError as err:
message = str(err)
if message is not None:
self.fail(message)
try:
numpy.testing.assert_allclose(
record.stdevs,
[
[0.0, -1.0, -2.0, -3.0, -4.0],
[-5.0, -6.0, -7.0, -8.0, -9.0],
[-10.0, -11.0, -12.0, -13.0, -14.0],
[-15.0, -16.0, -17.0, -18.0, -19.0],
[-20.0, -21.0, -22.0, -23.0, -24.0],
],
)
message = None
except AssertionError as err:
message = str(err)
if message is not None:
self.fail(message)
try:
numpy.testing.assert_allclose(
record.npix,
[
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
],
)
message = None
except AssertionError as err:
message = str(err)
if message is not None:
self.fail(message)
self.assertEqual(len(record.AlgorithmParameters), 329)
self.assertEqual(len(record.GridCornerUL), 7)
self.assertEqual(record.AlgorithmParameters[-3:], "169")
def testAffyBadHeader(self):
with self.assertRaises(CelFile.ParserError):
with open(self.affy4Bad, "rb") as f:
record = CelFile.read(f)
def testAffyWrongModeReadV3(self):
with self.assertRaises(ValueError):
with open(self.affy3, "rb") as f:
record = CelFile.read(f, version=3)
def testAffyWrongModeReadV4(self):
with self.assertRaises(ValueError):
with open(self.affy4) as f:
record = CelFile.read(f, version=4)
# Writes a small example Affymetrix V4 CEL File
def writeExampleV4(self, f, bad=False):
preHeaders = {
"cellNo": 25,
"columns": 5,
"headerLen": 752,
"magic": 64,
"rows": 5,
"version": 4,
}
goodH = {"Axis-invertX": b"0"}
badH = {"Axis-invertX": b"1"}
headers = {
"Algorithm": b"Percentile",
"AlgorithmParameters": b"Percentile:75;CellMargin:4;Outlie"
b"rHigh:1.500;OutlierLow:1.004;AlgVersion:6.0;FixedCellSize"
b":TRUE;FullFeatureWidth:7;FullFeatureHeight:7;IgnoreOutlie"
b"rsInShiftRows:FALSE;FeatureExtraction:TRUE;PoolWidthExten"
b"stion:1;PoolHeightExtension:1;UseSubgrids:FALSE;Randomize"
b"Pixels:FALSE;ErrorBasis:StdvMean;StdMult:1.000000;NumDATS"
b"ubgrids:169",
"AxisInvertY": b"0",
"Cols": b"5",
"DatHeader": b"[0..65534] 20_10N:CLS=19420RWS=19420XIN=0"
b" YIN=0 VE=30 2.0 05/25/05 23:19:07 50102310 M10 "
b" \x14 \x14 HuEx-1_0-st-v2.1sq \x14 \x14 \x14 \x14 "
b"\x14570 \x14 25540.671875 \x14 3.500000 \x14 0.7000 \x14"
b" 3",
"GridCornerLL": b"518 18668",
"GridCornerLR": b"18800 18825",
"GridCornerUL": b"659 469",
"GridCornerUR": b"18942 623",
"OffsetX": b"0",
"OffsetY": b"0",
"Rows": b"5",
"TotalX": b"2560",
"TotalY": b"2560",
"swapXY": b"0",
}
if not bad:
headers.update(goodH)
else:
headers.update(badH)
prePadding = b"this text doesn't matter and is ignored\x04"
preHeadersOrder = ["magic", "version", "columns", "rows", "cellNo", "headerLen"]
headersEncoded = struct.pack(
"<" + "i" * len(preHeadersOrder),
*(preHeaders[header] for header in preHeadersOrder),
)
def packData(intensity, sdev, pixel):
return struct.pack("< f f h", intensity, sdev, pixel)
f.write(headersEncoded)
for header in headers:
try:
f.write(
bytes(header, encoding="utf-8") + b"=" + headers[header] + b"\n"
)
except TypeError:
f.write(header + b"=" + headers[header] + b"\n")
f.write(prePadding)
f.write(b"\x00" * 15)
for i in range(25):
f.write(packData(float(i), float(-i), 9))
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=0)
unittest.main(testRunner=runner)
|
from bisect import bisect_left
from collections import defaultdict
M = 9999991# 100007
N = 51
class MaClass:
def __init__(self):
self.authors = [None for i in range(M)]
self.books = [None for i in range(M)]
def H(self,s):
h = 0
for c in s:
h = h * N + ord(c)
return h % M
def func(self,s):
res = ""
for i in range(len(s)):
res += s[i] + ', '
res = res[:len(res) - 2:]
return res
def addBook(self, author, title):
cur = self.H(author)
while self.authors[cur] is not None:
if self.authors[cur] == author:
num = bisect_left(self.books[cur], title)
if not (num < len(self.books[cur]) and self.books[cur][num] == title):
self.books[cur].insert(num, title)
return
cur = (cur + N) % M
self.authors[cur] = author
self.books[cur] = [title]
def find(self, author, title):
cur = self.H(author)
while self.authors[cur] is not None:
if self.authors[cur] == author:
num = bisect_left(self.books[cur], title)
return True if num < len(self.books[cur]) and title == self.books[cur][num] else False
cur = (cur + N) % M
return False
def findByAuthor(self, author):
cur = self.H(author)
while self.authors[cur] is not None:
if self.authors[cur] == author:
return self.books[cur]
cur = (cur + N) % M
return []
def findByAuthor_(self, author):
cur = self.H(author)
while self.authors[cur] is not None:
if self.authors[cur] == author:
return self.func(self.books[cur])
cur = (cur + N) % M
return []
n = 3
l_to_e = defaultdict(list)
obj1 = MaClass()
for i in range(n):
e_word, l_words = input().split(' - ')
l_trans = l_words.split(', ')
for l in l_trans:
obj1.addBook(l, e_word)
l_to_e[l].append(e_word)
print(len(l_to_e))
for wrd in sorted(l_to_e):
print(wrd, ' - ', obj1.findByAuthor_(wrd))
|
##########################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################################
'''
PamMatrices.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Code
----
'''
import string
import re
# ------------------------------------------------------------------------
# data section
# translate pam matrix
new_labels = "ACDEFGHIKLMNPQRSTVWY"
old_labels = "ARNDCQEGHILKMFPSTWYV"
# note: pam-matrices are in the literature cited as Mij, Mij gives the probability that j mutates into i.
# Therefore rows and columns are exchanged here.
pam1 = """
Ala Arg Asn Asp Cys Gln Glu Gly His Ile Leu Lys Met Phe Pro Ser Thr Trp Tyr Val
A R N D C Q E G H I L K M F P S T W Y V
Ala A 9867 2 9 10 3 8 17 21 2 6 4 2 6 2 22 35 32 0 2 18
Arg R 1 9913 1 0 1 10 0 0 10 3 1 19 4 1 4 6 1 8 0 1
Asn N 4 1 9822 36 0 4 6 6 21 3 1 13 0 1 2 20 9 1 4 1
Asp D 6 0 42 9859 0 6 53 6 4 1 0 3 0 0 1 5 3 0 0 1
Cys C 1 1 0 0 9973 0 0 0 1 1 0 0 0 0 1 5 1 0 3 2
Gln Q 3 9 4 5 0 9876 27 1 23 1 3 6 4 0 6 2 2 0 0 1
Glu E 10 0 7 56 0 35 9865 4 2 3 1 4 1 0 3 4 2 0 1 2
Gly G 21 1 12 11 1 3 7 9935 1 0 1 2 1 1 3 21 3 0 0 5
His H 1 8 18 3 1 20 1 0 9912 0 1 1 0 2 3 1 1 1 4 1
Ile I 2 2 3 1 2 1 2 0 0 9872 9 2 12 7 0 1 7 0 1 33
Leu L 3 1 3 0 0 6 1 1 4 22 9947 2 45 13 3 1 3 4 2 15
Lys K 2 37 25 6 0 12 7 2 2 4 1 9926 20 0 3 8 11 0 1 1
Met M 1 1 0 0 0 2 0 0 0 5 8 4 9874 1 0 1 2 0 0 4
Phe F 1 1 1 0 0 0 0 1 2 8 6 0 4 9946 0 2 1 3 28 0
Pro P 13 5 2 1 1 8 3 2 5 1 2 2 1 1 9926 12 4 0 0 2
Ser S 28 11 34 7 11 4 6 16 2 2 1 7 4 3 17 9840 38 5 2 2
Thr T 22 2 13 4 1 3 2 2 1 11 2 8 6 1 5 32 9871 0 2 9
Trp W 0 2 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 9976 1 0
Tyr Y 1 0 3 0 3 0 1 0 4 1 1 0 0 21 0 1 1 2 9945 1
Val V 13 2 1 1 3 2 2 3 3 57 11 1 17 1 3 2 10 0 2 9901
"""
pam250 = """
Ala Arg Asn Asp Cys Gln Glu Gly His Ile Leu Lys Met Phe Pro Ser Thr Trp Tyr Val
A R N D C Q E G H I L K M F P S T W Y V
Ala A 13 6 9 9 5 8 9 12 6 8 6 7 7 4 11 11 11 2 4 9
Arg R 3 17 4 3 2 5 3 2 6 3 2 9 4 1 4 4 3 7 2 2
Asn N 4 4 6 7 2 5 6 4 6 3 2 5 3 2 4 5 4 2 3 3
Asp D 5 4 8 11 1 7 10 5 6 3 2 5 3 1 4 5 5 1 2 3
Cys C 2 1 1 1 52 1 1 2 2 2 1 1 1 1 2 3 2 1 4 2
Gln Q 3 5 5 6 1 10 7 3 7 2 3 5 3 1 4 3 3 1 2 3
Glu E 5 4 7 11 1 9 12 5 6 3 2 5 3 1 4 5 5 1 2 3
Gly G 12 5 10 10 4 7 9 27 5 5 4 6 5 3 8 11 9 2 3 7
His H 2 5 5 4 2 7 4 2 15 2 2 3 2 2 3 3 2 2 3 2
Ile I 3 2 2 2 2 2 2 2 2 10 6 2 6 5 2 3 4 1 3 9
Leu L 6 4 4 3 2 6 4 3 5 15 34 4 20 13 5 4 6 6 7 13
Lys K 6 18 10 8 2 10 8 5 8 5 4 24 9 2 6 8 8 4 3 5
Met M 1 1 1 1 0 1 1 1 1 2 3 2 6 2 1 1 1 1 1 2
Phe F 2 1 2 1 1 1 1 1 3 5 6 1 4 32 1 2 2 4 20 3
Pro P 7 5 5 4 3 5 4 5 5 3 3 4 3 2 20 6 5 1 2 4
Ser S 9 6 8 7 7 6 7 9 6 5 4 7 5 3 9 10 9 4 4 6
Thr T 8 5 6 6 4 5 5 6 4 6 4 6 5 3 6 8 11 2 3 6
Trp W 0 2 0 0 0 0 0 0 1 0 1 0 0 1 0 1 0 55 1 0
Tyr Y 1 1 2 1 3 1 1 1 3 2 2 1 2 15 1 2 2 3 31 2
Val V 7 4 4 4 4 4 4 4 5 4 15 10 4 10 5 5 5 72 4 17
"""
# ------------------------------------------------------------------------
def ExponentiateMatrix(matrix):
l = len(matrix)
new_matrix = []
for row in range(0, l):
new_matrix.append([0.0] * l)
for col in range(0, l):
v = 0
for x in range(0, l):
v = v + matrix[row][x] * matrix[col][x]
new_matrix[row][col] = v
return new_matrix
def NormalizeMatrix(matrix):
l = len(matrix)
# normalize matrix:
for row in range(0, l):
total = 0.0
for col in range(0, l):
total += matrix[row][col]
for col in range(0, l):
matrix[row][col] = float(matrix[row][col]) / total
def MultiplyMatrices(matrix_a, matrix_b):
l = len(matrix_b)
m = len(matrix_a[0])
new_matrix = []
for row in range(0, l):
new_matrix.append([0.0] * l)
for col in range(0, m):
v = 0
for x in range(0, m):
v = v + matrix_a[row][x] * matrix_b[x][col]
new_matrix[row][col] = v
return new_matrix
def PrintMatrix(matrix):
# print matrix
for row in range(0, len(matrix)):
print new_labels[row], string.join(map(lambda x: "%6.4f " % x, matrix[row]), "")
def CreateMatrix(matrix_string):
new_indices = {}
for x in range(0, len(new_labels)):
new_indices[new_labels[x]] = x
# create matrix
matrix = []
for i in range(0, len(new_labels)):
matrix.append([0.0] * len(new_labels))
rx = re.compile("\s+")
row = 0
for line in string.split(pam1, "\n")[3:]:
data = map(string.atoi, rx.split(line)[2:])
if len(data) != len(old_labels):
continue
for col in range(0, len(old_labels)):
# exchange row and col
matrix[new_indices[old_labels[col]]][
new_indices[old_labels[row]]] = data[col]
row = row + 1
return matrix
def GetIdentityMatrix():
matrix = []
for i in range(0, len(new_labels)):
matrix.append([0.0] * len(new_labels))
for i in range(0, len(new_labels)):
matrix[i][i] = 1.0
return matrix
def GetMatrix(matrix_number):
current_matrix = CreateMatrix(pam1)
NormalizeMatrix(current_matrix)
result_matrix = GetIdentityMatrix()
x = matrix_number
while x > 0:
if x & 1:
result_matrix = MultiplyMatrices(result_matrix, current_matrix)
x = x >> 1
current_matrix = ExponentiateMatrix(current_matrix)
return result_matrix
# ------------------------------------------------------------------------
if __name__ == '__main__':
matrix = GetMatrix(100)
PrintMatrix(matrix)
|
URL_MAPPER = {
'COL:em-bonds':'https://www.trounceflow.com/api/v1/chart/granularbondholdingschart/non-resident-portfolio-flows-in-colombia-chart-in-colombian-peso.json',
'BRA:em-bonds':'https://www.trounceflow.com/api/v1/chart/granularbondholdingsflowchart/foreign-holdings-flow-in-brazil-chart.json',
'CHN:em-bonds':'https://www.trounceflow.com/api/v1/chart/granularbondholdingschart/non-resident-portfolio-flows-in-china-chart-in-chinese-yuan.json',
'ASIA:debt':'https://www.trounceflow.com/api/v1/chart/cumulativebyyearcategoryfundflowchart/cumulative-fund-flows-for-asia-debt-funds.json',
'GBI-EM:fundflow':'https://www.trounceflow.com/api/v1/chart/categoryfundflowchart/daily-em-debt-funds-flow-chart.json',
'IND:bbg':'https://www.trounceflow.com/api/v1/chart/indexchart/fiinnetusd-index-chart-in-usd.json',
'ZAF:bbg':'https://www.trounceflow.com/api/v1/chart/indexchart/sabo-index-chart-in-usd.json',
'THA:bbg':'https://www.trounceflow.com/api/v1/chart/indexchart/tbtvnetusd-index-chart-in-usd.json',
}
|
from pylangacq.dependency import DependencyGraph
_CHAT_GRAPH_DATA = [
("but", "CONJ", "but", (1, 3, "LINK")),
("I", "PRO:SUB", "I", (2, 3, "SUBJ")),
("thought", "V", "think&PAST", (3, 0, "ROOT")),
("you", "PRO", "you", (4, 3, "OBJ")),
("wanted", "V", "want-PAST", (5, 3, "JCT")),
("me", "PRO:OBJ", "me", (6, 5, "POBJ")),
("to", "INF", "to", (7, 8, "INF")),
("turn", "V", "turn", (8, 3, "XCOMP")),
("it", "PRO", "it", (9, 8, "OBJ")),
(".", ".", "", (10, 3, "PUNCT")),
]
def test_dep_graph_to_tikz():
graph = DependencyGraph(_CHAT_GRAPH_DATA)
assert (
graph.to_tikz()
== """
\\begin{dependency}[theme = simple]
\\begin{deptext}[column sep=1em]
but \\& I \\& thought \\& you \\& wanted \\& me \\& to \\& turn \\& it \\& . \\\\
\\end{deptext}
\\deproot{3}{ROOT}
\\depedge{1}{3}{LINK}
\\depedge{2}{3}{SUBJ}
\\depedge{3}{0}{ROOT}
\\depedge{4}{3}{OBJ}
\\depedge{5}{3}{JCT}
\\depedge{6}{5}{POBJ}
\\depedge{7}{8}{INF}
\\depedge{8}{3}{XCOMP}
\\depedge{9}{8}{OBJ}
\\depedge{10}{3}{PUNCT}
\\end{dependency}
""".strip() # noqa
)
def test_dep_graph_to_conll():
graph = DependencyGraph(_CHAT_GRAPH_DATA)
assert (
graph.to_conll()
== """
but CONJ 3 LINK
I PRO:SUB 3 SUBJ
thought V 0 ROOT
you PRO 3 OBJ
wanted V 3 JCT
me PRO:OBJ 5 POBJ
to INF 8 INF
turn V 3 XCOMP
it PRO 8 OBJ
. . 3 PUNCT
""".strip()
)
|
import curses
from typing import Dict
from typing import NamedTuple
from typing import Optional
from typing import Tuple
from babi import color_kd
from babi.color import Color
def _color_to_curses(color: Color) -> Tuple[int, int, int]:
factor = 1000 / 255
return int(color.r * factor), int(color.g * factor), int(color.b * factor)
class ColorManager(NamedTuple):
colors: Dict[Color, int]
raw_pairs: Dict[Tuple[int, int], int]
def init_color(self, color: Color) -> None:
if curses.can_change_color():
n = min(self.colors.values(), default=256) - 1
self.colors[color] = n
curses.init_color(n, *_color_to_curses(color))
elif curses.COLORS >= 256:
self.colors[color] = color_kd.nearest(color, color_kd.make_256())
else:
self.colors[color] = -1
def color_pair(self, fg: Optional[Color], bg: Optional[Color]) -> int:
fg_i = self.colors[fg] if fg is not None else -1
bg_i = self.colors[bg] if bg is not None else -1
return self.raw_color_pair(fg_i, bg_i)
def raw_color_pair(self, fg: int, bg: int) -> int:
try:
return self.raw_pairs[(fg, bg)]
except KeyError:
pass
n = self.raw_pairs[(fg, bg)] = len(self.raw_pairs) + 1
curses.init_pair(n, fg, bg)
return n
@classmethod
def make(cls) -> 'ColorManager':
return cls({}, {})
|
"""Comment copied from Python/compile.c:
All about a_lnotab.
c_lnotab is an array of unsigned bytes disguised as a Python string.
It is used to map bytecode offsets to source code line #s (when needed
for tracebacks).
The array is conceptually a list of
(bytecode offset increment, line number increment)
pairs. The details are important and delicate, best illustrated by example:
byte code offset source code line number
0 1
6 2
50 7
350 307
361 308
The first trick is that these numbers aren't stored, only the increments
from one row to the next (this doesn't really work, but it's a start):
0, 1, 6, 1, 44, 5, 300, 300, 11, 1
The second trick is that an unsigned byte can't hold negative values, or
values larger than 255, so (a) there's a deep assumption that byte code
offsets and their corresponding line #s both increase monotonically, and (b)
if at least one column jumps by more than 255 from one row to the next, more
than one pair is written to the table. In case #b, there's no way to know
from looking at the table later how many were written. That's the delicate
part. A user of c_lnotab desiring to find the source line number
corresponding to a bytecode address A should do something like this
lineno = addr = 0
for addr_incr, line_incr in c_lnotab:
addr += addr_incr
if addr > A:
return lineno
lineno += line_incr
Note: this is no longer valid as of CPython 3.6. After CPython 3.6,
the line offset is signed, and this code should be used:
lineno = addr = 0
for addr_incr, line_incr in co_lnotab:
addr += addr_incr
if addr > A:
return lineno
if line_incr >= 0x80:
line_incr -= 0x100
lineno += line_incr
In order for this to work, when the addr field increments by more than 255,
the line # increment in each pair generated must be 0 until the remaining addr
increment is < 256. So, in the example above, assemble_lnotab (it used
to be called com_set_lineno) should not (as was actually done until 2.2)
expand 300, 300 to 255, 255, 45, 45,
but to 255, 0, 45, 255, 0, 45.
"""
def lnotab(pairs, first_lineno=0):
"""Yields byte integers representing the pairs of integers passed in."""
assert first_lineno <= pairs[0][1]
cur_byte, cur_line = 0, first_lineno
for byte_off, line_off in pairs:
byte_delta = byte_off - cur_byte
line_delta = line_off - cur_line
assert byte_delta >= 0
while byte_delta > 255:
yield 255 # byte
yield 0 # line
byte_delta -= 255
yield byte_delta
# The threshold of 0x80 is smaller than necessary on Python
# 3.4 and 3.5 (the value is treated as unsigned), but won't
# produce an incorrect lnotab. On Python 3.6+, 0x80 is the
# correct value.
while line_delta >= 0x80:
yield 0x7F # line
yield 0 # byte
line_delta -= 0x7F
while line_delta < -0x80:
yield 0x80 # line
yield 0 # byte
line_delta += 0x80
if line_delta < 0:
line_delta += 0x100
assert 0x80 <= line_delta <= 0xFF
yield line_delta
cur_byte, cur_line = byte_off, line_off
def lnotab_string(pairs, first_lineno=0):
return bytes(lnotab(pairs, first_lineno))
|
import numpy as np
import pandas as pd
array = [1,2,np.nan,4,5,np.nan,np.nan,1,2,2,np.nan,np.nan,4]
df_array = pd.DataFrame(array)
print("Arranjo original")
print(df_array)
print("\n")
print("Determinando a quantidade de NaNs no arranjo")
n_nans = df_array.isnull().values.sum()
print(n_nans)
print("\n")
print("Substituindo os NaNs por 0")
df_array_new = df_array.fillna(0)
print("\n")
print("Arranjo modificado")
print(df_array_new)
print("\n")
print("Média arranjo original ",df_array.mean())
print("\n")
print("Média arranjo modificado ",df_array_new.mean())
|
from collections.abc import Iterable
from gaptrain.log import logger
import numpy as np
def soap(*args):
"""
Create a SOAP vector using dscribe (https://github.com/SINGROUP/dscribe)
for a set of configurations
soap(config) -> [[v0, v1, ..]]
soap(config1, config2) -> [[v0, v1, ..], [u0, u1, ..]]
soap(configset) -> [[v0, v1, ..], ..]
---------------------------------------------------------------------------
:param args: (gaptrain.configurations.Configuration) or
(gaptrain.configurations.ConfigurationSet)
:return: (np.ndarray) shape = (len(args), n) where n is the length of the
SOAP descriptor
"""
from dscribe.descriptors import SOAP
configurations = args
# If a configuration set is specified then use that as the list of configs
if len(args) == 1 and isinstance(args[0], Iterable):
configurations = args[0]
logger.info(f'Calculating SOAP descriptor for {len(configurations)}'
f' configurations')
unique_elements = list(set(atom.label for atom in configurations[0].atoms))
# Compute the average SOAP vector where the expansion coefficients are
# calculated over averages over each site
soap_desc = SOAP(species=unique_elements,
rcut=5, # Distance cutoff (Å)
nmax=6, # Maximum component of the radials
lmax=6, # Maximum component of the angular
average='inner')
soap_vec = soap_desc.create([conf.ase_atoms() for conf in configurations])
logger.info('SOAP calculation done')
return soap_vec
def soap_kernel_matrix(configs, zeta=4):
"""
Calculate the kernel matrix between a set of configurations where the
kernel is
K(p_a, p_b) = (p_a . p_b / (p_a.p_a x p_b.p_b)^1/2 )^ζ
:param configs: (gaptrain.configurations.ConfigurationSet)
:param zeta: (float) Power to raise the kernel matrix to
:return: (np.ndarray) shape = (len(configs), len(configs))
"""
soap_vecs = soap(configs)
n, _ = soap_vecs.shape
# Normalise each soap vector (row)
soap_vecs = soap_vecs / np.linalg.norm(soap_vecs, axis=1).reshape(n, 1)
k_mat = np.matmul(soap_vecs, soap_vecs.T)
k_mat = np.power(k_mat, zeta)
return k_mat
|
class Scenario:
STAGES = [
{
"name": "install",
"path": "build/{version}/com/se/vulns/javacard/vulns.new.cap",
"comment": "",
},
{
"name": "install",
"path": "build/{version}/com/se/applets/javacard/applets.cap",
},
{
"name": "send",
"comment": "NREAD_SHORT",
"payload": "0x80 0x10 0x01 0x02 0x06 0x00 0x00 0x00 0x00 0x00 0x00 0x7F",
"optional": False,
},
{
"name": "send",
"comment": "NWRITE_SHORT",
"payload": "0x80 0x11 0x01 0x02 0x08 0x00 0x00 0x00 0x00 0x00 0x00 0x11 0x22 0x7F",
"optional": False,
},
]
|
import numpy as np
from functions import priors, likelihood
from functions import BIGNEG
import emcee
from emcee.autocorr import AutocorrError
from emcee.moves import DESnookerMove
class Tuner(object):
"""Decide which stretch parameter is best to use.
Tracks previous trials and generates new trials
along with when to stop.
"""
def __init__(self, max_depth=3):
"""max_depth determines how granular a search for
the optimal parameter value is conducted.
"""
self.trial_queue = [0.5, 1.0, 1.5]
self.alphas = []
self.acceptances = []
self.good_alpha = None
self.depth = 0
self.max_depth = max_depth
def update(self, alpha, f_accept):
"""Update record of trials and results."""
self.alphas.append(alpha)
self.acceptances.append(f_accept)
if 0.2 < f_accept < 0.8:
self.good_alpha = alpha
self.acceptances = [x for _, x in sorted(zip(self.alphas,
self.acceptances))]
self.alphas = sorted(self.alphas)
def get_trial(self):
"""What parameter value to try next?
Returns: alpha, stopcode
alpha (float) value of parameter for next trial.
stopcode (boolean) whether to stop trials
"""
if self.good_alpha is not None:
return self.good_alpha, True
if self.depth >= self.max_depth:
return self.get_consolation(), True
if len(self.trial_queue) < 1:
self.update_queue()
tri = self.trial_queue.pop(0)
return tri, False
def update_queue(self):
"""Add further trials to the queue."""
alps, accs = self.alphas, self.acceptances
best = np.argmax(accs)
EDGE = False
if best == 0:
left = 0.
EDGE = True
else:
left = alps[best-1]
if best == len(accs)-1:
right = alps[-1] * 2
EDGE = True
else:
right = alps[best+1]
if not EDGE:
self.depth += 1
self.trial_queue.append((alps[best]+left)/2)
self.trial_queue.append((alps[best]+right)/2)
def get_consolation(self):
"""Get most value of most successful trial."""
best = np.argmax(self.acceptances)
return self.alphas[best]
def prob_func(x, b, mdls, berr, prior, PFOS, C8):
""" log-probability of posterior.
Takes current state vector and obs vector
along with additional information
and returns posterior log-probability of
sample based on defined likelihood and prior.
Arguments:
x (array) proposal
b (array) observations
mdls (array) observation MDLs
berr (array) obervation relative errors
prior (string) name of prior function to use
PFOS (tuple(float,float)) observed PFOS and PFOS MDL
C8 (Boolean) whether C8 was measured.
Returns:
(float) log-probability of proposal given observations
"""
# log-posterior, so sum prior and likelihood
lp = priors[prior](x, PFOS=PFOS, b=b)
if not np.isfinite(lp):
return BIGNEG
ll = likelihood(x, b, mdls, berr, C8)
if not np.isfinite(ll):
return BIGNEG
return ll + lp
def sample_measurement(b, mdls, berr, PFOS, prior='AFFF',
C8=False, nwalkers=32,
Nincrement=2000, TARGET_EFFECTIVE_STEPS=2500,
MAX_STEPS=100000, MAX_DEPTH=3):
"""For a given measurement b, sample the posterior.
Some MCMC options are set here to do ensemble
MCMC sampling in the 8 state vector dimensions
using 32 walkers doing Snooker moves.
Arguments:
b (array) observations
mdls (array) observation MDLs
berr (array) observation relative errors
PFOS (tuple(float,float)) observed PFOS and PFOS MDL
Keyword arguments:
prior (string; default 'AFFF') name of prior function to use
C8 (boolean; default False) whether C8 was measured
nwalkers (int; default 32) number of samplers in ensemble
Nincrement (int; default 2000) interations between status checks
TARGET_EFFECTIVE_STEPS (int; default 2500) effective sample size goal
MAX_STEPS (int; default 100000) cap on number of interations
MAX_DEPTH (int; default 3) level of granularity in
windowed meta-parameter search
Returns:
(emcee.EnsembleSampler) ensemble of samplers with results
"""
ndim = 9
WEGOOD = False
tuner = Tuner(max_depth=MAX_DEPTH)
print('-' * 50)
print(f'Number of walkers moving on each iteration: {nwalkers}')
print('Doing burn-in initialization and parameter tuning...')
while not WEGOOD:
alpha, WEGOOD = tuner.get_trial()
if WEGOOD:
print(f'alpha of {alpha} selected.')
sampler = emcee.EnsembleSampler(nwalkers,
ndim,
prob_func,
args=(b, mdls, berr, prior, PFOS, C8),
moves=[(DESnookerMove(alpha),
1.0)])
init = np.random.rand(nwalkers, ndim)
state = sampler.run_mcmc(init, Nincrement)
sampler.reset()
INIT = False
S = 1
while not INIT:
try:
state = sampler.run_mcmc(state, Nincrement)
INIT = True
except ValueError:
print('...')
state = sampler.run_mcmc(init, Nincrement*S)
S *= 1.5
f_accept = np.mean(sampler.acceptance_fraction)
print(f'acceptance rate is {np.mean(f_accept):.2f} when alpha is {alpha}')
tuner.update(alpha, f_accept)
print(f'Sampling posterior in {Nincrement}-iteration increments.')
WEGOOD = False
count = 0
prev_Nindep = 0
Nindep = 1
sampler.reset()
while (not WEGOOD) and (count < MAX_STEPS):
state = sampler.run_mcmc(state, Nincrement)
f_accept = np.mean(sampler.acceptance_fraction)
count += Nincrement
try:
tac = sampler.get_autocorr_time()
mtac = np.nanmax(tac) # go by the slowest-sampling dim or mean??
if np.isnan(mtac):
WEGOOD = False
else:
WEGOOD = True
except AutocorrError:
mtac = 'unavailable'
WEGOOD = False
print(f'After {count} iterations, autocorr time: {mtac}')
WEGOOD = False
while (not WEGOOD) and (count < MAX_STEPS):
if Nindep < prev_Nindep:
print("WARNING: Number of independent samples decreasing!")
state = sampler.run_mcmc(state, Nincrement)
f_accept = np.mean(sampler.acceptance_fraction)
count += Nincrement
try:
tac = sampler.get_autocorr_time()
mtac = np.nanmax(tac)
except AutocorrError:
pass
prev_Nindep = Nindep
Nindep = count * nwalkers / mtac
print(
f'After {count} iterations, effective number of samples:\
{int(Nindep)}'
)
if Nindep > TARGET_EFFECTIVE_STEPS:
WEGOOD = True
if MAX_STEPS <= count:
print("WARNING: maximum number of iterations reached! Terminating.")
print('SAMPLE DONE')
return sampler
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Ryan L. Collins <rlcollins@g.harvard.edu>
# and the Talkowski Laboratory
# Distributed under terms of the MIT license.
"""
Collect lists of genes overlapped by a BED file of regions
"""
import pybedtools as pbt
import argparse
from sys import stdout
from os import path
import gzip
import csv
import pandas as pd
import re
import subprocess
import gzip
def process_gtf(gtf_in):
"""
Read & process gtf
"""
gtfbt = pbt.BedTool(gtf_in)
# Build lists of eligible gene names and transcript IDs
genes, transcripts = [], []
for f in gtfbt:
if f.fields[2] == 'transcript':
gname = f.attrs['gene_name']
tname = f.attrs['transcript_id']
if gname not in genes:
genes.append(gname)
if tname not in transcripts:
transcripts.append(tname)
# Filter & clean records in gtf
def _filter_gtf(feature):
"""
Restrict GTF features to desired elements
"""
if feature.fields[2] == 'transcript' \
and feature.attrs['gene_name'] in genes \
and feature.attrs['transcript_id'] in transcripts:
return True
else:
return False
attrs_to_drop = 'gene_id gene_type gene_status transcript_type ' + \
'transcript_status transcript_name protein_id ' + \
'tag ccdsid havana_gene havana_transcript'
attrs_to_drop = attrs_to_drop.split()
def _clean_feature(feature):
"""
Clean unnecessary fields & info from GTF features
"""
for key in attrs_to_drop:
if key in feature.attrs.keys():
feature.attrs.pop(key)
return feature
gtfbt = gtfbt.filter(_filter_gtf).filter(_clean_feature).saveas()
return gtfbt, genes, transcripts
def annotate_regions(regions_path, gtfbt):
"""
Load regions and annotate with genes
"""
regions = pd.read_csv(regions_path, sep='\t')
# regions.rename(columns={regions.columns.tolist()[0] : \
# regions.columns.tolist()[0].replace('#', '')},
# inplace=True)
intervals_dict = {rid : i.split(';') for rid, i in regions.iloc[:, [3, -2]].values}
intervals_str = ''
for rid, ints in intervals_dict.items():
for i in ints:
intervals_str += '\t'.join([re.sub(':|-', '\t', i), rid]) + '\n'
intervals_bt = pbt.BedTool(intervals_str, from_string=True)
genes_dict = {x : [] for x in regions.iloc[:, 3].values}
for x in intervals_bt.intersect(gtfbt, wa=True, wb=True):
gene = str([f.split()[1].replace('"', '') for f in x[-1].split(';') \
if f.startswith('gene_name')][0])
genes_dict[x.name].append(gene)
genes_dict = {rid : sorted(list(set(g))) for rid, g in genes_dict.items()}
ngenes_dict = {rid : len(g) for rid, g in genes_dict.items()}
genes_str_dict = {rid : ';'.join(g) for rid, g in genes_dict.items()}
regions['n_genes'] = regions.iloc[:, 3].map(ngenes_dict)
regions['genes'] = regions.iloc[:, 3].map(genes_str_dict)
return regions.sort_values(by=regions.columns[0:3].tolist(), axis=0)
def main():
"""
Main block
"""
# Parse command line arguments and options
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('regions', help='BED file of final regions after refinement. ' +
'Fourth column must be unique row name and second-to-last ' +
'column must be a semicolon-delimited list of chr:start-end ' +
'intervals to compare vs gtf.')
parser.add_argument('gtf', help='GTF of genes to consider.')
parser.add_argument('-o', '--outbed', help='Path to output file. ' +
'[default: stdout]')
parser.add_argument('-z', '--bgzip', dest='bgzip', action='store_true',
help='Compress output BED with bgzip.')
args = parser.parse_args()
# Open connection to output file
if args.outbed is None \
or args.outbed in 'stdout -'.split():
outbed = stdout
else:
if path.splitext(args.outbed)[-1] in '.gz .bz .bgz .bgzip .gzip'.split():
outbed_path = path.splitext(args.outbed)[0]
else:
outbed_path = args.outbed
outbed = open(outbed_path, 'w')
# Extract canonical transcripts from input GTF
gtfbt, genes, transcripts = process_gtf(args.gtf)
# Load regions & annotate with genes
regions = annotate_regions(args.regions, gtfbt)
# Sort & write original bed out to file with extra columns for n_genes and genes
regions.to_csv(outbed, sep='\t', na_rep='NA', header=True, index=False)
outbed.close()
# Bgzip output, if optioned
if args.outbed is not None \
and args.outbed not in 'stdout -'.split() \
and args.bgzip:
subprocess.run(['bgzip', '-f', outbed_path])
if __name__ == '__main__':
main()
|
# -*- coding:utf-8 -*-
from bs4 import BeautifulSoup
import requests
class Article:
def __init__(self):
self.title=""
self.time=""
self.text=""
self.category=""
self.tag=[]
class GetHtml(object):
def __init__(self,Url,Blog):
'''
Contrust a GetHtml object
It will give you the html of the article and the text of the time and title
:param Url: Blog_index
:param Blog: Blog_type
'''
self.__url=Url
self.__cnt=0
self.__article_list=[]
while self.__url!='':
url_list=self.get_url_list(Blog)
for url in url_list:
self.get_article(url,Blog)
def get_html(self,url):
try:
r=requests.get(url,timeout=30)
r.raise_for_status()
r.encoding=r.apparent_encoding
return r.text
except:
print("获取链接"+url+"时失败")
def get_url_list(self,Blog):
'''
:param Blog: Judge different blogs
:return: The url of every article
'''
url_list=[]
html=self.get_html(self.__url)
soup=BeautifulSoup(html,'lxml')
lista=[]
lista=soup.find_all(Blog['index_url']['tag'],attrs={Blog['index_url']['key']:Blog['index_url']['valve']})
next_page=soup.find(Blog['next_page']['tag'],attrs={Blog['next_page']['key']:Blog['next_page']['valve'],},text=Blog['next_page']['text'])
self.__url=''
if not next_page is None:
self.__url = next_page['href']
for Url in lista:
try:
url_list.append(Url.a['href'])
self.__cnt=self.__cnt+1
except:
print('获取单篇博客链接失败')
return url_list
def get_article(self,url,Blog):
'''
Get the html of the article,the text of the time and the head
:param url: The url of the article
:param Blog: Judge different blogs
:return: None
'''
html=self.get_html(url)
soup=BeautifulSoup(html,'lxml')
article_html=soup.find(Blog['body']['tag'],attrs={Blog['body']['key']:Blog['body']['valve']})
title=soup.find(Blog['title']['tag'],attrs={Blog['title']['key']:Blog['title']['valve']}).text
time=soup.find(Blog['time']['tag'],attrs={Blog['time']['key']:Blog['time']['valve']}).text
tag=[]
category=''
if Blog['name']=='cnblogs':
BlogData_soup = soup.find_all('script', type='text/javascript')
BlogData = str(BlogData_soup[0].text + BlogData_soup[2].text)
currentBlogApp=self.get_string(BlogData,'currentBlogApp',4,"'")
cb_blogId=self.get_string(BlogData, 'cb_blogId', 1, ",")
cb_entryId=self.get_string(BlogData, 'cb_entryId', 1, ",")
category,tag=self.get_categoris_cnblogs(currentBlogApp,cb_blogId,cb_entryId)
else:
category_soup=soup.find(Blog['category']['tag'],attrs={Blog['category']['key']:Blog['category']['valve']})
if not category_soup is None:
category=category_soup.contents[0]
tag_soup=soup.find(Blog['tag']['tag'],attrs={Blog['tag']['key']:Blog['tag']['key']})
tag_list=[]
if not tag_soup is None:
tag_list=tag_soup.find_all('a')
for i in tag_list:
tag.append(i.text)
article=Article()
article.text=article_html
article.time=time
article.category=category
article.tag=tag
article.title=title
self.__article_list.append(article)
def get_string(self,text, string_to_find_tag, length, end):
tag_start = text.find(string_to_find_tag)
string_to_find_start = tag_start + len(string_to_find_tag) + length
string_to_find_back = text.find(end, string_to_find_start)
string_to_find = text[string_to_find_start:string_to_find_back]
return str(string_to_find)
def get_categoris_cnblogs(self,blogApp,blogId,postId):
get_data={'blogApp':blogApp,'blogId':blogId,'postId':postId}
url = "http://www.cnblogs.com/mvc/blog/CategoriesTags.aspx"
r=requests.get(url,params=get_data)
r_json=r.json()
category_soup=BeautifulSoup(r_json['Categories'],'lxml')
tags_soup=BeautifulSoup(r_json['Tags'],'lxml')
if category_soup.text!='':
category=category_soup.a.text
else:
category=""
tag_list=[]
if tags_soup.text!='':
tags=tags_soup.find_all('a')
for i in tags:
tag_list.append(i.text)
return category,tag_list
def get_article_list(self):
return self.__article_list
def get_cnt(self):
return self.__cnt
|
#encoding: utf-8
from PIL import Image
import numpy as np
from config import config as cfg
from torch.utils.data import Dataset
from torchvision import transforms as T
from sklearn.preprocessing import MultiLabelBinarizer
from imgaug import augmenters as iaa
import pathlib
import cv2
import pandas as pd
from torch.utils.data import DataLoader
import os
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
class HumanDataset(Dataset):
def __init__(self, images_df,
base_path,
target_shape=(512, 512),
augument=True,
use_yellow=False,
mode="train"):
if not isinstance(base_path, pathlib.Path):
base_path = pathlib.Path(base_path)
self.images_df = images_df.copy()
self.augument = augument
self.images_df.Id = self.images_df.Id.apply(lambda x: base_path / x)
self.mlb = MultiLabelBinarizer(classes=np.arange(0, cfg.num_classes))
self.mlb.fit(np.arange(0, cfg.num_classes))
self.mode = mode
self.target_shape = target_shape
self.use_yellow = use_yellow
def __len__(self):
return len(self.images_df)
def __getitem__(self, index):
X = self.read_images(index)
if not self.mode == "test":
labels = np.array(list(map(int, self.images_df.iloc[index].Target.split(' '))))
y = np.eye(cfg.num_classes, dtype=np.float)[labels].sum(axis=0)
else:
y = str(self.images_df.iloc[index].Id.absolute())
if self.augument:
X = self.augumentor(X)
X = T.Compose([T.ToPILImage(), T.ToTensor()])(X)
return X.float(), y
def read_images(self, index):
row = self.images_df.iloc[index]
filename = str(row.Id.absolute())
if 'ENSG' in filename:
filename = os.path.split(filename)[-1]
filename = os.path.join(cfg.extra_data, filename)
images = np.array(Image.open(filename + ".png"))
else:
r = np.array(Image.open(filename + "_red.png"))
g = np.array(Image.open(filename + "_green.png"))
b = np.array(Image.open(filename + "_blue.png"))
images = [r, g, b]
if self.use_yellow:
y = np.array(Image.open(filename + "_yellow.png"))
images.append(y)
images = np.stack(images, axis=-1)
images = images.astype(np.uint8)
if self.target_shape == (512, 512) and images.shape[:2] == (512, 512):
return images
else:
return cv2.resize(images, self.target_shape)
def augumentor(self, image):
sometimes = lambda aug: iaa.Sometimes(0.8, aug)
augment_img = iaa.Sequential([iaa.Fliplr(0.5),
iaa.Flipud(0.5),
iaa.BilateralBlur(),
iaa.Affine(rotate=90),
iaa.ContrastNormalization((0.8, 1.3)),
sometimes(iaa.Affine(scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)},
rotate=(-30, 30),
shear=(-5, 5)
))
],
random_order=True)
image_aug = augment_img.augment_image(image)
return image_aug
def get_dataloader(mix_up=False):
train_df = pd.read_csv(cfg.train_csv)
test_df = pd.read_csv(cfg.submission_csv)
train_data_list, val_data_list = train_test_split(train_df, test_size=cfg.split_ratio, random_state=42)
trainset = HumanDataset(train_data_list, cfg.train_data, mode="train")
train_loader = DataLoader(trainset, batch_size=cfg.batch_size, shuffle=True, pin_memory=True, num_workers=cfg.n_workers)
val_gen = HumanDataset(val_data_list, cfg.train_data, augument=False, mode="train")
val_loader = DataLoader(val_gen, batch_size=cfg.batch_size, shuffle=False, pin_memory=True, num_workers=cfg.n_workers)
test_gen = HumanDataset(test_df, cfg.test_data, augument=False, mode="test")
test_loader = DataLoader(test_gen, 1, shuffle=False, pin_memory=True, num_workers=cfg.n_workers)
if mix_up:
mix_loader = DataLoader(trainset, batch_size=cfg.batch_size, shuffle=False, pin_memory=True, num_workers=cfg.n_workers)
return train_loader, val_loader, test_loader, mix_loader
else:
return train_loader, val_loader, test_loader
def get_kfold_dataloader(k=5, n_select=0, use_extra=True, target_shape=(512, 512)):
kf = KFold(k, random_state=42)
train_df = pd.read_csv(cfg.train_csv)
if use_extra:
extra_df = pd.read_csv(cfg.extra_csv)
print('number of extra data is ', len(extra_df), '\tnumber of hpi data is ', len(train_df))
train_df = pd.concat([train_df, extra_df], axis=0)
print('after concat, number of train data is ', len(train_df))
test_df = pd.read_csv(cfg.submission_csv)
print('trainset split ', kf.get_n_splits(train_df), 'folds')
train_val_groups = []
for train_index, val_index in kf.split(train_df):
print('length of trainset is ', len(train_index), '\tlength of valset is ', len(val_index))
train_val_groups.append([train_df.iloc[train_index], train_df.iloc[val_index]])
train_data_list, val_data_list = train_val_groups[n_select]
trainset = HumanDataset(train_data_list, cfg.train_data, mode="train", target_shape=target_shape)
train_loader = DataLoader(trainset, batch_size=cfg.batch_size, shuffle=True, pin_memory=True, num_workers=cfg.n_workers)
val_gen = HumanDataset(val_data_list, cfg.train_data, augument=False, mode="train", target_shape=target_shape)
val_loader = DataLoader(val_gen, batch_size=cfg.batch_size, shuffle=False, pin_memory=True, num_workers=cfg.n_workers)
test_gen = HumanDataset(test_df, cfg.test_data, augument=False, mode="test", target_shape=target_shape)
test_loader = DataLoader(test_gen, 1, shuffle=False, pin_memory=True, num_workers=cfg.n_workers)
return train_loader, val_loader, test_loader
|
"""
Django settings for Match4healthcare project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from os import path
from django.contrib.messages import constants as messages
from django.utils.translation import ugettext_lazy as _
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# or better:
# add paths here and import: from django.conf import settings and use settings.XXX_DIR
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
RUN_DIR = os.path.join(BASE_DIR, "run")
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django_filters",
"widget_tweaks",
"crispy_forms",
"django_tables2",
"apps.mapview",
"apps.iamstudent",
"apps.ineedstudent",
"apps.accounts",
"apps.use_statistics",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.locale.LocaleMiddleware",
]
ROOT_URLCONF = "match4healthcare.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
LOGIN_REDIRECT_URL = "/accounts/login_redirect"
CRISPY_TEMPLATE_PACK = "bootstrap4"
WSGI_APPLICATION = "match4healthcare.wsgi.application"
MAX_EMAILS_PER_HOSPITAL_PER_DAY = 200
NEWSLETTER_REQUIRED_APPROVERS = 2
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
AUTH_USER_MODEL = "accounts.User"
USE_L10N = True
USE_TZ = True
# Translations
# Provide a lists of languages which your site supports.
LANGUAGES = (
("en", _("English")),
("de", _("Deutsch")),
)
# Set the default language for your site.
LANGUAGE_CODE = "de"
# Tell Django where the project's translation files should be.
LOCALE_PATHS = (os.path.join(BASE_DIR, "locale"),)
RUN_DIR = os.path.join(BASE_DIR, "run")
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
PROJECT_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
STATIC_URL = "/static/"
MEDIA_ROOT = os.path.join(RUN_DIR, "media")
MEDIA_URL = "/media/"
STATIC_ROOT = os.path.join(RUN_DIR, "static")
STATICFILES_DIRS = (os.path.join(BASE_DIR, "static"),)
MAPBOX_TOKEN = os.getenv("MAPBOX_TOKEN")
MESSAGE_TAGS = {
messages.DEBUG: "alert-info",
messages.INFO: "alert-info",
messages.SUCCESS: "alert-success",
messages.WARNING: "alert-warning",
messages.ERROR: "alert-danger",
}
# Configure Logging for all environments
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {
"require_debug_false": {"()": "django.utils.log.RequireDebugFalse",},
"require_debug_true": {"()": "django.utils.log.RequireDebugTrue",},
},
"formatters": {
"json": { # Made for Django Requests and General logging, will create parseable error logs
"class": "match4healthcare.logging.formatters.DjangoRequestJSONFormatter"
},
"text": {
"class": "match4healthcare.logging.formatters.OneLineExceptionFormatter",
"format": "%(asctime)s: %(name)-12s %(levelname)-8s |%(message)s|",
},
},
"handlers": {
"mail_admin": {
"class": "logging.NullHandler" # Make sure to disable Djangos default e-Mail Logger
},
"null": {"class": "logging.NullHandler",}, # Disable Django Default Server Logger
"console": {"class": "logging.StreamHandler", "formatter": "text",},
"errorlogfile": {
"class": "logging.handlers.RotatingFileHandler",
"formatter": "json",
"level": "ERROR",
"filename": path.join(RUN_DIR, "match4healthcare.json.error.log"),
"maxBytes": 1024 * 1024 * 15, # 15MB
"backupCount": 10,
},
"auditlogfile": {
"class": "logging.handlers.RotatingFileHandler",
"formatter": "json",
"level": "INFO",
"filename": path.join(RUN_DIR, "match4healthcare.json.audit.log"),
"maxBytes": 1024 * 1024 * 15, # 15MB
"backupCount": 10,
},
"slack": {
"level": "ERROR",
"()": "match4healthcare.logging.loggers.SlackMessageHandlerFactory",
"webhook_url": os.environ.get("SLACK_LOG_WEBHOOK", ""),
},
},
# Now put it all together
"loggers": {
"": { # Root Logger Configuration, should catch all remaining Warnings and Errors, that were not specifically handled below
"level": "WARNING",
"handlers": ["errorlogfile", "console", "slack"],
},
"apps": { # Logging Configuration for all Django apps, i.e. our software, matches any loggers under apps subdirectory using __name__
"level": "INFO",
"handlers": ["auditlogfile"],
"propagate": False,
},
"django.request": { # Main error logger and last line of defense for #500 Errors, will log all errors
"level": "WARNING",
"handlers": ["errorlogfile", "console", "slack"],
"propagate": False,
},
"django.server": { # Only for development server, all of these are mirrored on django.request anyway
"level": "ERROR",
"handlers": ["null"],
"propagate": False,
},
},
}
# ========== determine wether this is a forked version of m4h ==========#
IS_TRAVIS = "TRAVIS" in os.environ and bool(os.environ["TRAVIS"])
IS_CI = "CI" in os.environ and bool(os.environ["CI"])
IS_FORK = False
if IS_TRAVIS and os.environ["TRAVIS_PULL_REQUEST_SLUG"] is ["match4everyone/match4healthcare"]:
IS_FORK = True
|
import hashlib
import logging
from django import http
from django.core.cache import cache
from django.contrib import messages
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from django.db.models import Q, Count
from funfactory.urlresolvers import reverse
from jsonview.decorators import json_view
from airmozilla.base.utils import paginate
from airmozilla.main.models import Event, VidlySubmission
from airmozilla.manage import forms
from airmozilla.manage import vidly
from .decorators import superuser_required
@superuser_required
def vidly_media(request):
events = Event.objects.filter(
Q(template__name__contains='Vid.ly')
|
Q(pk__in=VidlySubmission.objects.all()
.values_list('event_id', flat=True))
)
status = request.GET.get('status')
repeated = request.GET.get('repeated') == 'event'
repeats = {}
if status:
if status not in ('New', 'Processing', 'Finished', 'Error'):
return http.HttpResponseBadRequest("Invalid 'status' value")
# make a list of all tags -> events
_tags = {}
for event in events:
environment = event.template_environment or {}
if not environment.get('tag') or environment.get('tag') == 'None':
continue
_tags[environment['tag']] = event.id
event_ids = []
for tag in vidly.medialist(status):
try:
event_ids.append(_tags[tag])
except KeyError:
# it's on vid.ly but not in this database
logging.debug("Unknown event with tag=%r", tag)
events = events.filter(id__in=event_ids)
elif repeated:
repeats = dict(
(x['event_id'], x['event__id__count'])
for x in
VidlySubmission.objects
.values('event_id')
.annotate(Count('event__id'))
.filter(event__id__count__gt=1)
)
events = Event.objects.filter(id__in=repeats.keys())
def get_repeats(event):
return repeats[event.id]
events = events.order_by('-start_time')
events = events.select_related('template')
paged = paginate(events, request.GET.get('page'), 15)
vidly_resubmit_form = forms.VidlyResubmitForm()
context = {
'paginate': paged,
'status': status,
'vidly_resubmit_form': vidly_resubmit_form,
'repeated': repeated,
'get_repeats': get_repeats,
}
return render(request, 'manage/vidly_media.html', context)
@superuser_required
@json_view
def vidly_media_status(request):
if request.GET.get('tag'):
tag = request.GET.get('tag')
else:
if not request.GET.get('id'):
return http.HttpResponseBadRequest("No 'id'")
event = get_object_or_404(Event, pk=request.GET['id'])
environment = event.template_environment or {}
if not environment.get('tag') or environment.get('tag') == 'None':
# perhaps it has a VidlySubmission anyway
submissions = (
VidlySubmission.objects
.exclude(tag__isnull=True)
.filter(event=event).order_by('-submission_time')
)
for submission in submissions[:1]:
environment = {'tag': submission.tag}
break
else:
return {}
tag = environment['tag']
cache_key = 'vidly-query-{md5}'.format(
md5=hashlib.md5(tag.encode('utf8')).hexdigest().strip())
force = request.GET.get('refresh', False)
if force:
results = None # force a refresh
else:
results = cache.get(cache_key)
if not results:
results = vidly.query(tag).get(tag, {})
expires = 60
# if it's healthy we might as well cache a bit
# longer because this is potentially used a lot
if results.get('Status') == 'Finished':
expires = 60 * 60
if results:
cache.set(cache_key, results, expires)
_status = results.get('Status')
return {'status': _status}
@superuser_required
@json_view
def vidly_media_info(request):
def as_fields(result):
return [
{'key': a, 'value': b}
for (a, b)
in sorted(result.items())
]
if not request.GET.get('id'):
return http.HttpResponseBadRequest("No 'id'")
event = get_object_or_404(Event, pk=request.GET['id'])
environment = event.template_environment or {}
if not environment.get('tag') or environment.get('tag') == 'None':
# perhaps it has a VidlySubmission anyway
submissions = (
VidlySubmission.objects
.exclude(tag__isnull=True)
.filter(event=event).order_by('-submission_time')
)
for submission in submissions[:1]:
environment = {'tag': submission.tag}
break
if not environment.get('tag') or environment.get('tag') == 'None':
return {'fields': as_fields({
'*Note*': 'Not a valid tag in template',
'*Template contents*': unicode(environment),
})}
else:
tag = environment['tag']
cache_key = 'vidly-query-%s' % tag
force = request.GET.get('refresh', False)
if force:
results = None # force a refresh
else:
results = cache.get(cache_key)
if not results:
all_results = vidly.query(tag)
if tag not in all_results:
return {
'ERRORS': ['Tag (%s) not found in Vid.ly' % tag]
}
results = all_results[tag]
cache.set(cache_key, results, 60)
data = {'fields': as_fields(results)}
is_hd = results.get('IsHD', False)
if is_hd == 'false':
is_hd = False
data['past_submission'] = {
'url': results['SourceFile'],
'email': results['UserEmail'],
'hd': bool(is_hd),
'token_protection': event.privacy != Event.PRIVACY_PUBLIC,
}
if request.GET.get('past_submission_info'):
qs = (
VidlySubmission.objects
.filter(event=event)
.order_by('-submission_time')
)
for submission in qs[:1]:
if event.privacy != Event.PRIVACY_PUBLIC:
# forced
token_protection = True
else:
# whatever it was before
token_protection = submission.token_protection
data['past_submission'] = {
'url': submission.url,
'email': submission.email,
'hd': submission.hd,
'token_protection': token_protection,
}
return data
@require_POST
@superuser_required
def vidly_media_resubmit(request):
if request.POST.get('cancel'):
return redirect(reverse('manage:vidly_media') + '?status=Error')
form = forms.VidlyResubmitForm(data=request.POST)
if not form.is_valid():
return http.HttpResponse(str(form.errors))
event = get_object_or_404(Event, pk=form.cleaned_data['id'])
environment = event.template_environment or {}
if not environment.get('tag') or environment.get('tag') == 'None':
raise ValueError("Not a valid tag in template")
if event.privacy != Event.PRIVACY_PUBLIC:
token_protection = True # no choice
else:
token_protection = form.cleaned_data['token_protection']
old_tag = environment['tag']
shortcode, error = vidly.add_media(
url=form.cleaned_data['url'],
email=form.cleaned_data['email'],
hd=form.cleaned_data['hd'],
token_protection=token_protection
)
VidlySubmission.objects.create(
event=event,
url=form.cleaned_data['url'],
email=form.cleaned_data['email'],
token_protection=token_protection,
hd=form.cleaned_data['hd'],
tag=shortcode,
submission_error=error
)
if error:
messages.warning(
request,
"Media could not be re-submitted:\n<br>\n%s" % error
)
else:
messages.success(
request,
"Event re-submitted to use tag '%s'" % shortcode
)
vidly.delete_media(
old_tag,
email=form.cleaned_data['email']
)
event.template_environment['tag'] = shortcode
event.save()
cache_key = 'vidly-query-%s' % old_tag
cache.delete(cache_key)
return redirect(reverse('manage:vidly_media') + '?status=Error')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def print_stat(gltf):
"""
モデル情報表示
:param gltf: glTFオブジェクト
"""
vrm = gltf['extensions']['VRM']
print 'vrm materials:', len(vrm['materialProperties'])
print 'materials:', len(gltf['materials'])
print 'textures:', len(gltf['textures'])
print 'images:', len(gltf['images'])
meshes = gltf['meshes']
print 'meshes:', len(meshes)
print 'primitives:', sum([len(m['primitives']) for m in meshes])
for mesh in meshes:
print '\t', mesh['name'], ':', len(mesh['primitives'])
|
from google.protobuf.duration_pb2 import Duration
from feast import FeatureView, FileSource
driver_hourly_stats = FileSource(
path="driver_stats.parquet", # this parquet is not real and will not be read
)
driver_hourly_stats_view = FeatureView(
name="driver_hourly_stats", # Intentionally use the same FeatureView name
entities=["driver_id"],
online=False,
batch_source=driver_hourly_stats,
ttl=Duration(seconds=10),
tags={},
)
driver_hourly_stats_view_dup1 = FeatureView(
name="driver_hourly_stats", # Intentionally use the same FeatureView name
entities=["driver_id"],
online=False,
batch_source=driver_hourly_stats,
ttl=Duration(seconds=10),
tags={},
)
|
from binaryninja import *
from binjago import *
def find_func_symbol_refs(view):
bt = StdCallSearch(view)
bt.start()
def find_rop_gadgets(view):
rop_search = ROPSearch(view)
rop_search.start()
def find_prologues(view):
sig_search = PrologSearch(view)
sig_search.start()
PluginCommand.register(
"binjago: Find standard function references",
"Locate and annotate symbol references for standard API calls",
find_func_symbol_refs
)
PluginCommand.register(
"binjago: Find ROP gadgets",
"Search .text for ROP gadgets",
find_rop_gadgets
)
PluginCommand.register(
"binjago: Find function prologues",
"Search binary files for function prologues",
find_prologues
)
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.osconfig_v1alpha.types import config_common
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.osconfig.v1alpha",
manifest={
"InstanceOSPoliciesCompliance",
"GetInstanceOSPoliciesComplianceRequest",
"ListInstanceOSPoliciesCompliancesRequest",
"ListInstanceOSPoliciesCompliancesResponse",
},
)
class InstanceOSPoliciesCompliance(proto.Message):
r"""This API resource represents the OS policies compliance data for a
Compute Engine virtual machine (VM) instance at a given point in
time.
A Compute Engine VM can have multiple OS policy assignments, and
each assignment can have multiple OS policies. As a result, multiple
OS policies could be applied to a single VM.
You can use this API resource to determine both the compliance state
of your VM as well as the compliance state of an individual OS
policy.
For more information, see `View
compliance <https://cloud.google.com/compute/docs/os-configuration-management/view-compliance>`__.
Attributes:
name (str):
Output only. The ``InstanceOSPoliciesCompliance`` API
resource name.
Format:
``projects/{project_number}/locations/{location}/instanceOSPoliciesCompliances/{instance_id}``
instance (str):
Output only. The Compute Engine VM instance
name.
state (google.cloud.osconfig_v1alpha.types.OSPolicyComplianceState):
Output only. Compliance state of the VM.
detailed_state (str):
Output only. Detailed compliance state of the VM. This field
is populated only when compliance state is ``UNKNOWN``.
It may contain one of the following values:
- ``no-compliance-data``: Compliance data is not available
for this VM.
- ``no-agent-detected``: OS Config agent is not detected
for this VM.
- ``config-not-supported-by-agent``: The version of the OS
Config agent running on this VM does not support
configuration management.
- ``inactive``: VM is not running.
- ``internal-service-errors``: There were internal service
errors encountered while enforcing compliance.
- ``agent-errors``: OS config agent encountered errors
while enforcing compliance.
detailed_state_reason (str):
Output only. The reason for the ``detailed_state`` of the VM
(if any).
os_policy_compliances (Sequence[google.cloud.osconfig_v1alpha.types.InstanceOSPoliciesCompliance.OSPolicyCompliance]):
Output only. Compliance data for each ``OSPolicy`` that is
applied to the VM.
last_compliance_check_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp of the last compliance
check for the VM.
last_compliance_run_id (str):
Output only. Unique identifier for the last
compliance run. This id will be logged by the OS
config agent during a compliance run and can be
used for debugging and tracing purpose.
"""
class OSPolicyCompliance(proto.Message):
r"""Compliance data for an OS policy
Attributes:
os_policy_id (str):
The OS policy id
os_policy_assignment (str):
Reference to the ``OSPolicyAssignment`` API resource that
the ``OSPolicy`` belongs to.
Format:
``projects/{project_number}/locations/{location}/osPolicyAssignments/{os_policy_assignment_id@revision_id}``
state (google.cloud.osconfig_v1alpha.types.OSPolicyComplianceState):
Compliance state of the OS policy.
os_policy_resource_compliances (Sequence[google.cloud.osconfig_v1alpha.types.OSPolicyResourceCompliance]):
Compliance data for each ``OSPolicyResource`` that is
applied to the VM.
"""
os_policy_id = proto.Field(proto.STRING, number=1,)
os_policy_assignment = proto.Field(proto.STRING, number=2,)
state = proto.Field(
proto.ENUM, number=4, enum=config_common.OSPolicyComplianceState,
)
os_policy_resource_compliances = proto.RepeatedField(
proto.MESSAGE, number=5, message=config_common.OSPolicyResourceCompliance,
)
name = proto.Field(proto.STRING, number=1,)
instance = proto.Field(proto.STRING, number=2,)
state = proto.Field(
proto.ENUM, number=3, enum=config_common.OSPolicyComplianceState,
)
detailed_state = proto.Field(proto.STRING, number=4,)
detailed_state_reason = proto.Field(proto.STRING, number=5,)
os_policy_compliances = proto.RepeatedField(
proto.MESSAGE, number=6, message=OSPolicyCompliance,
)
last_compliance_check_time = proto.Field(
proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,
)
last_compliance_run_id = proto.Field(proto.STRING, number=8,)
class GetInstanceOSPoliciesComplianceRequest(proto.Message):
r"""A request message for getting OS policies compliance data for
the given Compute Engine VM instance.
Attributes:
name (str):
Required. API resource name for instance OS policies
compliance resource.
Format:
``projects/{project}/locations/{location}/instanceOSPoliciesCompliances/{instance}``
For ``{project}``, either Compute Engine project-number or
project-id can be provided. For ``{instance}``, either
Compute Engine VM instance-id or instance-name can be
provided.
"""
name = proto.Field(proto.STRING, number=1,)
class ListInstanceOSPoliciesCompliancesRequest(proto.Message):
r"""A request message for listing OS policies compliance data for
all Compute Engine VMs in the given location.
Attributes:
parent (str):
Required. The parent resource name.
Format: ``projects/{project}/locations/{location}``
For ``{project}``, either Compute Engine project-number or
project-id can be provided.
page_size (int):
The maximum number of results to return.
page_token (str):
A pagination token returned from a previous call to
``ListInstanceOSPoliciesCompliances`` that indicates where
this listing should continue from.
filter (str):
If provided, this field specifies the criteria that must be
met by a ``InstanceOSPoliciesCompliance`` API resource to be
included in the response.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
filter = proto.Field(proto.STRING, number=4,)
class ListInstanceOSPoliciesCompliancesResponse(proto.Message):
r"""A response message for listing OS policies compliance data
for all Compute Engine VMs in the given location.
Attributes:
instance_os_policies_compliances (Sequence[google.cloud.osconfig_v1alpha.types.InstanceOSPoliciesCompliance]):
List of instance OS policies compliance
objects.
next_page_token (str):
The pagination token to retrieve the next
page of instance OS policies compliance objects.
"""
@property
def raw_page(self):
return self
instance_os_policies_compliances = proto.RepeatedField(
proto.MESSAGE, number=1, message="InstanceOSPoliciesCompliance",
)
next_page_token = proto.Field(proto.STRING, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
import pytest
from pathlib import Path
from modnet.utils import get_hash_of_file
_TEST_DATA_HASHES = {
"MP_2018.6_subset.zip": (
"d7d75e646dbde539645c8c0b065fd82cbe93f81d3500809655bd13d0acf2027c"
"1786091a73f53985b08868c5be431a3c700f7f1776002df28ebf3a12a79ab1a1"
),
"MP_2018.6_small.zip": (
"937a29dad32d18e47c84eb7c735ed8af09caede21d2339c379032fbd40c463d8"
"ca377d9e3a777710b5741295765f6c12fbd7ab56f9176cc0ca11c9120283d878"
),
}
def _load_moddata(filename):
"""Loads the pickled MODData from the test directory and checks it's hash."""
from modnet.preprocessing import MODData
data_file = Path(__file__).parent.joinpath(f"data/{filename}")
if filename not in _TEST_DATA_HASHES:
raise RuntimeError(
f"Cannot verify hash of {filename} as it was not provided, will not load pickle."
)
# Loading pickles can be dangerous, so lets at least check that the MD5 matches
# what it was when created
assert get_hash_of_file(data_file) == _TEST_DATA_HASHES[filename]
return MODData.load(data_file)
@pytest.fixture(scope="function")
def subset_moddata():
"""Loads the 100-structure featurized subset of MP.2018.6 for use
in other tests, checking only the hash.
"""
return _load_moddata("MP_2018.6_subset.zip")
@pytest.fixture(scope="function")
def small_moddata():
"""Loads the small 5-structure featurized subset of MP.2018.6 for use
in other tests, checking only the hash.
"""
return _load_moddata("MP_2018.6_small.zip")
@pytest.fixture(scope="module")
def tf_session():
"""This fixture can be used to sandbox tests that require tensorflow."""
import tensorflow
tensorflow.compat.v1.disable_eager_execution()
with tensorflow.device("/device:CPU:0") as session:
yield session
tensorflow.keras.backend.clear_session()
|
from test.util import TestCase
from unittest.mock import Mock
from OpenCast.infra.data.repo.context import Context
class ContextTest(TestCase):
def setUp(self):
self.repo = Mock()
self.context = Context(self.repo)
self.entity = "toto"
def test_add(self):
self.context.add(self.entity)
self.assertListEqual([self.entity], self.context.entities())
self.context.commit()
self.repo.create.assert_called_once_with(self.entity)
def test_update(self):
self.context.update(self.entity)
self.assertListEqual([self.entity], self.context.entities())
self.context.commit()
self.repo.update.assert_called_once_with(self.entity)
def test_delete(self):
self.context.delete(self.entity)
self.assertListEqual([self.entity], self.context.entities())
self.context.commit()
self.repo.delete.assert_called_once_with(self.entity)
|
import argparse
import asyncio
from aioconsole import ainput
from .setup_agent import setup_agent
async def message_processor(register_msg):
user_id = await ainput('Provide user id: ')
while True:
msg = await ainput(f'You ({user_id}): ')
msg = msg.strip()
if msg:
response = await register_msg(utterance=msg, user_external_id=user_id, user_device_type='cmd',
location='lab', channel_type='cmd_client',
deadline_timestamp=None, require_response=True)
print('Bot: ', response['dialog'].utterances[-1].text)
def run_cmd(pipeline_configs, debug):
agent, session, workers = setup_agent(pipeline_configs=pipeline_configs)
loop = asyncio.get_event_loop()
loop.set_debug(debug)
future = asyncio.ensure_future(message_processor(agent.register_msg))
for i in workers:
loop.create_task(i.call_service(agent.process))
try:
loop.run_until_complete(future)
except KeyboardInterrupt:
pass
except Exception as e:
raise e
finally:
future.cancel()
if session:
loop.run_until_complete(session.close())
loop.stop()
loop.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-pl', '--pipeline_configs', help='Pipeline config (overwrite value, defined in settings)',
type=str, action='append')
parser.add_argument('-d', '--debug', help='run in debug mode', action='store_true')
args = parser.parse_args()
run_cmd(args.pipeline_configs, args.debug)
|
from typing import Dict, Iterator, List, Union, Any, Type, cast
import numpy as np
import collections
import qulacs
from cirq import circuits, ops, protocols, schedules, study, value
from cirq.sim import SimulatesFinalState, SimulationTrialResult, wave_function
def _get_google_rotx(exponent : float) -> np.ndarray:
rot = exponent
g = np.exp(1.j*np.pi*rot/2)
c = np.cos(np.pi*rot/2)
s = np.sin(np.pi*rot/2)
mat = np.array([
[g*c, -1.j*g*s],
[-1.j*g*s, g*c]
])
return mat
def _get_google_rotz(exponent : float) -> np.ndarray:
return np.diag([1., np.exp(1.j*np.pi*exponent)])
class QulacsSimulator(SimulatesFinalState):
def __init__(self, *,
dtype: Type[np.number] = np.complex128,
seed: value.RANDOM_STATE_LIKE = None):
self._dtype = dtype
self._prng = value.parse_random_state(seed)
def _get_qulacs_state(self, num_qubits: int):
return qulacs.QuantumState(num_qubits)
def simulate_sweep(
self,
program: Union[circuits.Circuit, schedules.Schedule],
params: study.Sweepable,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> List['SimulationTrialResult']:
"""Simulates the supplied Circuit or Schedule with Qulacs
Args:
program: The circuit or schedule to simulate.
params: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
Returns:
List of SimulationTrialResults for this run, one for each
possible parameter resolver.
"""
trial_results = []
# sweep for each parameters
resolvers = study.to_resolvers(params)
for resolver in resolvers:
# result circuit
cirq_circuit = protocols.resolve_parameters(program, resolver)
qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for(cirq_circuit.all_qubits())
qubit_map = {q: i for i, q in enumerate(qubits)}
num_qubits = len(qubits)
# create state
qulacs_state = self._get_qulacs_state(num_qubits)
if initial_state is not None:
cirq_state = wave_function.to_valid_state_vector(initial_state,num_qubits)
qulacs_state.load(cirq_state)
del cirq_state
# create circuit
qulacs_circuit = qulacs.QuantumCircuit(num_qubits)
address_to_key = {}
register_address = 0
for moment in cirq_circuit:
operations = moment.operations
for op in operations:
indices = [num_qubits - 1 - qubit_map[qubit] for qubit in op.qubits]
result = self._try_append_gate(op, qulacs_circuit, indices)
if result:
continue
if isinstance(op.gate, ops.ResetChannel):
qulacs_circuit.update_quantum_state(qulacs_state)
qulacs_state.set_zero_state()
qulacs_circuit = qulacs.QuantumCircuit(num_qubits)
elif protocols.is_measurement(op):
for index in indices:
qulacs_circuit.add_gate(qulacs.gate.Measurement(index, register_address))
address_to_key[register_address] = protocols.measurement_key(op.gate)
register_address += 1
elif protocols.has_mixture(op):
indices.reverse()
qulacs_gates = []
gate = cast(ops.GateOperation, op).gate
channel = protocols.channel(gate)
for krauss in channel:
krauss = krauss.astype(np.complex128)
qulacs_gate = qulacs.gate.DenseMatrix(indices, krauss)
qulacs_gates.append(qulacs_gate)
qulacs_cptp_map = qulacs.gate.CPTP(qulacs_gates)
qulacs.circuit.add_gate(qulacs_cptp_map)
# perform simulation
qulacs_circuit.update_quantum_state(qulacs_state)
# fetch final state and measurement results
final_state = qulacs_state.get_vector()
measurements = collections.defaultdict(list)
for register_index in range(register_address):
key = address_to_key[register_index]
value = qulacs_state.get_classical_value(register_index)
measurements[key].append(value)
# create result for this parameter
result = SimulationTrialResult(
params = resolver,
measurements = measurements,
final_simulator_state = final_state
)
trial_results.append(result)
# release memory
del qulacs_state
del qulacs_circuit
return trial_results
def _try_append_gate(self, op : ops.GateOperation, qulacs_circuit : qulacs.QuantumCircuit, indices : np.array):
# One qubit gate
if isinstance(op.gate, ops.pauli_gates._PauliX):
qulacs_circuit.add_X_gate(indices[0])
elif isinstance(op.gate, ops.pauli_gates._PauliY):
qulacs_circuit.add_Y_gate(indices[0])
elif isinstance(op.gate, ops.pauli_gates._PauliZ):
qulacs_circuit.add_Z_gate(indices[0])
elif isinstance(op.gate, ops.common_gates.HPowGate):
qulacs_circuit.add_H_gate(indices[0])
elif isinstance(op.gate, ops.common_gates.XPowGate):
qulacs_circuit.add_RX_gate(indices[0], -np.pi*op.gate._exponent)
elif isinstance(op.gate, ops.common_gates.YPowGate):
qulacs_circuit.add_RY_gate(indices[0], -np.pi*op.gate._exponent)
elif isinstance(op.gate, ops.common_gates.ZPowGate):
qulacs_circuit.add_RZ_gate(indices[0], -np.pi*op.gate._exponent)
elif isinstance(op.gate, ops.SingleQubitMatrixGate):
mat = op.gate._matrix
qulacs_circuit.add_dense_matrix_gate(indices[0], mat)
elif isinstance(op.gate, circuits.qasm_output.QasmUGate):
lmda = op.gate.lmda
theta = op.gate.theta
phi = op.gate.phi
gate = qulacs.gate.U3(indices[0], theta*np.pi, phi*np.pi, lmda*np.pi)
qulacs_circuit.add_gate(gate)
# Two qubit gate
elif isinstance(op.gate, ops.common_gates.CNotPowGate):
if op.gate._exponent == 1.0:
qulacs_circuit.add_CNOT_gate(indices[0], indices[1])
else:
mat = _get_google_rotx(op.gate._exponent)
gate = qulacs.gate.DenseMatrix(indices[1], mat)
gate.add_control_qubit(indices[0],1)
qulacs_circuit.add_gate(gate)
elif isinstance(op.gate, ops.common_gates.CZPowGate):
if op.gate._exponent == 1.0:
qulacs_circuit.add_CZ_gate(indices[0], indices[1])
else:
mat = _get_google_rotz(op.gate._exponent)
gate = qulacs.gate.DenseMatrix(indices[1], mat)
gate.add_control_qubit(indices[0],1)
qulacs_circuit.add_gate(gate)
elif isinstance(op.gate, ops.common_gates.SwapPowGate):
if op.gate._exponent == 1.0:
qulacs_circuit.add_SWAP_gate(indices[0], indices[1])
else:
qulacs_circuit.add_dense_matrix_gate(indices,op._unitary_())
elif isinstance(op.gate, ops.parity_gates.XXPowGate):
qulacs_circuit.add_multi_Pauli_rotation_gate(indices, [1,1], -np.pi*op.gate._exponent)
elif isinstance(op.gate, ops.parity_gates.YYPowGate):
qulacs_circuit.add_multi_Pauli_rotation_gate(indices, [2,2], -np.pi*op.gate._exponent)
elif isinstance(op.gate, ops.parity_gates.ZZPowGate):
qulacs_circuit.add_multi_Pauli_rotation_gate(indices, [3,3], -np.pi*op.gate._exponent)
elif isinstance(op.gate, ops.TwoQubitMatrixGate):
indices.reverse()
mat = op.gate._matrix
qulacs_circuit.add_dense_matrix_gate(indices, mat)
# Three qubit gate
"""
# deprecated because these functions cause errors in gpu
elif isinstance(op.gate, ops.three_qubit_gates.CCXPowGate):
mat = _get_google_rotx(op.gate._exponent)
gate = qulacs.gate.DenseMatrix(indices[2], mat)
gate.add_control_qubit(indices[0],1)
gate.add_control_qubit(indices[1],1)
qulacs_circuit.add_gate(gate)
elif isinstance(op.gate, ops.three_qubit_gates.CCZPowGate):
mat = _get_google_rotz(op.gate._exponent)
gate = qulacs.gate.DenseMatrix(indices[2], mat)
gate.add_control_qubit(indices[0],1)
gate.add_control_qubit(indices[1],1)
qulacs_circuit.add_gate(gate)
"""
elif isinstance(op.gate, ops.three_qubit_gates.CSwapGate):
mat = np.zeros(shape=(4,4))
mat[0,0] = 1; mat[1,2] = 1; mat[2,1] = 1; mat[3,3] = 1
gate = qulacs.gate.DenseMatrix(indices[1:], mat)
gate.add_control_qubit(indices[0],1)
qulacs_circuit.add_gate(gate)
# Misc
elif protocols.has_unitary(op):
indices.reverse()
mat = op._unitary_()
qulacs_circuit.add_dense_matrix_gate(indices,mat)
# Not unitary
else:
return False
return True
class QulacsSimulatorGpu(QulacsSimulator):
def _get_qulacs_state(self, num_qubits : int):
try:
state = qulacs.QuantumStateGpu(num_qubits)
return state
except AttributeError:
raise Exception("GPU simulator is not installed")
|
#file to tell python that this directory contains a package
from alphadoc.docstring import get_docstring
from alphadoc.main import main
|
from tutorial3_nohup1 import *
class NoHUPStartSSH(NoHUPStart):
username = batch.Property()
password = batch.Property()
server = batch.Property()
terminal = batch.Controller(SSHTerminal, username, password, server)
|
class Topic:
topics = {}
def __init__(self):
pass
def add_topic(self, data):
username = data['username']
topicname = data['topicname']
users = data['users'].users
# permission check
if not username in users or users[username]['role'] != 'admin':
print('Only admins can create topics')
self.topics[topicname] = {}
print('Added new Topic')
def remove_topic(self, data):
username = data['username']
topicname = data['topicname']
users = data['users'].users
# permission check
if not username in users or users[username]['role'] != 'admin':
print('Only admins can remove topics')
del self.topics[topicname]
print('Removed Topic')
|
import math
class Node:
leftLength = 0
value = ""
isBalanced = True
def __init__(self, string, optimalLength=1000, minLength=500, maxLength=1500, isRoot=True):
halfWay = int(math.ceil(len(string) / 2))
self.isRoot = isRoot
# These are constants which are used to decide whether or not to re-build the tree
self.OPTIMAL_LENGTH = optimalLength
self.MIN_LENGTH = minLength
self.MAX_LENGTH = maxLength
# We build the rope by splitting the string in two until we get to the desired length
if halfWay > self.OPTIMAL_LENGTH:
self.left = Node(string[:halfWay], self.OPTIMAL_LENGTH, self.MIN_LENGTH, self.MAX_LENGTH, False)
self.right = Node(string[halfWay:], self.OPTIMAL_LENGTH, self.MIN_LENGTH, self.MAX_LENGTH, False)
self.leftLength = halfWay
else:
self.value = string
def __str__(self):
if self.value != "":
return self.value
else:
return str(self.left) + str(self.right)
def __len__(self):
if self.value != "":
return len(self.value)
else:
return self.leftLength + len(self.right)
def insert(self, offset, string):
if self.value != "":
value = self.value[:offset] + string + self.value[offset:]
self.value = value
length = len(value)
if not checkIfOptimalLength(length, self.MIN_LENGTH, self.MAX_LENGTH) and not self.isRoot:
self.isBalanced = False
return length
else:
if offset > self.leftLength:
self.right.insert(offset - self.leftLength, string)
else:
length = self.left.insert(offset, string)
self.leftLength = length
return self.leftLength
def delete(self, offset, length):
if self.value != "":
value = self.value[:offset] + self.value[length + offset:]
self.value = value
length = len(value)
if not checkIfOptimalLength(length, self.MIN_LENGTH, self.MAX_LENGTH) and not self.isRoot:
self.isBalanced = False
return length
else:
if offset > self.leftLength:
self.right.delete(offset - self.leftLength, length)
elif offset + length > self.leftLength:
leftLength = self.left.delete(offset, self.leftLength - offset)
self.right.delete(0, length - (self.leftLength - offset))
self.leftLength = leftLength
else:
leftLength = self.left.delete(offset, length)
self.leftLength = leftLength
return self.leftLength
def amIBalanced(self):
if self.value != "":
return self.isBalanced
else:
if self.left.amIBalanced() and self.right.amIBalanced():
return True
return False
# Checks to see if the length provided is within the desired range
def checkIfOptimalLength(length, minimum_length, maximum_length):
if length > minimum_length and length < maximum_length:
return True
return False
|
import json
from random import randint
from colorama import init, Fore
# initialize colours
init()
GREEN = Fore.GREEN
RED = Fore.RED
BLUE = Fore.BLUE
WHITE = Fore.WHITE
CYAN = Fore.CYAN
def intro():
print(f"""{CYAN}
______ __ __ ______ __ __ ______ ______ __ __
/\ ___\ /\ "-./ \ /\ __ \ /\ \ /\ \ /\ ___\ /\ ___\ /\ "-.\ \
\ \ __\ \ \ \-./\ \ \ \ __ \ \ \ \ \ \ \____ \ \ \__ \ \ \ __\ \ \ \-. \
\ \_____\ \ \_\ \ \_\ \ \_\ \_\ \ \_\ \ \_____\ \ \_____\ \ \_____\ \ \_\\"\_\
\/_____/ \/_/ \/_/ \/_/\/_/ \/_/ \/_____/ \/_____/ \/_____/ \/_/ \/_/
{WHITE}-- Made with ❤️ by YY
""")
# Reads and opens the correct json files
def read_json():
with open('names.json') as name:
names = json.load(name)
with open('surnames.json') as surname:
surnames = json.load(surname)
return names, surnames
# Collects all given data to craft the appropriate amount of e-mails. Generated email are collected in list and returned.
def generate(names, surnames, max_email):
hosts = ["@gmail", "@hotmail", "@outlook", "@protonmail", "@aol", "@yahoo", "@zoho", "@gmx"]
generated = []
for i in range(0, max_email):
email = names[randint(0, len(names) - 1)] + "_" + surnames[randint(0, len(surnames) - 1)] + hosts[
randint(0, len(hosts) - 1)] + ".com"
generated.append(email)
return generated
# Save generated e-mails in 'generated.txt'.
def write_txt(generated, max_email):
with open('generated.txt', 'w') as file:
for email in generated:
file.write("%s\n" % email)
print(f"{GREEN}[ ✅ ]{WHITE} all {GREEN}{max_email}{WHITE} emails are saved in {GREEN}generated.txt{WHITE} ! ")
# Save generated e-mails in 'generated.csv'.
def write_csv(generated, max_email):
file = open('generated.csv', 'w')
for email in generated:
file.write("%s\n" % email)
print(f"{GREEN}- ✅{WHITE} all {GREEN}{max_email}{WHITE} emails are saved in {GREEN}generated.csv{WHITE} ! ")
# Print all generated e-mails in command line.
def output(generated, max_email):
print(f"{GREEN}- ✅ {WHITE}generated {GREEN}{max_email}{WHITE} emails -> \n")
for email in generated:
print(email)
|
from ...helpers import format_result
from .queries import (GQL_CREATE_ORGANIZATION, GQL_DELETE_ORGANIZATION,
GQL_UPDATE_ORGANIZATION)
def create_organization(client, name: str, address: str, zip_code: str, city: str, country: str):
variables = {
'name': name,
'address': address,
'zipCode': zip_code,
'city': city,
'country': country
}
result = client.execute(GQL_CREATE_ORGANIZATION, variables)
return format_result('data', result)
def update_organization(client, organization_id: str, name: str, address: str, zip_code: str, city: str, country: str):
variables = {
'organizationID': organization_id,
'name': name,
'address': address,
'zipCode': zip_code,
'city': city,
'country': country
}
result = client.execute(GQL_UPDATE_ORGANIZATION, variables)
return format_result('data', result)
def delete_organization(client, organization_id: str):
variables = {'organizationID': organization_id}
result = client.execute(GQL_DELETE_ORGANIZATION, variables)
return format_result('data', result)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.