text stringlengths 8 6.05M |
|---|
__author__='RodrigoMachado'
__license__ = "MIT"
__version__ = "1.0.1"
__status__ = "Production"
__copyright__ = "Copyright 2019"
__maintainer__ = "RodrigoMachado9"
__email__ = "rodrigo.machado3.14@hotmail.com"
__credits__ = ["Python is life", "Live the opensource world"]
from sql_alchemy import banco
from sqlalchemy import DateTime
from datetime import datetime
from models.aux import aux
#todo modelo da minha classe motorista
class TransporteModel(banco.Model):
__tablename__ = 'transporte'
transporte_id = banco.Column(banco.Integer, primary_key = True)
frete = banco.Column(banco.String(40)) #todo; true or false
incidente = banco.Column(banco.String(40)) #todo; true or false
partida = banco.Column(DateTime(), default=datetime.utcnow())
chegada = banco.Column(banco.String(40))
carga = banco.relationship('CargaModel')
local_carga_id = banco.Column(banco.Integer, banco.ForeignKey('local_carga.local_carga_id'))
def __init__(self, frete, incidente, chegada, local_carga_id):
self.frete = frete
self.incidente = incidente
self.chegada = chegada
self.local_carga_id = local_carga_id
def json(self):
return {
'transporte_id':self.transporte_id,
'frete':self.frete,
'incidente':self.incidente,
'chegada':self.chegada,
'carga':[carga.json() for carga in self.carga]
}
@classmethod
def find_transporte(cls, transporte_id):
# query, consulta o banco
carga = cls.query.filter_by(transporte_id=transporte_id).first() # SELECT * FROM transporte WHERE transporte_id = transporte_id LIMIT 1
if carga:
return carga
return None
def save_transporte(self):
banco.session.add(self)
banco.session.commit()
def update_transporte(self, frete, incidente):
self.frete = frete
self.incidente = incidente
def delete_transporte(self):
banco.session.delete(self)
banco.session.commit()
|
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def __str__(self):
cursor = self
string = ''
while cursor != None:
string += '{}, '.format(cursor.val)
cursor = cursor.next
return '[' + string + ']'
def list_to_linked_list(lst):
cursor = fake_head = ListNode(None)
for x in lst:
cursor.next = ListNode(x)
cursor = cursor.next
return fake_head.next
|
mat = open('problem81.txt','r')
raw_path = mat.read().split('\n')
path = []
for y in raw_path:
path.append(y.split(','))
for y in range(0,len(path)):
for x in range(0,len(path[0])):
path[y][x] = int(path[y][x])
mat.close()
## Gets the matrix into a readable form of integers
## First value represents vertical, second value represents horizontal
##path =[[131,673,234,103,18], # my approach: start at the bottom right
## [201,96,342,965,150], # add the smallest value possible from either the right
## [630,803,746,422,111], # or the bottom until you get to the top
## [537,699,497,121,956],
## [805,732,524,37,331]]
side = len(path)
for diagonal in range(1,side*2-1):
y = side-1
x = side-1-diagonal
if x < 0:
y+=x
x-=x # starting point for the diagonals
while x <= side-1 and y >= 0:
# runs along the diagonal
if y==side-1:
path[y][x] += path[y][x+1]
elif x ==side-1:
path[y][x] +=path[y+1][x]
else:
if path[y+1][x] < path[y][x+1]:
path[y][x]+=path[y+1][x]
else:
path[y][x] += path[y][x+1]
x+=1
y-=1
print path[0][0]
|
#!/usr/bin/env python3
import sys
def even(integers):
for i in integers:
if i % 2 == 0:
yield i
def main(lines):
for i in even(map(int, lines)):
try:
print(i)
except BrokenPipeError:
break
if __name__ == '__main__':
main(sys.stdin)
|
X = int(input("Digite o valor de X: "))
Y = int(input("Digite o valor de Y: "))
F = 2*X + 2*(Y**2)
print(F) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This is an example on homography estimation using OpenCV
Created on Tue Sep 12 21:01:53 2017
@author: gholguin
"""
# Imports
import cv2
import numpy as np
# Variable global que se pueda compartir con el callback
puntos_click = list()
# ----------------------------------------------------------------------
def click_and_count(event, x, y, flags, param):
"""Definicion del callback para captura del raton"""
global puntos_click
# Si se hace click, mientras el boton baja, guardar las coordenadas
if event == cv2.EVENT_LBUTTONDOWN:
puntos_click.append((x, y))
# =======================================================================
class MiHomografia():
"""Clase para solucionar problemas relacionados con homografias"""
# Atributos de la clase
reprojThresh = 0.01
def __init__(self):
"""Inicializador del objeto miembro de la clase"""
# Atributos del objeto
self.imagen_original = list()
self.rectificada = list()
self.pts_x = np.array(list())
self.pts_xp = np.array(list())
self.H = list()
def load_image(self, image_path):
"""Funcion para cargar una imagen desde el disco duro"""
self.imagen_original = cv2.imread(image_path)
def grab_four_points(self):
"""Capturar Puntos en la imagen"""
global puntos_click
# Clonar la imagen original para no modificarla
imagen_conpuntos = self.imagen_original.copy()
cv2.namedWindow("image")
cv2.setMouseCallback("image", click_and_count)
while True:
# Muestre la imagen hasta que se presione 'q'
cv2.imshow("image", imagen_conpuntos)
key = cv2.waitKey(1) & 0xFF
# Si se presiona 'r', resetear los puntos
if key == ord("r"):
imagen_conpuntos = self.imagen_original.copy()
puntos_click = list()
# Si se presiona 'q' termine
elif key == ord("q"):
break
# Mostrar los puntos en la imagen
if puntos_click:
for pt, coords in enumerate(puntos_click):
x, y = coords[0], coords[1]
cv2.circle(imagen_conpuntos, (x, y), 5, (0, 0, 255), 5, 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(imagen_conpuntos, str(pt+1), (x, y), font, 4, (0, 0, 255), 2, cv2.LINE_AA)
cv2.imshow("image", imagen_conpuntos)
def encontrar_h(self):
"""Calculo robusto de H"""
self.H, status = cv2.findHomography(self.pts_xp, self.pts_x, cv2.RANSAC, self.reprojThresh)
return self.H, status
def remover_proyectividad(self):
"""Basado en el H encontrado, remover la proyectividad"""
self.rectificada = cv2.warpPerspective(self.imagen_original, self.H,
(self.imagen_original.shape[1], self.imagen_original.shape[0]))
cv2.namedWindow("Rectificada")
cv2.imshow("Rectificada", self.rectificada)
cv2.waitKey(0)
if __name__ == '__main__':
# Crear un objeto de la clase MiHomografia
hproblem = MiHomografia()
# Usar el metodo load_image
hproblem.load_image("capilla60.jpg")
# Llamar el metodo que toma 4 puntos de la imagen
hproblem.grab_four_points()
print("\nEsquinas seleccionadas:")
print(puntos_click)
hproblem.pts_xp = np.array(puntos_click)
hproblem.pts_x = np.array([(200, 300), (700, 300), (700, 700), (200, 700)])
H, status = hproblem.encontrar_h()
print("\nMatriz H:")
print(H)
hproblem.remover_proyectividad()
|
#!/usr/bin/env python3
import numpy as np
from keras import Input, Model, Sequential
from keras.layers import BatchNormalization, Conv2DTranspose, LeakyReLU, Conv2D, Activation, Flatten, Dense, Reshape, \
Lambda
from keras import backend as K
def create_models():
n_channels = 3 + 1
image_shape = (64, 64, n_channels)
n_encoder = 1024
latent_dim = 128
decode_from_shape = (8, 8, 256)
n_decoder = np.prod(decode_from_shape)
leaky_relu_alpha = 0.2
def conv_block(x, filters, leaky=True, transpose=False, name=''):
conv = Conv2DTranspose if transpose else Conv2D
activation = LeakyReLU(leaky_relu_alpha) if leaky else Activation('relu')
layers = [
conv(filters, 5, strides=2, padding='same', name=name + 'conv'),
BatchNormalization(name=name + 'bn'),
activation
]
if x is None:
return layers
for layer in layers:
x = layer(x)
return x
# Encoder
def create_encoder():
x = Input(shape=image_shape, name='enc_input')
y = conv_block(x, 64, name='enc_blk_1_')
y = conv_block(y, 128, name='enc_blk_2_')
y = conv_block(y, 256, name='enc_blk_3_')
y = Flatten()(y)
y = Dense(n_encoder, name='enc_h_dense')(y)
y = BatchNormalization(name='enc_h_bn')(y)
y = LeakyReLU(leaky_relu_alpha)(y)
z_mean = Dense(latent_dim, name='z_mean')(y)
z_log_var = Dense(latent_dim, name='z_log_var')(y)
return Model(x, [z_mean, z_log_var], name='encoder')
# Decoder
decoder = Sequential([
Dense(n_decoder, input_shape=(latent_dim,),
name='dec_h_dense'),
BatchNormalization(name='dec_h_bn'),
LeakyReLU(leaky_relu_alpha),
Reshape(decode_from_shape),
*conv_block(None, 256, transpose=True, name='dec_blk_1_'),
*conv_block(None, 128, transpose=True, name='dec_blk_2_'),
*conv_block(None, 32, transpose=True, name='dec_blk_3_'),
Conv2D(1, 5, activation='sigmoid', padding='same', name='dec_output')
], name='decoder')
def _sampling(args):
"""Reparameterization trick by sampling fr an isotropic unit Gaussian.
Instead of sampling from Q(z|X), sample eps = N(0,I)
# Arguments:
args (tensor): mean and log of variance of Q(z|X)
# Returns:
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
sampler = Lambda(_sampling, output_shape=(latent_dim,), name='sampler')
encoder = create_encoder()
# Build graph
x = Input(shape=image_shape, name='input_image')
z_mean, z_log_var = encoder(x)
z = sampler([z_mean, z_log_var])
y = decoder(z)
vae = Model(x, y, name='vae')
# KL divergence loss
kl_loss = K.mean(-0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1))
vae.add_loss(kl_loss)
return encoder, decoder, vae
from PIL import Image
def loader(encoder_train=True, batch_size=64):
ball_img = Image.open('ball.png', 'r')
while True:
images = np.random.uniform(size=(batch_size, 64, 64, 4))
# Clear mask
images[:, :, :, -1] = 0.
for i in range(len(images)):
s = np.random.randint(24, 48)
img = np.asarray(ball_img.resize((s, s), Image.BICUBIC)) / 255.
x = np.random.randint(0, 64 - s)
y = np.random.randint(0, 64 - s)
images[i, y:y+s, x:x+s, :] = img
mask = np.expand_dims(images[:, :, :, -1], -1)
if encoder_train:
yield images, mask
else:
yield images[:, :, :, :3], None
from keras.callbacks import ModelCheckpoint
def main():
encoder, decoder, vae = create_models()
datagen = loader()
ck = ModelCheckpoint('encoder.{epoch:02d}.h5', save_weights_only=True)
vae.compile('nadam', 'binary_crossentropy', ['acc'])
vae.fit_generator(datagen, 1000, 100, callbacks=[ck])
encoder.save_weights('encoder-trained.h5')
if __name__ == '__main__':
main()
|
# coding: utf-8
import xadmin
from .models import UserComments, UserMessage, UserLearn
class UserCommentsAdmin(object):
list_display = ['user', 'comment_id', 'comment_type', 'comments', 'add_time']
class UserMessageAdmin(object):
list_display = ["email", "message", "has_read", "add_time"]
search_fields = ["email", "message", "has_read"]
list_filter = ["email", "message", "has_read", "add_time"]
class UserLearnAdmin(object):
list_display = ["user", "learn_id", "learn_type", "add_time"]
search_fields = ["user", "message", "has_read"]
list_filter = ["user", "learn_id", "learn_type", "add_time"]
xadmin.site.register(UserComments, UserCommentsAdmin)
xadmin.site.register(UserMessage, UserMessageAdmin)
xadmin.site.register(UserLearn, UserLearnAdmin)
|
#!/usr/bin/env python
from __future__ import print_function, unicode_literals
# ConnectHandler
from netmiko import Netmiko
from getpass import getpass
net_conn = Netmiko(host="cisco1.twb-tech.com", username='pyclass', password=getpass(), device_type='cisco_ios')
print(net_conn.find_prompt())
|
# feature selection
logreg = LogisticRegression()
rfe = RFE(logreg, n_features_to_select=15)
rfe = rfe.fit(X_sm, Y_sm)
print(rfe.support_)
print(rfe.ranking_)
# building
logit_model=sm.Logit(Y_sm,X_selected)
result=logit_model.fit()
print(result.summary2())
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
print('Accuracy of logistic regression classifier on test set: {:.2f}'.format(logreg.score(X_test, y_test)))
# result of cols
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
# predict enrolled patients
pred = df_enroll[cols]
logreg.predict(pred)
# give probability of each class
logreg.predict_proba(pred)
|
import datetime
import re
class Field:
name: str = None
def __init__(self, fixed_length=None, validators=None, trim=None):
self.fixed_length = fixed_length
if self.fixed_length and trim is None:
self.trim = True
else:
self.trim = trim
def __set_name__(self, owner, name):
self.name = name
owner.fields[name] = self
def __get__(self, instance, owner):
if instance is not None:
return self.to_python(instance._values.get(self.name))
def to_python(self, value):
if self.fixed_length and len(value) != self.fixed_length:
raise ValueError('Invalid field value length')
if value == '':
value = None
if isinstance(value, str) and self.trim:
return value.strip()
return value
class StringField(Field):
pass
class DateField(Field):
input_format = '%Y-%m-%d'
def to_python(self, value):
if value:
return datetime.datetime.strptime(value, self.input_format)
return super().to_python(value)
class DateTimeField(DateField):
input_format = '%Y-%m-%d %H:%M:%S'
class IntegerField(Field):
def to_python(self, value):
if value:
return int(value)
return super().to_python(value)
|
from time import sleep
import numpy as np
import random
import copy
import os
def formatmap(num):
global x, y
x = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
y = [9, 9, 9, 9, 9, 9, 9, 9, 9, 9]
map1[num][y[num]][x[num]] = 2
for i in range(10):
if i % 2 == 0:
for j in range(7):
map1[num][i][j] = 1
map1[num][i][j] = 1
map1[num][i][random.randrange(1, 6)] = 0
for i in range(10):
map1[num][i][0] = 1
map1[num][i][6] = 1
def findhole(num):
hole = 0
count = 0
for i in range(7):
if map1[num][y[num] - 1][i] == 0:
hole = i
count += 1
if count > 1:
return 99
return hole
def runchromo(num, i):
global chromo
if chromo[num][i] == 0:
return 0
elif chromo[num][i] == 1:
return 1
elif chromo[num][i] == 2:
return 2
def showmap():
global gene, chromo, select
os.system("cls")
print(f"GEN : {gene}")
print(chromo)
print(select)
for i in range(10):
for h in range(10):
for j in range(7):
if map1[h][i][j] == 1:
print("■", end=" ")
elif map1[h][i][j] == 2:
print("★", end=" ")
else:
print("□", end=" ")
print(" ", end=" ")
print("\n")
def left(num):
global x, y
try:
if map1[num][y[num]][x[num] - 1] == 0:
map1[num][y[num]][x[num]] = 0
x[num] -= 1
map1[num][y[num]][x[num]] = 2
except IndexError:
pass
def right(num):
global x, y
try:
if map1[num][y[num]][x[num] + 1] == 0:
map1[num][y[num]][x[num]] = 0
x[num] += 1
map1[num][y[num]][x[num]] = 2
except IndexError:
pass
def up(num):
global x, y
try:
if map1[num][y[num] - 1][x[num]] == 0:
map1[num][y[num]][x[num]] = 0
y[num] -= 1
map1[num][y[num]][x[num]] = 2
except IndexError:
pass
def down(num):
global x, y
try:
if map1[num][y[num] + 1][x[num]] == 0:
map1[num][y[num]][x[num]] = 0
y[num] += 1
map1[num][y[num]][x[num]] = 2
except IndexError:
pass
chromo = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0],
[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] # 0 = up, 1 = left, 2= right
new_chromo = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0],
[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] # 마지막은 y의 좌표
for i in range(10):
for j in range(3):
chromo[i][j] = random.randint(0, 2)
count = 0
num = 0
hole = 0
mutation = 0.1 # 돌연변이가 나타날 확률
select = [0, 0] # 선택된 우성 염색체
gene = 0 # 세대
rank = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
x = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
y = [9, 9, 9, 9, 9, 9, 9, 9, 9, 9]
x_1 = x
y_1 = y
map1 = np.zeros([10, 10, 7]) # 3차원 배열 설정
while True:
map1 = np.zeros([10, 10, 7])
for i in range(10):
formatmap(i) # 맵들 전체 설정
showmap()
for start in range(27):
for num in range(10):
hole = findhole(num)
if y[num] == 0:
continue
elif x[num] == hole:
if runchromo(num, 0) == 0:
up(num)
elif runchromo(num, 0) == 1:
left(num)
elif runchromo(num, 0) == 2:
right(num)
elif hole == 99:
if runchromo(num, 0) == 0:
up(num)
elif runchromo(num, 0) == 1:
left(num)
elif runchromo(num, 0) == 2:
right(num)
elif x[num] > hole:
if runchromo(num, 1) == 0:
up(num)
elif runchromo(num, 1) == 1:
left(num)
elif runchromo(num, 1) == 2:
right(num)
elif x[num] < hole:
if runchromo(num, 2) == 0:
up(num)
elif runchromo(num, 2) == 1:
left(num)
elif runchromo(num, 2) == 2:
right(num)
chromo[num][3] = y[num]
rank[num] = chromo[num][3]
showmap()
sleep(0.2)
if x[:] == x_1[:] and y[:] == y_1[:]:
break
x_1[:] = x[:]
y_1[:] = y[:]
if sum(rank) <= 1:
showmap()
print("끝")
break
for i in range(2):
select[i] = np.argsort(rank)[i] # 우수 유전자 추출
for i in range(10):
for j in range(3):
if random.random() < mutation: # 돌연변이 발생
new_chromo[i][j] = random.randint(0, 2)
else:
new_chromo[i][j] = chromo[select[random.randint(0, 1)]][j]
chromo = copy.deepcopy(new_chromo)
gene += 1
if gene > 100:
break
showmap()
print("끝")
|
cache = []
MAXLEN = 0
def checkUni():
pass
def bfs(n):
global MAXLEN
if n == MAXLEN - 1: # 종료조건 - 마지막 index에 도달
return 0
s = 0
for i in range(n+1, MAXLEN):
s += bfs(i)
return s
def solution(relation):
global MAXLEN
MAXLEN = len(relation[0])
answer = 0
return answer |
#!/usr/bin/env python
"""
_Test_
Component that can parse a cvs log
and generate a file for generating test that map
to developers responsible for the test.
"""
from __future__ import print_function
import os
import unittest
import WMCore.WMInit
try:
from commands import getstatusoutput
except ImportError:
# python3
from subprocess import getstatusoutput
class Test:
"""
_Test_
Component that can parse a cvs log
and generate a file for generating test that map
to developers responsible for the test.
"""
def __init__(self, tests = [], logFile = 'failures3.log'):
self.tests = tests
self.failures = {}
self.errors = {}
self.totalTests = 0
self.totalErrors = 0
self.totalFailures = 0
self.logFile = logFile
# files used for testing.
self.testFile = {}
self.cvsLog = None
self.moduleCut = None
def run(self):
"""
Run over all the tests added to this suite.
"""
for test in self.tests:
print('*********************Test: '+test[0].__class__.__name__+ ' Developer: '+test[1])
try:
testSuite = unittest.TestSuite()
test[0].developer = test[1]
testSuite.addTest(test[0])
testResult = unittest.TestResult()
self.testResult = testSuite.run(testResult)
self.summarizeTest()
except Exception as ex:
customReport = {}
customReport['developer'] = test[1]
customReport['class'] = test[0].__class__.__name__
msg = """
Test framework error! Did you use the proper test classes? """
customReport['msg'] = msg + ' '+str(ex)
self.summarizeTest(customReport)
# call the script we use for cleaning the backends:
# FIXME: need to add something for oracle too.
print('Cleaning database backends')
command = os.path.join(WMCore.WMInit.getWMBASE(), '/standards/./cleanup_mysql.sh')
result = getstatusoutput(command)
for entry in result:
print(str(entry))
def parseCVS(self, cvsLog, pathCut, moduleCut, maxVotes):
"""
Parses a cvs log to information to generate a style quality
test.
"""
self.cvsLog = cvsLog
# pathCut cuts the path from
# e.g. cvmsserver/repositories/CMSSW/WMCore/src/python/WMCore)
# to src/python/WMCore
# ensures that non relevant modules are not incorporated. e.g.
# src/python/WMCore becomes WMCore
self.moduleCut = moduleCut
# maxVotes: maximum number of authors for voting.
logFile = open(self.cvsLog, 'r')
nl = logFile.readline()
state = 'file'
# reset the vote
vote = 0
# a list on who we should vote
curFile = ''
firstAuthor = True
while nl:
# locate the cvs files
first = 0
if nl.find('RCS file:') == 0:
firstAuthor = True
# reset our vote structure
vote = 0
# filter the file path:
path = nl.split(' ')[2].split(',')[0]
# check if it is .py file
if path.endswith('_t.py'):
# split it and start building modules
parts = path.split('/')
moduleName = ''
# do not include the actual file for style testing
# (only modules)
for index in xrange(0, len(parts)):
# we cut of part of the path
if index > pathCut:
moduleName = os.path.join(moduleName, parts[index])
if moduleName not in self.testFile and \
index > pathCut + self.moduleCut and\
index == (len(parts)-1):
self.testFile[moduleName] = {}
curFile = moduleName
# now we need to find authors and let them vote.
state = 'authors'
# reset the vote
vote = 0
if nl.find('date:') == 0 and state == 'authors':
author = nl.split(' ')[6].split(';')[0]
# if this is the first author, check if the file has not been removed.
if firstAuthor:
firstAuthor = False
modState = nl.split(' ')[9].split(';')[0]
# if removed, remove it hear too.
if modState == 'dead' :
del self.testFile[curFile]
state = 'file'
if state != 'file':
# start voting:
if author not in self.testFile[curFile]:
self.testFile[curFile][author] = 0
self.testFile[curFile][author] += 1
# we voted
vote += 1
# if we reach maxVotes where done
if vote < maxVotes:
state = 'file'
vote = 0
nl = logFile.readline()
# we are done voting
def missingTests(self, filename):
"""
Parses the cvs log and finds what modules have missing
tests.
"""
pass
def generate(self, filename):
"""
Generates a python file that uses the result of parsing
and this class to generate a script that does suite tests.
"""
testsFile = open(filename, 'w')
head = """#!/usr/bin/env python
from WMQuality.Test import Test
"""
testsFile.writelines(head)
testsFile.writelines('\n')
# winners are successful imports
winners = {}
# losers are unsuccesful imports which are reported (level 1)
losers = {}
losersCum = {}
# make the import statements
for testFile in self.testFile.keys():
# find the one with the most votes per module:
votes = 0
winner = ''
for voter in self.testFile[testFile].keys():
if self.testFile[testFile][voter] > votes:
votes = self.testFile[testFile][voter]
winner = voter
# make the import:
parts = testFile.split('/')
importStmt = 'from '
for part in xrange(0, len(parts)-1):
if part > self.moduleCut:
importStmt += parts[part]+"."
importStmt += parts[-1].split('.')[0]
importStmt += ' import '
testObject = parts[-1].split('_t.py')[0] + 'Test'
importStmt += testObject
# test if the import works. If it does not work we report it
pythonCmd = "python -c '"+importStmt+"'"
stdout, stdin, stderr = os.popen3(pythonCmd)
errorLine = stderr.readline()
# if no error register it
if not errorLine:
winners[testObject] = winner
testsFile.writelines(importStmt+'\n')
# if error report it
else:
errorMsg = errorLine
while True:
errorLine = stderr.readline()
if not errorLine:
break
errorMsg += errorLine
if winner not in losers:
losers[winner] = []
losersCum[winner] = 0
losers[winner].append( [testFile, importStmt, errorMsg] )
losersCum[winner] += 1
# make the object instantiations.
# it is done with try/except clauses to test instantiation (level 2)
testsFile.writelines('\nerrors = {}\n')
testsFile.writelines('tests = []\n')
testsFile.writelines('\n\n')
for testObject in winners:
testsFile.writelines('try:\n')
testsFile.writelines(' x='+testObject+'()\n')
testsFile.writelines(' tests.append((x,"'+\
winners[testObject]+'"))\n')
testsFile.writelines('except Exception,ex:\n')
testsFile.writelines(' if not errors.has_key("'+\
winners[testObject]+'"):\n')
testsFile.writelines(' errors["'+\
winners[testObject]+'"] = []\n')
testsFile.writelines(' errors["'+ \
str(winners[testObject])+'"].append(("'+\
str(testObject)+'",str(ex)))\n')
testsFile.writelines('\n')
tail = """
print('Writing level 2 failures to file: failures2.log ')
failures = open('failures2.log','w')
failures.writelines('Failed instantiation summary (level 2): \\n')
for author in errors.keys():
failures.writelines('\\n*****Author: '+author+'********\\n')
for errorInstance, errorMsg in errors[author]:
failures.writelines('Test: '+errorInstance)
failures.writelines(errorMsg)
failures.writelines('\\n\\n')
failures.close()
test = Test(tests,'failures3.log')
test.run()
test.summaryText()
"""
testsFile.writelines(tail)
testsFile.close()
# we generated the test file, now generate the report of failed
# imports.
print('Writing level 1 failures to file: failures1.log ')
failures = open('failures1.log','w')
failures.writelines('Failed import summary (level 1):\n\n')
for winner in losersCum.keys():
msg = 'Author: '+winner
msg += ' Failures: '+str(losersCum[winner])
failures.writelines(msg+'\n')
failures.writelines('\nFailed import details:\n\n')
for winner in losers.keys():
failures.writelines('************Author: '+winner+'***********\n\n')
for failed in losers[winner]:
failures.writelines('File: '+failed[0]+'\n\n')
failures.writelines('Failed import: '+failed[1]+'\n\n')
failures.writelines('Error message: \n'+failed[2]+'\n\n')
def summaryText(self):
"""
Summary for the tests result.
"""
print('Writing level 3 failures to file: '+self.logFile)
failures = open(self.logFile,'w')
failures.writelines('Following tests where run\n\n')
for test in self.tests:
failures.writelines(test[0].__class__.__name__+'-->'+test[1]+'\n')
failures.writelines('\n\n')
failures.writelines('Failed tests (level 3):\n\n')
for author in self.failures.keys():
failures.writelines(author+ \
':'+str(len(self.failures[author]))+' failures\n')
for author in self.errors.keys():
failures.writelines(author+ \
':'+str(len(self.errors[author]))+' errors\n')
failures.writelines('Failures (level 3):\n\n')
for author in self.failures.keys():
failures.writelines('Author: '+author+'\n\n')
for failure in self.failures[author]:
failures.writelines('Test: '+failure[0]+'\n\n')
failures.writelines('Failure: '+failure[1]+'\n\n')
for author in self.errors.keys():
failures.writelines('Author: '+author+'\n\n')
for failure in self.errors[author]:
failures.writelines('Test: '+failure[0]+'\n\n')
failures.writelines('Error: '+failure[1]+'\n\n')
failures.close()
def summarizeTest(self, customReport = {}):
"""
Aggregates a summary of the test. If the test framework failed
(e.g. the developer did not inherit from the test object, we need
to send a custom report.
"""
if customReport != {}:
self.totalFailures += 1
if customReport['developer'] not in self.failures:
self.failures[customReport['developer']] = []
self.failures[customReport['developer']].append(\
[customReport['class'], customReport['msg']])
else:
for i in self.testResult.failures:
obj, msg = i
self.totalFailures += 1
if obj.developer not in self.failures:
self.failures[obj.developer] = []
self.failures[obj.developer].append([obj.__class__.__name__, \
msg])
for i in self.testResult.errors:
obj, msg = i
self.totalErrors += 1
if obj.developer not in self.errors:
self.errors[obj.developer] = []
self.errors[obj.developer].append([obj.__class__.__name__, \
msg])
|
# %%
# This script will recompute epochs and restore them in memory dir (see deploy.py).
# It uses multiprocessing to operate, and it requires a long time to complete.
# You can run the script as following.
# python recompute_epochs.py >> running.log
# %%
import multiprocessing
from tools.data_manager import DataManager
# %%
reject_criteria = dict(mag=4000e-15, # 4000 fT
grad=4000e-13, # 4000 fT/cm
eeg=150e-6, # 150 µV
eog=250e-6) # 250 µV
parameters_meg = dict(picks='mag',
stim_channel='UPPT001',
l_freq=0.1,
h_freq=15.0,
tmin=-0.2,
tmax=1.2,
decim=12,
detrend=1,
reject=dict(mag=4000e-15),
baseline=None)
parameters_eeg = dict(picks='eeg',
stim_channel='from_annotations',
l_freq=0.1,
h_freq=15.0,
tmin=-0.2,
tmax=1.2,
decim=10,
detrend=1,
reject=dict(eeg=150e-6),
baseline=None)
# %%
def run_subject(name, parameters, recompute=True):
loader = DataManager(name, parameters=parameters)
loader.load_epochs(recompute=recompute)
print(f'Done {name}.')
# %%
# # pool = []
for idx in range(1, 11):
name = f'EEG_S{idx:02d}'
# Run in sequence
run_subject(name, parameters_eeg)
# Run in parallel
# p = multiprocessing.Process(target=run_subject,
# args=(name, parameters_eeg))
# p.start()
# %%
# idx = 3
# name = f'MEG_S{idx:02d}'
# loader = FileLoader(name, parameters=parameters_meg)
# loader.load_epochs(recompute=False)
# print(loader.epochs_list)
# t = 5
# includes = [e for e in range(len(loader.epochs_list)) if not e == t]
# excludes = [t]
# a, b = loader.leave_one_session_out(includes, excludes)
# print(a, b)
# %%
# for eid in a.event_id:
# print(eid)
# a[eid].average().plot_joint()
# b[eid].average().plot_joint()
# %%
|
n=int(input())
a=int()
a==2
while a<=n:
if n%a==0:
print('No')
else:
a+=1
if a==n:
print('YES') |
# drop_char.py
#
# a function that uses a List Comprehension to take a string and
#returns a list of all lower case strings you can obtain by removing
# a single character.
# Usage:
# % python drop_char.py
#
# Himanshu Mohan, Nov 11, 2019
from typing import List
def drop_char(word: str) -> List[str]:
"Return all lower case strings obtained by dropping one char"
new_word=word.lower()
new_list=[ new_word[0:i]+new_word[i+1:] for i in range(len(new_word))]
return(new_list)
drop_char('Total') |
from rest_framework.response import Response
from rest_framework import status
from rest_framework.decorators import api_view
from packages.models import PackageSettings
from packages.serializers import PackageSettingsSerializer
@api_view(["get", "put", "delete"])
def PackageSettingsView(request):
def save_or_error_response(save_object):
if not save_object.is_valid():
return Response(
{"detail": "not a valid settings"}, status=status.HTTP_400_BAD_REQUEST
)
if not save_object.save():
return Response(
{"detail": "unable to save the request data"},
status=status.HTTP_400_BAD_REQUEST,
)
return Response(save_object.data)
def create_or_update_entry(custom_request_data, update=None):
serializers = PackageSettingsSerializer(update, data=custom_request_data)
return save_or_error_response(serializers)
if request.method == "GET":
queryset = PackageSettings.objects.filter(user__id=request.user.id)
queryset = queryset.values()[0]
queryset["user"] = queryset.pop("user_id")
serializers = PackageSettingsSerializer(data=queryset)
if not serializers.is_valid():
return Response(
{"detail": "setting not found"}, status=status.HTTP_404_NOT_FOUND
)
return Response(serializers.data, status=status.HTTP_200_OK)
if request.method == "PUT":
request.data.update({"user": request.user.id})
queryset = PackageSettings.objects.filter(user__id=request.user.id)
queryset = queryset.first()
return create_or_update_entry(request.data, queryset)
if request.method == "DELETE":
return Response(
{"detail": "method is not allowed"}, status=status.HTTP_403_FORBIDDEN
)
return Response(status=status.HTTP_204_NO_CONTENT)
|
#!/usr/bin/env python
from setuptools import Command, find_packages, setup
version = '0.8.10'
class PyTest(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import sys, subprocess
try:
from py import test as pytest
except ImportError:
raise Exception('Running tests requires pytest.')
errno = subprocess.call([sys.executable, '-m', 'py.test'])
raise SystemExit(errno)
setup(
name = 'pystachio',
version = version,
description = 'type-checked dictionary templating library',
url = 'http://github.com/wickman/pystachio',
author = 'Brian Wickman',
author_email = 'wickman@gmail.com',
license = 'MIT',
packages = find_packages(),
py_modules = ['pystachio'],
zip_safe = True,
cmdclass = {
'test': PyTest
},
scripts = [
'bin/pystachio_repl'
],
classifiers = [
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
|
import numpy as np
from ..psychopy.psychopy_line import psychopy_line
from .zollner_parameters import _zollner_parameters
def _zollner_psychopy(window, parameters=None, **kwargs):
# Create white canvas and get drawing context
if parameters is None:
parameters = _zollner_parameters(**kwargs)
# Loop lines
for i in range(parameters["Distractors_n"]):
# Draw distractor lines
for pos in ["_Top_", "_Bottom_"]:
psychopy_line(window,
x1=parameters["Distractors" + pos + "x1"][i],
y1=parameters["Distractors" + pos + "y1"][i],
x2=parameters["Distractors" + pos + "x2"][i],
y2=parameters["Distractors" + pos + "y2"][i],
adjust_height=True,
color="black", size=5)
for pos in ["Bottom", "Top"]:
# Draw target lines
psychopy_line(window,
x1=parameters[pos + "_x1"],
y1=parameters[pos + "_y1"],
x2=parameters[pos + "_x2"],
y2=parameters[pos + "_y2"],
adjust_height=True,
color="red", size=5)
|
# @Title: 同构字符串 (Isomorphic Strings)
# @Author: 2464512446@qq.com
# @Date: 2020-12-28 16:12:46
# @Runtime: 48 ms
# @Memory: 17.1 MB
class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
d1,d2 = defaultdict(list), defaultdict(list)
for index,i in enumerate(s):
d1[i].append(index)
for index,i in enumerate(t):
d2[i].append(index)
# print(list(d1.values()),list(d2.values()))
return list(d1.values()) == list(d2.values())
|
import uuid
from http import HTTPStatus
from flask_restful import Resource, reqparse, fields, marshal
from flask_jwt_extended import (
create_access_token,
create_refresh_token,
jwt_required,
jwt_refresh_token_required,
get_jwt_identity,
get_raw_jwt
)
from flask import current_app, request, make_response, jsonify
from app.models import Comment as CommentModel
parser = reqparse.RequestParser()
parser.add_argument('content', help='Required', required=True)
permitted = {
'id': fields.String,
'content': fields.String,
}
class Comment(Resource):
method_decorators = [jwt_required]
def post(self):
# Create Comment
data = parser.parse_args()
name = data['content']
try:
new_comment = CommentModel(
name=name,
)
new_comment.save()
return {
'comment': marshal(new_comment, permitted)
}, HTTPStatus.CREATED
except Exception as error:
return {
'message': str(error)
}, HTTPStatus.INTERNAL_SERVER_ERROR
@api_bp.route('/<int:comment_id>/update_comment/', methods=['PATCH'])
def update_comment(self):
data = parser.parse_args()
comment_model = CommentModel.query.get(comment_id)
comment_model.update(
title=data['title']
)
return make_response(
jsonify({
'message': {
'title': 'Successfully changed'
}
}),
HTTPStatus.CREATED
)
@api_bp.route('/<int:comment_id>/delete_comment/', methods=['DELETE'])
def delete_comment(self):
data = parser.parse_args()
comment_model = CommentModel.query.get(comment_id)
comment_model.delete()
return make_response(
jsonify({
'message': {
'comment': 'Successfully deleted'
}
}),
HTTPStatus.DELETED
)
@api_bp.route('/<int:comment_id>/get_comment/', methods=['GET'])
def get_comment(self):
comment_model = CommentModel.query.get(comment_id)
return jsonify(json_comment = comment_model)
def get(self):
comments = CommentModel.query.all()
return jsonify(json_comment = comments) |
if __name__ == "__main__":
T = 0
while True:
T += 1
s = input()
if s == "END":
break
sections = s.split("*")
if s[0] == "*":
sections = sections[1:]
if s[-1] == "*":
sections = sections[:-1]
if sections:
target = len(sections[0])
is_even = True
for section in sections:
if len(section) != target:
is_even = False
break
if is_even:
print(f"{T} EVEN")
else:
print(f"{T} NOT EVEN")
|
"""
Make a plot of the UVIS cosmic ray rate as a function of spacecraft
suborbital position. Shows that there is an enhancement in the CR
rate at longitudes near the magnetic poles, not just in the SAA.
"""
import numpy as np
from astropy.coordinates import Angle
import ephem
def archived_tle():
"""
Return a list of HST TLEs from space-track
https://www.space-track.org/basicspacedata/query/class/tle/EPOCH/2015-01-01--2016-12-31/NORAD_CAT_ID/20580/orderby/TLE_LINE1%20ASC/format/tle
HST_TLE.txt:
https://www.space-track.org/basicspacedata/query/class/tle/EPOCH/2009-08-01--2016-12-31/NORAD_CAT_ID/20580/orderby/TLE_LINE1%20ASC/format/tle
"""
from skyfield.api import load
from skyfield.constants import AU_KM
import astropy.time
ts = load.timescale()
eph = load('de421.bsp')
earth = eph['earth']
#sat = earth.satellite('\n'.join(lines))
#lines = open('2015-16_TLEs.txt').readlines()
#lines = open('HST_TLE.txt').readlines()
lines = open('HST_TLE_WFC3.txt').readlines()
lines = [line.strip() for line in lines]
N = len(lines)//2
times = []
strings = []
for i in range(N):
print(i,N)
tle_str = 'HST\n'+'\n'.join(lines[i*2:i*2+2])
sat = earth.satellite(tle_str)
t0 = astropy.time.Time(sat.epoch.utc_datetime())
times.append(t0.mjd)
strings.append(tle_str)
return np.array(times), np.array(strings)
def go():
"""
Read TLE file and save to a numpy file for faster I/O
"""
tle_times, tle_strings = archived_tle()
#np.save('2015-16_TLEs.npy', [tle_times, tle_strings])
np.save('HST_TLE_WFC3.npy', [tle_times, tle_strings])
def get_hst_positions(header=None, interval=None, dt=10, verbose=False):
"""
Get sublat/sublon of HST based on EXPSTART/STOP keywords in a
header
"""
import ephem
from astropy.coordinates import Angle
import astropy.time
import astropy.units as u
#tle_times, tle_strings = np.load('2015-16_TLEs.npy')
tle_times, tle_strings = np.load('HST_TLE_WFC3.npy')
if interval is None:
t0 = header['EXPSTART']
t1 = header['EXPEND']
else:
t0, t1 = interval
times = np.arange(t0, t1, dt/86400.)
pos = np.zeros((len(times), 2))
for i, t in enumerate(times):
if verbose:
print(i, t, len(times))
tle_str = tle_strings[np.cast[float](tle_times) > t][0].split('\n')
hst = ephem.readtle(tle_str[0], tle_str[1], tle_str[2])
tx = astropy.time.Time(t, format='mjd')
hst.compute(tx.datetime)
pos_i = np.array([Angle(hst.sublat*180/np.pi*u.deg).value, Angle(hst.sublong*180/np.pi*u.deg).wrap_at(360*u.deg).value])
pos[i,:] = pos_i
if pos[-1,1] < pos[0,1]:
pos[pos[:,1] < pos[0,1], 1] += 360
return times, pos
# t0x = astropy.time.Time(header['EXPSTART'], format='mjd')
# hst.compute(t0x.datetime)
# ll0 = np.array([Angle(hst.sublat*180/np.pi*u.deg).value, Angle(hst.sublong*180/np.pi*u.deg).wrap_at(360*u.deg).value])
#
# t1x = astropy.time.Time(header['EXPEND'], format='mjd')
# hst.compute(t1x.datetime)
# ll1 = np.array([Angle(hst.sublat*180/np.pi*u.deg).value, Angle(hst.sublong*180/np.pi*u.deg).wrap_at(360*u.deg).value])
#
# start_stop = np.array([ll0, ll1])
# return start_stop
def for_pmcc():
"""
Compute telescope positions every minute since WFC3 SMOV (May 24, 2009)
"""
import astropy.time
import astropy.table
t0 = astropy.time.Time('2009-05-14')
t1 = astropy.time.Time('2016-12-20')
dt = 60#*60*24
times, coo = get_hst_positions(header=None, interval=[t0.mjd, t1.mjd], dt=dt, verbose=True)
tab = astropy.table.Table([times, coo[:,1], coo[:,0]], names=('mjd', 'sublng', 'sublat'))
tab.write('HST_SubEarth.fits')
### Compute at fixed times
dt = 60.
dat = astropy.table.Table.read('pmcc.dat', format='ascii.commented_header')
mjd = []
lat = []
lng = []
N = len(dat)
for i, t0 in enumerate(dat['mjd']):
print('\nNext', i, N, t0)
dt0 = 1./86400.*60*30
times, coo = get_hst_positions(header=None, interval=[t0-dt0, t0+dt0], dt=dt, verbose=True)
mjd = np.append(mjd, times)
lat = np.append(lat, coo[:,0])
lng = np.append(lng, coo[:,1])
tab = astropy.table.Table([mjd, lat, lng], names=('mjd', 'sublat', 'sublng'))
tab.write('HST_SubEarth_pmcc.fits')
tab['mjd'].format = '16.5f'
tab['sublat'].format = '-13.2f'
tab['sublng'].format = '-13.2f'
tab.write('HST_SubEarth_pmcc.dat', format='ascii.commented_header')
def test():
jit = pyfits.open('/Users/brammer/Research/HST/UDS_Arc/LymanALpha/RAW/id8w04010_jit.fits')
jit = pyfits.open('/Users/brammer/Research/HST/UDS_Arc/LymanALpha/RAW/id8w01010_jit.fits')
jit = pyfits.open('/Users/brammer/Research/HST/UDS_Arc/LymanALpha/RAW/id8w02010_jit.fits')
jit = pyfits.open('/Users/brammer/Research/HST/UDS_Arc/LymanALpha/RAW/id8w03010_jit.fits')
jit = pyfits.open('/Users/brammer/3DHST/Spectra/Work/Grizli/WISP/RAW/id1kf0010_jit.fits')
ix = 1
flc = pyfits.open(glob.glob('{0}/{1}q_fl*.fits*'.format(os.path.dirname(jit.filename()), jit[ix].header['EXPNAME'][:-1]))[0])
tab = astropy.table.Table.read(jit[ix])
pl = plt.plot(tab['Longitude'], tab['Latitude'], linewidth=3, alpha=0.5)
times, coo = get_hst_positions(flc[0].header)
plt.plot(coo[:,1], coo[:,0], color=pl[0].get_color())
def show_darks():
import costools
import scipy.ndimage as nd
import shapely
from shapely.geometry import Polygon
from descartes import PolygonPatch
import astropy.io.fits as pyfits
files = glob.glob("/grp/hst/wfc3s/bourque/blvs/ctecorr/*tmp.fits")
files=glob.glob('*tmp.fits')
### Measure CR fraction
files.sort()
cr_fraction = np.zeros(len(files))
headers = []
for i, file in enumerate(files):
im = pyfits.open(file)
RN = 3.5
dq = ((nd.minimum_filter(im['DQ',1].data, size=2) & 8192) > 0) & (im['SCI',1].data > 2*RN)
cr_fraction[i] = dq.sum()*1./dq.size/im[0].header['EXPTIME']*1200
headers.append(im[0].header)
print(i, file, cr_fraction[i])
plt.set_cmap('cubehelix')
vm = [2, 5.4]
import matplotlib.colors
cm = plt.get_cmap()
cnorm = matplotlib.colors.Normalize(vmin=vm[0], vmax=vm[1])
fig = plt.figure(figsize=[8,4])
import matplotlib.gridspec
gs = matplotlib.gridspec.GridSpec(1,2, width_ratios=[1,2])
ax = fig.add_subplot(gs[0])
ax.hist(cr_fraction[:len(headers)]*100, range=[0, 7], bins=50, alpha=0.5, color='k')
ax.set_xlabel('UVIS CR fraction\n% pixels in 1200 s')
ax.set_ylabel('N')
ax = fig.add_subplot(gs[1])
for i, file in enumerate(files):
print(i, file)
#im = pyfits.open(file)
#coo = get_hst_positions(im[0].header)
times, coo = get_hst_positions(headers[i])
N = coo.shape[0]//2
# ax.plot(coo[:,1], coo[:,0], color='k', alpha=0.1)
# ax.plot(coo[:,1]+360, coo[:,0], color='0.5', alpha=0.1)
c_i = cm(cnorm(cr_fraction[i]*100))
ax.plot(coo[:,1], coo[:,0], color=c_i, alpha=0.5, zorder=2)
ax.plot(coo[:,1]+360, coo[:,0], color=c_i, alpha=0.5, zorder=2)
#ax.scatter(coo[N:N+1,1], coo[N:N+1,0], c=[cr_fraction[i]*100], vmin=vm[0], vmax=vm[1], edgecolor='None', s=100, marker='s', alpha=0.8)
#sc = ax.scatter(coo[N:N+1,1]+360, coo[N:N+1,0], c=[cr_fraction[i]*100], vmin=vm[0], vmax=vm[1], edgecolor='None', s=100, marker='s', alpha=0.8)
for i in range(33):
saa = np.array(costools.saamodel.saaModel(i))
saa[:,1][saa[:,1] < 50] += 360
#plt.plot(saa[:,1], saa[:,0], label=i, color='0.5', zorder=-1)
poly = Polygon(saa[:,::-1])
patch = PolygonPatch(poly, color='k', alpha=0.05, zorder=-1)
ax.add_patch(patch)
ax.set_xlim(180, 540)
ax.set_ylim(-35, 35)
ax.set_xlabel('SubLng')
ax.set_ylabel('SubLat')
ax.text(320, -25, 'SAA', ha='center', va='center', color='w')
xarr = np.array(360+np.array([-150,-100,-50,0,50,100,150]))
ax.set_xticks(xarr)
ax.set_xticklabels(['150W', '100', '50', 'PM', '50E', '100', '150'])
# magnetic poles
ax.scatter([360-72.62, 360+107.38], [32, -32], marker='+', s=80, color='k')
ax.grid()
cb = plt.colorbar(sc)
cb.set_label('CR fraction')
gs.tight_layout(fig, pad=0.1)
|
from abc import abstractmethod
from Domain.FacesCollection import FacesCollection
class FaceHeuristic:
def __init__(self, type: str):
self.type = type
@abstractmethod
def filterFaces(self, faceCollection: FacesCollection) -> FacesCollection:
pass
class NonHeuristic(FaceHeuristic):
def __init__(self, type: str):
super().__init__(type)
def filterFaces(self, faceCollection: FacesCollection) -> FacesCollection:
return faceCollection
class DimensionBasedHeuristic(FaceHeuristic):
def __init__(self, type: str):
super().__init__(type)
self.thresholdHeight: int = 50
self.thresholdScore: float = 0.80
def filterFaces(self, faceCollection: FacesCollection) -> FacesCollection:
if faceCollection.isEmpty(): return faceCollection
newCollection = FacesCollection()
newCollection.addFaces(list(
filter(lambda face:
abs(face.height - face.posY) > self.thresholdHeight
and face.score > self.thresholdScore,
faceCollection.facesCollection)
))
return newCollection
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import copy
TYPE_A = 1
TYPE_B = 2
def entropy(prob):
p = prob*(1-2e-10) + 1e-10
return -torch.dot(p, torch.log(p))
class DcpConfig():
def __init__(self, n_param=1, split_type=TYPE_A, reuse_gate=None):
self.n_param = n_param
self.split_type = split_type
self.reuse_gate = reuse_gate
def copy(self, reuse_gate=None):
dcfg = copy.copy(self)
dcfg.reuse_gate = reuse_gate
return dcfg
def __str__(self):
return '{0}; {1}; {2}'.format(self.n_param, self.split_type, self.reuse_gate)
def assign_gate(n_seg, reuse_gate=None, state='uniform'):
if reuse_gate is not None:
return reuse_gate
return torch.zeros([n_seg], requires_grad=True) if state == 'uniform' else \
torch.tensor([0] * (n_seg - 1) + [1000], dtype=torch.float, requires_grad=True)
class Conv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding, bias, dcfg=None):
# done: share mask is not enough,
# done: consider wrap the additional parameters.
# todo: degenerate to common Conv2d while dcfg is None or abnormal
# todo: actually, dcfg == None is not allowed
super(Conv2d, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,
stride=stride, padding=padding, bias=bias)
self.in_planes = in_planes
self.out_planes = out_planes
self.kernel_size = kernel_size
if dcfg is None:
return
self.dcfg = dcfg.copy()
if dcfg.split_type not in ['fix_seg_size', 'fix_gp_number', TYPE_A, TYPE_B]:
raise ValueError('Only \'fix_seg_size\' and \'fix_gp_number\' are supported.')
if dcfg.split_type == 'fix_seg_size' or dcfg.split_type == TYPE_B:
in_seg_sz = dcfg.n_param
in_n_seg = int(np.ceil(in_planes / dcfg.n_param))
self.seg_sz = dcfg.n_param
self.n_seg = int(np.ceil(out_planes / dcfg.n_param))
else:
in_n_seg = dcfg.n_param
in_seg_sz = int(np.ceil(in_planes / dcfg.n_param))
self.n_seg = dcfg.n_param
self.seg_sz = int(np.ceil(out_planes / dcfg.n_param))
assert self.out_planes >= self.n_seg
if in_n_seg <= self.in_planes:
self.in_plane_list = torch.Tensor(self.__calc_seg_list(self.in_planes, in_n_seg, in_seg_sz)).cuda()
self.out_plane_list = self.__calc_seg_list(self.out_planes, self.n_seg, self.seg_sz)
self.mask = self.__init_mask()
self.gate = self.__init_gate(dcfg.reuse_gate)
self.out_plane_list = torch.Tensor(self.out_plane_list).cuda()
def __calc_seg_list(self, planes, n_seg, seg_sz):
seg_sz_num = planes + n_seg - n_seg * seg_sz
seg_sub_sz_num = n_seg - seg_sz_num
seg_list = [seg_sz] * seg_sz_num + [seg_sz - 1] * seg_sub_sz_num
seg_tail_list = [sum(seg_list[:i + 1]) for i in range(n_seg)]
return seg_tail_list
def __init_mask(self):
mask = torch.zeros(self.out_planes, self.n_seg)
for col in range(self.n_seg):
mask[:self.out_plane_list[col], col] = 1
return nn.Parameter(mask, requires_grad=False)
def __init_gate(self, reuse_gate=None):
if reuse_gate is None:
return nn.Parameter(assign_gate(self.n_seg, reuse_gate=reuse_gate))
return reuse_gate
def __cnt_flops(self, p_in, p_out, out_size):
h, w = out_size
def average(p, vals):
return torch.dot(p, vals)
cin_avg = self.in_planes if p_in is None else average(p_in, self.in_plane_list)
cout_avg = average(p_out, self.out_plane_list)
return cin_avg*cout_avg*h*w*self.kernel_size*self.kernel_size
def __gumbel_softmax(self, tau=1, noise=False):
if noise:
uniform_noise = torch.rand(self.n_seg).cuda()
gumbel_noise = -torch.log(-torch.log(uniform_noise))
return F.softmax((self.gate+gumbel_noise) / tau, dim=0)
return F.softmax((self.gate)/tau, dim=0)
def forward(self, x, tau=1, noise=False, reuse_prob=None, p_in=None):
y = self.conv(x)
prob = self.__gumbel_softmax(tau, noise) if reuse_prob is None else reuse_prob
rmask = torch.sum(self.mask * prob, dim=1)
flops = self.__cnt_flops(p_in, prob, y.shape[2:])
# todo: original implementation
# return y * rmask.view(1, len(rmask), 1, 1), prob, flops
return y, rmask, prob, flops
def weighted_feature(x, rmask):
return x * rmask.view(1, len(rmask), 1, 1)
class Linear(nn.Module):
def __init__(self, in_features, out_features, bias=True, dcfg=None):
super(Linear, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias)
self.in_features = in_features
self.out_features = out_features
if dcfg is None:
print('dcfg is None')
return
self.dcfg = dcfg.copy()
if dcfg.split_type not in ['fix_seg_size', 'fix_gp_number', TYPE_A, TYPE_B]:
raise ValueError('Only \'fix_seg_size\' and \'fix_gp_number\' are supported.')
if dcfg.split_type == 'fix_seg_size' or dcfg.split_type == TYPE_B:
in_seg_sz = dcfg.n_param
in_n_seg = int(np.ceil(in_features / dcfg.n_param))
self.seg_sz = dcfg.n_param
self.n_seg = int(np.ceil(out_features / dcfg.n_param))
else:
in_n_seg = dcfg.n_param
in_seg_sz = int(np.ceil(in_features / dcfg.n_param))
self.n_seg = dcfg.n_param
self.seg_sz = int(np.ceil(out_features / dcfg.n_param))
assert self.out_features >= self.n_seg
if in_n_seg <= self.in_features:
self.in_plane_list = torch.Tensor(self.__calc_seg_list(self.in_features, in_n_seg, in_seg_sz)).cuda()
self.out_plane_list = self.__calc_seg_list(self.out_features, self.n_seg, self.seg_sz)
self.mask = self.__init_mask()
self.gate = self.__init_gate(dcfg.reuse_gate)
def __calc_seg_list(self, planes, n_seg, seg_sz):
seg_sz_num = planes + n_seg - n_seg * seg_sz
seg_sub_sz_num = n_seg - seg_sz_num
seg_list = [seg_sz] * seg_sz_num + [seg_sz - 1] * seg_sub_sz_num
seg_tail_list = [sum(seg_list[:i + 1]) for i in range(n_seg)]
return seg_tail_list
def __init_mask(self):
mask = torch.zeros(self.out_features, self.n_seg)
for col in range(self.n_seg):
mask[:self.out_plane_list[col], col] = 1
return nn.Parameter(mask, requires_grad=False) # todo: determine whether to register mask
def __init_gate(self, reuse_gate=None):
if reuse_gate is None:
return nn.Parameter(assign_gate(self.n_seg, reuse_gate=reuse_gate))
return reuse_gate
def __cnt_flops(self, p_in):
cin_avg = self.in_features if p_in is None else torch.dot(p_in, self.in_plane_list)
return cin_avg*self.out_features
def forward(self, x, p_in=None):
y = self.linear(x)
flops = self.__cnt_flops(p_in)
return y, flops
if __name__ == '__main__':
# gate = torch.Tensor([0, 0, 0, 1000])
# dcfg = DcpConfig(n_param=4, split_type=TYPE_A, reuse_gate=gate)
# conv = Conv2d(3, 4, kernel_size=3, stride=1, padding=0, bias=False, dcfg=dcfg)
# print(conv.mask)
# print(conv.gate)
#
# x = torch.ones([1, 3, 4, 4])
# y, gate, flops = conv(x)
# print(y)
# print(gate)
# print(flops)
#
# for k, v in conv.named_parameters():
# print(k)
logits = torch.ones(4)
print(logits)
loss_fn = nn.Softmax()
prob = loss_fn(logits)
print(prob)
print(entropy(prob))
# todo: to be deleted
# you should be ashamed for this awkward loop-based implementation
# def __cnt_flops(self, in_size, p_in, out_size, p_out):
# c_in, h, w = in_size
# assert c_in == self.in_planes
# if p_in is None:
# return self.__cnt_flops_mean(in_size, out_size, p_out)
# flops_list = []
# for i_chn in self.in_plane_list:
# flops_fixed_in = self.__cnt_flops_mean([i_chn, h, w], out_size, p_out)
# flops_list.append(flops_fixed_in)
# return torch.dot(torch.cuda.FloatTensor(flops_list), p_in)
#
# def __cnt_flops_mean(self, in_size, out_size, p_out):
# c_out, h_out, w_out = out_size
# assert c_out == self.out_planes
# flops_list = [self.__cnt_flops_common(in_size, torch.cuda.FloatTensor([o_chn, h_out, w_out]))
# for o_chn in self.out_plane_list]
# return torch.dot(torch.cuda.FloatTensor(flops_list), p_out)
#
# def __cnt_flops_common(self, in_size, out_size):
# c_in, h_in, w_in = in_size
# c_out, h_out, w_out = out_size
# return self.kernel_size * self.kernel_size * c_in * c_out * h_out * w_out
# def __cnt_flops(self, p_in):
# if p_in is None:
# return self.__cnt_flops_(self.in_features, self.out_features)
# flops_list = [self.__cnt_flops_common(i_chn, self.out_features) for i_chn in self.in_plane_list]
# return torch.dot(torch.Tensor(flops_list).cuda(), p_in)
#
# def __cnt_flops_common(self, in_size, out_size):
# return in_size * out_size |
from abc import ABCMeta, abstractmethod, abstractproperty
__all__ = ['literalvalidator', 'complexvalidator']
class MODE(object):
"""Validation modes
NONE: always true
SIMPLE: mimeType check
STRICT: can be opened using standard library (e.g. GDAL)
VERYSTRICT: Schema passes
"""
NONE = 0
SIMPLE = 1
STRICT = 2
VERYSTRICT = 3
class ValidatorAbstract(object):
"""Data validator abstract class
"""
__metaclass__ = ABCMeta
@abstractmethod
def validate(self, input, level=MODE.VERYSTRICT):
"""Perform input validation
"""
True
|
import scrapy
import time
from os import walk
import json
import datetime
class PropertySpider(scrapy.Spider):
name = "property"
def start_requests(self):
urls = []
for url in urls:
time.sleep(1.5)
yield scrapy.Request(url=url, callback=self.parse_property)
def parse_property(self, response):
property_dict = {
"raw_data": response.css('body')
}
with open('./files/properties/properties.jsonl', 'a') as file:
json.dump(property_dict, file)
file.write('\n')
|
from decocare import lib
from decocare import commands
import logging
import time
log = logging.getLogger( ).getChild(__name__)
"""
0x8d == 141 == ReadPumpModel
0000000: 0000 0028 5101 3636 3534 3535 0000 0000 ...(Q.665455....
0000010: 0000 0000 0000 1221 0500 0000 0000 0000 .......!........
0000020: 0700 0000 30a7 6654 558d 001d 0000 0000 ....0.fTU.......
0000030: 0000 0000 0000 0000 0000 0000 0000 0000 ................
"""
"""
0:22.933.038 IN 64
0000000: 4142 433c 5101 3636 3534 3535 0000 0000 ABC<Q.665455....
0000010: 0000 0000 0000 1221 0500 0000 0000 0000 .......!........
0000020: 4000 0000 a503 3532 3200 0000 0000 0000 @.....522.......
0000030: 0000 0000 0000 0000 0000 0000 0000 0000 ................
0:23.203.076 IN 64
0000000: 4142 4325 0000 0000 0000 0000 0000 0000 ABC%............
0000010: 0000 0000 0000 0000 0000 0000 0000 0000 ................
0000020: 0000 0000 0000 0000 0000 0000 0000 0000 ................
0000030: 0000 0000 0000 0000 0000 0000 0000 0000 ................
"""
class Framer (object):
packet_length = -1
def __init__ (self):
self._done = False
self.frames = [ ]
self.data = bytearray( )
def frame (self, data):
head = data[0:3]
size = data[3]
load = data[4:4+size]
payload = bytearray( )
print "HEAD", head
if str(head) == 'ABC':
print "LOAD", load[0]
last = dict(head=head, size=size, load=load)
if len(self.frames) < 1:
query = load[0]
num = load[1]
serial = str(load[2:8])
meta = data[22:26]
packet_length = data[32]
self.packet_length = packet_length
offset = meta[2]
payload = data[32+offset:]
if size < 20 or load[0] < 0x20:
log.error("UNEXPECTED load")
return
else:
payload = load
self.data.extend(payload)
self.frames.append(last)
if len(self.data) == self.packet_length:
self._done = True
if len(self.data) > self.packet_length:
print "WARNING"
log.error("self.data bigger than packet_length")
def done (self):
return self._done
class Remote (object):
def __init__ (self, link, serial=None):
self.link = link
self.serial = serial
def execute (self, msg, **kwds):
msg.serial = self.serial
message = fmt_command(msg, serial=self.serial, **kwds)
self.link.write(message)
framer = Framer( )
while not framer.done( ):
data = self.link.read( )
framer.frame(data)
print "HYPOTHETICAL PUMP RESULT", framer, len(framer.frames), len(framer.data)
print lib.hexdump(framer.data)
msg.respond(framer.data)
return msg
def query (self, Msg, **kwds):
msg = Msg(serial=self.serial, **kwds)
return self.execute(msg, **kwds)
def fmt_command (msg, serial=None, **kwds):
prefix = bytearray([ 0x00, 0x00, 0x00 ])
op = bytearray([ 'Q', 0x01 ]) + bytearray(serial) + \
bytearray([ 0x00, 0x00, 0x00, 0x00 ]) + \
bytearray([ 0x00, 0x00, 0x00, 0x00 ]) + \
bytearray([ 0x00, 0x00 ]) + \
bytearray([ 0x12, 0x21 ]) + \
bytearray([ 0x05, 0x00 ]) + \
bytearray([ 0x00, 0x00 ]) + \
bytearray([ 0x00, 0x00, 0x00, 0x00 ]) + \
bytearray([ 0x07, 0x00, 0x00, 0x00 ]) + \
bytearray([ 0x30, 0xa7 ]) + \
bytearray(str(serial).decode('hex')) + \
bytearray([ msg.code, 0x00 ])
crc = lib.CRC8.compute(op[33:])
length = bytearray([len(op) + 1])
return prefix + length + op + bytearray([crc])
if __name__ == '__main__':
import sys, os
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
from link import Link
from modem import Modem
serial = os.environ.get('SERIAL')
msg = fmt_command(commands.ReadPumpModel(serial=serial), serial=serial)
print lib.hexdump(msg)
# sys.exit(0)
link = Link.Make( )
# print link.device
with link:
modem = Modem(link)
print modem
modem.init_modem( )
with modem.remote( ) as control:
# now in remote command mode.
remote = Remote(link, serial)
model = remote.query(commands.ReadPumpModel)
print model
print "MODEL", model.getData( )
# end remote command mode
|
#coding:utf-8
from service.service import Service
from dao.database import Database
from repository.task import TaskRepository
from repository.project import ProjectRepository
class TopDisplayService(Service):
def __init__(self):
pass
def execute(self):
db = Database()
task_repo = TaskRepository(db)
project_repo = ProjectRepository(db)
return {
"task_list" : task_repo.fetch_all(),
"project_list" : project_repo.fetch_all(),
}
|
from google.appengine.ext import db
from ragendja.auth.google_models import User as BaseUser
class User(BaseUser):
"""Represents a user in the system"""
pickled_tokens = db.BlobProperty()
class Document(db.Model):
"""Represents a Document in the system"""
user = db.ReferenceProperty(User,required=True)
link = db.StringProperty(required=True)
author_email = db.StringProperty(required=True)
doc_id = db.StringProperty(required=True)
title = db.StringProperty(required=True)
last_updated = db.DateTimeProperty(required=True)
notify = db.StringListProperty() |
import discord
from .utils import checks
from discord.ext import commands
from cogs.utils.dataIO import dataIO
import os
from datetime import datetime as dt
import random
import asyncio
class ChannelDraw:
"""Draws a random message from a set"""
__author__ = "mikeshardmind"
__version__ = "2.2a"
def __init__(self, bot):
self.bot = bot
self.users = []
self.queues = {}
self.settings = dataIO.load_json('data/channeldraw/settings.json')
self.utilities = self.bot.get_cog("SinbadUtilities")
def save_json(self):
dataIO.save_json("data/channeldraw/settings.json", self.settings)
@checks.admin_or_permissions(Manage_channels=True)
@commands.group(pass_context=True, name='draw', no_pm=True)
async def draw(self, ctx):
"""Need I say more?"""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
@draw.command(pass_context=True, name='bymessageids', aliases=["bmids"])
async def by_msgs(self, ctx, first: str, last: str):
if ctx.message.author.id in self.users:
return await self.bot.say("You already have a drawing in progress")
a = await self.get_msg(first)
b = await self.get_msg(last)
if a is None or b is None:
return await self.bot.say("I could not find one or both of those")
if a.channel.id != b.channel.id:
return await self.bot.say("Those messages are in seperate rooms")
if a.channel.id in self.queues:
return await self.bot.say("That channel has a drawing in progress")
if a.timestamp == b.timestamp: # Because QA
return await self.bot.say("Those message(s) are at the same time")
if a.timestamp > b.timestamp:
a, b = b, a # Because I can't trust people to use things correctly
self.initialize(a.channel.id)
await self.mkqueue(a.timestamp, b.timestamp, b.channel)
self.queues[a.channel.id].append(a)
self.queues[b.channel.id].append(b)
self.users.append(ctx.message.author.id)
await self.validate(b.channel, ctx.message.author)
@draw.command(pass_context=True, name='bytimes')
async def by_times(self, ctx, *, times):
"""gets messages from the channel it was called from between 2 times.\n
Format should be \nYYYY-MM-DDHH:mm\n
In chronological order, with a space inbetween them"""
try:
t = str(times)
start, end = t.split(' ')
start = ''.join(c for c in start if c.isdigit())
end = ''.join(c for c in end if c.isdigit())
a = dt.strptime(start, "%Y%m%d%H%M")
b = dt.strptime(end, "%Y%m%d%H%M")
except ValueError:
return await self.bot.send_cmd_help(ctx)
if a >= b:
a, b = b, a
if b > ctx.message.timestamp:
b = ctx.message.timestamp
if a >= ctx.message.timestamp:
return await self.bot.say("I can't read the future.")
if ctx.channel.id in self.queues:
return await self.bot.say("That channel has a drawing in progress")
if ctx.message.author.id in self.users:
return await self.bot.say("You already have a drawing in progress")
self.initialize(a.channel.id)
await self.mkqueue(a, b, ctx.message.channel)
self.users.append(ctx.message.author.id)
await self.validate(ctx.message.channel, ctx.message.author)
@draw.command(pass_context=True, name='fromdate')
async def by_interval(self, ctx, *, time):
"""gets messages from the channel it was called from
between now and a time (UTC).\n
Format should be \n\`YYYY-MM-DDHH:mm\`\n
"""
try:
t = str(time)
t = ''.join(c for c in t if c.isdigit())
a = dt.strptime(t, "%Y%m%d%H%M")
b = dt.utcnow()
pass
except ValueError:
return await self.bot.send_cmd_help(ctx)
if a >= b:
return await self.bot.send_cmd_help(ctx)
if ctx.message.author.id in self.users:
return await self.bot.say("You already have a drawing in progress")
if ctx.message.channel.id in self.queues:
return await self.bot.say("That channel has a drawing in progress")
self.initialize(ctx.message.channel.id)
await self.mkqueue(a, b, ctx.message.channel)
self.users.append(ctx.message.author.id)
await self.validate(ctx.message.channel, ctx.message.author)
@draw.command(name="auto", pass_context=True)
async def autodraw(self, ctx):
"""only works if there is a prior draw on record"""
self.initialize(ctx.message.channel.id)
if self.settings['latest'][ctx.message.channel.id] == 0:
return await self.bot.send_cmd_help(ctx)
if ctx.message.author.id in self.users:
return await self.bot.say("You already have a drawing in progress")
if ctx.message.channel.id in self.queues:
return await self.bot.say("That channel has a drawing in progress")
a = dt.strptime(str(self.settings['latest'][ctx.message.channel.id]),
"%Y%m%d%H%M")
b = ctx.message.timestamp
await self.mkqueue(a, b, ctx.message.channel)
self.users.append(ctx.message.author.id)
await self.validate(ctx.message.channel, ctx.message.author)
def initialize(self, chan_id: str):
if chan_id not in self.settings['latest']:
self.settings['latest'][chan_id] = 0
self.save_json()
async def validate(self, channel, author):
if len(self.queues[channel.id]) == 0:
self.users.remove(author.id)
return await self.bot.send_message(author, "No new messages.")
latest = self.queues[channel.id][-1].timestamp.strftime("%Y%m%d%H%M")
fail_count = 0
random.seed()
random.shuffle(self.queues[channel.id])
while author.id in self.users:
if fail_count == 1:
await asyncio.sleep(1)
await self.bot.send_message(author, "Quit wasting my time.")
if fail_count == 2:
await asyncio.sleep(1)
await self.bot.send_message(author, "Next one either quit "
"or do it correctly")
if fail_count == 3:
await asyncio.sleep(1)
await self.bot.send_message(author, "We are done here.")
self.users.remove(author.id)
break
if len(self.queues[channel.id]) == 0:
await asyncio.sleep(1)
await self.bot.send_message(author, "That's all folks")
self.users.remove(author.id)
break
entry = self.queues[channel.id].pop()
em = self.qform(entry)
await self.bot.send_message(author, embed=em)
await asyncio.sleep(1)
dm = await self.bot.send_message(author,
"Is this a valid entry?"
"(yes/no/quit)")
message = await self.bot.wait_for_message(
channel=dm.channel,
author=author, timeout=60)
if message is None:
fail_count += 1
continue
reply = message.clean_content.lower()
if reply[0] == 'y':
await self.bot.send_message(channel,
"{} won the drawing with "
"the following entry"
"".format(entry.author.mention))
await self.bot.send_message(channel, embed=em)
self.settings['latest'][channel.id] = int(latest) + 1
self.users.remove(author.id)
self.save_json()
if reply[0] == 'n':
await self.bot.send_message(author, "Ok then...")
if reply[0] == 'q':
await self.bot.send_message(author,
"I guess we're done here")
self.users.remove(author.id)
self.queues.pop(channel.id, None)
async def mkqueue(self, a, b, channel):
self.queues[channel.id] = []
async for message in \
self.bot.logs_from(channel, limit=1000000,
after=a, before=b, reverse=True):
self.queues[channel.id].append(message)
async def get_msg(self, message_id: str, server=None):
if server is not None:
for channel in server.channels:
try:
msg = await self.bot.get_message(channel, message_id)
if msg:
return msg
except Exception:
pass
return None
for server in self.bot.servers:
for channel in server.channels:
try:
msg = await self.bot.get_message(channel, message_id)
if msg:
return msg
except Exception:
pass
def qform(self, message):
channel = message.channel
server = channel.server
content = message.content
author = message.author
sname = server.name
cname = channel.name
avatar = author.avatar_url if author.avatar \
else author.default_avatar_url
footer = 'Said in {} #{}'.format(sname, cname)
em = discord.Embed(description=content, color=author.color,
timestamp=message.timestamp)
em.set_author(name='{}'.format(author.name), icon_url=avatar)
em.set_footer(text=footer)
if message.attachments:
a = message.attachments[0]
fname = a['filename']
url = a['url']
if fname.split('.')[-1] in ['png', 'jpg', 'gif', 'jpeg']:
em.set_image(url=url)
else:
em.add_field(name='Message has an attachment',
value='[{}]({})'.format(fname, url),
inline=True)
return em
def check_folder():
f = 'data/channeldraw'
if not os.path.exists(f):
os.makedirs(f)
def check_file():
f = 'data/channeldraw/settings.json'
if dataIO.is_valid_json(f) is False:
dataIO.save_json(f, {'latest': {}})
def setup(bot):
check_folder()
check_file()
n = ChannelDraw(bot)
bot.add_cog(n)
|
#!/usr/bin/env python
"""
_GetCompletedFilesByRun_
Oracle implementation of Subscription.GetCompletedFilesByRun
"""
from WMCore.WMBS.MySQL.Subscriptions.GetCompletedFilesByRun import \
GetCompletedFilesByRun as GetCompletedFilesByRunMySQL
class GetCompletedFilesByRun(GetCompletedFilesByRunMySQL):
pass
|
#!/usr/bin/env python3
# Names: Sophia Trump, Eunsoo Jang, Maria Vivanco, Emily Lobel
# File: makeCleanDictionary.py
# Description: Takes 3 dictionary files and cleans them, saving the mega combined cleaned dictionary
# into a file called "cleanedDictionary.txt".
# Run in the cmd with python3 makeCleanDictionary.py <path to dictionary file>
import sys
import re
def makeCleanDictionary():
# create a list of words from the dictionary, which is delimited by newline
listUnixDictionaryWords = open("unix-words.txt").read().split('\n')
listScrabbleWords = open("masterdict.txt").read().split('\n')
print("Checking", len(listUnixDictionaryWords), "words from dictionary 1...")
print("Checking", len(listScrabbleWords), "words from dictionary 2...")
# create common dictionary without repeats
dictionaryWordsNoRepeats = list(set(listUnixDictionaryWords) | set(listScrabbleWords))
f = open("cleanedDictionary.txt", "w+")
for word in dictionaryWordsNoRepeats:
f.write(word + '\n')
# close the files
f.close()
def main():
makeCleanDictionary()
if __name__ == "__main__":
main()
|
# General Imports
from django.core.mail import send_mail
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
import secrets as python_secrets
import json
# Model Imports
from apps.Espn import models as espn_models
def create_new_access_token(profile: espn_models.Profile) -> str:
profile.access_token = python_secrets.token_urlsafe(nbytes=256)
profile.save()
return profile.access_token
def user_password_was_incorrect() -> JsonResponse:
return JsonResponse(data={
'ok': False,
'description': "Password was Incorrect",
})
def token_was_not_found() -> JsonResponse:
return JsonResponse(data={
'ok': False,
'description': "Incorrect Access Token",
})
def user_profile_not_found() -> JsonResponse:
return JsonResponse(data={
'ok': False,
'description': "User Profile Was Not Found",
})
def api_method_not_found() -> JsonResponse:
return JsonResponse(data={
'ok': False,
'description': "Method Not Found - For Security Reasons API Methods are only available through POST method",
})
def get_profile(token) -> espn_models.Profile:
try:
profile = espn_models.Profile.objects.get(access_token__exact=token)
except Exception:
raise Exception
return profile
def find_profile_decorator(funct: callable):
@method_decorator(csrf_exempt, name='dispatch')
def wrapper(request, *args, **kwargs):
try:
data = json.loads(request.body)
profile = get_profile(data['token'])
return funct(*args, request=request, profile=profile, **kwargs)
except Exception:
return user_profile_not_found()
return wrapper
def find_profile_if_exists_decorator(funct: callable):
@method_decorator(csrf_exempt, name='dispatch')
def wrapper(request, *args, **kwargs):
try:
data = json.loads(request.body)
profile = get_profile(data['token'])
if not profile.active:
raise Exception
return funct(request, *args, profile=profile, logged_in=True, **kwargs)
except Exception:
return funct(request, *args, profile=None, logged_in=False, **kwargs)
return wrapper
def create_forget_password_token(email: str):
try:
profile = espn_models.Profile.objects.get(user__email=email)
profile.forget_password_access_token = python_secrets.token_urlsafe(nbytes=256)
profile.save()
except Exception:
return False, None
return profile.forget_password_access_token, profile
def send_forget_password_email(profile: espn_models.Profile, token: str):
subject = 'Your Forget Password Token'
body = token
send_mail(
subject,
body,
'noreply@aeonem.xyz',
[profile.user.email],
fail_silently=False,
)
def send_new_account_activation_email(profile: espn_models.Profile):
subject = 'Account Activation Token'
body = create_new_access_token(profile)
send_mail(
subject,
body,
'noreply@aeonem.xyz',
[profile.user.email],
fail_silently=False,
)
|
from django.db import models
from swgraph.models import DateTimeModel
from people.models import People
class Transport(DateTimeModel):
name = models.CharField(max_length=40)
model = models.CharField(max_length=40)
manufacturer = models.CharField(max_length=80)
cost_in_credits = models.CharField(max_length=40)
length = models.CharField(max_length=40)
max_atmosphering_speed = models.CharField(max_length=40)
crew = models.CharField(max_length=40)
passengers = models.CharField(max_length=40)
cargo_capacity = models.CharField(max_length=40)
consumables = models.CharField(max_length=40)
def __str__(self):
return self.name
class Starship(Transport):
""" A starship is a transport with a hypderdrive """
hyperdrive_rating = models.CharField(max_length=40)
MGLT = models.CharField(max_length=40)
starship_class = models.CharField(max_length=40)
pilots = models.ManyToManyField(
People,
related_name="starships",
blank=True
)
class Vehicle(Transport):
""" A vehicle is anything without hyperdrive capability """
vehicle_class = models.CharField(max_length=40)
pilots = models.ManyToManyField(
People,
related_name="vehicles",
blank=True
)
|
from django.shortcuts import render
from django.views.generic import ListView
from app.models import News
import logging
logger = logging.getLogger(__name__)
# Create your views here.
class NewsList(ListView):
model = News
def get_queryset(self):
logger.debug(f"Request to NewsList from user: {self.request.user.id} with params: {self.request.GET}")
logger.info("NewsList.get_queryset called")
try:
news = News.objects.select_related("region").all()
except Exception as exc:
logger.error(str(exc))
raise
if not news:
logger.warning("News list is empty")
import q; q(news); q(news[0])
return news |
from tkinter import *
import base64
# Initialize window
root = Tk()
root.geometry("590x540")
root.title("Encode & Decode Messages")
root.config(bg="light blue")
# Label
Label(root, text="Encode & Decode Messages", font="aerial 25 italic", bg="light blue").pack()
# Define variables
Text = StringVar()
private_key = StringVar()
Mode = StringVar()
Result = StringVar()
# Function to encode
def encode(key, message):
enc = []
for i in range(len(message)):
key_c = key[i % len(key)]
enc.append(chr((ord(message[i]) + ord(key_c)) % 256))
return base64.urlsafe_b64encode("".join(enc).encode()).decode()
# Function to decode
def decode(key, message):
dec = []
message = base64.urlsafe_b64decode(message).decode()
for i in range(len(message)):
key_c = key[i % len(key)]
dec.append(chr((256 + ord(message[i]) - ord(key_c)) % 256))
return "".join(dec)
# Function to set mode
def mode():
if Mode.get() == "e":
Result.set(encode(private_key.get(), Text.get()))
elif Mode.get() == "d":
Result.set(decode(private_key.get(), Text.get()))
else:
Result.set("Invalid Mode")
# Function to exit
def close():
root.destroy()
# Function to reset
def reset():
Text.set("")
private_key.set("")
Mode.set("")
Result.set("")
# Labels & Buttons
Label(root, font="aerial 15 bold", bg="light blue", text="MESSAGE").place(x=15, y=75)
Entry(root, font="aerial 18", textvariable=Text, bg="snow").place(x=75, y=105)
Label(root, font="aerial 15 bold", bg="light blue", text="ENTER KEY").place(x=15, y=165)
Entry(root, font="aerial 18", textvariable=private_key, bg="snow").place(x=75, y=195)
Label(root, font="aerial 15 bold", bg="light blue", text="MODE (e-encode, d-decode)").place(x=15, y=275)
Entry(root, font="aerial 18", textvariable=Mode, bg="snow").place(x=75, y=305)
Entry(root, font="aerial 18", textvariable=Result, bg="snow").place(x=195, y=390)
Button(root, font="aerial 15 bold", text="RESULT", padx=2, width=6, bg="lime green",
bd=5, command=mode).place(x=35, y=390)
Button(root, font="aerial 12 bold", text="RESET", padx=2, width=6, bg="orange2",
bd=5, command=reset).place(x=0, y=495)
Button(root, font="aerial 12 bold", text="EXIT", padx=2, width=6, bg="red",
bd=5, command=close).place(x=500, y=495)
root.mainloop()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append("../..")
import json
import logging
from flask import Flask, request
from keras_bert_ner.utils.predict import build_trained_model, get_model_inputs
class Args(object):
def __init__(self, configs):
self.model_path = configs.get("model_path")
self.model_name = configs.get("model_name")
self.bert_vocab = configs.get("bert_vocab")
self.device_map = configs.get("device_map")
self.max_len = configs.get("max_len")
configs = {
"model_path": "../models",
"model_name": "ALBERT-IDCNN-CRF.h5",
"bert_vocab": "/home1/liushaoweihua/pretrained_lm/albert_tiny_250k/vocab.txt",
"device_map": "cpu",
"max_len": 512
}
args = Args(configs)
tokenizer, id2tag, viterbi_decoder = build_trained_model(args=args)
def parse(text):
tokens, segs = get_model_inputs(tokenizer, [text], max_len=args.max_len)
decode_res = viterbi_decoder.decode([tokens, segs])
decode_res = [id2tag[item] for item in decode_res[0] if id2tag[item] != "X"]
res = get_entities([list(text), decode_res])
return "|".join(res)
def get_entities(inputs):
text, tags = inputs
entities = []
entity = ""
for text_item, tag_item in zip(text, tags):
if tag_item == "B":
entity += text_item
elif tag_item == "I":
if entity != "":
entity += text_item
else:
if entity != "":
entities.append(entity)
entity = ""
return entities
def create_app():
app = Flask(__name__)
@app.route("/", methods=["GET"])
def callback():
text = request.args.get("s") or "EOF"
app.logger.info("[RECEIVE]: {}".format(text))
res = parse(text)
app.logger.info("[SEND]: {}".format(res))
return json.dumps({"text": text, "entities": res}, ensure_ascii=False, indent=4)
return app
app = create_app()
if __name__ != '__main__':
gunicorn_logger = logging.getLogger('gunicorn.error')
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
app.logger.info("Initializing complete!") |
import os
import io
import time
import subprocess
import sys
from threading import Thread
import datetime
import tarfile
import ctypes
import shutil
import webbrowser
from enum import Enum
from system_hotkey import SystemHotkey
import yaml
import win32api
import win32gui
import win32process
import win32con
import keyboard
import psutil
import wx
from wx.lib.newevent import NewEvent
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import locale
from mem_resources.resources import *
config = {
'saveFolderPath' : os.path.expanduser('~') + '\\AppData\\LocalLow\\Nolla_Games_Noita',
'hotkey_save': ('control', 'alt', 'f5'),
'hotkey_saveQuick': ('control', 'shift', 'f5'),
'hotkey_load': ('control', 'alt', 'f9'),
'hotkey_loadQuick': ('control', 'shift', 'f9'),
'autoclose': True,
'launch_on_load': False,
'executable_path': '',
'7z_path': '',
'steam_launch': '',
'use_steam_launch': True,
'launch_arguments': ['-no_logo_splashes', '-gamemode 4294967295', '-save_slot 0'],
'display_save_status' : True
}
colors = {
'border': wx.Colour(240, 207, 116),
'border-light': wx.Colour(107, 101, 96),
'background': wx.Colour( 37, 33, 30),
'content': wx.Colour( 24, 23, 21),
'save-item': wx.Colour( 18, 17, 14),
'hover-red': wx.Colour(165, 45, 52),
'hover-light': wx.Colour( 63, 53, 39),
'main-text': wx.Colour(255, 252, 241),
'secondary-text': wx.Colour(156, 155, 145),
'text-input': wx.Colour( 11, 10, 6),
'button': wx.Colour(214, 214, 207),
'button-hover': wx.Colour(218, 217, 152),
'button-red': wx.Colour( 83, 27, 27),
'progress-pending': wx.Colour( 51, 45, 18),
'progress': wx.Colour(205, 196, 72)
}
scanCodes = {
0x02: '1',
0x03: '2',
0x04: '3',
0x05: '4',
0x06: '5',
0x07: '6',
0x08: '7',
0x09: '8',
0x0a: '9',
0x0b: '0',
0x10: 'q',
0x11: 'w',
0x12: 'e',
0x13: 'r',
0x14: 't',
0x15: 'y',
0x16: 'u',
0x17: 'i',
0x18: 'o',
0x19: 'p',
0x1e: 'a',
0x1f: 's',
0x20: 'd',
0x21: 'f',
0x22: 'g',
0x23: 'h',
0x24: 'j',
0x25: 'k',
0x26: 'l',
0x2c: 'z',
0x2d: 'x',
0x2e: 'c',
0x2f: 'v',
0x30: 'b',
0x31: 'n',
0x32: 'm',
0x3b: 'f1',
0x3c: 'f2',
0x3d: 'f3',
0x3e: 'f4',
0x3f: 'f5',
0x40: 'f6',
0x41: 'f7',
0x42: 'f8',
0x43: 'f9',
0x44: 'f10',
0x57: 'f11',
0x58: 'f12',
0x1d: 'control',
0x2a: 'shift', 0x36: 'shift',
0x38: 'alt', 0xe038: 'alt',
0x5b: 'super', 0x5c: 'super',
}
class Action(Enum):
delete = 0
load = 1
save = 2
saveFiles = {}
saveStatus = {
'player_pos': None
}
def toAscii(code):
try:
return scanCodes[code]
except:
return None
def readConfig():
os.chdir(working_dir)
if os.path.exists('./config.yaml'):
global config
with open('./config.yaml', 'r') as file:
config_file = yaml.load(file, Loader=yaml.FullLoader)
if config_file:
for item in config:
if item in config_file:
value = config_file[item]
if value != '':
config[item] = value
def writeConfig():
os.chdir(working_dir)
with open("./config.yaml" , "w") as file:
yaml.dump(config , file)
def focusWindow():
if window.IsIconized():
window.Restore()
win32gui.SetForegroundWindow(window.GetHandle())
def hitTest(rect, point):
deltaX = point[0] - rect[0]
deltaY = point[1] - rect[1]
return deltaX > 0 and deltaX < rect[2] and deltaY > 0 and deltaY < rect[3]
def get_hwnds_for_pid(pid):
def callback(hwnd, hwnds):
if win32gui.IsWindowVisible (hwnd) and win32gui.IsWindowEnabled(hwnd):
_, found_pid = win32process.GetWindowThreadProcessId(hwnd)
if found_pid == pid:
hwnds.append(hwnd)
return True
hwnds = []
win32gui.EnumWindows(callback, hwnds)
return hwnds
def findProcess(procName):
for proc in psutil.process_iter():
if procName == proc.name().lower():
return proc
return None
def waitForNoitaTermination(action):
proc = findProcess('noita.exe')
if proc:
config['executable_path'] = proc.exe()
if config['autoclose']:
hwnd = get_hwnds_for_pid(proc.pid)
if len(hwnd) > 0:
hwnd = hwnd[0]
try:
win32gui.PostMessage(hwnd, win32con.WM_CLOSE, 0, 0)
except:
pass
while psutil.pid_exists(proc.pid):
try:
if psutil.Process(proc.pid).name().lower() != 'noita.exe':
break
except:
pass
time.sleep(0.1)
return config['autoclose'], True
return config['launch_on_load'] if action == Action.load else False, True
def findExecutable(binary_loc, prefix_prog_files, prefix_independent):
candidates = []
candidates.append(os.path.expandvars('%programfiles(x86)%') + prefix_prog_files + binary_loc)
candidates.append(os.path.expandvars('%programfiles%') + prefix_prog_files + binary_loc)
drives = win32api.GetLogicalDriveStrings()
drives = drives.split('\0')[:-1]
for drive in drives:
candidates.append(drive + prefix_independent + binary_loc)
for candidate in candidates:
if os.path.exists(candidate):
return candidate
return ''
def findNoita():
global config
if os.path.exists(config.get('executable_path')):
return
proc = findProcess('noita.exe')
if proc:
config['executable_path'] = proc.exe()
return
path = findExecutable('\\steamapps\\common\\Noita\\Noita.exe', '\\Steam', '\\SteamLibrary')
if path != '':
config['executable_path'] = path
def findSteam():
global config
if os.path.exists(config.get('steam_launch')):
return
proc = findProcess('steam.exe')
if proc:
config['steam_launch'] = proc.exe()
return
path = findExecutable('\\Steam\\steam.exe', '', '')
if path != '':
config['steam_launch'] = path
def find7Zip():
envPath = shutil.which('7z.exe')
if envPath:
return envPath
return findExecutable('\\7-Zip\\7z.exe', '', '')
def stylizeBorder(element):
if hasattr(element, 'styleBorder') and element.styleBorder:
for border in element.styleBorder:
border.Destroy()
element.styleBorder = []
element.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
size = element.GetSize()
positions = [
(2, 0),
(2, size.GetHeight() - 2),
(0, 2),
(size.GetWidth() - 2, 2)
]
sizes = [
(size.GetWidth() - 4, 2),
(size.GetWidth() - 4, 2),
(2, size.GetHeight() - 4),
(2, size.GetHeight() - 4)
]
for i in range(0, len(positions)):
element.styleBorder.append(wx.Panel(element, pos = positions[i], size = sizes[i]))
element.styleBorder[-1].SetBackgroundColour(element.GetBackgroundColour())
def selectArchiveTool():
extension = '.tar'
if not os.path.exists(config.get('7z_path')):
config['7z_path'] = find7Zip()
if config['7z_path'] != '':
extension = '.7z'
return extension
def getTextInQuotes(source, start):
if source[start] != '"':
return None
result = ''
pos = start + 1
while source[pos] != '"':
result += source[pos]
pos += 1
return result if len(result) else None
def updatePlayerSaveStatus():
time.sleep(2)
with open(config['saveFolderPath'] + '\\save00\\player.xml', 'r') as file:
content = file.read()
posXStr = 'position.x='
posYStr = 'position.y='
try:
xValue = getTextInQuotes(content, content.index(posXStr) + len(posXStr))
yValue = getTextInQuotes(content, content.index(posYStr) + len(posYStr))
except:
return
if xValue and yValue:
try:
saveStatus['player_pos'] = (int(float(xValue)), int(float(yValue)))
window.saveStatusChanged()
except:
pass
def saveDirChangeHandler(event):
if event.src_path.endswith('\\player.xml'):
thread = Thread(target = updatePlayerSaveStatus)
thread.start()
def watchSaveDirectory():
thread = Thread(target = updatePlayerSaveStatus)
thread.start()
eventHandler = PatternMatchingEventHandler(['*'], None, False, True)
eventHandler.on_created = saveDirChangeHandler
eventHandler.on_modified = saveDirChangeHandler
saveDirObserver = Observer()
saveDirObserver.schedule(eventHandler, config['saveFolderPath'], recursive = True)
saveDirObserver.start()
return saveDirObserver
class ScaledBitmap (wx.Bitmap):
def __init__(self, data, width, height, quality = wx.IMAGE_QUALITY_HIGH):
image = wx.Image(io.BytesIO(data), type = wx.BITMAP_TYPE_ANY, index = -1)
#ImageFromStream
ratio = min(
float(width) / float(image.GetWidth()),
float(height) / float(image.GetHeight())
)
image = image.Scale(
round(float(image.GetWidth()) * ratio),
round(float(image.GetHeight()) * ratio),
quality
)
wx.Bitmap.__init__(self, image.ConvertToBitmap())
class ActionButton (wx.Button):
def __init__(self, parent, **kwargs):
wx.Button.__init__(self, parent, **kwargs)
self.Initialize()
def Initialize(self):
if self.passiveColor != None:
self.SetBackgroundColour(self.passiveColor)
self.Bind(wx.EVT_ENTER_WINDOW, self.onMouseEnter)
self.Bind(wx.EVT_LEAVE_WINDOW, self.onMouseLeave)
self.Bind(wx.EVT_BUTTON, self.onClick)
def onMouseEnter(self, event):
if self.hoverColor != None:
self.SetBackgroundColour(self.hoverColor)
def onMouseLeave(self, event):
if self.passiveColor != None:
self.SetBackgroundColour(self.passiveColor)
def onClick(self, event):
self.PerformAction()
def PerformAction(self):
raise NotImplementedError('Method \'PerformAction\' is not implemented')
class ActionBitmapButton (wx.BitmapButton, ActionButton):
def __init__(self, parent, **kwargs):
wx.BitmapButton.__init__(self, parent, **kwargs)
self.Initialize()
class TitleImage (wx.StaticBitmap):
def __init__(self, parent):
logo = ScaledBitmap(resources_logo_png, 587, 24)
wx.StaticBitmap.__init__(
self,
parent,
size = wx.Size(logo.GetWidth(), logo.GetHeight()),
bitmap = logo,
pos = (2, 0)
)
self.Bind(wx.EVT_LEFT_UP, parent.OnLeftUp)
self.Bind(wx.EVT_MOTION, parent.OnMouseMove)
self.Bind(wx.EVT_LEFT_DOWN, parent.OnLeftDown)
class CloseButton (ActionBitmapButton):
def __init__(self, parent):
self.passiveColor = colors['background']
self.hoverColor = colors['hover-red']
ActionBitmapButton.__init__(
self,
parent,
bitmap = ScaledBitmap(resources_close_png, 42, 24, wx.IMAGE_QUALITY_NEAREST),
size = wx.Size(42, 24),
style = wx.BORDER_NONE,
pos = (654, 0)
)
def PerformAction(self):
if (window.ReadyToClose):
window.Close()
else:
answer = wx.MessageBox(
"Are you sure you want to stop the installation?",
"Exit Installation",
wx.YES_NO | wx.ICON_EXCLAMATION,
window
)
if answer == wx.YES:
window.Close()
class MinimizeButton (ActionBitmapButton):
def __init__(self, parent):
self.passiveColor = colors['background']
self.hoverColor = colors['hover-light']
ActionBitmapButton.__init__(
self,
parent,
bitmap = ScaledBitmap(resources_minimize_png, 42, 24, wx.IMAGE_QUALITY_NEAREST),
size = wx.Size(42, 24),
style = wx.BORDER_NONE,
pos = (612, 0)
)
def PerformAction(self):
window.Iconize()
class TitlePanel (wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, size = wx.Size(696, 24), pos = (2, 2))
self.delta = None
self.SetBackgroundColour(colors['background'])
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.addPanelControls()
def addPanelControls(self):
TitleImage(self)
CloseButton(self)
MinimizeButton(self)
def OnLeftDown(self, event):
self.CaptureMouse()
pos = window.ClientToScreen(event.GetPosition())
origin = window.GetPosition()
self.delta = wx.Point(pos.x - origin.x, pos.y - origin.y)
def OnMouseMove(self, event):
if event.Dragging() and event.LeftIsDown() and self.delta != None:
pos = window.ClientToScreen(event.GetPosition())
newPos = (pos.x - self.delta.x, pos.y - self.delta.y)
window.Move(newPos)
def OnLeftUp(self, event):
if self.HasCapture():
self.ReleaseMouse()
class ScrollIndicator (wx.Button):
def __init__(self, parent, size, pos):
self.passiveColor = colors['button']
self.hoverColor = colors['button-hover']
self.delta = None
self.maxY = 0
self.scrollBar = parent
wx.Button.__init__(
self,
parent,
size = size,
style = wx.BORDER_NONE,
pos = pos,
label='',
)
self.SetBackgroundColour(self.passiveColor)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
def OnLeftDown(self, event):
self.CaptureMouse()
pos = self.ClientToScreen(event.GetPosition())
origin = self.GetPosition()
self.delta = wx.Point(pos.x - origin.x, pos.y - origin.y)
def OnMouseMove(self, event):
if event.Dragging() and event.LeftIsDown() and self.delta != None:
pos = self.ClientToScreen(event.GetPosition())
posY = pos.y - self.delta.y
if posY < 0:
posY = 0
if posY > self.maxY:
posY = self.maxY
self.SetPosition((0, posY))
self.SetBackgroundColour(self.hoverColor)
self.scrollBar.scrollContentByIndicator(posY)
def OnLeftUp(self, event):
if self.HasCapture():
self.ReleaseMouse()
self.SetBackgroundColour(self.passiveColor)
class ListScrollBar (wx.Panel):
def __init__(self, parent, pos):
wx.Panel.__init__(self, parent, size = (12, parent.ContentHeight), pos = pos)
self.SetBackgroundColour(colors['text-input'])
self.list = parent
self.scrollIndicator = ScrollIndicator(self, size = (12, 48), pos = (0, 0))
self.scrollIndicator.maxY = parent.ContentHeight - 48
def setScrollableHeight(self, height):
if not height:
self.scrollableHeight = 0
self.scrollMultiplier = 0
else:
self.scrollableHeight = height
self.scrollMultiplier = float(height) / float(self.list.ContentHeight - 48)
def scrollContentByIndicator(self, height):
posY = int(float(height) * self.scrollMultiplier)
self.list.container.SetPosition((0, -posY))
def scrollContentByWheel(self, up):
if self.scrollMultiplier > 0.0:
posY = 0
if up:
posY = self.list.container.GetPosition().y + 35
if posY > 0:
posY = 0
else:
posY = self.list.container.GetPosition().y - 35
if (posY * -1) > self.scrollableHeight:
posY = self.scrollableHeight * -1
self.list.container.SetPosition((0, posY))
posY = int(float(-posY) / self.scrollMultiplier)
self.scrollIndicator.SetPosition((0, posY))
class ListMenuButton (ActionBitmapButton):
def __init__(self, parent, pos, name, path, data, mouseMoveHandler):
self.parent = parent
self.name = name
self.path = path
self.onMouseMove = mouseMoveHandler
ActionBitmapButton.__init__(
self,
parent,
bitmap = ScaledBitmap(data, 36, 36, wx.IMAGE_QUALITY_NEAREST),
size = wx.Size(36, 36),
style = wx.BORDER_NONE,
pos = pos
)
self.boderElement = []
self.SetBackgroundColour(self.passiveColor)
def setUpBorder(self):
if len(self.boderElement) > 0:
for element in self.boderElement:
element.Destroy()
self.boderElement = []
size = self.GetSize()
positions = [
(0, 0),
(size.GetWidth() - 2, 0),
(0, size.GetHeight() - 2),
(size.GetWidth() - 2, size.GetHeight() - 2)
]
for pos in positions:
self.boderElement.append(wx.Panel(self, pos = pos, size = (2, 2)))
self.boderElement[-1].SetBackgroundColour(self.parent.GetBackgroundColour())
def onMouseEnter(self, event):
self.SetBackgroundColour(self.hoverColor)
self.onMouseMove(event)
def onMouseLeave(self, event):
self.SetBackgroundColour(self.passiveColor)
self.onMouseMove(event)
class LoadSaveButton (ListMenuButton):
def __init__(self, parent, pos, name, path, mouseMoveHandler):
self.passiveColor = colors['background']
self.hoverColor = colors['hover-light']
ListMenuButton.__init__(self, parent, pos, name, path, resources_load_png, mouseMoveHandler)
self.setUpBorder()
def PerformAction(self):
window.makeLoad(self.path)
class DeleteSaveButton (ListMenuButton):
def __init__(self, parent, pos, name, path, mouseMoveHandler):
self.passiveColor = colors['background']
self.hoverColor = colors['hover-red']
ListMenuButton.__init__(self, parent, pos, name, path, resources_close_png, mouseMoveHandler)
self.setUpBorder()
def PerformAction(self):
window.openDeleteMenu(self.path)
class ScrollableList (wx.Panel):
def __init__(self, parent, size, pos):
wx.Panel.__init__(self, parent, size = size, pos = pos)
self.SetBackgroundColour(colors['border-light'])
self.disabled = False
self.ContentWidth = size[0] - 18
self.ContentHeight = size[1] - 4
wx.Panel(self, pos = (self.ContentWidth + 2, 0), size = (2, 2)).SetBackgroundColour(colors['content'])
wx.Panel(self, pos = (self.ContentWidth + 2, self.ContentHeight + 2), size = (2, 2)).SetBackgroundColour(colors['content'])
self.containerWrapper = wx.Panel(self, size = (self.ContentWidth, self.ContentHeight), pos = (2, 2))
self.containerWrapper.SetBackgroundColour(colors['text-input'])
wx.Panel(self, pos = (self.ContentWidth + 2, 2), size = (2, self.ContentHeight)).SetBackgroundColour(colors['border-light'])
self.scrollBar = ListScrollBar(self, pos = (self.ContentWidth + 4, 2))
self.container, self.contentSizer = self.CreateContainer()
self.container.Show()
def CreateContainer(self):
container = wx.Panel(self.containerWrapper, size = (self.ContentWidth, self.ContentHeight), pos = (0, 0))
container.Hide()
container.SetBackgroundColour(colors['text-input'])
itemIndex = 0
contentSizer = wx.BoxSizer(orient=wx.VERTICAL)
container.SetSizer(contentSizer)
container.Bind(wx.EVT_MOUSEWHEEL, self.OnWheel)
return container, contentSizer
def OnWheel(self, event):
if not self.disabled:
amt = event.GetWheelRotation()
if amt > 0:
self.scrollBar.scrollContentByWheel(True)
elif amt < 0:
self.scrollBar.scrollContentByWheel(False)
def SetHeight(self, height):
if height < self.ContentHeight:
height = self.ContentHeight
self.container.SetSize((self.container.GetSize().GetWidth(), height))
self.scrollBar.setScrollableHeight(height - self.ContentHeight)
def DestroyContainer(self):
self.container.Destroy()
def ShowLoadingStatus(self):
self.contentSizer.AddStretchSpacer()
font = wx.Font(14, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_LIGHT, faceName = 'GamePixies')
text = wx.StaticText(self.container, size = wx.Size(-1, -1), label = "Searching for save files ...")
text.SetForegroundColour(colors['secondary-text'])
text.SetFont(font)
stringSizer = wx.BoxSizer(orient=wx.HORIZONTAL)
stringSizer.AddSpacer(130)
stringSizer.Add(text)
self.contentSizer.Add(stringSizer)
self.contentSizer.AddStretchSpacer()
self.container.Layout()
def disable(self):
self.disabled = True
self.scrollBar.Disable()
for child in self.container.GetChildren():
child.disable()
def enable(self):
self.disabled = False
self.scrollBar.Enable()
for child in self.container.GetChildren():
child.enable()
class ContentPanel (wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, size = wx.Size(532, 670), pos = (166, 28))
self.SetBackgroundColour(colors['content'])
self.CreateContent()
linePieces = [
('Noita Save Scummer', wx.FONTWEIGHT_LIGHT, 20, wx.FONTSTYLE_NORMAL, 40, colors['main-text']),
(' ' + versionNumber, wx.FONTWEIGHT_LIGHT, 16, wx.FONTSTYLE_NORMAL, 35, colors['main-text'])
]
contentSizer = wx.BoxSizer(wx.VERTICAL)
contentSizer.AddSpacer(10)
contentSizer.Add(self.CreateFormatedString(20, linePieces))
self.SetSizer(contentSizer)
self.Layout()
def CreateContent(self):
raise NotImplementedError('Method \'CreateContent\' is not implemented')
def CreateFormatedString(self, padding, pieces, container = None):
if (not container):
container = self
stringSizer = wx.BoxSizer(orient=wx.HORIZONTAL)
stringSizer.AddSpacer(padding)
for text, fontWeight, fontSize, fontStyle, height, color in pieces:
font = wx.Font(fontSize, wx.FONTFAMILY_DEFAULT, fontStyle, fontWeight, faceName = 'GamePixies')
text = wx.StaticText(container, size = wx.Size(-1, height), label = text)
text.SetForegroundColour(color)
text.SetFont(font)
stringSizer.Add(text, 0, wx.ALIGN_BOTTOM)
return stringSizer
class SaveInstance (wx.Panel):
def __init__(self, parent, sizeX, info, parentRect):
wx.Panel.__init__(self, parent = parent, size = (sizeX, 70))
self.saveName = info[0]
self.savePath = info[1]
self.enabled = True
self.parentRect = parentRect
self.SetBackgroundColour(colors['text-input'])
self.interactivePanel = wx.Panel(self, size = (sizeX - 30, 70), pos = (15, 0))
self.hoverColor = colors['border']
self.passiveColor = colors['border-light']
self.interactivePanel.SetBackgroundColour(self.passiveColor)
stylizeBorder(self.interactivePanel)
panel = wx.Panel(self.interactivePanel, size = (sizeX - 34, 66), pos = (2,2))
panel.SetBackgroundColour(colors['save-item'])
sizerVertical = wx.BoxSizer(orient=wx.VERTICAL)
mainTextSizer = wx.BoxSizer(orient=wx.HORIZONTAL)
secondaryTextSizer = wx.BoxSizer(orient=wx.HORIZONTAL)
sizerVertical.AddStretchSpacer(3)
sizerVertical.Add(mainTextSizer)
sizerVertical.AddSpacer(3)
sizerVertical.Add(secondaryTextSizer)
sizerVertical.AddStretchSpacer(2)
panel.SetSizer(sizerVertical)
mainTextSizer.AddSpacer(25)
font = wx.Font(14, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_LIGHT, faceName = 'GamePixies')
text = wx.StaticText(panel, size = wx.Size(-1, -1), label = self.saveName)
text.SetForegroundColour(colors['main-text'])
text.SetFont(font)
mainTextSizer.Add(text)
time_str = datetime.datetime.fromtimestamp(
os.path.getmtime(self.savePath)).strftime('%H : %M : %S %d %b %Y')
secondaryTextSizer.AddSpacer(25)
font = wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_LIGHT, faceName = 'GamePixies')
text = wx.StaticText(panel, size = wx.Size(-1, -1), label = time_str)
text.SetForegroundColour(colors['secondary-text'])
text.SetFont(font)
secondaryTextSizer.Add(text)
panel.Layout()
elements = [self.interactivePanel]
elements += self.interactivePanel.GetChildren()
for child in self.interactivePanel.GetChildren():
elements += child.GetChildren()
for element in elements:
element.Bind(wx.EVT_ENTER_WINDOW, self.onMouseMove)
element.Bind(wx.EVT_LEAVE_WINDOW, self.onMouseMove)
self.loadButton = LoadSaveButton(panel, (panel.GetSize().GetWidth() - 102, 15), self.saveName, self.savePath, self.onMouseMove)
self.deleteButton = DeleteSaveButton(panel, (panel.GetSize().GetWidth() - 51, 15), self.saveName, self.savePath, self.onMouseMove)
def onMouseMove(self, event):
if self.enabled:
rect = self.interactivePanel.GetScreenRect()
pos = wx.GetMousePosition()
topLeft = (rect[0], rect[1])
bottomLeft = (rect[0], rect[1] + rect[3])
if hitTest(self.parentRect, topLeft) and (rect[1] + rect[3]) > (self.parentRect[1] + self.parentRect[3]):
rect[3] = (self.parentRect[1] + self.parentRect[3]) - rect[1]
if hitTest(self.parentRect, bottomLeft) and rect[1] < self.parentRect[1]:
rect[3] = (rect[1] + rect[3]) - self.parentRect[1]
rect[1] = self.parentRect[1]
if hitTest(rect, pos):
self.interactivePanel.SetBackgroundColour(self.hoverColor)
else:
self.interactivePanel.SetBackgroundColour(self.passiveColor)
self.Refresh()
def disable(self):
self.loadButton.Disable()
self.deleteButton.Disable()
self.enabled = False
def enable(self):
self.loadButton.Enable()
self.deleteButton.Enable()
self.enabled = True
class ContentButton (wx.Button):
def __init__(self, parent, label, size, pos):
wx.Button.__init__(self, parent, label = label, size = size, style = wx.BORDER_NONE, pos = pos)
self.border = parent
self.passiveColor = colors['background']
self.hoverColor = colors['hover-light']
self.SetForegroundColour(colors['main-text'])
self.SetBackgroundColour(self.passiveColor)
stylizeBorder(parent)
font = wx.Font(18, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, faceName = 'GamePixies')
self.SetFont(font)
self.Bind(wx.EVT_ENTER_WINDOW, self.onMouseEnter)
self.Bind(wx.EVT_LEAVE_WINDOW, self.onMouseLeave)
self.Bind(wx.EVT_BUTTON, self.onClick)
def onMouseEnter(self, event):
self.SetBackgroundColour(self.hoverColor)
self.border.SetBackgroundColour(colors['button-hover'])
self.border.Refresh()
def onMouseLeave(self, event):
self.SetBackgroundColour(self.passiveColor)
self.border.SetBackgroundColour(colors['button'])
self.border.Refresh()
class FolderButton (ContentButton):
def __init__(self, parent):
ContentButton.__init__(self, parent, 'Save Dir', (150, 32), (2, 2))
def onClick(self, event):
subprocess.run(['explorer', config['saveFolderPath']])
class OptionsButton (ContentButton):
def __init__(self, parent):
ContentButton.__init__(self, parent, 'Options', (150, 32), (2, 2))
def onClick(self, event):
window.openOptionsMenu()
class ClosePopupButton (ContentButton):
def __init__(self, parent):
ContentButton.__init__(self, parent, 'Close', (150, 36), (2, 2))
def onClick(self, event):
window.removePopup()
class SaveOptionsButton (ContentButton):
def __init__(self, parent, optionsMenu):
ContentButton.__init__(self, parent, 'Save', (150, 36), (2, 2))
self.optionsMenu = optionsMenu
def onClick(self, event):
self.optionsMenu.saveConfig()
window.removePopup()
class OptionChangePanel (wx.Panel):
def __init__(self, parent, label, pos, align, config):
wx.Panel.__init__(self, parent, pos = pos, size = (516, 50))
self.config = config
self.disabled = False
self.font = wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_LIGHT, faceName = 'GamePixies')
description = wx.StaticText(self, size = wx.Size(160, 20), pos = (0, 16), style = wx.ST_ELLIPSIZE_START | wx.ALIGN_RIGHT | wx.ST_NO_AUTORESIZE)
description.SetForegroundColour(colors['main-text'])
description.SetFont(self.font)
description.SetLabel(label)
self.wrapper = wx.Panel(self, pos = (170, 6), size = (346, 38))
self.wrapper.SetBackgroundColour(colors['button'])
stylizeBorder(self.wrapper)
self.textPanel = wx.Panel(self.wrapper, pos = (2, 2), size = (342, 34))
self.textPanel.SetBackgroundColour(colors['text-input'])
self.value = wx.StaticText(self.textPanel, pos = (10, 8), size = (322, 20), style = wx.ST_ELLIPSIZE_START | align | wx.ST_NO_AUTORESIZE)
self.value.SetForegroundColour(colors['main-text'])
self.value.SetFont(self.font)
for element in [self.wrapper, self.textPanel, self.value]:
element.Bind(wx.EVT_ENTER_WINDOW, self.onMouseMove)
element.Bind(wx.EVT_LEAVE_WINDOW, self.onMouseMove)
element.Bind(wx.EVT_LEFT_UP, self.onClick)
def onMouseMove(self, event):
if not self.disabled:
rect = self.wrapper.GetScreenRect()
pos = wx.GetMousePosition()
if hitTest(rect, pos):
self.wrapper.SetBackgroundColour(colors['button-hover'])
else:
self.wrapper.SetBackgroundColour(colors['button'])
self.wrapper.Refresh()
def disable(self):
self.disabled = True
def enable(self):
self.disabled = False
class OptionCheckbox (OptionChangePanel):
def __init__(self, parent, pos, optionsMenu, config, label, option):
OptionChangePanel.__init__(self, parent, label = label, pos = pos, align = wx.ALIGN_RIGHT, config = config)
self.value.Destroy()
self.wrapper.SetSize((38, 38))
self.textPanel.SetSize((34, 34))
self.value = wx.Panel(self.textPanel, pos = (8, 8), size = (18, 18))
self.value.Bind(wx.EVT_ENTER_WINDOW, self.onMouseMove)
self.value.Bind(wx.EVT_LEAVE_WINDOW, self.onMouseMove)
self.value.Bind(wx.EVT_LEFT_UP, self.onClick)
self.option = option
stylizeBorder(self.wrapper)
self.setValue()
def setValue(self):
if self.config[self.option]:
self.value.SetBackgroundColour(colors['button'])
else:
self.value.SetBackgroundColour(colors['text-input'])
self.wrapper.Refresh()
def onClick(self, event):
if not self.disabled:
self.config[self.option] = not self.config[self.option]
self.setValue()
class OptionAutoclose (OptionCheckbox):
def __init__(self, parent, pos, optionsMenu, config):
OptionCheckbox.__init__(self, parent, pos, optionsMenu, config, 'Autoclose Noita :', 'autoclose')
class OptionLoadLaunch (OptionCheckbox):
def __init__(self, parent, pos, optionsMenu, config):
OptionCheckbox.__init__(self, parent, pos, optionsMenu, config, 'Launch Noita on load :', 'launch_on_load')
self.Raise()
class OptionUseSteamLaunch (OptionCheckbox):
def __init__(self, parent, pos, optionsMenu, config):
OptionCheckbox.__init__(self, parent, pos, optionsMenu, config, 'Use Steam launch :', 'use_steam_launch')
class OptionDisplayStatus (OptionCheckbox):
def __init__(self, parent, pos, optionsMenu, config):
OptionCheckbox.__init__(self, parent, pos, optionsMenu, config, 'Show save info :', 'display_save_status')
self.Raise()
class FolderSelectSetting (OptionChangePanel):
def __init__(self, parent, pos, optionsMenu, config):
OptionChangePanel.__init__(self, parent, label = "Path to save folder :", pos = pos, align = wx.ALIGN_LEFT, config = config)
self.optionsMenu = optionsMenu
self.setValue()
def setValue(self):
self.value.SetLabel(self.config['saveFolderPath'])
self.wrapper.Refresh()
def onClick(self, event):
if not self.disabled:
dialog = wx.DirDialog(
self,
message='Select Folder',
defaultPath=self.config['saveFolderPath'],
style=wx.DD_DEFAULT_STYLE,
pos=wx.DefaultPosition,
size=wx.DefaultSize
)
dialog.ShowModal()
self.config['saveFolderPath'] = dialog.GetPath()
self.setValue()
class OptionExecutableSelect (OptionChangePanel):
def __init__(self, parent, pos, optionsMenu, config, label, option, hint):
OptionChangePanel.__init__(self, parent, label = label, pos = pos, align = wx.ALIGN_LEFT, config = config)
self.optionsMenu = optionsMenu
self.option = option
self.hint = hint
self.setValue()
def setValue(self):
self.value.SetLabel(self.config[self.option])
self.wrapper.Refresh()
def onClick(self, event):
defaultPath = self.config[self.option]
if defaultPath == '':
defaultPath = os.path.expanduser('~')
if not self.disabled:
dialog = wx.FileDialog(
self,
message = self.hint,
defaultDir = os.path.dirname(defaultPath),
style = wx.FD_DEFAULT_STYLE | wx.FD_FILE_MUST_EXIST,
pos = wx.DefaultPosition,
size = wx.DefaultSize,
wildcard = 'Executable files (*.exe)|*.exe'
)
if not dialog.ShowModal() == wx.ID_CANCEL:
self.config[self.option] = dialog.GetPath()
self.setValue()
class NoitaExeSelectSetting (OptionExecutableSelect):
def __init__(self, parent, pos, optionsMenu, config):
OptionExecutableSelect.__init__(
self,
parent,
pos,
optionsMenu,
config,
'Path to executable :',
'executable_path',
'Select Noita executable')
class SteamExeSelectSetting (OptionExecutableSelect):
def __init__(self, parent, pos, optionsMenu, config):
OptionExecutableSelect.__init__(
self,
parent,
pos,
optionsMenu,
config,
'Path to steam :',
'steam_launch',
'Select Steam executable')
class BindingSetting (OptionChangePanel):
def __init__(self, parent, pos, optionsMenu, binding, label, config):
OptionChangePanel.__init__(self, parent, label = label, pos = pos, align = wx.ALIGN_LEFT, config = config)
self.optionsMenu = optionsMenu
self.binding = binding
self.setValue()
self.keysPressedEvent, EVT_RESULT = NewEvent()
self.Bind(EVT_RESULT, self.onKeysPressed)
def setValue(self):
value = ''
if self.config[self.binding]:
for key in self.config[self.binding]:
value += key + ' + '
value = value[:-3]
self.value.SetLabel(value)
self.wrapper.Refresh()
def filterKeys(self):
keys = []
for code in self.bound_keys:
char = toAscii(code)
if char:
keys.append(char)
keys = set(keys)
sorted_keys = []
for special in ['alt', 'shift', 'control', 'super']:
for key in keys:
if special == key:
sorted_keys.insert(0, key)
for key in keys:
if len(key) == 1 or (len(key) < 4 and key[0] == 'f'):
sorted_keys.append(key)
return sorted_keys
def onKeysPressed(self, event):
if len(event.data) == 0:
self.removeHook()
self.listening = False
self.onMouseMove(event)
self.optionsMenu.enable()
else:
for key in event.data:
if key not in self.bound_keys:
self.bound_keys.append(key)
self.config[self.binding] = tuple(self.filterKeys())
self.setValue()
def keyPressHandler(self):
keys = []
for e in keyboard._pressed_events:
keys.append(e)
wx.PostEvent(self, self.keysPressedEvent(data = keys))
def listenToKeys(self):
self.listening = True
self.removeHook = keyboard.hook(lambda e: self.keyPressHandler())
while self.listening:
time.sleep(0.1)
def onClick(self, event):
if not self.disabled:
self.bound_keys = []
self.config[self.binding] = None
self.setValue()
self.optionsMenu.disable()
thread = Thread(target = self.listenToKeys)
thread.start()
class OptionsMenu (wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, size = (550, 680), pos = (75, 10))
self.Raise()
self.SetBackgroundColour(colors['border'])
stylizeBorder(self)
global config
self.config = dict(config)
contentPanel = wx.Panel(self, size = (546, 676), pos = (2,2))
contentPanel.SetBackgroundColour(colors['content'])
panel = wx.Panel(contentPanel, pos = (45, 621), size = (154, 40))
panel.SetBackgroundColour(colors['button'])
self.closeButton = ClosePopupButton(panel)
panel = wx.Panel(contentPanel, pos = (347, 621), size = (154, 40))
panel.SetBackgroundColour(colors['button'])
self.saveButton = SaveOptionsButton(panel, self)
self.options = []
self.options.append(
FolderSelectSetting(contentPanel, (15, 15), self , self.config)
)
self.options.append(
BindingSetting(contentPanel, (15, 80), self, 'hotkey_save', 'Save hotkey :', self.config)
)
self.options.append(
BindingSetting(contentPanel, (15, 145), self, 'hotkey_saveQuick', 'Quick save hotkey :', self.config)
)
self.options.append(
BindingSetting(contentPanel, (15, 210), self, 'hotkey_load', 'Load hotkey :', self.config)
)
self.options.append(
BindingSetting(contentPanel, (15, 275), self, 'hotkey_loadQuick', 'Quick load hotkey :', self.config)
)
self.options.append(
OptionAutoclose(contentPanel, (15, 340), self, self.config)
)
self.options.append(
OptionLoadLaunch(contentPanel, (270, 340), self, self.config)
)
self.options.append(
OptionUseSteamLaunch(contentPanel, (15, 405), self, self.config)
)
self.options.append(
OptionDisplayStatus(contentPanel, (270, 405), self, self.config)
)
self.options.append(
NoitaExeSelectSetting(contentPanel, (15, 470), self , self.config)
)
self.options.append(
SteamExeSelectSetting(contentPanel, (15, 535), self, self.config)
)
def disable(self):
for option in self.options:
option.disable()
self.closeButton.Disable()
self.saveButton.Disable()
def enable(self):
for option in self.options:
option.enable()
self.closeButton.Enable()
self.saveButton.Enable()
def saveConfig(self):
global config
config = dict(self.config)
writeConfig()
#apply gui config gui changes
window.statusInfo.Show(config['display_save_status'])
class SaveButton (ContentButton):
def __init__(self, parent):
ContentButton.__init__(self, parent, 'Save', (150, 32), (2, 2))
def onClick(self, event):
window.openNewSaveMenu()
class NewSaveButton (ContentButton):
def __init__(self, parent, saveMenu):
ContentButton.__init__(self, parent, 'Save', (150, 36), (2, 2))
self.saveMenu = saveMenu
self.overwriteCooldownEvent, EVT_RESULT = NewEvent()
self.Bind(EVT_RESULT, self.enable)
self.overwrite = False
def manualHoverDetect(self):
rect = self.GetScreenRect()
pos = wx.GetMousePosition()
if hitTest(rect, pos):
self.onMouseEnter(None)
else:
self.onMouseLeave(None)
self.Refresh()
def enable(self, event):
self.Enable()
self.manualHoverDetect()
def overwriteCooldown(self):
time.sleep(0.5)
self.overwrite = True
wx.PostEvent(self, self.overwriteCooldownEvent())
def onClick(self, event):
global saveFiles
name = self.saveMenu.textInput.GetValue()
if not self.overwrite and name in saveFiles:
self.hoverColor = colors['button-red']
self.SetLabel('Overwrite')
self.manualHoverDetect()
self.Disable()
thread = Thread(target = self.overwriteCooldown)
thread.start()
return
window.makeSave(name)
class PlaceholderPanel (wx.Panel):
def __init__(self, parent, label):
wx.Panel.__init__(self, parent, size = (550, 230), pos = (75, 235))
self.Raise()
self.SetBackgroundColour(colors['border'])
stylizeBorder(self)
self.label = label
self.contentPanel = wx.Panel(self, size = (546, 226), pos = (2,2))
self.contentPanel.SetBackgroundColour(colors['content'])
font = wx.Font(18, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_LIGHT, faceName = 'GamePixies')
self.text = wx.StaticText(self.contentPanel, size = wx.Size(-1, -1), label = self.label)
self.text.SetForegroundColour(colors['secondary-text'])
self.text.SetFont(font)
horSizer = wx.BoxSizer(orient=wx.HORIZONTAL)
horSizer.Add(self.text, 1, wx.CENTER)
verSizer = wx.BoxSizer(orient=wx.VERTICAL)
verSizer.Add(horSizer, 1, wx.CENTER)
self.contentPanel.SetSizer(verSizer)
self.contentPanel.Layout()
self.setText(self.label)
def setText(self, text):
self.text.SetLabel(text)
self.contentPanel.Layout()
class WaitingPlaceholder (PlaceholderPanel):
def __init__(self, parent, label):
PlaceholderPanel.__init__(self, parent, label)
self.setText('Waiting for Noita to close ...')
def setActionLabel(self):
self.setText(self.label)
class SavingPlaceholder (WaitingPlaceholder):
def __init__(self, parent):
WaitingPlaceholder.__init__(self, parent, 'Saving, please wait ...')
class LoadingPlaceholder (WaitingPlaceholder):
def __init__(self, parent):
WaitingPlaceholder.__init__(self, parent, 'Loading, please wait ...')
class DeletingPlaceholder (PlaceholderPanel):
def __init__(self, parent):
PlaceholderPanel.__init__(self, parent, 'Deleting, please wait ...')
class NewSaveMenu (wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, size = (550, 230), pos = (75, 235))
self.Raise()
self.SetBackgroundColour(colors['border'])
stylizeBorder(self)
contentPanel = wx.Panel(self, size = (546, 226), pos = (2,2))
contentPanel.SetBackgroundColour(colors['content'])
font = wx.Font(14, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_LIGHT, faceName = 'GamePixies')
text = wx.StaticText(contentPanel, size = wx.Size(-1, -1), label = 'Save game as:', pos = (30, 30))
text.SetForegroundColour(colors['main-text'])
text.SetFont(font)
panel = wx.Panel(contentPanel, size = (486, 50), pos = (30, 70))
panel.SetBackgroundColour(colors['button'])
stylizeBorder(panel)
panel = wx.Panel(panel, size = (482, 46), pos = (2, 2))
panel.SetBackgroundColour(colors['text-input'])
saveName = 'save-{date:%Y_%m_%d_%H_%M_%S}'.format(date=datetime.datetime.now())
self.textInput = wx.TextCtrl(panel, value=saveName, pos=(10, 11), size=(462, 30), style=wx.NO_BORDER)
self.textInput.SetBackgroundColour(colors['text-input'])
self.textInput.SetForegroundColour(colors['main-text'])
self.textInput.SetFont(font)
panel = wx.Panel(contentPanel, size = (154, 40), pos = (30, 160))
panel.SetBackgroundColour(colors['button'])
NewSaveButton(panel, self)
panel = wx.Panel(contentPanel, size = (154, 40), pos = (362, 160))
panel.SetBackgroundColour(colors['button'])
ClosePopupButton(panel)
class SaveFileListPanel (ContentPanel):
def __init__(self, parent):
ContentPanel.__init__(self, parent)
self.displayEvent, EVT_RESULT = NewEvent()
self.Bind(EVT_RESULT, self.updateDisplay)
def updateDisplay(self, event):
newContainer, newContentSizer = self.saveList.CreateContainer()
height = 15
newContentSizer.AddSpacer(15)
global saveFiles
contentRect = self.saveList.containerWrapper.GetScreenRect()
for saveFile in saveFiles:
saveInstance = SaveInstance(newContainer, self.saveList.ContentWidth, (saveFile, saveFiles[saveFile]), contentRect)
newContentSizer.Add(saveInstance)
newContentSizer.AddSpacer(15)
height += saveInstance.GetSize().GetHeight() + 15
self.saveList.DestroyContainer()
self.saveList.container = newContainer
self.saveList.contentSizer = newContentSizer
self.saveList.container.Layout()
self.saveList.SetHeight(height)
self.saveList.container.Show()
def findSaveFiles(self, initInProcess = False):
saveMng.findSaveFiles()
if initInProcess:
time.sleep(0.25)
wx.PostEvent(self, self.displayEvent())
def CreateContent(self):
self.saveList = ScrollableList(self, size = (502, 555), pos = (15, 50))
stylizeBorder(self.saveList)
self.saveList.ShowLoadingStatus()
panel = wx.Panel(self, pos = (15, 619), size = (154, 36))
panel.SetBackgroundColour(colors['button'])
self.saveButton = SaveButton(panel)
panel = wx.Panel(self, pos = (189, 619), size = (154, 36))
panel.SetBackgroundColour(colors['button'])
self.optionsButton = OptionsButton(panel)
panel = wx.Panel(self, pos = (363, 619), size = (154, 36))
panel.SetBackgroundColour(colors['button'])
self.folderButton = FolderButton(panel)
thread = Thread(target = self.findSaveFiles, args=(True,))
thread.start()
def disable(self):
self.saveList.disable()
self.saveButton.Disable()
self.optionsButton.Disable()
self.folderButton.Disable()
def enable(self):
self.saveList.enable()
self.saveButton.Enable()
self.optionsButton.Enable()
self.folderButton.Enable()
class DeleteSavePopupButton (ContentButton):
def __init__(self, parent, savePath):
ContentButton.__init__(self, parent, 'Delete', (150, 36), (2, 2))
self.hoverColor = colors['button-red']
self.savePath = savePath
def onClick(self, event):
window.makeDelete(self.savePath)
class DeleteSavePopup (wx.Panel):
def __init__(self, parent, savePath):
wx.Panel.__init__(self, parent, size = (550, 160), pos = (75, 270))
self.Raise()
self.SetBackgroundColour(colors['border'])
contentPanel = wx.Panel(self, size = (546, 156), pos = (2,2))
contentPanel.SetBackgroundColour(colors['content'])
font = wx.Font(14, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_LIGHT, faceName = 'GamePixies')
text = wx.StaticText(contentPanel, size = wx.Size(-1, -1), label = 'Do you really want to delete this save?', pos = (30, 30))
text.SetForegroundColour(colors['main-text'])
text.SetFont(font)
panel = wx.Panel(contentPanel, size = (154, 40), pos = (362, 86))
panel.SetBackgroundColour(colors['button'])
ClosePopupButton(panel)
panel = wx.Panel(contentPanel, size = (154, 40), pos = (30, 86))
panel.SetBackgroundColour(colors['button'])
DeleteSavePopupButton(panel, savePath)
class SaveStatusInfo (wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, size = (132, 55), pos = (17, 435), style=wx.TRANSPARENT_WINDOW)
self.Raise()
self.SetBackgroundColour(colors['border-light'])
stylizeBorder(self)
content = wx.Panel(self, pos = (2, 2), size = (128, 51))
content.SetBackgroundColour(colors['text-input'])
font = wx.Font(13, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_LIGHT, faceName = 'GamePixies')
self.playerX = wx.StaticText(content, size = wx.Size(-1, -1), label = 'x : ???', pos = (15, 5))
self.playerX.SetForegroundColour(colors['secondary-text'])
self.playerX.SetFont(font)
self.playerY = wx.StaticText(content, size = wx.Size(-1, -1), label = 'y : ???', pos = (15, 23))
self.playerY.SetForegroundColour(colors['secondary-text'])
self.playerY.SetFont(font)
def updateStatus(self):
if saveStatus['player_pos']:
self.playerX.SetLabel('x : ' + str(saveStatus['player_pos'][0]))
self.playerY.SetLabel('y : ' + str(saveStatus['player_pos'][1]))
class MainWindow (wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(
self,
parent,
id = wx.ID_ANY,
title = wx.EmptyString,
pos = wx.DefaultPosition,
size = wx.Size(700, 700),
style = wx.NO_BORDER | wx.SIMPLE_BORDER
)
self.SetDoubleBuffered(True)
self.ReadyToClose = True
self.popup = None
self.SetBackgroundColour(colors['border'])
self.Centre(wx.BOTH)
stylizeBorder(self)
self.hotkeyEvent, EVT_RESULT = NewEvent()
self.Bind(EVT_RESULT, self.hotkeyEventHandler)
self.processCompletedEvent, EVT_RESULT = NewEvent()
self.Bind(EVT_RESULT, self.processComplete)
self.noitaWaitEndEvent, EVT_RESULT = NewEvent()
self.Bind(EVT_RESULT, self.waitForNoitaEneded)
self.saveStatusChangedEvent, EVT_RESULT = NewEvent()
self.Bind(EVT_RESULT, self.updateSaveStatus)
TitlePanel(self)
self.contentPanel = SaveFileListPanel(self)
wx.Panel(self, pos = (164, 28), size = (3, 670)).SetBackgroundColour(colors['border-light'])
line = wx.Panel(self, pos = (2, 26), size = (696, 2))
line.SetBackgroundColour(colors['border-light'])
wx.StaticBitmap(self, bitmap=ScaledBitmap(resources_background_png, 696, 670), pos = (2, 28), size = (162, 670))
wx.Panel(line, pos = (162, 0), size = (2, 2)).SetBackgroundColour(colors['background'])
self.statusInfo = SaveStatusInfo(self)
self.statusInfo.Show(config['display_save_status'])
def __del__( self ):
pass
def updateSaveStatus(self, event):
self.statusInfo.updateStatus()
def saveStatusChanged(self):
wx.PostEvent(self, self.saveStatusChangedEvent())
def deleteThread(self, path):
self.needToLaunch = False
saveMng.deleteSave(path)
wx.PostEvent(self, self.processCompletedEvent())
def waitForNoitaThenAction(self, callback, action):
self.needToLaunch, canDoAction = waitForNoitaTermination(action)
wx.PostEvent(self, self.noitaWaitEndEvent())
if canDoAction:
callback()
wx.PostEvent(self, self.processCompletedEvent())
def saveThread(self, name):
self.waitForNoitaThenAction(lambda: saveMng.backupSave(name), Action.save)
def loadThread(self, path):
self.waitForNoitaThenAction(lambda: saveMng.loadSave(path), Action.load)
def makeSave(self, name):
self.removePopup()
self.showSavePlaceholder()
thread = Thread(target = self.saveThread, args=(name,))
thread.start()
def makeLoad(self, path):
self.removePopup()
self.showLoadPlaceholder()
thread = Thread(target = self.loadThread, args=(path,))
thread.start()
def makeDelete(self, path):
self.removePopup()
self.showDeletePlaceholder()
thread = Thread(target = self.deleteThread, args=(path,))
thread.start()
def waitForNoitaEneded(self, event):
self.popup.setActionLabel()
def processComplete(self, event):
self.contentPanel.findSaveFiles()
self.removePopup()
if self.needToLaunch:
if config['steam_launch'] != '' and config['use_steam_launch']:
launch = [config['steam_launch'], '-applaunch', '881100']
elif config['executable_path'] != '':
os.chdir(os.path.dirname(config['executable_path']))
launch = [config['executable_path']]
if launch:
for argument in config['launch_arguments']:
for word in argument.split():
if word != '':
launch.append(word)
subprocess.Popen(launch, close_fds=True, creationflags=subprocess.DETACHED_PROCESS)
def openOptionsMenu(self):
hkm.unregisterAll()
self.contentPanel.disable()
self.popup = OptionsMenu(self)
def openNewSaveMenu(self):
hkm.unregisterAll()
self.contentPanel.disable()
self.popup = NewSaveMenu(self)
def openDeleteMenu(self, savePath):
hkm.unregisterAll()
self.contentPanel.disable()
self.popup = DeleteSavePopup(self, savePath)
def removePopup(self):
if self.popup:
self.popup.Destroy()
self.popup = None
self.contentPanel.enable()
hkm.registerAll()
def showSavePlaceholder(self):
hkm.unregisterAll()
self.contentPanel.disable()
self.popup = SavingPlaceholder(self)
def showLoadPlaceholder(self):
hkm.unregisterAll()
self.contentPanel.disable()
self.popup = LoadingPlaceholder(self)
def showDeletePlaceholder(self):
hkm.unregisterAll()
self.contentPanel.disable()
self.popup = DeletingPlaceholder(self)
def hotkeyEventHandler(self, event):
# 'load' event has no special behavior
if event.data == 'save-quick':
saveNumber = saveMng.getQuicksaveNumber()
self.makeSave('!!quicksave~' + str(1 if saveNumber == 3 else saveNumber + 1))
elif event.data == 'save':
self.openNewSaveMenu()
if event.data == 'load-quick':
saveName = '!!quicksave~' + str(saveMng.getQuicksaveNumber())
if saveName in saveFiles:
self.makeLoad(saveFiles[saveName])
self.Refresh()
class HotkeyManager():
def loadSave(self, *args):
focusWindow()
wx.PostEvent(window, window.hotkeyEvent(data = 'load'))
def loadQuick(self, *args):
wx.PostEvent(window, window.hotkeyEvent(data = 'load-quick'))
def backupSave(self, *args):
focusWindow()
wx.PostEvent(window, window.hotkeyEvent(data = 'save'))
def backupQuick(self, *args):
wx.PostEvent(window, window.hotkeyEvent(data = 'save-quick'))
def __init__(self):
self.reg_backup = False
self.reg_backupQuick = False
self.reg_load = False
self.reg_loadQuick = False
self.hk = SystemHotkey()
self.registerAll()
def unregisterAll(self):
if self.reg_backup:
self.hk.unregister(config['hotkey_save'])
self.reg_backup = False
if self.reg_backupQuick:
self.hk.unregister(config['hotkey_saveQuick'])
self.reg_backupQuick = False
if self.reg_load:
self.hk.unregister(config['hotkey_load'])
self.reg_load = False
if self.reg_loadQuick:
self.hk.unregister(config['hotkey_loadQuick'])
self.reg_loadQuick = False
def registerAll(self):
if config['hotkey_save'] and not self.reg_backup:
self.hk.register(config['hotkey_save'], callback = lambda x: self.backupSave(x))
self.reg_backup = True
if config['hotkey_saveQuick'] and not self.reg_backupQuick:
self.hk.register(config['hotkey_saveQuick'], callback = lambda x: self.backupQuick(x))
self.reg_backupQuick = True
if config['hotkey_load'] and not self.reg_load:
self.hk.register(config['hotkey_load'], callback = lambda x: self.loadSave(x))
self.reg_load = True
if config['hotkey_loadQuick'] and not self.reg_loadQuick:
self.hk.register(config['hotkey_loadQuick'], callback = lambda x: self.loadQuick(x))
self.reg_loadQuick = True
class SaveManager():
def __init__(self, extension):
self.extension = extension
def deleteSaveFolder(self):
saveFolderPath = config['saveFolderPath'] + '\\save00'
if not os.path.exists(saveFolderPath):
return
shutil.rmtree(saveFolderPath)
def findSaveFiles(self):
global saveFiles
saveFiles = {}
if not os.path.exists(config['saveFolderPath']):
os.makedirs(config['saveFolderPath'])
files = os.listdir(config['saveFolderPath'])
for file in files:
name, extension = os.path.splitext(file)
if extension in supportedExtensions:
saveFiles[name] = config['saveFolderPath'] + '\\' + file
saveFiles = dict(sorted(saveFiles.items(), key=lambda item: os.path.getmtime(item[1]), reverse=True))
def getQuicksaveNumber(self):
global saveFiles
latestSave = None
saveTime = 0
for saveFile in saveFiles:
if saveFile.startswith('!!quicksave'):
latestSave = saveFile
break
if latestSave == '!!quicksave':
try:
name, extension = os.path.splitext(saveFiles[latestSave])
os.rename(saveFiles[latestSave], name + '~1' + extension)
except:
pass
return 1
else:
try:
return int(latestSave[-1])
except:
return 1
def backupSave(self, saveName):
global saveFiles
for saveFile in saveFiles:
if saveFile == saveName:
self.deleteSave(saveFiles[saveName])
os.chdir(config['saveFolderPath'])
if self.extension == '.tar':
with tarfile.open(saveName + self.extension, 'w') as tar:
tar.add('save00', arcname = 'save00')
elif self.extension == '.7z':
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess.call('"' + config['7z_path'] + '" a ' + saveName + self.extension + ' save00/* -mmt4 -mx0 -t7z', startupinfo=si)
def loadSave(self, savePath):
os.chdir(config['saveFolderPath'])
if not os.path.exists(savePath):
return
self.deleteSaveFolder()
extension = os.path.splitext(savePath)[1]
if extension == '.tar':
with tarfile.open(savePath, 'r') as tar:
tar.extractall(path = config['saveFolderPath'])
elif extension == '.7z':
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess.call('"' + config['7z_path'] + '" x ' + savePath + ' -y -mmt4', startupinfo=si)
def deleteSave(self, savePath):
if not os.path.exists(savePath):
return
try:
os.remove(savePath)
except:
pass
class NoitaSaveScummer(wx.App):
def OnInit(self):
global window
window = MainWindow(None)
window.Show()
return True
def InitLocale(self):
locale.setlocale(locale.LC_ALL, 'C')
versionNumber = 'v0.5.6'
locale.setlocale(locale.LC_ALL, 'C')
working_dir = os.getcwd()
num = ctypes.c_uint32()
data = (ctypes.c_char * len(resources_Gamepixies_8MO6n_ttf))(*resources_Gamepixies_8MO6n_ttf)
ctypes.windll.gdi32.AddFontMemResourceEx(data, len(data), 0, ctypes.byref(num))
readConfig()
findNoita()
findSteam()
hkm = HotkeyManager()
saveMng = SaveManager(selectArchiveTool())
supportedExtensions = ['.tar']
if config['7z_path'] != '':
supportedExtensions.append('.7z')
window = None
app = NoitaSaveScummer()
saveDirObserver = watchSaveDirectory()
app.MainLoop()
#exit program
saveDirObserver.stop()
saveDirObserver.join()
hkm.unregisterAll()
writeConfig()
|
def longest_palindromic_substring(s):
if s == '':
return 0
# Use list for inner function visit. Python2 does not support nonlocal.
max_start = [0]
max_length = [1]
def trace(l, r, initial_length):
length = initial_length
while l >= 0 and r < len(s) and s[l] == s[r]:
length += 2
l -= 1
r += 1
if length > max_length[0]:
max_length[0] = length
max_start[0] = l + 1
# Case 1: Turn point is a character
for turn_point in range(len(s)):
l = turn_point - 1
r = turn_point + 1
trace(l, r, 1)
# Case 2: Turn point is between two adjacent characters
for turn_point_right in range(len(s)):
l = turn_point_right - 1
r = turn_point_right
trace(l, r, 0)
return s[max_start[0]:max_start[0]+max_length[0]]
|
# ----------------Break------------------------
print("Example of break:\n")
for letter in 'Python':
if letter == 'h':
break
print('Current Letter :\t', letter)
var = 10
while var > 0:
var = var - 1
if var == 5:
break
print('Current variable value :\t', var)
# ---------------Continue----------------------
print("\nExample of continue:\n")
for letter in 'Python':
if letter == 'h':
continue
print('Current Letter :\t', letter)
var = 10
while var > 0:
var = var - 1
if var == 5:
continue
print('Current variable value :\t', var)
# ------------------Pass-------------------------
print("\nExample of pass:\n")
for letter in 'Python':
if letter == 'h':
pass
print('Current Letter :\t', letter)
var = 10
while var > 0:
var = var - 1
if var == 5:
pass
print('Current variable value :\t', var)
|
from django.shortcuts import render,get_object_or_404,render_to_response
from blog.models import Blog,Post,Tag, DocumentForm, Document
from django.contrib.auth import authenticate, login,logout
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.models import User
import re,copy
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.template import RequestContext
from django.core.urlresolvers import reverse
def index(request):
posts = Post.objects.all()
paginator = Paginator(posts,5)
page = request.GET.get('page')
try:
post_page = paginator.page(page)
except PageNotAnInteger:
post_page = paginator.page(1)
except EmptyPage:
post_page = paginator.page(paginator.num_pages)
context = {
'blogs':Blog.objects.all(),
'posts':post_page,
'tags':Tag.objects.all()
}
return render(request,'blog/index.html',context)
def logout_page(request):
logout(request)
context = {
'blogs':Blog.objects.all(),
'posts':Post.objects.all()[:10],
'tags':Tag.objects.all()
}
return render(request,'blog/index.html',context)
def login_page(request):
if request.POST:
_username = request.POST.get('username')
_password = request.POST.get('password')
user = authenticate(username = _username, password = _password)
if user is not None and user.is_active:
login(request, user)
return render(request,'blog/user_home.html',{'user':user})
elif not User.objects.filter(username=_username.strip()):
return render(request,'blog/login.html',{
'error_message':"ERROR: user doesn't exist!"
})
else:
return render(request,'blog/login.html',{
'error_message':"ERROR: password and username didn't match!"
})
else:
return render(request,'blog/login.html',{'error_message':''})
def short_body(posts):
posts_500c=copy.deepcopy(posts)
for post in posts_500c:
if len(post.body) > 500:
post.body = post.body[:500] + "......Please click the title of post to read origianl contents"
return posts_500c
def blog_posts(request,slug):
blog = get_object_or_404(Blog,slug=slug)
user = blog.user
posts = blog.post_set.all()
paginator = Paginator(posts,5)
page = request.GET.get('page')
try:
post_page = paginator.page(page)
except PageNotAnInteger:
post_page = paginator.page(1)
except EmptyPage:
post_page = paginator.page(paginator.num_pages)
return render(request,'blog/blog_posts.html',{'blog':blog,'posts':post_page,'user':user,})
def create_blog(request,username):
_user = get_object_or_404(User,username=username)
if not request.user.is_authenticated():
return render(request,'blog/user_home.html',{
'user':_user,
'error_message':"it's not your space, you can't create a blog",
})
if request.POST:
_title = request.POST.get('title')
if _title.strip() =='':
return render(request, 'blog/create_blog.html', {
'user':_user,
'error_message':"You have to write title for a blog."
})
b = Blog(user=_user,title=_title.strip())
b.create_slug()
b.save()
return render(request,'blog/user_home.html',{'user':_user})
else:
return render(request, 'blog/create_blog.html', {'user':_user})
def valid_username(s):
p = re.compile(r'^[\d\w_]+$')
if p.match(s.strip()) == None:
return False
else:
return True
def sign_up(request):
if request.POST:
_username = request.POST.get('username')
if valid_username(_username) == False:
return render(request,'blog/sign_up.html',{
'error_message':"username can only have digital, letter and '_'."
})
if User.objects.filter(username = _username.strip()) :
return render(request,'blog/sign_up.html',{
'error_message':("the username: '%s' has been registered." %_username)
})
_pw = request.POST.get('password')
_pw2 = request.POST.get('password_re')
if _pw != _pw2:
return render(request,'blog/sign_up.html',{
'error_message':"Two passwords didn't match,please confirm they are the same."
})
_user = User.objects.create_user(_username.strip(),'',_pw)
user = authenticate(username = _username, password = _pw)
if user is not None and user.is_active:
login(request, user)
return render(request,'blog/user_home.html',{'user':user})
else:
return render(request,'blog/sign_up.html',{
'error_message':"ERROR: sign up failed."
})
else:
return render(request, 'blog/sign_up.html',{})
def post(request,slug):
p = get_object_or_404(Post,slug=slug)
blog = p.blog
user = blog.user
return render(request,'blog/post.html',{'post':p,'user':user})
def edit_post(request,slug):
if request.user.is_authenticated():
cur_user = request.user
post = get_object_or_404(Post,slug=slug)
blog = post.blog
if cur_user != blog.user:
return render(request,'blog/post.html',{
'post':post,
'error_message':"It's not your post, you can't edit",
'user':blog.user,
})
else:
#submited by user,return to post page
if request.POST:
_title = request.POST.get('title')
if _title.strip() == '':
return render(request,'blog/edit_post.html',{
'post':post,
'error_message':"Title can't be blank",
})
_body = request.POST.get('body')
_tags = request.POST.get('tags')
_img = request.POST.get('image')
post.title = _title
post.body = _body
post.add_image(_img)
post.add_tags(_tags)
post.save()
return render(request, 'blog/post.html', {'post':post})
else:
# didn't submit
return render(request,'blog/edit_post.html',{'post':post})
else:
# anonymous users
post = get_object_or_404(Post,slug=slug)
user = post.blog.user
return render(request,'blog/post.html',{
'post':post,
'user':user,
'error_message':"You're not authenticated user, you can't edit",
})
def create_post(request,slug):
_blog = get_object_or_404(Blog,slug=slug)
blog_user = _blog.user
posts = _blog.post_set.all()
if blog_user != request.user:
return render(request,'blog/blog_posts.html',{
'blog':_blog,
'posts':posts,
'user':blog_user,
'error_message':"It's not your blog, you can't create a post here.",
})
if request.POST:
_title = request.POST.get('title')
if _title.strip() == '':
return render(request,'blog/create_post.html',{
'blog':_blog,
'error_message':"Title can't be blank",
})
_body = request.POST.get('body')
_tags = request.POST.get('tags')
_img = request.POST.get('image')
post = Post(title=_title,body=_body,blog=_blog)
post.create_slug()
post.add_tags(_tags)
post.add_image(_img)
post.save()
return render(request, 'blog/post.html', {'post':post,'user':request.user})
else:
# didn't submit
return render(request,'blog/create_post.html',{'blog':_blog})
def tag(request,slug):
tag = get_object_or_404(Tag,slug=slug)
tags = Tag.objects.all()
posts = tag.post_set.all()
paginator = Paginator(posts,5)
page = request.GET.get('page')
try:
post_page = paginator.page(page)
except PageNotAnInteger:
post_page = paginator.page(1)
except EmptyPage:
post_page = paginator.page(paginator.num_pages)
return render(request, 'blog/tag.html',{
'tag':tag,
'posts':post_page,
'tags':tags,
})
def upload_file(request):
# Handle file upload
if request.user.is_authenticated() == False:
error_message="You're not authenticated user, you can't upload_file"
return render_to_response('blog/upload_file.html',
{'documents': Document.objects.all(),
'form': DocumentForm(),
'error_message':error_message},
context_instance=RequestContext(request)
)
else:
error_message=''
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
newdoc = Document(docfile = request.FILES['docfile'])
newdoc.save()
# Redirect to the document list after POST
return HttpResponseRedirect(reverse('blog:upload_file'))
else:
form = DocumentForm() # A empty, unbound form
# Load documents for the list page
documents = Document.objects.all()
# Render list page with the documents and the form
return render_to_response('blog/upload_file.html',
{'documents': documents, 'form': form, 'error_message':error_message},
context_instance=RequestContext(request)
)
def about_me(request):
return HttpResponse("about_me")
def user_home(request, username):
_user=get_object_or_404(User,username=username)
return render(request,'blog/user_home.html',{'user':_user})
def photoes(request):
return render(request, 'blog/photoes.html', {
'documents':Document.objects.all()
}) |
#!/opt/conda/bin/python
import pandas
from keras.layers import concatenate, Dropout
import numpy
from math import sqrt
from keras.callbacks import EarlyStopping
import statsmodels.api as sm
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
import keras
from keras import layers
from sklearn.metrics import mean_squared_error
import datetime
import numpy as np
from utils import is_file
from slack import send_slack
from get_data import get_sp500, get_raw_data
COLUMNS = ( 'DATE','vfx','vbx', 'vmt','rwm','dog','psh','spx')
RES_COLUMNS = ('DATE', 'REAL_DATE', 'LAST_REAL','PRED', 'REAL','UP_BOND', 'LOW_BOND', 'STD', 'MEAN', 'PRED_RATE','REAL_RATE','PRED_RES', 'REAL_RES')
STOCKS = ['VFINX','VBMFX','VMOT','RWM','DOG','SH','^SP500TR']
FOLDER = "/nfs/Workspace"
TRAIN_FILE = "LSTM_TRAIN.csv"
TRAIN_START_DATE = '2017-5-5'
TRAIN_END_DATE = '2020-8-28'
PRED_FILE = "LSTM_PRED.csv"
PRED_START_DATE = '2020-9-4'
RES_FILE = "LSTM_RES.csv"
def get_today():
return datetime.date.today().strftime("%Y-%m-%d")
def read_csv(filename, folder):
folder=folder+"/"+filename
return pandas.read_csv(folder,encoding='ISO-8859-1')
def Standard_MinMax(data):
sc = MinMaxScaler(feature_range = (0, 1))
return sc.fit_transform(data.reshape(-1,1))
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pandas.DataFrame(data)
cols, names = list(), list()
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = pandas.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
class predictModel(object):
def __init__(self):
self.folder = FOLDER
self.train_filename = TRAIN_FILE
self.data_columns = ( 'DATE','vfx','vbx', 'vmt','rwm','dog','psh', 'spx')
self.pred_filename = PRED_FILE
self.res_filename = RES_FILE
self.train_path = '/'.join([FOLDER, self.train_filename])
self.pred_path = '/'.join([FOLDER, self.pred_filename])
self.res_path = '/'.join([FOLDER, self.res_filename])
self.init_train_data()
self.init_pred_data()
self.res_date = None
def init_train_data(self):
if not is_file(self.train_path):
print('[INFO] TRAIN data not fond, init one')
get_raw_train_data()
self.train_data = read_csv(filename=self.train_filename, folder=self.folder)
def init_pred_data(self):
if not is_file(self.pred_path):
print('[INFO] PERD data not fond, init one')
get_pred_data()
def get_train_data(self):
data = self.train_data
data.columns = self.data_columns
data = data.fillna(method='ffill')
data.head()
#folder="/Users/wenyongjing/Downloads/第二章"
#data=read_csv(filename="WEN",folder=folder)
#data.columns = ( 'DATE','vfx','vix' ,'vbx', 'vmt','rwm','dog','psh', 'spx')
del data['DATE']
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(data)
data.head()
reframed = series_to_supervised(scaled, 1, 1)
reframed.head()
pred = {'vfx': 7}
reframed = pandas.concat([reframed.iloc[:,0:7],reframed.iloc[:,pred['vfx']]],axis=1)
reframed.head()
train_num = round(reframed.shape[0] * 0.6)
print(train_num)
train = reframed.values[:train_num,:]
test = reframed.values[train_num:,:]
train_X, self.train_y = train[:, :-1], train[:, -1]
test_X , self.test_y = test[:, :-1], test[:, -1]
#train_X.shape, train_y.shape, test_X.shape, test_y.shape
self.train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
self.test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
self.data = data
def train_model(self):
model = keras.models.Sequential()
model.add(layers.LSTM(8, input_shape=(self.train_X.shape[1], self.train_X.shape[2])))
model.add(layers.Dense(1))
#model.add(TimeDistributed(Dense(1,activation='softmax')))
#model.add(Dropout(0.5))
#model.compile(loss='categorical_crossentropy', optimizer='adam')
model.compile(loss='mse', optimizer='adam')
model.summary()
history = model.fit(self.train_X, self.train_y, epochs=80,
batch_size=9, validation_data=(self.test_X, self.test_y),
verbose=1, shuffle=False)
self.model = model
def pred_data(self):
pred_data=read_csv(filename=self.pred_filename,folder=self.folder)
pred_data.columns = self.data_columns
#pred_data.isnull().sum()
pred_data = pred_data.fillna(method='ffill')
# Copy Last date to tomorrow
tmp = pred_data[-1:].values.tolist()
print(tmp)
if not self.res_date:
self.res_date = tmp[0][0]
tomorrow = transfer_date(self.res_date) + datetime.timedelta(days=7)
self.tomorrow = tomorrow.strftime("%Y-%m-%d")
tmp[0][0] = self.tomorrow
pred_data.loc[len(pred_data)] = tmp[0]
pred_data_bkp = np.array(pred_data['vfx']);
print(pred_data)
print(pred_data_bkp)
del pred_data['DATE']
scaler = MinMaxScaler(feature_range=(0, 1))
pred_scaled = scaler.fit_transform(pred_data)
pred_data.head()
pred_reframed = series_to_supervised(pred_scaled, 1, 1)
pred_reframed.head()
pred = {'vfx': 7}
pred_reframed = pandas.concat([pred_reframed.iloc[:,0:7],pred_reframed.iloc[:,pred['vfx']]],axis=1)
pred_reframed.head()
pred_test = pred_reframed.values[:,:]
pred_test_X , pred_test_y = pred_test[:, :-1], pred_test[:, -1]
pred_test_X.shape, pred_test_y.shape
pred_test_X = pred_test_X.reshape((pred_test_X.shape[0], 1, pred_test_X.shape[1]))
pred_test_X.shape, pred_test_y.shape
pred_yhat = self.model.predict(pred_test_X)
pred_test_X = pred_test_X.reshape((pred_test_X.shape[0], pred_test_X.shape[2]))
pred_yhat.shape, pred_test_X.shape
pred = {'vfx': 0}
pred_inv_yhat = concatenate((pred_yhat, numpy.delete(pred_test_X, pred['vfx'], axis=1)), axis=1)
pred_inv_yhat = scaler.inverse_transform(pred_inv_yhat)
pred_inv_yhat = pred_inv_yhat[:,0]
pred_inv_yhat.shape,pred_inv_yhat
real = pred_test_y.reshape((len(pred_test_y), 1))
inv_y = concatenate((real, numpy.delete(pred_test_X, pred['vfx'], axis=1)), axis=1)
inv_y = scaler.inverse_transform(inv_y)
inv_y = inv_y[:,0]
inv_y
print(inv_y)
print(pred_inv_yhat)
rmse = sqrt(mean_squared_error(inv_y, pred_inv_yhat))
print('Test RMSE: %.3f' % rmse)
self.last_real = pred_data_bkp[-2]
self.pred_real = pred_inv_yhat[0]
def get_data_std_mean(self):
samples = np.array(self.data['vfx'][-30:])
arr1 = []
for i in range(len(samples)-1):
arr1.append((samples[i+1]-samples[i])/samples[i])
arr2 = np.array(arr1)
self.std = np.std(arr2, ddof=1)
self.mean = np.mean(arr2)
self.up_bond = self.mean + self.std
self.low_bond = self.mean - self.std
print(self.std, self.mean)
def _get_result(self, last_real, value, up_bond, low_bond):
#print(self.pred_data_bkp[-2:])
#print(self.pred_inv_yhat)
value_rate = (value - last_real) / last_real
#pred_last2 = pred_inv_yhat[len(pred_inv_yhat)-1]
#newReturn = (pred_last1/pred_last2)/pred_last1
if value_rate >= up_bond:
res = 1
elif value_rate <= low_bond:
res = -1
else:
res = 0
return value_rate, res
def _get_real_data(self, pred_date):
_today = transfer_date(get_today())
_pred_date = transfer_date(pred_date)
delta = datetime.timedelta(days=1)
real_data = None
while _pred_date <= _today:
try:
print(_pred_date, _today)
real_data = get_raw_data(_pred_date.strftime("%Y-%m-%d"),_pred_date.strftime("%Y-%m-%d"), stock_list=STOCKS, columns=STOCKS)
return real_data
except Exception as e:
_pred_date = _pred_date + delta
print('[ERROR] ERROR: {0}'.format(str(e)))
pass
raise Exception('Cannot get real data: {0}'.format(pred_date))
def get_real_data(self):
if not is_file(self.res_path):
print('[WARN] Resulat file not exists!!')
return
res_data = read_csv(self.res_filename, folder=self.folder)
if np.isnan(res_data.loc[res_data.index[-1], 'REAL_DATE']):
# Get old data
up_bond = res_data.loc[res_data.index[-1], 'UP_BOND']
low_bond = res_data.loc[res_data.index[-1], 'LOW_BOND']
last_real = res_data.loc[res_data.index[-1], 'LAST_REAL']
pred_date = res_data.loc[res_data.index[-1], 'DATE']
print(up_bond,low_bond,last_real,pred_date)
print(res_data)
real_data = self._get_real_data(pred_date)
print(real_data)
# Update TRAIN
real_data.to_csv(self.train_path, mode='a', header=False)
# Update PRED
real_data.to_csv(self.pred_path)
# Update Result
real_date = real_data.index[-1].strftime("%Y-%m-%d")
real = real_data.loc[real_date, 'VFINX']
real_rate, real_res = self._get_result(last_real, real, up_bond, low_bond)
res_data.loc[res_data.index[-1], 'REAL_DATE'] = real_date
res_data.loc[res_data.index[-1], 'REAL'] = real
res_data.loc[res_data.index[-1], 'REAL_RATE'] = real_rate
res_data.loc[res_data.index[-1], 'REAL_RES'] = real_res
res_data.to_csv(self.res_path, index=False)
print(res_data)
self.res_date = res_data.loc[res_data.index[-1], 'DATE']
def save_result(self):
print('[INFO] Save Result')
self.pred_rate, self.pred_res = self._get_result(self.last_real, self.pred_real, self.up_bond, self.low_bond)
print(self.pred_real)
print(self.pred_res)
print(self.tomorrow)
print(self.up_bond)
print(self.low_bond)
print(self.last_real)
print(self.pred_rate)
print(self.pred_real)
new_res_data = pandas.DataFrame(np.array([[self.tomorrow, None ,self.last_real, self.pred_real, None, self.up_bond, self.low_bond,self.std, self.mean, self.pred_rate, None,self.pred_res, None]]), columns=RES_COLUMNS)
if is_file(self.res_path):
res_data = read_csv(self.res_filename, folder=self.folder)
#res_data.append(new_res_data, ignore_index=True)
last_date = res_data.loc[res_data.index[-1], 'DATE']
if last_date != self.tomorrow:
new_res_data.to_csv(self.res_path, mode='a', header=False, index=False)
else:
#res_data = new_res_data
new_res_data.to_csv(self.res_path, index=False )
def send_slack(self):
msg = "Team 03 predict {0} price {1} Classification: {2}".format(self.tomorrow, self.pred_real, self.pred_res)
send_slack(msg)
def run(self):
self.get_real_data()
self.get_train_data()
self.train_model()
self.pred_data()
self.get_data_std_mean()
self.save_result()
self.send_slack()
def transfer_date(date):
return datetime.datetime.strptime(date, '%Y-%m-%d')
def get_raw_train_data():
raw_filename = 'RAW_' + TRAIN_FILE
get_sp500(TRAIN_START_DATE,TRAIN_END_DATE,raw_filename, FOLDER,stock_list=STOCKS)
raw_data = read_csv(raw_filename, FOLDER)
raw_data_dict = raw_data.to_dict()
remove_index = []
start_date = transfer_date(raw_data_dict['Date'][0])
next_date = start_date + datetime.timedelta(days=7)
for cur_index in range(1, len(raw_data_dict['Date'])):
cur_date = transfer_date(raw_data_dict['Date'][cur_index])
if cur_date < next_date:
remove_index.append(cur_index)
else:
start_date = next_date
next_date = start_date + datetime.timedelta(days=7)
for key in raw_data_dict.keys():
for index in remove_index:
del raw_data_dict[key][index]
res_data = pandas.DataFrame.from_dict(raw_data_dict)
res_data = res_data.reset_index(drop=True)
res_data.to_csv('/'.join([FOLDER,TRAIN_FILE]), index=False)
def get_pred_data():
get_sp500(PRED_START_DATE,PRED_START_DATE,PRED_FILE, FOLDER,stock_list=STOCKS)
def save_pred_data():
pass
if __name__ == '__main__':
tm = predictModel()
tm.run()
|
import json
import os
import praw
import pdb
import re
import copy
from functools import wraps
def set_default(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
def options_sub(options, keys):
return dict((k, options[k]) for k in keys)
class Reddit(object):
# Config/Options stuff
class Config:
def __init__(self, config_dict):
setDict = self.__dict__
setDict["config_dict"] = config_dict
setDict["orig_config"] = copy.deepcopy(config_dict)
print "Original config: ", setDict["orig_config"]
if not(os.path.isfile(config_dict["auth_session_path"])):
file(config_dict["auth_session_path"], "w").close()
setDict["auth_session_file"] = open(config_dict["auth_session_path"], "r+")
setDict["state_re"] = re.compile(r"^(posts|subreddits)(?:\-id=)?([A-Za-z0-9_]{3,9})?")
self.__dict__ = setDict
def get(self, key):
try:
if isinstance(key, basestring):
if not(key == "state"):
ret_val = self.config_dict[key]
if type(ret_val) is list:
return tuple(ret_val)
return ret_val
else:
page = self.config_dict["page"]
guid = self.config_dict["guid"]
if guid is not None:
return page + "-id=" + guid
else:
return page
elif type(key) is tuple or type(key) is list:
return self.get_options(key)
else:
raise ValueError
except AttributeError as e:
print e
def set(self, key, val):
if key == "state":
state_match = self.state_re.match(val)
if state_match is not None:
if len(state_match.groups()) == 2:
page_guid = state_match.groups()
self.config_dict["page"] = page_guid[0]
self.config_dict["guid"] = page_guid[1]
self.config_dict["state"] = val
return
page = self.orig_config["page"]
guid = self.orig_config["guid"]
self.config_dict["page"] = page
self.config_dict["guid"] = guid
self.config_dict["state"] = page
print "No page or guid able to be parsed, defaulting to orig_config parameters."
print "Page: % Guid: % State: %" % (page, guid, page)
return
if isinstance(key, basestring):
if isinstance(val, list):
self.config_dict[str(key)] = tuple(val)
else:
self.config_dict[str(key)] = val
elif type(key) is tuple or type(key) is list:
for i, k in enumerate(key):
self.config_dict[k] = val[i]
else:
raise ValueError
def __getitem__(self, index):
return self.get(index)
def __setitem__(self, key, val):
self.set(key, val)
def __getattr__(self, key):
return self.get(key)
def __setattr__(self, key, item):
self.set(key, item)
def __str__(self):
return "\n".join(["self = reddit.Reddit.Config"] + [str("self[" + key + "] = " + str(val)) for key, val in self.config_dict.iteritems()])
def load_session_file(self):
self.auth_session_file = open(self.auth_session_path, "r+")
access_info = None
auth_session_data = self.auth_session_file.read()
if(len(auth_session_data) > 0):
try:
access_info = json.loads(auth_session_data)
self.update(access_info)
return access_info
except ValueError:
print "Auth file is corrupted"
return "Corrupted session file"
def delete_session_file(self):
open(self.auth_session_path, "w").close()
self.auth_session_file = open(self.auth_session_path, "r+")
def update(self, new_items):
for key, value in new_items.iteritems():
self.config_dict[key] = value
self.delete_session_file()
self.auth_session_file.seek(0)
json.dump(self[self["session_keys"]], self.auth_session_file, default=set_default)
self.auth_session_file.flush()
def delete(self, key=None):
if key is not None:
if type(key) is str:
del self.config_dict[key]
elif type(key) is tuple or type(key) is list:
for i, k in enumerate(key):
if k in self.config_dict:
del self.config_dict[k]
else:
self.delete(("access_token", "refresh_token"))
open(self.auth_session_path, "w").close() # clear out file by closing it on "w" write mode
self.auth_session_file = open(self.auth_session_path, "r+")
def get_options(self, keys):
inter_keys = set(keys) & set(self.config_dict.keys())
inter_dict = dict([(key, self[key]) for key in inter_keys])
return inter_dict
class FakeReddit:
class FakeUser:
def __init__(self):
self.name = "ibly31"
self.json_dict = {"name": "ibly31"}
def __init__(self):
self.user = self.FakeUser()
def __init__(self, app=None):
with open("main/config.json", "r") as config_file:
self.config = self.Config(json.load(config_file))
print "======= Initializing new Reddit instance ======="
if not(self.config["offline"]):
self.r = praw.Reddit(**self.config[("user_agent", "log_requests", "store_json_result")])
self.r.set_oauth_app_info(**self.config[("client_id", "client_secret", "redirect_uri")])
else:
self.r = self.FakeReddit()
self.load_session_file()
def save_session(self, session_dict):
if session_dict["refresh_token"] is None:
self.config.delete()
return False
else:
self.config.update(session_dict)
return True
def delete_session(self):
self.config.delete()
def refresh_session(self):
try:
access_info = self.r.refresh_access_information(self.config["refresh_token"])
return self.save_session(access_info)
except praw.errors.OAuthException:
self.delete_session()
return False
def is_valid_session(self, scope="identity"):
scope_list = [sco for sco in self.config.orig_config["scope"].split(" ")]
#print(self.config["offline"] and True)
return self.r.has_scope(scope_list) # this calls is_oauth_session() under the hood
def get_session(self, code_and_state):
try:
access_info = self.r.get_access_information(code_and_state["code"])
if type(access_info) is dict:
access_info["state"] = code_and_state["state"]
if self.save_session(access_info):
return True
self.config.delete()
return False
except praw.errors.OAuthException:
return False
def load_session_file(self):
access_info = self.config.load_session_file()
if type(access_info) is dict:
try:
try:
self.r.set_access_credentials(**self.config[self.config["access_keys"]])
except TypeError:
pdb.set_trace()
if(not(self.is_valid_session())):
return self.refresh_session()
except praw.errors.OAuthException:
self.delete_session()
else:
print "Error loading access info: ", access_info
self.delete_session()
def get_auth_url(self, state="user"):
authParams = self.config[("scope", "refreshable")]
authParams["state"] = state
return self.r.get_authorize_url(**authParams)
def get_me(self):
return self.r.get_me()
def vote_on_post(self, sub_id, dir):
post = self.r.get_submission(submission_id=sub_id)
post.vote(dir)
def get_saved_posts(self, *args, **kwargs):
# after=None, sort=None, time=None, count=None, subreddit=None
content_params = self.config["content_params"]
content_params.update(kwargs)
saved = self.r.user.get_saved(**content_params)
saved_jsons = [post.json_dict for post in iter(saved)]
saved_posts = list()
if content_params["subreddit"] is not None:
for post in saved_jsons:
if post["subreddit"].lower() == content_params["subreddit"].lower():
saved_posts.append(post)
else:
saved_posts = saved_jsons
print "Saved posts #: ", len(saved_posts)
saved_posts.insert(0, content_params)
self.save_cached_results(saved_posts)
return saved_posts
def save_cached_results(self, results):
try:
results_json = json.dumps(results)
cached_results_file = open(self.config["cache_session_path"], "w")
cached_results_file.seek(0)
cached_results_file.write(results_json)
cached_results_file.close()
print "Finished writing cache file"
return True
except (IOError, ValueError):
print "Error serializing cached results json"
return False
def get_cached_results(self, *args, **kwargs):
content_params = self.config["content_params"]
content_params.update(kwargs)
if os.path.isfile(self.config["cache_session_path"]):
try:
results = open(self.config["cache_session_path"], "r+")
results_json = json.load(results)
if len(results_json) > 0:
params = results_json[0]
print "Params: ", params
if params == content_params:
results_json.pop(0)
return results_json
else:
self.delete_cached_results()
except (IOError, ValueError):
self.delete_cached_results()
return None
def delete_cached_results(self):
open(self.config["cache_session_path"], "w").close() |
from django.urls import path
from . import views
urlpatterns = [
path('signup/', views.sign_up, name='signup'),
path('profile/<str:username>/', views.profile, name='profile'),
path('profile/update/<int:pk>/', views.update_profile, name='update_profile'),
path('profile/picture/<int:pk>/', views.update_image, name='profile_picture'),
path('profile/delete/<int:pk>/', views.delete_account, name='delete_account'),
path('message/delete/<int:pk>/', views.delete_message, name='delete_message'),
path('message/read/<int:pk>/', views.read_message, name='read_message'),
]
|
import pandas as pd
import time
def get_event_sv():
# Ask the user what game they want to select, then ask which event of that list
# Need to add checks of when there is no event in game or when user does not select valid event !!!
game_map = pd.read_csv('../data/game_map.csv') # Obtain the map between game and game_id
game_map = game_map[['Game', 'ID']]
print('Return a game_id from the following list')
time.sleep(0)
print(game_map.to_string()) # to_string used so that whole df can be printed
game_ids = list(game_map['ID'])
print(' ')
while True:
game_id = input() # ask the user to choose the input
if int(game_id) in game_ids:
break
else:
print("Please choose an ID from the list above or enter '0' to exit")
if game_id == '0':
return None, None, None, None, None
game_id = '00' + str(game_id) # add 00s in the beginning
# game_id = 21500492 # Remove later
# game_id = '00' + str(game_id) # add 00s in the beginning
pbp_path = '../data/build_feat/pbp/features_added/shots_marked/clean_time/clean_distance/'
pbp = pd.read_csv(pbp_path + game_id + '.csv')
events = pbp[['EVENTNUM', 'HOMEDESCRIPTION']]
events.columns = ['Event_ID', 'Event_Description']
events = events[['Event_Description', 'Event_ID']]
event_ids = list(events['Event_ID'])
for i in range(10):
print(" ")
print('Return an event_id from the following list')
time.sleep(0)
print(events.to_string())
print(' ')
while True:
event_id = int(input()) # ask the user to choose the input
if event_id in event_ids:
break
else:
print("Please choose an ID from the list above or enter 0 to exit")
if str(event_id) == '0':
return None, None, None, None, None
# event_id = 6 # Remove later
event_description = pbp[pbp['EVENTNUM'] == event_id].iloc[0, 5]
shot_time = pbp[pbp['EVENTNUM'] == event_id].iloc[0, 13]
sv_path = '../data/shots/csv/shooter/eliminate_non_ball/eliminate_shooter_subbed/distances/'
sv = pd.read_csv(sv_path + game_id + '.csv')
sv_event = sv[sv['event_id'] == event_id]
return game_id, sv_event, event_id, event_description, shot_time
|
#coding:utf-8
import os
import sys;
# reload(sys);
# sys.setdefaultencoding("utf8")
# import sys
# sys.setdefaultencoding('utf-8')
path = '/Users/zy/Downloads/pdf/'
generatorWalk= os.walk(path)
fileLists = []
for root, dirs, files in os.walk(path):
# files 是一个内存地址 直接输入不会受到任何转码指令的影响
print("Root=",root, "dirs = ", dirs, "files = ", str(files))
# decode('gbk').encode('utf-8'))
# a = eval("u"+root)
# b = eval("u"+dirs)
# c = eval("u"+files)
## root.decode('gb2312').encode('utf-8')
|
if __name__ == "__main__":
n = int(input())
CURRENT = 0
MIN = 0
for _ in range(n):
CURRENT += int(input())
MIN = min(MIN, CURRENT)
print(abs(MIN))
|
# ==================================================================================================
# Copyright 2012 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from functools import partial
import pytest
from twitter.common.collections import (
maybe_list,
OrderedDict,
OrderedSet)
def test_default_maybe_list():
HELLO_WORLD = ['hello', 'world']
assert maybe_list('hello') == ['hello']
assert maybe_list(('hello', 'world')) == HELLO_WORLD
assert maybe_list(['hello', 'world']) == HELLO_WORLD
assert maybe_list(OrderedSet(['hello', 'world', 'hello'])) == HELLO_WORLD
assert maybe_list(s for s in ('hello', 'world')) == HELLO_WORLD
od = OrderedDict(hello=1)
od.update(world=2)
assert maybe_list(od) == HELLO_WORLD
assert maybe_list([]) == []
assert maybe_list(()) == []
assert maybe_list(set()) == []
with pytest.raises(ValueError):
maybe_list(123)
with pytest.raises(ValueError):
maybe_list(['hello', 123])
assert maybe_list(['hello', 123], expected_type=(str, int)) == ['hello', 123]
assert maybe_list(['hello', 123], expected_type=(int, str)) == ['hello', 123]
def test_maybe_list_types():
iml = partial(maybe_list, expected_type=int)
iml(1) == [1]
iml([1,2]) == [1, 2]
iml((1,2)) == [1, 2]
iml(OrderedSet([1, 2, 1])) == [1, 2]
iml(k for k in range(3)) == [0, 1, 2]
iml([]) == []
assert iml([]) == []
assert iml(()) == []
assert iml(set()) == []
with pytest.raises(ValueError):
iml('hello')
with pytest.raises(ValueError):
iml([123, 'hello'])
with pytest.raises(ValueError):
iml(['hello', 123])
|
from utility import Global
class Data:
def __init__(self, t=0, ns=500, r=1, k=1, nw=1, prefix=None, f_TYPE_TEST=None, f_nwlen=None, f_mds=None, relativePath=None, fp=None):
if fp==None:
self.prefix = prefix
self.t = t
self.ns = ns
self.r = r
self.k = k
self.nw = nw
self.filePath = Global.getSampleResultPath(t, ns, r, k, nw, prefix=prefix, f_TYPE_TEST=f_TYPE_TEST, f_nwlen=f_nwlen, f_mds=f_mds, relativePath=relativePath)
else: self.filePath=fp
self.timeSemantic = 0
self.timeOther = 0
self.timeTotal = 0
self.numTQSP = 0
self.numAccessedRTreeNode = 0
# 计算所需字段的平均值
@staticmethod
#testSampleResultFile.t=0.ns=500.r=1.k=10.nw=1
def getData(t=0, ns=500, r=1, k=1, nw=1, prefix=None, f_TYPE_TEST=None, f_nwlen=None, f_mds=None, relativePath=None, fp=None):
if fp==None: data = Data(t, ns, r, k, nw, prefix=prefix, f_TYPE_TEST=f_TYPE_TEST, f_nwlen=f_nwlen, f_mds=f_mds, relativePath=relativePath)
else: data = Data(fp=fp)
index = 0
with open(data.filePath) as f:
f.readline()
while True:
line = f.readline()
if line=='': break
index += 1
# print(index, end=' ')
# print(line, end='')
# print(index, end=' ')
strArr = line.split(',')
data.numAccessedRTreeNode += int(strArr[25])
data.numTQSP += int(strArr[27])
data.timeSemantic += int(strArr[28])
data.timeTotal += int(strArr[32])
data.timeOther += (int(strArr[32]) - int(strArr[28]))
# print(self)
# print(str(self.timeSemantic) + ' ' + str(self.timeOther) + ' ' + str(self.timeTotal))
data.numAccessedRTreeNode /= index
data.numTQSP /= index
data.timeSemantic /= index
data.timeTotal /= index
data.timeOther /= index
return data
def get_info(self):
return self.filePath
def __str__(self):
strs = ''
strs = strs + self.filePath + '\n'
strs += 'numAccessedRTreeNode numTQSP timeSemantic timeOther timeTotal\n';
strs += "%-7.0d%-7.0d%-7.0d%-7.0d%-7.0d"%(self.numAccessedRTreeNode, self.numTQSP, self.timeSemantic, self.timeOther, self.timeTotal) + '\n'
return strs
# data = Data.getData(0, 500, 1, 1, 5)
# print(data)
# print(Data.getData(0, 500, 3, 10, 5, prefix='nwlen=50'))
# print(Data.getData(0, 500, 3, 10, 5, prefix='nwlen=5000'))
# print(Data.getData(0, 500, 3, 10, 5, prefix='nwlen=-1'))
# print(Data.getData(0, 500, 3, 5, 10))
# print(Data.getData(0, 200, 3, 5, 10))
# print(Data.getData(0, 100, 3, 5, 10))
# print(Data.getData(1, 500, 3, 10, 5, prefix='nwlen=5000'))
# print(Data.getData(1, 500, 3, 10, 5, prefix='nwlen=-1'))
|
import os
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split, StratifiedKFold
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, Dropout, BatchNormalization, Activation
from tensorflow.keras.layers import GlobalAveragePooling2D, ZeroPadding2D, Add
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.optimizers import Adam
# 1. 데이터
train_data = pd.read_csv("./dacon2/data/train.csv", index_col=0, header=0)
print(train_data)
'''
# 그림 확인
idx = 999
img = train_data.loc[idx, '0':].values.reshape(28, 28).astype(int)
digit = train_data.loc[idx, 'digit']
letter = train_data.loc[idx, 'letter']
plt.title('Index: %i, Digit: %s, Letter: %s'%(idx, digit, letter))
plt.imshow(img)
plt.show()
'''
train_digit = train_data['digit'].values
train_letter = train_data['letter'].values
x_train = train_data.drop(['digit', 'letter'], axis=1).values
x_train = x_train.reshape(-1, 28, 28, 1)
x_train = x_train/255
y = train_data['digit']
y_train = np.zeros((len(y), len(y.unique())))
for i, digit in enumerate(y):
y_train[i, digit] = 1
print(x_train.shape, y_train.shape) # (2048, 28, 28, 1) (2048, 10)
# x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.1, stratify=y_train)
datagen = ImageDataGenerator(
width_shift_range=(-1,1),
height_shift_range=(-1,1))
datagen2 = ImageDataGenerator()
steps = 40
skfold = StratifiedKFold(n_splits=steps, random_state=42, shuffle=True)
# 2. 모델
# number of classes
K = 10
input_tensor = Input(shape=x_train.shape[1:])
def conv1_layer(x):
x = ZeroPadding2D(padding=(3,3))(x)
x = Conv2D(64, (7,7), strides=(2,2))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = ZeroPadding2D(padding=(1,1))(x)
return x
def conv2_layer(x):
x = MaxPooling2D((3,3),2)(x)
shortcut = x
for i in range(3):
if (i == 0):
x = Conv2D(64, (1,1), strides=(1,1), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(64, (3,3), strides=(1,1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (1,1), strides=(1,1), padding='valid')(x)
shortcut = Conv2D(256, (1,1), strides=(1,1), padding='valid')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
else:
x = Conv2D(64, (1,1), strides=(1,1), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(64, (3,3), strides=(1,1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (1,1), strides=(1,1), padding='valid')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
return x
def conv3_layer(x):
shortcut = x
for i in range(4):
if (i == 0):
x = Conv2D(128, (1,1), strides=(2,2), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(128, (3,3), strides=(1,1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (1,1), strides=(1,1), padding='valid')(x)
shortcut = Conv2D(512, (1,1), strides=(2,2), padding='valid')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
else:
x = Conv2D(128, (1,1), strides=(1,1), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(128, (3,3), strides=(1,1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (1,1), strides=(1,1), padding='valid')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
return x
def conv4_layer(x):
shortcut = x
for i in range(6):
if (i == 0):
x = Conv2D(256, (1,1), strides=(2,2), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (3,3), strides=(1,1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(1024, (1,1), strides=(1,1), padding='valid')(x)
shortcut = Conv2D(1024, (1,1), strides=(2,2), padding='valid')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
else:
x = Conv2D(256, (1,1), strides=(1,1), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (3,3), strides=(1,1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(1024, (1,1), strides=(1,1), padding='valid')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
return x
def conv5_layer(x):
shortcut = x
for i in range(3):
if (i == 0):
x = Conv2D(512, (1,1), strides=(2,2), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (3,3), strides=(1,1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(2048, (1,1), strides=(1,1), padding='valid')(x)
shortcut = Conv2D(2048, (1,1), strides=(2,2), padding='valid')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
else:
x = Conv2D(512, (1,1), strides=(1,1), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (3,3), strides=(1,1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(2048, (1,1), strides=(1,1), padding='valid')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
return x
x = conv1_layer(input_tensor)
x = conv2_layer(x)
x = conv3_layer(x)
x = conv4_layer(x)
x = conv5_layer(x)
# error 잡기
x = GlobalAveragePooling2D()(x)
output_tensor = Dense(K, activation='softmax')(x)
resnet50 = Model(inputs=input_tensor, outputs=output_tensor)
resnet50.summary()
val_acc = []
for i, (train_idx, val_idx) in enumerate(skfold.split(x_train, y_train.argmax(1))):
x_train_, x_val_ = x_train[train_idx], x_train[val_idx]
y_train_, y_val_ = y_train[train_idx], y_train[val_idx]
model = resnet50(x_train)
filepath = './dacon2/data/vision_model_{}.hdf5'.format(i)
es = EarlyStopping(monitor='val_loss', patience=160, mode='auto')
cp = ModelCheckpoint(filepath=filepath, monitor='val_loss', save_best_only=True, mode='auto')
lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=100)
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.002, epsilon=None), metrics=['accuracy'])
hist = model.fit_generator(datagen.flow(x_train_, y_train_, batch_size=32), epochs=2000,
validation_data=(datagen.flow(x_val_, y_val_)), verbose=2, callbacks=[es, cp, lr])
val_acc.append(max(hist.history['val_accuracy']))
print('{}\'s CV End'.format(i+1))
# 3. 예측
test_data = pd.read_csv('./dacon2/data/test.csv', index_col=0, header=0)
x_test = test_data.drop(['letter'], axis=1).values
x_test = x_test.reshape(-1, 28, 28, 1)
x_test = x_test/255
# best model select
print(val_acc)
i_max = np.argmax(val_acc)
print('Best Model is {}\'s'.format(i_max))
model = load_model('./dacon2/data/vision_model_{}.hdf5'.format(i_max))
submission = pd.read_csv('./dacon2/data/submission.csv', index_col=0, header=0)
submission['digit'] = np.argmax(model.predict(x_test), axis=1)
print(submission)
submission.to_csv('./dacon2/data/submission_model_best.csv')
# KFold 값 평균내기
submission2 = pd.read_csv('./dacon2/data/submission.csv', index_col=0, header=0)
result = 0
for i in range(steps):
model = load_model('./dacon2/data/vision_model_{}.hdf5'.format(i))
result += model.predict_generator(datagen2.flow(x_test, shuffle=False)) / steps
submission2['digit'] = result.argmax(1)
print(submission2)
submission2.to_csv('./dacon2/data/submission_model_mean.csv')
|
import numpy as np
from numpy import pi
from numpy import cos
from numpy import sin
from numpy import exp
from numpy import sqrt
def cross_in_tray_function(params):
f = -0.0001 * (abs(sin(params[0]) * sin(params[1]) * exp(abs(100 - sqrt(params[0]**2 + params[1]**2)/pi))) + 1)**0.1
return f
def himmelblau_function(params):
f = (params[0]**2 + params[1] - 11)**2 + (params[0] + params[1]**2 - 7)**2
return f
def booth_function(params):
f = (params[0] + 2*params[1] - 7)**2 + (2*params[0] + params[1] - 5)**2
return f
def rastrigin_function(params):
A = 10
n = len(params)
f = A*n + np.sum(params**2 - A*cos(2*pi*params), axis=0)
return f
def beale_function(params):
f = (1.5 - params[0] + params[0]*params[1])**2 + (2.25 - params[0] + params[0]*(params[1]**2))**2 + (2.625 - params[0] + params[0]*(params[1]**3))**2
return f
def happy_cat_function(params):
pass
class FuncInf:
NAME = ""
MULTI_DIM_PARAMS = False
NUM_PARAMS = 2
GLOBAL_OPTIMA = (0, 0)
DOMAIN = (0, 0)
F_FUNC = None
def __init__(self, name, num_params, multi_dim_params, domain, global_optima, func):
self.NAME = name
self.DOMAIN = domain
self.NUM_PARAMS = num_params
self.MULTI_DIM_PARAMS = multi_dim_params
self.GLOBAL_OPTIMA = global_optima
self.F_FUNC = func |
import argparse
import logging
import os
import sys
import systemstat
class SystemStatTool(systemstat.SystemStat):
def __init__(self,logfile='systemstat.log', **kwargs):
self.options = None
self.logger = logging.getLogger(__name__)
self.command_parser = argparse.ArgumentParser()
self.command_parser.add_argument(
"--sleep",
help="seconds to sleep between polling",
action="store",
dest="sleep",
default=1.0,
type=float)
self.command_parser.add_argument(
"--wait",
help="seconds to wait for the system to launch",
action="store",
dest="wait",
default=120.0,
type=float)
self.command_parser.add_argument(
"--logfile",
help="name of the logfile",
action="store",
dest="logfile",
default=logfile,
type=str)
self.command_parser.add_argument(
"--logformat",
help="logging format",
action="store",
dest="logformat",
default='%(asctime)s %(message)s',
type=str)
self.command_parser.add_argument(
"--verbose", "-v",
help="level of logging verbosity",
dest="verbose",
default=3,
action="count")
self.command_parser.add_argument(
"--stdout",
help="print logs to stdout",
dest="stdout",
default=False,
action="store_true")
def parse_options(self, args=None, namespace=None):
# parse command line options
cl_options, cl_unknown = self.command_parser.parse_known_args(
args, namespace)
self.options = cl_options
self.options.__dict__['remainder'] = cl_unknown
super(SystemStatTool,self).__init__(
sleep=self.options.sleep, wait=self.options.wait)
def start_logging(self):
# setup a log file
self.options.logfile = os.path.abspath(
os.path.expanduser(
os.path.expandvars(
self.options.logfile)))
loglevel = int((6-self.options.verbose)*10)
file_hdlr = logging.FileHandler(self.options.logfile)
file_hdlr.setFormatter(logging.Formatter(self.options.logformat))
file_hdlr.setLevel(loglevel)
self.logger.addHandler(file_hdlr)
# check if we should print the log to stdout as well
if self.options.stdout is True:
out_hdlr = logging.StreamHandler(sys.stdout)
out_hdlr.setFormatter(logging.Formatter(self.options.logformat))
out_hdlr.setLevel(loglevel)
self.logger.addHandler(out_hdlr)
self.logger.setLevel(loglevel)
self.logger.info("starting %s" % sys.argv[0])
self.logger.info("command line options: %s" % sys.argv[1:])
# print out the parsed options
self.logger.debug('opts = {}'.format(self.options))
if __name__ == '__main__' :
tool = SystemStatTool()
tool.parse_options()
tool.start_logging()
system_ready = tool.wait_until_ready()
if system_ready:
status = 0
else:
status = 1
tool.logger.debug('exiting')
sys.exit(status)
|
from flask import Flask,jsonify,request
from flask_pymongo import PyMongo
from bson.json_util import dumps
from flask_cors import CORS
from numpy.core.numeric import NaN
from model.plagiarism import cosine_distance_countvectorizer_method
import json
app =Flask(__name__)
CORS(app)
cors = CORS(app,resources = {
r"/*":{
"origins":"*"
}
})
app.config['MONGO_URI']="mongodb+srv://nihal:panasonic@cluster0.fwayb.mongodb.net/records?retryWrites=true&w=majority"
mongo = PyMongo(app)
@app.route('/<string:uId>/<string:testId>',methods=['GET','PUT','DELETE'])
def testscrud(uId,testId):
if(request.method=="GET"):
tests=mongo.db[uId].find_one({"_id":testId})
tests= dumps(tests)
return tests
elif(request.method=="PUT"):
tests=mongo.db[uId].find({"_id":testId})
tests= dumps(tests)
return tests
else:
mongo.db[uId].delete_one({"_id":testId})
mongo.db.tests.delete_one({"_id":testId})
return jsonify ({"message":"success"})
@app.route('/<string:userId>',methods=['GET','POST'])
def home(userId):
if(request.method=='POST'):
document=request.get_json()
_id=document["testId"]
document['_id']=_id
mongo.db[userId].insert_one(document)
testPaper={}
testPaper['_id']=_id
testPaper['userId'] =userId
mongo.db.tests.insert_one(testPaper)
return jsonify(document)
else:
tests=mongo.db[userId].find()
tests= dumps(tests)
return tests
@app.route('/tests/<string:testId>',methods=['GET','POST','PUT'])
def getNewTest(testId):
if(request.method=="GET"):
query={}
query["_id"]=testId
response = mongo.db.tests.find_one(query)
if(response==None):
return jsonify({'message':"Error"})
userId = response['userId']
document={}
document = mongo.db[userId].find_one(query)
document= dumps(document)
return document
elif(request.method=="PUT"):
requestBody=request.get_json()
query={}
requestBody["students"]
query["_id"]=testId
response = mongo.db.tests.find_one(query)
if(response==None):
return jsonify({'message':"Error"})
userId = response['userId']
document={}
document = mongo.db[userId].update(query,requestBody,True)
json_data = requestBody["students"]
for key1 in json_data:
for response1 in json_data[key1]["responses"]:
for key2 in requestBody["students"]:
for response2 in requestBody["students"][key2]["responses"]:
if key1 !=key2 and response1["id"] == response2["id"]:
score = cosine_distance_countvectorizer_method(response1["Answer"],response2["Answer"])
try:
if(int(response2["plagiarism"])<score):
response2["plagiarism"]=score
except:
response2["plagiarism"]=score
if(response2["plagiarism"]==NaN):
response2["plagiarism"]=0
document = mongo.db[userId].update(query,requestBody,True)
document =dumps(document)
return jsonify(document)
else:
requestBody = request.get_json()
print("Evaluating score for - ")
print(requestBody)
return jsonify({"message":"undefined request"})
if __name__ =="__main__":
app.run(port=3001,debug=True) |
import logging
# Network communication default port
DEFAULT_PORT = 7777
# Current logging level
LOGGING_LEVEL = logging.DEBUG
# Max connections queue
MAX_CONNECTIONS = 5
# Max message length (in bytes)
MAX_PACKAGE_LENGTH = 1024
# Project encoding
ENCODING = 'utf-8'
# Protocol keys
ACTION = 'action'
TIME = 'time'
USER = 'user'
PUBLIC_KEY = 'public_key'
DATA = 'bin'
SENDER = 'sender'
DESTINATION = 'destination'
ACCOUNT_NAME = 'account_name'
MESSAGE_TEXT = 'mess_text'
# Protocol actions
ACTION_PRESENCE = 'presence'
ACTION_EXIT = 'exit'
ACTION_MESSAGE = 'message'
ACTION_ADD_CONTACT = 'add'
ACTION_REMOVE_CONTACT = 'remove'
ACTION_USERS_REQUEST = 'users'
ACTION_CONTACTS_REQUEST = 'contacts'
ACTION_PUBLIC_KEY_REQUEST = 'public_key'
# Generic protocol keys
RESPONSE = 'response'
ERROR = 'error'
DATA_LIST = 'data_list'
# Responses
RESPONSE_400 = {
RESPONSE: 400,
ERROR: None
}
RESPONSE_200 = {
RESPONSE: 200
}
RESPONSE_202 = {
RESPONSE: 202,
DATA_LIST: None
}
RESPONSE_205 = {
RESPONSE: 205
}
RESPONSE_511 = {
RESPONSE: 511,
DATA: None
}
SERVER_CONFIG_FILENAME = 'server.ini'
CLIENT_DB_PATH = ''
|
from py2neo import Graph, Node, Relationship
from bs4 import BeautifulSoup
import requests, json, sys
graph = Graph(password="fhdiGEN82&sk@PLD")
trans = graph.begin()
# For each movie, get the image src property
for node in graph.nodes.match("Person"):
if node['image'] == '' : continue
elif node['image'] is not None : continue
try:
url = f"http://imdb.com{node['url']}"
req = requests.get(url)
if req.status_code == 404:
node['image'] = ''
graph.push(node)
print(f"Could not find IMDB page for {node['name']}")
continue
else:
html = req.content
soup = BeautifulSoup(html, 'html.parser')
img = soup.select('#name-poster')[0]['src']
node['image'] = img
graph.push(node)
print(f"Updated {node['name']}")
except (KeyboardInterrupt):
print("Exiting manually")
trans.commit()
sys.exit()
except Exception as e:
print(e)
node['image'] = ''
graph.push(node)
print(f"Skipping {node['name']}")
trans.commit()
|
from django.test import TestCase, Client
from django.urls import reverse
from django.contrib.auth import get_user_model
from .models import Profile
class ProfileModelTest(TestCase):
# Test the height stringification on a normal case
def test_height_normal(self):
prof = create_user().profile
prof.height = 74 # 6 ft 2 inches
self.assertTupleEqual((6, 2), prof.readable_height)
def test_height_negative(self):
prof = create_user().profile
prof.height = -12
self.assertTupleEqual((0, 0), prof.readable_height)
def test_height_boundary(self):
prof = create_user().profile
prof.height = 72
self.assertTupleEqual((6, 0), prof.readable_height)
def test_height_boundary2(self):
prof = create_user().profile
prof.height = 71
self.assertTupleEqual((5, 11), prof.readable_height)
class ProfileCreationTest(TestCase):
# Create a new user and make sure that an associated profile is created as well.
def test_create_profile(self):
new_user = create_user(first_name='creator')
self.assertIsInstance(new_user.profile, Profile)
new_user.save()
def create_user(username="Tester", first_name="John", last_name="Doe", email="jd@example.com"):
User = get_user_model()
new_user = User.objects.create(username = username, first_name = first_name, last_name = last_name, email = email)
return new_user
class ProfilePageTest(TestCase):
# Basic test, ensure that user's characteristics and the edit profile button shows up on their own profile page.
def test_new_profile(self):
new_user = create_user(first_name = 'new_prof_test')
new_user.profile.weight = 1234
new_user.profile.points = 9999
new_user.profile.bio = "unit test bio"
new_user.profile.save()
self.client.force_login(new_user)
response = self.client.get(reverse('profile'))
self.assertContains(response, '1234')
self.assertContains(response, 9999)
self.assertContains(response, 'unit test bio')
self.assertContains(response, 'Edit')
# Test to ensure that updating the profile with a POST actually changes the profile page.
def test_profile_change(self):
new_user = create_user(first_name = 'update_test')
new_user.profile.weight = 74
new_user.profile.bio = "I should be changed"
new_user.profile.save()
self.client.force_login(new_user)
self.client.post(reverse('edit_profile'), {
'bio': 'New bio',
'height': 72,
'weight': 75,
'points': 0,
})
response = self.client.get(reverse('profile'))
self.assertNotContains(response,'I should be changed')
self.assertNotContains(response, '6 ft 2 in')
self.assertContains(response, 'New bio')
def test_nonexistent_profile_id(self):
response = self.client.get(reverse('profile') + '/999')
self.assertEqual(response.status_code, 404)
class FriendsTest(TestCase):
# Test that users can friend one another
def test_friend_basic(self):
friender = create_user(username="friender")
friend_target = create_user(username="friendtarget")
self.client.force_login(friender)
self.client.post(reverse('add_friend'),{
'new_friend_pk': friend_target.profile.pk
})
self.assertIn(friend_target.profile, friender.profile.friends.all())
# Test that friends can be removed
def test_friend_remove(self):
friender = create_user(username="friender")
friend_target = create_user(username="friendtarget")
self.client.force_login(friender)
self.client.post(reverse('add_friend'),{
'new_friend_pk': friend_target.profile.pk
})
self.client.post(reverse('remove_friend'), {
'remove_friend_pk': friend_target.profile.pk
})
self.assertNotIn(friend_target.profile, friender.profile.friends.all()) |
#!/usr/bin/python
# coding: utf-8
# Screensaver for imagizer
from __future__ import division, print_function, with_statement
__author__ = "Jérôme Kieffer"
__date__ = "06/01/2016"
__copyright__ = "Jerome Kieffer"
__license__ = "GPLv3+"
__contact__ = "Jerome.Kieffer@terre-adelie.org"
import sys
import os
import gc
import logging
import random
import uuid
from argparse import ArgumentParser
logging.basicConfig(level=logging.INFO , filename="/tmp/imagizer_screensaver_%s.log" % uuid.uuid1())
logger = logging.getLogger("imagizer.screensaver")
import numpy
from imagizer import qt
from imagizer.qt import flush
from imagizer.photo import Photo
from imagizer.config import config
from imagizer.imagizer import range_tout
from PyQt5 import QtX11Extras
class FullScreenWidget(qt.QWidget):
def __init__(self, parent=None):
super(FullScreenWidget, self).__init__(parent=parent)
# if wid is not None:
# self.embedInto(wid)
self.setWindowState(qt.Qt.WindowFullScreen | qt.Qt.WindowActive)
self.setSizePolicy(qt.QSizePolicy.Maximum, qt.QSizePolicy.Maximum)
pal = qt.QPalette()
pal.setColor(qt.QPalette.Window, qt.Qt.black);
self.image = qt.QLabel(self)
self.image.setSizePolicy(qt.QSizePolicy.Maximum, qt.QSizePolicy.Maximum)
self.image.setAlignment(qt.Qt.AlignHCenter | qt.Qt.AlignVCenter)
self.setAutoFillBackground(True)
self.setPalette(pal)
self.layout = qt.QVBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setAlignment(qt.Qt.AlignHCenter | qt.Qt.AlignVCenter)
self.layout.addWidget(self.image)
self.setLayout(self.layout)
def keyPressEvent(self, e):
self.close()
def make_fullscreen(self):
"""
Put an empty rectangle for a couple of seconds to actually record
the size of the screen
"""
pix = qt.QPixmap (1920, 1080)
self.image.setPixmap(pix)
app.processEvents()
desktop = app.desktop()
screen = desktop.screenNumber(self)
rec = desktop.screenGeometry(screen)
width, height = rec.width(), rec.height()
rec2 = qt.QRect(0, 0, width, height)
self.setGeometry(rec2)
self.image.setGeometry(rec2)
print(width, height)
app.processEvents()
class ScreeenSaver(object):
def __init__(self):
self.window = None
self.photo = None
self.args = None
self.current = None
self.all_jpg = []
self.random_list = []
self.filename = os.path.join(os.environ.get("HOME"), ".screensaver.imagizer")
self.timer = None
self.desktop = app.desktop()
self.wid = None
self.parent = None
def init_window(self):
"""initialize the window, if needed"""
if self.window:
return
for i in os.environ:
if "XSCREENSAVER" in i:
logger.debug("%s: %s" % (i, os.environ[i]))
if self.args.window_id:
try:
self.wid = int(self.args.window_id)
except ValueError:
self.wid = int(self.args.window_id, 16)
elif "XSCREENSAVER_WINDOW" in os.environ:
try:
self.wid = int(os.environ["XSCREENSAVER_WINDOW"])
except ValueError:
self.wid = int(os.environ["XSCREENSAVER_WINDOW"], 16)
logger.debug("Working on wid %s screen %s/%s" % (self.wid, self.desktop.screenNumber(self.window), self.desktop.screenCount()))
if self.wid:
self.parent = qt.QWidget.find(self.wid)
if self.parent:
self.parent.show()
logger.debug("Parent place: %sx%s size: %sx%s",self.parent.x(), self.parent.y(), self.parent.height(), self.parent.width(), self.parent.height())
else:
logger.debug("parent: %s wid: %s", self.parent, self.wid)
#ascii_wid = numpy.array([self.wid], int).tostring()
#ascii_wid = hex(self.wid).encode("ascii")
#logger.debug("code: %s",ascii_wid)
#QtX11Extras.QX11Info.setNextStartupId(ascii_wid)
#self.win.show()
self.window = FullScreenWidget(parent=self.parent)
self.window.show()
# self.window.make_fullscreen()
flush()
@staticmethod
def scan():
"""
Scan the repository for all valid files
"""
all_jpg = range_tout(repository=config.DefaultRepository, bUseX=False, fast=True, updated=None, finished=None)[0]
logger.debug("Scanned directory %s and found %i images: %s", config.DefaultRepository, len(all_jpg), all_jpg[-5:])
return all_jpg
def parse(self):
"""
Parse command line argument: old style single "-"
"""
parser = ArgumentParser(description='Imagizer screen-saver')
parser.add_argument('-display', dest='display', help='host:display.screen')
parser.add_argument('-window', dest='window', help="Draw on a newly-created window. This is the default.", action="store_true", default=False)
parser.add_argument('-root', dest='root', action="store_true", default=False, help="Draw on the root window.")
parser.add_argument('-delay', dest="delay", default=20, type=int, help="Per-frame delay, in seconds. Default: 20 seconds.")
parser.add_argument("-window-id", dest="window_id", help="Identifier of the window to display.")
parser.add_argument("-landscape", dest="landscape", action="store_true", default=False, help="Restrict slideshow to landscape images")
parser.add_argument("-verbose", dest="debug", action="store_true", default=False, help="print debugging informations")
self.args = parser.parse_args()
if self.args.debug:
logging.root.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
logger.debug("args: %s" % self.args)
def show(self):
"""Show the image in the given GtkImage widget and set up the exif tags in the GUI"""
if not self.current:
return
if self.photo is None:
self.photo = Photo(self.current, True)
if self.wid:
screen_geometry = self.desktop.screenGeometry(self.window)
width, height = screen_geometry.width(), screen_geometry.height()
else:
width, height = self.window.width(), self.window.height()
logger.debug("Showing image %s in size %sx%s on screen %s/%s" % (self.current, width, height, self.desktop.screenNumber(self.window), self.desktop.screenCount()))
pixbuf = self.photo.get_pixbuf(width, height)
self.window.image.setPixmap(pixbuf)
del pixbuf
gc.collect()
flush()
logger.debug("image widget has size %sx%s", self.window.image.width(), self.window.image.height())
def next(self):
"""
Finds the next image to display
"""
if not self.all_jpg:
self.all_jpg = self.scan()
found = False
while not found:
if not self.random_list:
self.random_list = range(len(self.all_jpg))
random.shuffle(self.random_list)
index = self.random_list.pop()
self.photo = Photo(self.all_jpg[index], True)
data = self.photo.readExif()
rate = data.get("rate", 0)
# 0 for x=5, 0.5 for x=3 and 0.99 for x=0
treshold = -0.01733333 * rate * rate + -0.11133333 * rate + 0.99
found = (random.random() >= treshold)
if found and self.args.landscape:
found = found and (self.photo.pixelsX > self.photo.pixelsY)
return self.all_jpg[index]
def __repr__(self):
return "screensaver instance"
def save(self):
"""
saves the current image path for next restart
"""
logger.debug("Saving filename")
with open(self.filename, "w") as f:
f.write(self.current)
def load(self):
"""
Load the last image from a file
"""
if os.path.isfile(self.filename):
current = open(self.filename, "r").read()
current = current.strip()
if current.startswith(os.sep):
current2 = current
else:
current2 = os.path.join(config.DefaultRepository, current)
if os.path.exists(current2):
self.current = current
def on_tick(self):
"""
Skip to another picture.
"""
self.current = self.next()
self.save()
self.show()
def start(self):
"""
Actually start the slideshow
"""
if not self.args:
self.parse()
if not self.timer:
if self.args.debug:
delay = int(self.args.delay * 100)
else:
delay = int(self.args.delay * 1000)
self.timer = qt.QTimer(self.window)
self.timer.setInterval(delay)
self.timer.timeout.connect(self.on_tick)
self.timer.start(delay)
else:
logger.warning("timer already started")
if __name__ == "__main__":
app = qt.QApplication([])
scs = ScreeenSaver()
scs.parse()
scs.load()
scs.init_window()
scs.show()
scs.start()
res = app.exec_()
sys.exit(0)
|
import psycopg2
import pandas as pd
import numpy as np
import datetime
from sklearn.metrics.pairwise import cosine_similarity
connection = psycopg2.connect("host='localhost' dbname='movies_db_demo' user='postgres' password='tiendat148'")
mycursor = connection.cursor()
class User_User_CF(object):
def __init__(self, userId, rated_movies):
"""Constructor of class User_User_CF
Parameters:
+ ratings: is the ratings dataframe (type 'Dataframe').
+ userId: id of user that you want to recommend movies to (type 'int').
+ rated_movies: the movies which user 'userId' voted for"""
self.userId = userId
# find the others users who also rate movies in 'rated_movies'
self.similarity_ratings = None
self.list_items_id = rated_movies
# list_users_id contains users who rate movies in 'rated_movies'
self.list_users_id = []
# number of users
self.n_users = 0
# number of items
self.n_items = len(self.list_items_id)
# Matrix user_item with column is item, row is user and value is rating
self.Ybar_data = None
# scores_users_sim contains users that are similar to 'userId'
self.scores_users_sim = None
def process_attributes(self):
"""Process the attributes if it is not processed in the contructor"""
query_string = """select user_id
from ratings
where movie_id in ("""+str(rated_movies).replace('[','').replace(']','')+""")
group by user_id
having count(user_id) > """ + str(int(0.5*len(rated_movies)))
mycursor.execute(query_string)
query_result = mycursor.fetchall()
# add userId to list_users_id after filtering
self.list_users_id = [ele[0] for ele in query_result]
self.n_users = len(self.list_users_id)
# similarity_ratings contains ratings of users in 'list_users_id'
query_string = "select * from ratings where user_id in ("+str(self.list_users_id).replace('[','').replace(']','')+") and movie_id in ("+str(self.list_items_id).replace('[','').replace(']','')+")"
mycursor.execute(query_string)
query_result = mycursor.fetchall()
self.similarity_ratings = pd.DataFrame(query_result, columns=['userId','movieId','rating','timestamp'])
mean = {}
for user in self.list_users_id:
mean[user] = (self.similarity_ratings[self.similarity_ratings.userId==user].rating).mean()
# Generate Ybar_data which has 'n_users' rows and 'n_items' columns
self.Ybar_data = pd.DataFrame(data=np.zeros((self.n_users,self.n_items)),index=self.list_users_id,columns=self.list_items_id)
#set ratings value in Ybar_data
for row in self.similarity_ratings.itertuples():
self.Ybar_data.loc[row.userId,row.movieId] = row.rating - mean[row.userId]
def calculate_similarity(self):
"""Calculate the similarity
between 'userId' and other users"""
# get data about rating of 'userId' from Ybar_data and convert to numpy array
row_user = [(self.Ybar_data.loc[self.userId,:]).to_numpy()]
# convert Ybar_data to numpy array
Ybar = self.Ybar_data.to_numpy()
# Calculate the similarity
cosine_sim = cosine_similarity(row_user,Ybar)
# Sort users in descending order of similarity with 'userId'
temp = cosine_sim[0].tolist()
similar_users = list(zip(self.list_users_id,temp))
similar_users = sorted(similar_users, key=lambda x: x[1], reverse=True)[1:6]
# assign the result to 'scores_users_sim' as a dict
self.scores_users_sim = dict(similar_users)
# edit 'similarity_ratings' only contains rating of users in 'scores_users_sim'
query_string = "select * from ratings where user_id in ("+str(list(self.scores_users_sim.keys())).replace('[','').replace(']','')+")"
mycursor.execute(query_string)
query_result = mycursor.fetchall()
self.similarity_ratings = pd.DataFrame(query_result, columns=['userId','movieId','rating','timestamp'])
def run(self):
"""Handles attributes and calculates similarities between found users"""
self.process_attributes()
self.calculate_similarity()
def get_recommend_movies(self):
"""The func will find some movies to recommend to 'userId'
**Return: list of movieId"""
movies_df = self.similarity_ratings.copy()
# filter and keep movies whose rating > 0
movies_df = movies_df.query('rating > 0')
movies = movies_df['movieId'].unique().tolist()
recommend_movies = [movie for movie in movies if movie not in self.list_items_id]
return recommend_movies
def pred(self,item):
"""Predict the rating of 'userId' with 'item'
**Return: a float number."""
ratings_item = self.similarity_ratings[self.similarity_ratings['movieId']==item].copy()
tu = 0
mau = 0
for k in self.scores_users_sim.keys():
mau += self.scores_users_sim[k]
for row in ratings_item.itertuples():
tu += (self.scores_users_sim[row.userId] * row.rating)
return tu/mau
def recommend(self):
"""Recommed movies to 'userId'
**Return: a list of movieId is sorted in descending order by rating"""
recommend_movies = self.get_recommend_movies()
sort_movies = []
# predict the rating and add movies to sort_movies
for i in recommend_movies:
rating = self.pred(i)
sort_movies.append((i,rating))
# sort movies by descending order of rating
sort_movies = sorted(sort_movies,key=lambda x: x[1], reverse=True)[:200]
return sort_movies
def info(self):
"""Print some information about this object"""
print('userId:',self.userId)
print('n_users:',self.n_users)
print('n_items:',self.n_items)
print('similarity_ratings shape:',self.similarity_ratings.shape)
print(self.Ybar_data.head())
# x = datetime.datetime.now()
# print(x)
userIds = [8, 9, 11, 12, 15, 16, 20]
list_recommend_for_userIds = {}
for userId in userIds:
mycursor.execute("select movie_id from ratings where user_id = "+str(userId))
query_result = mycursor.fetchall()
rated_movies = [ele[0] for ele in query_result]
# train = rated_movies[:int(0.8*len(rated_movies))].copy()
# test = [ele for ele in rated_movies if ele not in train]
user_user_cf = User_User_CF(userId,rated_movies)
user_user_cf.run()
sort_recommend_movies = user_user_cf.recommend()
list_recommend_for_userIds[userId] = [ele[0] for ele in sort_recommend_movies]
recommend_file = open('./data/recommend.txt', 'w')
recommend_file.write(str(list_recommend_for_userIds))
recommend_file.close()
# y = datetime.datetime.now()
# true_recommend = []
# for i in sort_recommend_movies:
# if i[0] in test:
# true_recommend.append(i)
# print('test:',test)
# print('so recommend:',len(sort_recommend_movies))
# print('so phim dung:',len(true_recommend))
# print('ti le 1:',len(true_recommend)/len(test))
# print('ti le 2:',len(true_recommend)/len(sort_recommend_movies))
# print('id dung:',true_recommend)
# print(y)
# print('thoi gian chay:', y-x)
mycursor.close()
connection.close() |
import csv
output = set()
with open("/home/basar/Downloads/icd10cm_codes_2020.txt") as f:
for line in f :
line = line.split(None, 1)
if line[0].startswith('S') :
line[0] = line[0][:3]
output.add(line[0])
print(list(output))
print(len(output))
|
from bibliopixel.animation import BaseStripAnim
import bibliopixel.colors as colors
import time
class RGBClock(BaseStripAnim):
"""RGB Clock done with RGB LED strip(s)"""
def __init__(self, led, hStart, hEnd, mStart, mEnd, sStart, sEnd):
super(RGBClock, self).__init__(led, 0, -1)
if hEnd < hStart:
hEnd = hStart + 1
if mEnd < mStart:
mEnd = mStart + 1
if sEnd < sStart:
sEnd = sStart + 1
self._hStart = hStart
self._hEnd = hEnd
self._mStart = mStart
self._mEnd = mEnd
self._sStart = sStart
self._sEnd = sEnd
def step(self, amt = 1):
t = time.localtime()
r, g, b = colors.hue2rgb_rainbow(t.tm_hour * (256/24))
self._led.fillRGB(r,g,b,self._hStart,self._hEnd)
r, g, b = colors.hue2rgb_rainbow(t.tm_min * (256/60))
self._led.fillRGB(r,g,b,self._mStart,self._mEnd)
r, g, b = colors.hue2rgb_rainbow(t.tm_sec * (256/60))
self._led.fillRGB(r,g,b,self._sStart,self._sEnd)
self._step = 0
MANIFEST = [
{
"class": RGBClock,
"controller": "strip",
"desc": "Color Clock",
"display": "RGBClock",
"id": "RGBClock",
"params": [
{
"default": None,
"help": "",
"id": "hStart",
"label": "Hour Start Pixel",
"type" : "int"
},
{
"default": None,
"help": "",
"id": "hEnd",
"label": "Hour End Pixel",
"type" : "int"
},
{
"default": None,
"help": "",
"id": "mStart",
"label": "Min Start Pixel",
"type" : "int"
},
{
"default": None,
"help": "",
"id": "mEnd",
"label": "Minute End Pixel",
"type" : "int"
},
{
"default": None,
"help": "",
"id": "sStart",
"label": "Sec Start Pixel",
"type" : "int"
},
{
"default": None,
"help": "",
"id": "sEnd",
"label": "Sec End Pixel",
"type" : "int"
}
],
"type": "animation"
}
]
|
def insertion_sort(arr):
index_length = range(1, len(arr))
for i in index_length:
right = arr[i]
while arr[i-1] > right and i>0:
arr[i], arr[i-1] = arr[i-1], arr[i]
i -= 1
return arr
|
import collections
from django.core.management.base import BaseCommand
from pizzeria.order import models
def add_sizes():
for label, inches, base_cost, topping_cost in (
('Extra Large', 16, 13.95, 2.25),
('Large', 14, 11.95, 1.85),
('Medium', 12, 9.95, 1.50),
('Small', 10, 7.95, 1.25)
):
models.Size.objects.get_or_create(
label=label,
inches=inches,
base_cost=base_cost,
topping_cost=topping_cost
)
def add_toppings():
for label, is_vegan in (
('Alfredo Sauce', False),
('Anchovies', False),
('Artichoke Hearts', False),
('Bacon', False),
('Banana Peppers', False),
('Basil', False),
('BBQ Sauce', True),
('Beef', False),
('Black Olives', False),
('Bleu Cheese', False),
('Brats', False),
('Breakfast Sausage', False),
('Broccoli', False),
('Canadian Bacon', False),
('Cheddar', False),
('Chicken', False),
('Chicken Strips', False),
('Chorizo', False),
('Chunky Marinara Sauce', False),
('Cilantro', False),
('Classic Italian Sauce', True),
('Corn', False),
('Cucumbers', False),
('Diced Pickles', False),
('Dijon Kraut', False),
('Extra Cheese', False),
('Extra Spices', False),
('Feta', False),
('Garlic', False),
('Grated Parmesan', False),
('Green Olives', False),
('Green Peppers', False),
('Ham', False),
('Hashbrown', False),
('Honey Mustard', False),
('Hot Marinara Sauce', False),
('House Blend Cheese', False),
('Hummus', False),
('Jalapeno', False),
('Meatballs', False),
('Mozzarella', False),
('Mushroom', False),
('Peanut Sauce', True),
('Pepperoncini', False),
('Pepperoni', False),
('Pesto Sauce', True),
('Pineapple', False),
('Provolone', False),
('Ranch', False),
('Red Onion', False),
('Red Peppers', False),
('Ricotta Sauce', False),
('Ricotta Topping', False),
('Romano', False),
('Salami', False),
('Salsa', False),
('Sauerkraut', False),
('Sausage', False),
('Sauteed Yellow Onions', False),
('Spices', False),
('Spinach', False),
('Sprouts', False),
('Swiss', False),
('Tex-Mex', False),
('Tomatoes', False),
('Turkey', False),
('Walnuts', False),
('Wedge Fries', False),
('Yellow Onion', False)
):
models.Topping.objects.get_or_create(label=label, is_vegan=is_vegan)
class Command(BaseCommand):
help = 'Populates test pizza data into the database.'
def handle(self, *args, **options):
add_sizes()
add_toppings()
|
class SlidingWindow:
def minSubArrayLen(self, s, nums):
# corner case
if not nums or len(nums) == 0:
return 0
# two pointers sliding window
left_idx = right_idx = 0
cur_sum = nums[0]
valid_flag = False
min_size = len(nums)
while right_idx < len(nums) and left_idx < len(nums):
if cur_sum >= s:
# record min_size
min_size = min(right_idx - left_idx + 1, min_size)
# move left pointer
cur_sum -= nums[left_idx]
left_idx += 1
# turn on valid flag
valid_flag = True
else:
# move right pointer
right_idx += 1
cur_sum += nums[right_idx] if right_idx < len(nums) else 0
return min_size if valid_flag else 0
if __name__ == "__main__":
test = [
(7, [2,3,1,2,4,3])
]
for s, arr in test:
print(SlidingWindow().minSubArrayLen(s, arr)) |
import re
class Solution:
def addBinary(self, a: str, b: str) -> str:
maxLength = max(len(a),len(b))
minLength = min(len(a),len(b))
resZero = ""
for i in range(maxLength-minLength):
resZero+="0"
if len(a)==minLength:
a = resZero+a
else:
b = resZero+b
resStr = ""
extra = 0
for i in range(maxLength-1,-1,-1):
current = int(a[i])+int(b[i])+extra
if extra==0:
if current==2:
resStr="0"+resStr
extra = 1
elif current==1:
resStr="1"+resStr
extra = 0
elif current==0:
resStr="0"+resStr
extra = 0
else:
if current==2:
resStr="1"+resStr
extra = 1
elif current==1:
resStr="0"+resStr
extra = 1
elif current==0:
resStr="1"+resStr
extra = 0
if extra==1:
resStr = "1"+resStr
return resStr
s = Solution()
f = s.addBinary("0","0")
print(f)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
from PIL import Image
import numpy
frame = 10
def get_image(video_path, image_path):
try:
os.system('ffmpeg -i {0} -r {1} -f image2 {2}\%05d.png'.format(video_path, frame, image_path))
except:
print('ERROR !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') |
# %%
import os
import sys
import pickle
import multiprocessing
import mne
from mne.decoding import Vectorizer
from sklearn import svm
from sklearn import metrics
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) # noqa
import deploy
from local_tools import FileLoader, Enhancer
import numpy as np
import matplotlib.pyplot as plt
# %%
times = np.linspace(-0.10, 0.10, np.int(100 * 0.2) + 1)
times
s2 = 0.2 ** 2
cos = np.cos(2 * np.pi / 0.2 * times)
exp = np.exp(- times ** 2 / s2)
GABOR_KERNEL = cos * exp
results_dir = os.path.join('.', 'SVM_xdawn')
try:
os.mkdir(results_dir)
except:
pass
finally:
assert(os.path.isdir(results_dir))
def get_X_y(epochs):
# Get data [X] and label [y] from [epochs]
X = epochs.get_data()
y = epochs.events[:, -1]
return X, y
def mvpa(name):
# Perform MVPA
# Setting
CROP = (0, 0.8, 0.2)
EVENTS = ['1', '2', '4']
# Load epochs
loader = FileLoader(name)
loader.load_epochs(recompute=False)
print(loader.epochs_list)
# Prepare [predicts] for results
predicts = []
# Cross validation
num_epochs = len(loader.epochs_list)
for exclude in range(num_epochs):
# Start on separate training and testing dataset
print(f'---- {name}: {exclude} | {num_epochs} ----------------------')
includes = [e for e in range(
len(loader.epochs_list)) if not e == exclude]
excludes = [exclude]
train_epochs, test_epochs = loader.leave_one_session_out(includes,
excludes)
print(train_epochs, test_epochs)
print('Xdawn --------------------------------')
enhancer = Enhancer(train_epochs=train_epochs,
test_epochs=test_epochs)
train_epochs, test_epochs = enhancer.fit_apply()
print('Baseline and Crop --------------------')
train_epochs = train_epochs.crop(CROP[0], CROP[1])
train_epochs.apply_baseline((CROP[0], CROP[2]))
test_epochs = test_epochs.crop(CROP[0], CROP[1])
test_epochs.apply_baseline((CROP[0], CROP[2]))
print('Get data -----------------------------')
X_train, y_train = get_X_y(train_epochs[EVENTS])
X_test, y_test = get_X_y(test_epochs[EVENTS])
print('Training -----------------------------')
clf = svm.SVC(gamma='scale', kernel='rbf', class_weight='balanced')
pipeline = make_pipeline(Vectorizer(), StandardScaler(), clf)
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_test)
print('Saving ------------------------------')
try:
times = train_epochs.times
except:
times = 0
predicts.append(dict(y_pred=y_pred,
y_test=y_test,
X_test=X_test,
y_train=y_train,
X_train=X_train,
times=times))
with open(os.path.join(results_dir,
f'{name}.pkl'), 'wb') as f:
pickle.dump(predicts, f)
pass
# %%
for idx in range(1, 11):
name = f'MEG_S{idx:02d}'
# mvpa(name)
p = multiprocessing.Process(target=mvpa, args=(name,))
p.start()
# %%
# name = 'MEG_S05'
# # Perform MVPA
# # Setting
# BASELINE = (None, 0)
# CROP = (0, 0.8)
# EVENTS = ['1', '2', '4']
# # Load epochs
# loader = FileLoader(name)
# loader.load_epochs(recompute=False)
# print(loader.epochs_list)
# # Prepare [predicts] for results
# predicts = []
# # Cross validation
# num_epochs = len(loader.epochs_list)
# for exclude in range(num_epochs):
# # Start on separate training and testing dataset
# print(f'---- {name}: {exclude} | {num_epochs} ----------------------')
# includes = [e for e in range(
# len(loader.epochs_list)) if not e == exclude]
# excludes = [exclude]
# train_epochs, test_epochs = loader.leave_one_session_out(includes,
# excludes)
# print(train_epochs, test_epochs)
# print('Xdawn --------------------------------')
# enhancer = Enhancer(train_epochs=train_epochs,
# test_epochs=test_epochs)
# train_epochs, test_epochs = enhancer.fit_apply()
# train_epochs.apply_baseline((None, 0))
# test_epochs.apply_baseline((None, 0))
# X_train, y_train = get_X_y(train_epochs[EVENTS])
# X_test, y_test = get_X_y(test_epochs[EVENTS])
# print('Got data ----------------------------')
# break
# # %%
# X_test.shape
# # %%
# fig, axes = plt.subplots(3, 1)
# for j, eid in enumerate([1, 2, 4]):
# X = X_test[y_test == eid]
# axes[j].plot(np.mean(X, axis=0).transpose())
# # %%
# epochs = mne.BaseEpochs(info=train_epochs[EVENTS].info,
# events=train_epochs[EVENTS].events,
# data=X_train,
# tmin=train_epochs.times[0],
# tmax=train_epochs.times[-1])
# # %%
# for eid in ['1', '2', '4']:
# epochs[eid].average().plot_joint(title=f'New {eid}')
# train_epochs[eid].average().plot_joint(title=f'Old {eid}')
# # %%
# # %%
|
import fitsio
import numpy as np
import numpy.random as npr
from scipy.optimize import minimize
from scipy import interpolate
from funkyyak import grad, numpy_wrapper as np
from redshift_utils import load_data_clean_split, project_to_bands, fit_weights_given_basis
from slicesample import slicesample
import matplotlib.pyplot as plt
import seaborn as sns
import sys, os
from quasar_infer_photometry import pixel_likelihood
sns.set_style("white")
current_palette = sns.color_palette()
npr.seed(42)
if __name__=="__main__":
out_dir = "/Users/acm/Dropbox/Proj/astro/DESIMCMC/tex/quasar_z/figs/"
## load a handful of quasar spectra
lam_obs, qtrain, qtest = \
load_data_clean_split(spec_fits_file = 'quasar_data.fits',
Ntrain = 400)
## load in basis
basis_string = "1364" # size of lam0
file_contents = np.load('cache/basis_fit_K-4_V-%s.npz'%basis_string)
lam0 = file_contents['lam0']
lam0_delta = file_contents['lam0_delta']
th = file_contents['th']
parser = ParamParser()
omegas = th[:,:N]
betas = th[:, N:]
W = np.exp(omegas)
B = np.exp(betas)
B = B / B.sum(axis=1, keepdims=True)
## map inference for each quasar
Nqso = qtest['spectra'].shape[0]
z_maps = np.zeros(Nqso)
for n in range(Nqso):
## fit w's and red-shift w/ MCMC
print " ... map %d of %d "%(n, Nqso)
spec_n = qtest['spectra'][n, :]
spec_n[spec_n < 0] = 0
spec_ivar_n = qtest['spectra_ivar'][n, :]
z_n = qtest['Z'][n]
w_n = fit_weights_given_basis(B, lam0, spec_n, spec_ivar_n, z_n, lam_obs)
#mu_n = project_to_bands(np.atleast_2d(spec_n), lam_obs)
mu_n = project_to_bands(np.atleast_2d(w_n.dot(B)), lam0)
x_n = npr.poisson(mu_n).ravel()
#w_n = np.random.rand(len(w_n))
if False:
plt.plot(lam_obs, spec_ivar_n, alpha=.5, color='grey')
plt.plot(lam_obs, spec_n, label="noisy obs spec")
plt.plot(lam0*(1+z_n), w_n.dot(B), label="fit mean")
plt.legend()
plt.show()
## do maximum likelihood using numerical differences
lnfun = lambda th: -pixel_likelihood(th[-1], th[:-1], x_n, lam0, B)
def lnjac(th):
dth = np.zeros(len(th))
for i in range(len(th)):
de = np.zeros(len(th))
de[i] = 1e-5
dth[i] = (lnfun(th + de) - lnfun(th - de)) / (2*1e-5)
return dth
#th0 = np.concatenate((w_n, [z_n]))
th0 = np.concatenate([.0001*np.random.rand(4), [.5]])
res = minimize(x0 = th0,
fun = lnfun,
#jac = lnjac,
method = 'L-BFGS-B',
#method = 'TNC',
#method = 'SLSQP',
bounds = [(0, None)]*len(th0))
z_maps[n] = res.x[-1]
print res.x
print " true = %2.4f, pred = %2.4f"%(qtest['Z'][n], z_maps[n])
print " result less than GT: ", lnfun(res.x) < lnfun(np.concatenate((w_n, [z_n])))
print weights_to_bands(res.x[-1], res.x[:-1], x_n, lam0, B)
print mu_n
print lnfun(res.x)
print lnfun(np.concatenate((w_n, [z_n])))
# now
if False:
w_n_hat = res.x[:-1]
z_n_hat = res.x[-1]
spec_n_hat = w_n.dot(B) # rest frame
lam_n_hat = lam0 * (1 + z_n_hat)
plt.plot(lam_obs, qtest['spectra'][n,:], label = "observed ($z = %2.2f$)"%z_n)
plt.plot(lam_n_hat, spec_n_hat, label = "photo_spec ($\hat z = %2.2f$)"%z_n_hat)
plt.legend()
plt.show()
fig = plt.figure(figsize=(6, 6))
max_z = max(qtest['Z'].max(), z_maps.max()) + .2
min_z = -.2
plt.scatter(qtest['Z'], z_maps, color=current_palette[2])
plt.plot([min_z, max_z], [min_z, max_z], linewidth=2, alpha=.5)
#for n in range(len(z_pred)):
# plt.plot([qtest['Z'][n], qtest['Z'][n]], [z_lo[n], z_hi[n]], alpha = .5, color = 'grey')
plt.xlim(min_z, max_z)
plt.ylim(min_z, max_z)
plt.xlabel("full spec measurment, $z_{spec}$", fontsize=14)
plt.ylabel("photometric measurement, $z_{photo}$", fontsize=14)
plt.title("Posterior expectation model predictions", fontsize=14)
plt.savefig(out_dir + "map_z_preds_K-%d_V-%d.pdf"%B.shape, bbox_inches='tight')
|
s: str
n: int = 10
s = 'a'
def is_equal(n1: int, n2: int) -> bool:
return n1 == n2
print(is_equal(3, 3))
|
from datetime import timedelta
from sedate import utcnow
from uuid import uuid4
from pytest import mark
@mark.flaky(reruns=3)
def test_audit_for_course(client, scenario):
"""
Story:
For a course with refresh interval,
an admin checks the list of attendees.
This list contains when the attendee last visited this course during an
event.
The editor might do the same. He can see all attendees he is permitted
to by attendee.permissions which match attendee.organisation
"""
# Test what happens if no course is publicly available
client.login_admin()
url = '/fsi/audit'
page = client.get(url)
options = [opt[2] for opt in page.form['course_id'].options]
assert options == ['Keine']
# Course options should be hidden but there
org_options = [opt[2] for opt in page.form['organisations'].options]
assert org_options == ['Keine']
# Test pdf
assert 'PDF' not in page
# Adds data
scenario.add_course(refresh_interval=2, mandatory_refresh=True)
scenario.commit()
scenario.refresh()
course = scenario.latest_course
now = utcnow()
for ix in range(2):
scenario.add_course_event(
course,
start=now + timedelta(days=30 * ix)
)
scenario.commit()
scenario.refresh()
# Test that course input form vanishes if only one course is not hidden
page = client.get(url)
options = [opt[2] for opt in page.form['course_id'].options]
assert options == [scenario.latest_course.name]
# Test if a course was chosen automatically
assert page.form['course_id'].value == str(scenario.latest_course.id)
page.click('PDF')
# add 21 attendee members
for org in ('ZZW', 'ZHW', 'UBV'):
for i in range(7):
scenario.add_attendee(
role='member',
organisation=org,
username=f'{uuid4().hex}.manila@quine.ch'
)
scenario.commit()
scenario.refresh()
editor = scenario.first_user('editor')
editor.permissions = ['UBV', 'ZHW']
scenario.commit()
scenario.refresh()
page = client.get(url)
assert page.form['organisations'].value is None
# test next subscription
scenario.add_subscription(
scenario.course_events[1], scenario.attendees[0]
)
scenario.commit()
scenario.refresh()
page = client.get(url)
assert 'class="next-subscription"' in page
# Test pagination
assert page.pyquery('ul.pagination > li.current > a')[0].text == "1"
page = page.click('2', index=-2)
assert page.pyquery('ul.pagination > li.current > a')[0].text == "2"
assert page.form['organisations'].value is None
# Test reset of pagination upon choosing letter
page = page.click('M', index=-1)
# Test reset of page when filtering with last name
assert page.pyquery('ul.pagination > li.current > a')[0].text == "1"
# Test pdf
page.click('PDF')
|
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, Session
# SQLALCHEMY_DATABASE_URL = "sqlite:///./sql_app.db"
SQLALCHEMY_DATABASE_URL = "postgresql://api:secret@localhost/fastapidb"
engine = create_engine(SQLALCHEMY_DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
def session_factory():
db = SessionLocal()
try:
yield db
finally:
db.close()
class BaseRepository:
def __init__(self, session: Session, model: Base):
# self.session_factory = session_factory
self.session = session
self.model = model
# @property
# def session(self):
# return self.session_factory()
@property
def query(self):
return self.session.query(self.model)
def get_all(self, skip: int = 0, limit: int = 100):
return self.query.offset(skip).limit(limit).all()
def get(self, id):
return self.query.get(id)
def create(self, **kwargs):
item = self.model(**kwargs)
self.session.add(item)
self.session.commit()
self.session.refresh(item)
return item |
a=[1,4,9,16,25,36,49.64,81,100]
b=[]
for i in range(0,len(a)):
if (int(a[i])%2==0):
b.append(a[i])
print(b)
|
from Loan import Loan
def main():
annualInterestRate = eval(input ("Enter yearly interest rate, for example, 7.25: "))
numberOfYears = eval(input("Enter number of years as an integer: "))
loanAmount = eval(input("Enter loan amount, for example, 120000.95: "))
borrower = input("Enter a borrower's name: ")
loan = Loan(annualInterestRate, numberOfYears, loanAmount, borrower)
loan.__str__()
main()
|
#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import re
from collections import namedtuple
import parse_gc_graph
import argparse
# get an overview of what is in the heap
# Command line arguments
parser = argparse.ArgumentParser(description='Overview of what is in the GC heap.')
parser.add_argument('file_name',
help='cycle collector graph file name')
args = parser.parse_args()
def loadGraph(fname):
sys.stderr.write ('Parsing {0}. '.format(fname))
sys.stderr.flush()
(g, ga) = parse_gc_graph.parseGCEdgeFile(fname)
g = parse_gc_graph.toSinglegraph(g)
sys.stderr.write('Done loading graph.')
return (g, ga)
####################
addrPatt = re.compile ('(?:0x)?[a-fA-F0-9]+')
(g, ga) = loadGraph (args.file_name)
counter = {}
for src, dst in ga.nodeLabels.iteritems():
counter[dst] = counter.get(dst, 0) + 1
for name, count in counter.iteritems():
print '%(num)8d %(label)s' % {'num':count, 'label':name}
|
import urllib
from bs4 import BeautifulSoup
from xlwt import *
import random
book_name=""
book_num=""
book_name = input("请输入书名:")
book_name1 = urllib.parse.quote(book_name.encode('gb2312'))
url = "http://www.biquyun.com/modules/article/soshu.php?searchkey="+book_name1
fileHandle = open ( book_name+'.txt', 'a',encoding='utf-8' )
print(url)
headers = {'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.8',
'Cache-Control': 'max-age=0',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',
'Connection': 'keep-alive',
'Referer': 'http://www.biquge.com.tw/'
}
# proxy = {'http':''}
# proxy_support = urllib.request.ProxyHandler(proxy)
# opener = urllib.request.build_opener(proxy_support)
# urllib.request.install_opener(opener)
def getCatalogueList(book_url):
catalogue_url_list = []
catalogue_url=book_url
catalogue_request = urllib.request.Request(catalogue_url,None,headers)
catalogue_response = urllib.request.urlopen(catalogue_request,None)
catalogue_html = catalogue_response.read().decode('gbk','ignore')
catalogue_soup = BeautifulSoup(catalogue_html, "html.parser")
catalogue_list = catalogue_soup.find(name='div',attrs={"id":'list'}).find_all('a')
for item in catalogue_list:
catalogue_url_list.append(item.get('href'))
return catalogue_url_list
def write_novel(text_url):
try:
text_request = urllib.request.Request(text_url,None,headers)
text_response = urllib.request.urlopen(text_request,None,timeout=10)
try:
text_html = text_response.read().decode('gbk','ignore')
except Exception as e:
print("读取网页失败,正在尝试重新读取!")
text_html = text_response.read().decode('gbk','ignore')
except Exception as e:
print("连接请求失败,正在尝试重新连接!")
text_request = urllib.request.Request(text_url,None,headers)
text_response = urllib.request.urlopen(text_request,None,timeout=10)
try:
text_html = text_response.read().decode('gbk','ignore')
except Exception as e:
print("读取网页失败,正在尝试重新读取!")
text_html = text_response.read().decode('gbk','ignore')
text_soup = BeautifulSoup(text_html, "html.parser")
if text_soup!=None:
text_context = text_soup.find(name='div',attrs={"id":'content'}).get_text('\n','br/')
if text_context!=None:
text_title = text_soup.h1.get_text()
if text_title!=None:
print("正在写入: "+ text_title)
fileHandle.write (text_title+'\n\n'+text_context+'\n\n')
#fileHandle.close()
if __name__ == '__main__':
catalogue_url_list=[]
try:
request = urllib.request.Request(url,None,headers)
response = urllib.request.urlopen(request,None,timeout=10)
try:
html = response.read()
except Exception as e:
print("读取网页失败,正在尝试重新读取!")
html = response.read()
except Exception as e:
print("连接请求失败,正在尝试重新连接!")
request = urllib.request.Request(url,None,headers)
response = urllib.request.urlopen(request,None,timeout=10)
try:
html = response.read()
except Exception as e:
print("读取网页失败,正在尝试重新读取!")
html = response.read()
soup = BeautifulSoup(html, "html.parser")
td_soup = soup.find_all(name='td',attrs={"class":'odd'})
if td_soup==[]:
div_soup = soup.find(name='div',attrs={"class":'box_con'})
if div_soup!=None:
catalogue_list = soup.find(name='div',attrs={"id":'list'}).find_all('a')
for item in catalogue_list:
catalogue_url_list.append(item.get('href'))
for item in catalogue_url_list:
item_url = 'http://www.biquge.com.tw'+item
write_novel(item_url)
fileHandle.close()
else:
print(div_soup)
else:
s_book_name = []
s_book_writer = []
no =[]
for x in range(0,len(td_soup),3):
s_book_name.append(td_soup[x].string)
s_book_writer.append(td_soup[x+1].string)
print(s_book_name)
print(s_book_writer)
for x in range(0,len(s_book_name)):
no.append(x)
print(str(x)+'.书名:'+s_book_name[x]+' 作者:'+s_book_writer[x])
print(no)
book_no = input("请输入小说序号:")
while int(book_no) not in no:
print('请输入正确的序号')
book_no = input("请输入小说序号:")
book_url = td_soup[int(book_no)*3].find('a').get('href')
catalogue_url_list = getCatalogueList(book_url)
for item in catalogue_url_list:
item_url = 'http://www.biquge.com.tw'+item
write_novel(item_url)
fileHandle.close()
|
"""
Owner: Noctsol
Contributors: N/A
Date Created: 2021-10-24
Summary:
Holds all the custom Exceptions for this project
"""
############################ EXCEPTIONS ############################
class EnvVarNotSet(Exception):
'''Exception for when we didn't load'''
def __init__(self):
self.message = "Environment not set - please call Environment.load_env()"
super().__init__(self.message)
class EnvVarNotExistError(Exception):
'''Exception for when we call a nonexistent item'''
def __init__(self, env_key):
self.message = f"Environment variable key '{env_key}' not found"
super().__init__(self.message)
class EnvVarEmptyError(Exception):
'''Exception for when a environment var is empty or null'''
def __init__(self, env_key):
self.message = f"Environment variable key '{env_key}' has an empty or null value"
super().__init__(self.message)
class EnvFileNotExist(Exception):
'''Exception for when .env file is not found'''
def __init__(self, path_list):
paths_string = ", ".join(path_list)
self.message = f"Create an env file please. Unable to find .env file in any of the following paths: {paths_string}"
super().__init__(self.message)
|
__author__ = 'Stuart'
from flask import jsonify
from app.exceptions import ValidationError
from . import api
def bad_request(message):
response = jsonify({'error':'bad request', 'message':message})
response.status_code = 400
return response
def unauthorized(message):
"""
When login credentials invalid, server returns 401 to client. Flask-HTTPAuth does this automatically, but to
ensure that the response is consistent with other errors returned by the API, error response can be customized here.
:param message:
:return:
"""
response = jsonify({'error':'unauthorized', 'message':message})
response.status_code = 401
return response
def forbidden(message):
response = jsonify({'error':'forbidden', 'message': message})
response.status_code = 403
return response
@api.errorhandler(ValidationError)
def validation_error(e):
"""
Provides response to client. To avoid having to add exception catching code in view functions, a global exception
handler can be installed. This is ValidationError's exception handler.
Same @xxx.errorhandler used to register handlers for HTTP status codes, but in this usage takes an exception
class as argument. Decorated funct will be invoked any time an exception of given class is raised.
Decorator is obtained from API blueprint, so this handler only will be invoked when exception raised while on a
route from within the blueprint is being processed.
:param e:
:return:
"""
return bad_request(e.args[0]) |
from __future__ import print_function
from rosetta import *
# from random import randint, random
# import sys
# import time
import lasagne
import numpy as np
import theano
import theano.tensor as ten
# import prep_v031 as prep
from predict_type2p import build_network
from toolbox import get_secstruct
from toolbox import generate_resfile_from_pose
# noinspection PyShadowingNames
def load_evolutron(filename):
# Prepare Theano variables for inputs and targets as well as index to minibatch
inputs = ten.tensor3('inputs')
# Create neural network model (parametrization based command line inputs or else defaults)
print("Building model and compiling functions...")
with np.load(filename) as f:
[filters, filter_size] = f['arr_0'][2:]
param_values = [f['arr_%d' % i] for i in range(1, len(f.files))]
network = build_network(inputs, 700, filters, filter_size)
lasagne.layers.set_all_param_values(network, param_values)
# Create a loss expression for training, i.e., a scalar objective we want to minimize
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_pred = theano.function([inputs], test_prediction)
return test_pred
# # set up mover
def perturb_bb(pose):
resnum = randint(14, pose.total_residue())
pose.set_phi(resnum, pose.phi(resnum) - 25 + random() * 50)
pose.set_psi(resnum, pose.psi(resnum) - 25 + random() * 50)
return pose
def main():
# Initialize rosetta
init()
# Initialize Rosetta scoring function
score_ros = get_fa_scorefxn()
# scorefxn2 = ScoreFunction()
# scorefxn2.set_weight(fa_atr, 1.0)
# scorefxn2.set_weight(fa_rep, 1.0)
# Initialize Evolutron scoring function
filename = 'networks/o_smodel_500_150_30_30.npz'
score_cnn = load_evolutron(filename)
# Load initial protein and view in PyMol
pose = Pose()
pose_from_file(pose, 'structures/eco_dna.pdb')
pymol = PyMOL_Mover()
pymol.apply(pose)
# set up MonteCarlo object
kT = 1.0
mc = MonteCarlo(pose, score_ros, kT)
# Pack Rotamers Mover
generate_resfile_from_pose(pose, "1eri.resfile")
task_design = TaskFactory.create_packer_task(pose)
parse_resfile(pose, task_design, "1eri.resfile")
pack_mover = PackRotamersMover(score_ros, packer_task)
pack_mover.apply(pose)
# Monte-Carlo search
mc.reset(pose) # assumes we have the lowest energy structure
for i in range(1, 60000):
perturb_bb(pose) # make a change (Search Operator, Mover in Rosetta)
mc.boltzmann(
pose) # calculating Boltzmann energy and evaluating with Metropolis criterion, keep changes if
# exp(-DE/kT) < than before
if i % 1000 == 0:
mc.recover_low(pose)
# output lowest-energy structure
mc.recover_low(pose)
# GA Search
# Start with the initial (native) pose (protein structure)
# for i in range(1, n_generations):
#if first gen:
# Perform mutations/change of angles +++ on the initial pose
# and store generated population
# Calculate energy scores for members of population (fitness function)
# Perform selection and select a curated subset of individuals (poses)
# Let's keep X best individuals based on running time
#else:
# Perform mutations/crossover/change of angles +++ on the selected individuals
# and store generated population
# Calculate energy scores for members of population (fitness function)
# Perform selection and select a curated subset of individuals (poses)
# Let's keep X best individuals based on running time |
from asyncio import coroutine
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.web import asynchronous
import tornado.websocket
import json
from core.player import Player
from core.server import Server
import time
from multiprocessing.managers import BaseManager
class ListManager(BaseManager): pass
ListManager.register('get_client_list')
m = ListManager(address=('localhost', 50000), authkey=123)
class ServerHandler(tornado.websocket.WebSocketHandler):
def __init__(self, handler):
self.handler = handler
@asynchronous
def on_message(self, message):
#print("Server Handler from {0} recived".format(self.handler.request.remote_ip))
#print(json.dumps(message, indent=4))
self.handle_request(message)
@asynchronous
def handle_request(self, request):
response = {'message': "I don't understand you",
'data': {}}
method_name = request['message']
data = request['data']
if method_name == "connectToServer":
self.connect_to_server(data)
elif method_name == "getRoomsList":
self.get_rooms_list()
elif method_name == "createNewRoom":
self.create_new_room(data)
else:
self.write_response(response)
def write_response(self, response):
print(response)
self.handler.write_message(response)
@asynchronous
def connect_to_server(self, player_name):
player_id = self.add_new_user(player_name, callback=self.write_response)
# response = {'message': "connectSuccess",
# 'data': {
# 'player_id' : player_id
# }
# }
# self.write_response(response)
@gen.coroutine
@asynchronous
def add_new_user(self, player_name, callback):
#m.connect()
#player = Player(self, player_name)
#user_list = m.get_client_list()
#new_user = Player(self, "Vacek")
#user_list.append(new_user)
#user_list.append("XD")
#player_id = new_user.id
callback(response)
def get_rooms_list(self):
server = Server.get_from_memcached()
rooms = []
rooms = server.get_rooms_list()
ant = [3, 'AntekTworca', 0, 8, 'Labirynt - zaprszam']
adam = [1, 'AdamTworca', 5, 8, 'Gramy w mafie, polecam']
raf = [2, 'RafalTworca', 0, 8, 'sssa']
#rooms.append(ant)
#rooms.append(adam)
#rooms.append(raf)
response = {'message': "rooms",
'data': {
'rooms': rooms
}
}
self.write_response(response)
def create_new_room(self, data):
room_name = data['room_name']
creator_id = data['creator_id']
server = Server.get_from_memcached()
server.create_room(creator_id, room_name)
response = {'message': "createRoomSuccess",
'data': {
'room_id' : room_name
}
}
self.write_response(response) |
poem = '''There was a young lady named Bright,
Whose speed was far faster than light;
She started one day
In a relative way,
And returned on the previous night.'''
lines1 = ["One", "Two", "Three", "Four", "Five"]
lines2 = ["One\n", "Two\n", "Three\n", "Four\n", "Five\n"]
fout = open('./ch08/relativity1.txt', 'wt')
fout.write(poem)
fout.close()
fout = open('./ch08/relativity2.txt', 'wt')
print(poem, file=fout, sep='', end='')
fout.close()
fout = open('./ch08/relativity3.txt', 'wt')
print(lines1, file=fout)
fout.close()
fout = open('./ch08/relativity3.txt', 'a')
print(lines1, 'hello', file=fout, sep='%%', end='$$$')
fout.close()
fout = open('./ch08/relativity3.txt', 'a')
print(lines2, file=fout)
fout.close()
fout = open('./ch08/relativity4.txt', 'wt')
fout.writelines(lines1)
fout.close()
fout = open('./ch08/relativity4.txt', 'a')
fout.writelines(lines2)
fout.close()
fout = open('./ch08/relativity5.txt', 'wt')
size = len(poem)
offset = 0
chunk = 100
while True:
if offset > size:
break
fout.write(poem[offset:offset+chunk])
offset += chunk
fout.close()
with open('./ch08/relativity6.txt', 'wt') as fout:
print(poem, file=fout, sep='', end='')
|
# import pygame
from network import Network
import msvcrt as m
run = True
n = Network()
# startPos = read_pos(n.getPos())
# p = Player(startPos[0],startPos[1],100,100,(0,255,0))
# p2 = Player(0,0,100,100,(255,0,0))
while run:
message=""
if m.getch():
message=str(input("Type your message :>"))
message_recieved = n.send(message)
print("\t\t\t\t\t:>",message_recieved)
|
from flask import Flask, request, redirect, render_template
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://build-a-blog:helloworld@localhost:8889/build-a-blog'
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
def is_blog_valid(user_input):
if user_input != "":
return True
else:
return False
class Blog(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120))
body = db.Column(db.String(720))
def __init__(self, title, body):
self.title = title
self.body = body
@app.route('/newpost', methods=['GET','POST'])
def newpost():
if request.method == 'POST':
entry_title = request.form['title']
entry_body = request.form['body']
title_error = ""
body_error = ""
if not is_blog_valid(entry_title):
title_error = "Enter blog title"
if not is_blog_valid(entry_body):
body_error = "Enter blog body"
if not title_error and not body_error:
new_entry = Blog(entry_title, entry_body)
db.session.add(new_entry)
db.session.commit()
new_entry_id = str(new_entry.id)
new_entry_URL = '/individual?id=' + new_entry_id
return redirect(new_entry_URL)
else:
return render_template('newpost.html',
entry_title=entry_title,entry_body=entry_body,
title_error=title_error,body_error=body_error)
else:
entries = Blog.query.all()
return render_template('newpost.html', title="Add a Blog Post!", entries=entries)
@app.route('/blog', methods=['GET','POST'])
def blog():
entries = Blog.query.all()
return render_template('blog.html', title="List of Blog Posts", entries=entries)
@app.route('/individual', methods=['GET', 'POST'])
def individual():
id = request.args.get('id')
entry = Blog.query.get(id)
return render_template('individual.html', title="Blog", entry=entry)
if __name__ == '__main__':
app.run() |
import math
import time
import logging
import itertools
import paramiko
def download_file_from_pepper(config, remote_path, local_path):
"""Download a file via SFTP from the pepper to a local file system."""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(
config.Ip,
username=config.Username,
password=config.Password
)
sftp = ssh.open_sftp()
sftp.get(remote_path, local_path)
sftp.remove(remote_path)
sftp.close()
ssh.close()
def upload_file_to_pepper(config, local_path, remote_path):
"""Upload a file to the pepper."""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(
config.Ip,
username=config.Username,
password=config.Password
)
sftp = ssh.open_sftp()
sftp.put(local_path, remote_path)
sftp.close()
ssh.close()
def rotate_head_until(robot, predicate):
ANGLES = [15, -15, 30, -30, 45, -45, 60, -60, 75, -75, 90, -90]
# move head pitch to middle position
robot.ALMotion.setAngles("HeadPitch", 0, 1)
robot.ALMotion.setAngles("HeadYaw", 0, 0.1)
angles_cycle = itertools.cycle(ANGLES)
while True:
result = predicate()
if result:
return result
angle = next(angles_cycle)
logging.info("Rotating head angle to %f", angle)
robot.ALMotion.setAngles("HeadYaw", math.radians(angle), 0.2)
time.sleep(0.5)
|
"""
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/oj_custom"
# docs_base_url = "https://[org_name].github.io/oj_custom"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "OJ Custom"
|
import array as arr
a = arr.array ('i',[2,5,8,4,5,1,2])
print(a)
#import array as arr
#to declare array use arrayname = arr.array
#array length
print(len(a))
#add elements in array
a.append(7)
print(a)
#extend elements in array
a.extend([4,5])
print(a)
#insert value in array
a.insert(3,5)
print(a)
#remove element from array
a.pop()
print(a)
#remove element from array using remove
a.remove(5)
#concatination of array only for same datatype
#where i is type code and it refers to integer type for float we can use f and for every data type using type code in python
a = arr.array ('i',[2,5,8,4,5,1,2])
b = arr.array ('i',[6,4,3,2,7,1,8])
print(a)
print(b)
d = a+b
print(d)
#slicing of array
print(a[0:3])
#reverse of array
'''print(a[::-1])'''
#looping in an array
# for loop with array
for z in a:
print(z)
#array using while loop
print(a)
temp = 2
while temp < len(a):
print(a[temp])
temp+=1
|
# coding:utf-8
import json
from django.shortcuts import render, reverse
from django.http import HttpResponseRedirect, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate, login, logout
def LoginCheck(request):
user = request.user
if user.is_active:
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
return render(request, 'login.html')
@csrf_exempt
def login_views(request):
data = json.loads(request.body)
username = data["username"]
password = data["password"]
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
else:
pass
else:
data["success"] = False
return JsonResponse(data)
@csrf_exempt
def logout_views(request):
logout(request)
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
|
hex = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']
bin = ['0000', '0001', '0010', '0011', '0100', '0101', '0110', '0111', '1000', '1001', '1010', '1011', '1100', '1101', '1110', '1111']
s = input()
result = ''
negativeFlag = False
if s >= '7f':
print("that's not my work :)")
exit(0)
if s[0] == '-':
negativeFlag = True
s = s[1:]
result = ''.join(map(lambda x: bin[hex.index(x)], s))
if negativeFlag:
result = ''.join(map(lambda x: '0' if int(x) else '1', result))
print(result)
result = str(int('1' + result) + 1)
result = result[1:]
k = 0
if result[-1] == '2':
for i in range(len(result) - 2, 1, -1):
if result[i] == '1':
k += 1
else:
break
result = result[:len(result) - k - 2] + '1' + '0' * (k + 1)
print(result)
|
# Generated by Django 2.0.7 on 2018-07-06 23:07
from os.path import abspath, dirname, join
from django.db import migrations
BASE = join(abspath(dirname(__file__)), '..', 'sql')
with open(join(BASE, 'audit_0002.sql'), 'r') as f:
sql = f.read()
def update_triggers(apps, schema_editor):
for Model in apps.get_models():
if not getattr(Model._meta, 'audit_trigger', False):
continue
table = Model._meta.db_table
schema_editor.execute(
'DROP TRIGGER IF EXISTS audit_trigger_row ON {}'.format(table),
)
schema_editor.execute(
'DROP TRIGGER IF EXISTS audit_trigger_stm ON {}'.format(table),
)
with schema_editor.connection.cursor() as cursor:
cursor.execute('SELECT to_regclass(\'audit.logged_actions\')')
has_audit = cursor.fetchone()[0]
if has_audit:
schema_editor.execute(
'SELECT audit.audit_table(\'{}\')'.format(table),
)
class Migration(migrations.Migration):
dependencies = [
('postgres_audit_triggers', '0001_initial'),
]
operations = [
migrations.RunSQL(sql),
migrations.RunPython(update_triggers),
]
|
names=["laoliu",100,3.14,"laowang"]#描述大量相同型建议使用列表
#增
names.append("老杨")#增加一个元素
names.insert(0,"八戒")
names2=["葫芦娃","猴子"]
print(names+names2)#合并进去但不改变names
print(names.extend(names2))#合并列表,追加到后面且改变names值
#删
names.pop()#删除最后一个(栈:先进后出,后进先出的特点)
names.remove("老杨")#从左删一个
del names[1]#删除第二个元素
# del names[2:5] #删第三个到底六个
# del names[-1]# 删除最后一个
# del names[2:]# 删除从第三个到最后
#改
names[0]="猪八戒"
#查
print("猪八戒" in names)
print("猪八戒" not in names)
#排序
names.sort()#从小到大排序
names.sort(reverse=True)#从大到小排序
names.reverse()#倒叙
infors=[{"name":"laowang","age":"18"},{"name":"xiaoli","age":"20"}]
infors.sort(key=lambda x:x['name']) |
def mySqrt(self, x):
"""
:type x: int
:rtype: int
"""
value = 1
while (value*value <= x):
value += 1
return int(value-1) |
import cv2
from darkflow.net.build import TFNet
import matplotlib.pyplot as plt
options = {
'model': 'cfg/yolo.cfg',
'load': 'bin/yolov2.weights',
'threshold': 0.3,
'gpu': 1.0
}
tfnet = TFNet(options)
vidcap = cv2.VideoCapture(0)
def getFrame(sec):
vidcap.set(cv2.CAP_PROP_POS_MSEC,sec*1000)
hasFrames,image = vidcap.read()
if hasFrames:
cv2.imwrite("sec.jpg", image) # save frame as JPG file
return hasFrames
sec = 0
counter = 1
frameRate = 0.5
#it will capture image in each 0.5 second
success = getFrame(sec)
while success:
sec = sec + frameRate
sec = round(sec, 2)
img = cv2.imread('sec.jpg', cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# use YOLO to predict the image
result = tfnet.return_predict(img)
numberofvehicles = 0
labels = [li['label'] for li in result]
for i in labels:
if i == 'car' or i == 'bicycle' or i == 'motorcycle' or i == 'bus' or i == 'truck':
numberofvehicles = numberofvehicles + 1
print('No. of Vehicles =', numberofvehicles)
counter=counter+1
success = getFrame(sec)
|
import sys, os, importlib, json, multiprocessing, time
import rasterio, pycountry
import reverse_geocode
import geopandas as gpd
import pandas as pd
from urllib.request import urlopen
from shapely.geometry import Point
from shapely.ops import nearest_points
from shapely import wkt
# Import GOST libraries; sys.path.append will be unnecessary if libraries are already installed
sys.path.append("../../../../gostrocks/src")
sys.path.append("../../../../GOST_Urban/src")
import GOSTRocks.rasterMisc as rMisc
from GOSTRocks.misc import tPrint
import GOST_Urban.UrbanRaster as urban
def get_nearest_date(x, selCables):
selCables = selCables.loc[selCables['RFS'] <= x['d2_l1_year_perf_indicators']]
if selCables.shape[0] > 0:
xx = selCables.loc[selCables.geometry == nearest_points(x['geometry'], selCables.unary_union)[1]]
return(xx.sort_values(['RFS'])['RFS'].iloc[0])
else:
return(-1)
def get_nearest(x, selCables):
selCables = selCables.loc[selCables['RFS'] <= x['d2_l1_year_perf_indicators']]
if selCables.shape[0] == 0:
return(-1)
return(x['geometry'].distance(selCables.unary_union))
def calculate_country(iso3, curD, curB, curN, out_file, selCol, selIXP, inCables, inCell, epsg='epsg:6933', debug=False):
''' calculaet ICT distances per country
Args:
curD: geopandas data frame of WBES survey locations
curB: geopandas data frame of country bounds
curN: geopandas data frame of neighbouring countries boundaries
outFile: string of the path for the output file; is read in if it doesn't exist
selCol: geopandas data frame of colocation centers
selIXP: geopandas data frame of IXPs
inCables: geopandas data frame cable landing spots
'''
start_time = time.time()
tPrint("Starting %s" % iso3)
cell_coverage_folder = '/home/public/Data/GLOBAL/INFRA/GSMA/2019/MCE/Data_MCE/Global'
cell_files = ['MCE_Global2G_2020.tif', 'MCE_Global3G_2020.tif', 'MCE_Global4G_2020.tif']
gsma2g_R = rasterio.open(os.path.join(cell_coverage_folder, cell_files[0]))
gsma3g_R = rasterio.open(os.path.join(cell_coverage_folder, cell_files[1]))
gsma4g_R = rasterio.open(os.path.join(cell_coverage_folder, cell_files[2]))
if False: #os.path.exists(out_file):
curD = pd.read_csv(out_file, index_col=0)
curD = pd.merge(distD, curD.loc[:,['idstd','d2_l1_year_perf_indicators']], on='idstd')
curD_geom = curD['geometry'].apply(wkt.loads)
distD = gpd.GeoDataFrame(curD, geometry=curD_geom, crs=epsg)
# Remove columns that need to be re-calculated
distD = distD.loc[:,[not "ngh" in x for x in distD.columns]]
distD = distD.loc[:,[not "gsma" in x for x in distD.columns]]
distD = distD.loc[:,[not "cables_dist" in x for x in distD.columns]]
else:
distD = curD.to_crs(epsg)
total_bound = curB.unary_union
if curB.shape[0] > 0:
if not 'col_dist' in distD.columns:
if selCol.shape[0] > 0:
selCol = selCol.to_crs(epsg)
distD['col_dist'] = distD.distance(selCol.unary_union)
else:
distD['col_dist'] = -1
if not "ixp_dist" in distD.columns:
if selIXP.shape[0] > 0:
selIXP = selIXP.to_crs(epsg)
distD['ixp_dist'] = distD.distance(selIXP.unary_union)
else:
distD['ixp_dist'] = -1
if not 'firstCable' in distD.columns:
selCables = inCables.loc[inCables['ISO3'] == iso3]
if selCables.shape[0] > 0:
selCables = selCables.to_crs(epsg)
# Calculate distance and date of first cable landing point
first_date = selCables['RFS'].sort_values().iloc[0]
first_points = selCables.loc[selCables['RFS'] == first_date]
distD['firstCable'] = first_date
distD['firstCable_dist'] = distD.distance(first_points.unary_union)
# Calculate distance and date of closest cable landing point
distD['closestCable'] = distD.apply(lambda x: get_nearest_date(x, selCables), axis=1)
distD['closestCable_dist'] = distD.apply(lambda x: get_nearest(x, selCables), axis=1)
else:
distD['firstCable'] = ''
distD['firstCable_dist'] = -1
# Calculate distance and date of closest cable landing point
distD['closestCable'] = ''
distD['closestCable_dist'] = -1
# Calculate distance to nearest neighbouring country
if not "ngh1_dist" in distD.columns:
cnt = 1
for idx, row in curN.iterrows():
distD['ngh%s' % cnt] = row['ISO3']
distD['ngh%s_dist' % cnt] = distD.distance(row['geometry'])
#Calculate distance to neighbouring submarine cables
selCables = inCables.loc[inCables['ISO3'] == row['ISO3']]
if debug:
tPrint(f'Neighbour cable distance {row["ISO3"]}: {selCables.shape[0]}')
if selCables.shape[0] > 0:
#distD['ngh%s_cbl_dist' % cnt] = distD.distance(selCables.unary_union)
distD['ngh%s_cbl_dist' % cnt] = distD.apply(lambda x: get_nearest(x, selCables), axis=1)
distD['ngh%s_cbl' % cnt] = distD.apply(lambda x: get_nearest_date(x, selCables), axis=1)
else:
distD['ngh%s_cbl_dist' % cnt] = -1
distD['ngh%s_cbl' % cnt] = -1
cnt = cnt +1
if not False: # 'cell_dist' in distD.columns:
cell_sindex = inCell.sindex
potential_matches = inCell.loc[list(cell_sindex.intersection(total_bound.bounds))]
selCell = potential_matches.loc[potential_matches.intersects(total_bound)]
selCell = selCell.to_crs(epsg)
distD['cell_dist'] = distD.distance(selCell.unary_union)
if not "gsma2g" in distD.columns:
coordsD = distD.to_crs(gsma2g_R.crs)
coords = [[x.x,x.y] for x in coordsD['geometry']]
distD['gsma2g'] = [x[0] for x in list(gsma2g_R.sample(coords))]
distD['gsma3g'] = [x[0] for x in list(gsma3g_R.sample(coords))]
distD['gsma4g'] = [x[0] for x in list(gsma4g_R.sample(coords))]
pd.DataFrame(distD).to_csv(out_file)
return(distD)
end_time = time.time()
tPrint(f"Completed {iso3}: {round((end_time-start_time)/60)}") |
import numpy as np
import matplotlib.pyplot as plt
from error_functions import mean_square_error
from math import log
def closed_form_lin_reg(X, y, query, ridge_regression=False, lambda_reg=2):
X_t = np.transpose(X)
inv_body = np.dot(X_t, X)
if ridge_regression:
inv_body += lambda_reg * np.identity(4)
X_t_X_inv = np.linalg.inv(inv_body)
weights = np.dot(np.dot(X_t_X_inv, X_t), y)
error = mean_square_error(np.dot(X, weights), y)
if len(query) > 0:
pred = np.dot(weights, query)
else:
pred = 0
print("pred: {}, error: {}, weights: {}".format(pred, error, weights))
return pred, weights, error
# remember to add the bias as first element in the x data, as done below!
x = np.array([[1, 1, 1], [1, 2, 1], [1, 1, 3], [1, 3, 3]])
t = np.array([1.4, 0.5, 2, 2.5])
pred, weights, error = closed_form_lin_reg(x, t, query=np.array([1, 2, 3]))
x = np.array([[1, -2], [1, -1], [1, 0], [1, 2]])
t = np.array([1, 0, 0, 0])
pred, weights, error = closed_form_lin_reg(x, t, query=np.array([1, -0.3]))
x = np.array(
[
[1.0, 0.8, 0.64, 0.512],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.2, 1.44, 1.728],
[1.0, 1.4, 1.96, 2.744],
[1.0, 1.6, 2.56, 4.096],
]
)
t = np.array([24, 20, 10, 13, 12])
pred, weights, error = closed_form_lin_reg(x, t, query=[])
pred, weights, error = closed_form_lin_reg(
x, t, query=[], ridge_regression=True, lambda_reg=2
)
|
#!/usr/bin/env python
import datetime as dt
import matplotlib.pyplot as plt
from matplotlib import style
import pandas as pd
import pandas_datareader.data as web
pd.set_option('display.expand_frame_repr', False)
style.use('ggplot')
start = dt.datetime(2017, 1, 1)
end = dt.datetime.now()
df = web.DataReader('TSLA', 'morningstar', start, end)
#print (df.head())
df.reset_index(inplace=True)
df.set_index("Date", inplace=True)
df = df.drop("Symbol", axis=1)
df.to_csv('TSLA.csv')
df = pd.read_csv('TSLA.csv', parse_dates=True, index_col=0)
# Create 100-day moving average
df['100ma'] = df['Close'].rolling(window=100, min_periods=0).mean()
df['50ma'] = df['Close'].rolling(window=50, min_periods=0).mean()
#df.dropna(inplace=True)
print (df.tail())
ax1 = plt.subplot2grid((6,1), (0,0), rowspan=5, colspan=1)
ax2 = plt.subplot2grid((6,1), (5,0), rowspan=1, colspan=1,sharex=ax1)
ax1.plot(df.index, df['Close'])
ax1.plot(df.index, df['100ma'])
ax1.plot(df.index, df['50ma'])
ax2.bar(df.index, df['Volume'])
plt.show()
#plt.savefig('first.eps', format='eps', dpi=300)
|
a = input()
x = (len(a)+1)//2
b = (a[x:len(a)])
c = (a[0:x])
print(b, end='')
print(c) |
def make_sandwich(bread_name, *toppings):
print("Making a " + bread_name + " with: ")
for topping in toppings:
print("- " + topping)
make_sandwich("Italian herb and cheeses", "Tomato", "Lettuce", "Chicken")
make_sandwich("Brown bread", "Meatball", "Paprika")
make_sandwich("Tosti bread", "Cheese", "Ham", "Mayo", "Curry")
|
def bin_to_int(binary):
"""
Convert a binary to an integer.
Runs O(log n).
"""
x = str(binary)
power = 0
sum = 0
for i in range(1, len(str(binary))+1):
if x[-i] == str(1):
sum += 2**power
power += 1
return int(sum)
def test_cases():
assert bin_to_int(1001) == 9
assert bin_to_int(0001) == 1
assert bin_to_int(1110) == 14
assert bin_to_int(010010101110) == 512
def main():
test_cases()
if __name__ == '__main__':
main()
|
#Write a function called count_capital_consonants. This
#function should take as input a string, and return as output
#a single integer. The number the function returns should be
#the count of characters from the string that were capital
#consonants. For this problem, consider Y a consonant.
#
#For example:
#
# count_capital_consonants("Georgia Tech") -> 2
# count_capital_consonants("GEORGIA TECH") -> 6
# count_capital_consonants("gEOrgIA tEch") -> 0
def count_capital_consonants(string):
count = 0
vowels = ["A", "E", "I", "O", "U"]
for i in string:
if ord(i) >= 65 and ord(i) <= 90:
if not i in vowels:
count += 1
return count
print(count_capital_consonants("Georgia Tech"))
print(count_capital_consonants("GEORGIA TECH"))
print(count_capital_consonants("gEOrgIA tEch"))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Jeffrey.Sun
import numpy as np
import tensorflow as tf
PAD_ID = 0
def variable_scope(name):
def decorator(func):
def wrapper(*args, **kwargs):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
return func(*args, **kwargs)
return wrapper
return decorator
def get_shape_list(tensor):
if isinstance(tensor, np.ndarray) or isinstance(tensor, list):
shape = np.array(tensor).shape
return shape
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def create_initializer(stddev=0.02):
return tf.truncated_normal_initializer(stddev=stddev)
class Transformer(object):
def __init__(self, num_layers, vocab_size, max_seq_len, hidden_size, num_heads, unit_hidden_size):
self.num_layers = num_layers
self.vocab_size = vocab_size
self.max_seq_len = max_seq_len
self.hidden_size = hidden_size
self.num_heads = num_heads
self.att_proj_size = hidden_size // num_heads
self.unit_hidden_size = unit_hidden_size
@variable_scope("layer_norm")
def layer_norm(self, hidden_states, epsilon=1e-5):
ln_weight = tf.get_variable("weight", [self.hidden_size], initializer=tf.ones_initializer())
ln_bias = tf.get_variable("bias", [self.hidden_size], initializer=tf.ones_initializer())
mean = tf.reduce_mean(hidden_states, axis=-1, keepdims=True)
variance = tf.reduce_mean(tf.squared_difference(hidden_states, mean), axis=-1, keepdims=True)
normed_tensor = (hidden_states - mean) * tf.rsqrt(variance + epsilon)
return normed_tensor * ln_weight + ln_bias
@variable_scope("attention")
def multihead_attention(self, hidden_states, attn_mask, batch_size, hidden_size, num_heads, att_proj_size):
# init weights
in_proj_weight = tf.get_variable(
name='in_proj_weight',
shape=[hidden_size, 3 * hidden_size],
initializer=create_initializer(), dtype=tf.float32)
in_proj_bias = tf.get_variable(
name='in_proj_bias',
shape=[3 * hidden_size],
initializer=create_initializer(), dtype=tf.float32)
out_proj_weight = tf.get_variable(
name='out_proj_weight',
shape=[hidden_size, hidden_size],
initializer=create_initializer(), dtype=tf.float32)
out_proj_bias = tf.get_variable(
name='out_proj_bias',
shape=[hidden_size],
initializer=create_initializer(), dtype=tf.float32)
_hidden_states = self.layer_norm(hidden_states)
qkv = tf.matmul(_hidden_states, in_proj_weight, transpose_b=False) + in_proj_bias
qw, kw, vw = tf.split(qkv, 3, axis=-1)
def _reshape(tensor):
tensor = tf.reshape(tensor, shape=[batch_size, -1, num_heads, att_proj_size])
return tf.transpose(tensor, [0, 2, 1, 3])
qw = _reshape(qw) # [B, num_heads, qL, size_per_head]
kw = _reshape(kw) # [B, num_heads, kL, size_per_head]
vw = _reshape(vw) # [B, num_heads, kL, size_per_head]
attn_scores = tf.einsum("bnqd,bnkd->bnqk", qw, kw) # (B, num_heads, qL, kL)
attn_scores *= att_proj_size ** -0.5
if attn_mask is not None:
attn_scores += attn_mask
attn_weights = tf.nn.softmax(attn_scores, axis=-1) # (B, n_heads, qL, kL)
attn_output = tf.matmul(attn_weights, vw) # [B, n_heads, qL, size_per_head]
attn_output = tf.reshape(tf.transpose(attn_output, perm=(0, 2, 1, 3)), (batch_size, -1, hidden_size))
return hidden_states + tf.matmul(attn_output, out_proj_weight, transpose_b=False) + out_proj_bias
@variable_scope("mlp")
def dense_gelu_dense(self, hidden_states, hidden_size):
in_weight = tf.get_variable(
name='in_weight',
shape=[hidden_size, 4 * hidden_size],
initializer=create_initializer(), dtype=tf.float32)
in_bias = tf.get_variable(
name='in_bias',
shape=[4 * hidden_size],
initializer=create_initializer(), dtype=tf.float32)
out_weight = tf.get_variable(
name='out_weight',
shape=[4 * hidden_size, hidden_size],
initializer=create_initializer(), dtype=tf.float32)
out_bias = tf.get_variable(
name='out_bias',
shape=[hidden_size],
initializer=create_initializer(), dtype=tf.float32)
_hidden_states = self.layer_norm(hidden_states)
_hidden_states = tf.matmul(_hidden_states, in_weight, transpose_b=False) + in_bias
_hidden_states *= tf.sigmoid(1.702 * _hidden_states)
_hidden_states = tf.matmul(_hidden_states, out_weight, transpose_b=False) + out_bias
return hidden_states + _hidden_states
def _encoder(self, hidden_states, attn_mask, batch_size, hidden_size, num_heads, att_proj_size):
hidden_states = self.multihead_attention(
hidden_states=hidden_states,
attn_mask=attn_mask,
batch_size=batch_size,
hidden_size=hidden_size,
num_heads=num_heads,
att_proj_size=att_proj_size)
hidden_states = self.dense_gelu_dense(
hidden_states=hidden_states,
hidden_size=hidden_size)
return hidden_states
def body(self, hidden_states, attn_mask, batch_size):
intermediate_embeds = []
for idx in range(self.num_layers):
with tf.variable_scope(f"layer_{idx}"):
hidden_states = self._encoder(
hidden_states=hidden_states,
attn_mask=attn_mask,
batch_size=batch_size,
hidden_size=self.hidden_size,
num_heads=self.num_heads,
att_proj_size=self.att_proj_size)
intermediate_embeds.append(hidden_states[:,0,:])
self.intermediate_embeds = tf.stack(intermediate_embeds, axis=1)
return self.layer_norm(hidden_states)
@variable_scope("token_embedding")
def token_embedding(self, input_ids, seq_len):
token_embed_table = tf.get_variable(
name='token_embed_table',
shape=[self.vocab_size, self.hidden_size],
initializer=create_initializer(), dtype=tf.float32)
position_embed_table = tf.get_variable(
name='position_embed_table',
shape=[self.max_seq_len, self.hidden_size],
initializer=create_initializer(), dtype=tf.float32)
onehot_input_ids = tf.one_hot(input_ids, depth=self.vocab_size, axis=-1)
output = tf.matmul(onehot_input_ids, token_embed_table)
return output + tf.expand_dims(position_embed_table[:seq_len, :], axis=0)
def text_projection(self, txt_features):
txt_proj_weight = tf.get_variable(
name='txt_proj_weight',
shape=[self.hidden_size, self.unit_hidden_size],
initializer=create_initializer(), dtype=tf.float32)
return tf.matmul(txt_features, txt_proj_weight, transpose_b=False)
@variable_scope("transformer")
def build(self, features):
batch_size, seq_len = get_shape_list(features['txt_ids'])
seq_ids = tf.range(seq_len)
attn_mask = tf.less_equal(tf.tile(seq_ids[None, None, :], (batch_size, seq_len, 1)), seq_ids[None, :, None])
attn_mask = (1.0 - tf.cast(attn_mask, dtype=tf.float32)[:, None, :, :]) * -1e9
txt_embeds = self.token_embedding(features['txt_ids'], seq_len)
outputs = self.body(txt_embeds, attn_mask, batch_size)
# slice outputs
last_token_idx = tf.reduce_sum(tf.sign(features['txt_ids']), reduction_indices=1) - 1
gather_idx = tf.stack([tf.range(batch_size), tf.cast(last_token_idx, dtype=tf.int32)], axis=-1)
outputs = tf.gather_nd(outputs, gather_idx) # [B, H]
return self.text_projection(outputs)
class VisualTransformer(Transformer):
def __init__(self, num_layers, vocab_size, max_seq_len, hidden_size, num_heads,
patch_size, input_resolution, unit_hidden_size):
super(VisualTransformer, self).__init__(num_layers, vocab_size, max_seq_len, hidden_size, num_heads, unit_hidden_size)
self.patch_size = patch_size
self.input_resolution = input_resolution
self.max_img_len = (input_resolution // patch_size) ** 2 + 1
self.unit_hidden_size = unit_hidden_size
@variable_scope("conv")
def conv_2d(self, input_ids):
"""
expected shape of 'img_ids' is [B, H, W, C]
( and [B, C, W, H] in clip.torch )
return [B, grid, grid, hidden_size]
( and [B, hidden_size, grid, grid] in clip.torch )
"""
conv_weight = tf.get_variable(
name='conv_weight',
shape=[self.patch_size, self.patch_size, 3, self.hidden_size],
initializer=create_initializer(), dtype=tf.float32)
return tf.nn.conv2d(input_ids, conv_weight, strides=[1, self.patch_size, self.patch_size, 1], padding='SAME')
@variable_scope("image_embedding")
def image_embedding(self, input_ids, batch_size):
class_embedding = tf.get_variable(
name='class_embed_table',
shape=[self.hidden_size],
initializer=create_initializer(), dtype=tf.float32)
position_embedding = tf.get_variable(
name='position_embed_table',
shape=[self.max_img_len, self.hidden_size],
initializer=create_initializer(), dtype=tf.float32)
input_ids = tf.transpose(input_ids, [0, 3, 2, 1])
input_ids = tf.reshape(input_ids, [batch_size, self.hidden_size, -1])
input_ids = tf.transpose(input_ids, [0, 2, 1]) # [B, grid ** 2, H]
embeds = tf.concat([class_embedding + tf.zeros([batch_size, 1, self.hidden_size], dtype=tf.float32), input_ids], axis=1)
embeds += position_embedding
return self.layer_norm(embeds)
def image_projection(self, img_features):
img_proj_weight = tf.get_variable(
name='img_proj_weight',
shape=[self.hidden_size, self.unit_hidden_size],
initializer=create_initializer(), dtype=tf.float32)
return tf.matmul(img_features, img_proj_weight, transpose_b=False)
@variable_scope("visual_transformer")
def build(self, features):
batch_size = get_shape_list(features['img_ids'])[0]
img_ids = self.conv_2d(features['img_ids'])
img_embeds = self.image_embedding(img_ids, batch_size=batch_size)
outputs = self.body(hidden_states=img_embeds, attn_mask=None, batch_size=batch_size)
return self.image_projection(outputs[:, 0, :])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.