text stringlengths 8 6.05M |
|---|
#!/usr/bin/env python3
#fonction Counter() pour compter les élements dans un tableau
from collections import Counter
def p_pair( b_array):
#b_array doit être un bytearray
bit = bytearray()
#boucle pour parcourir le tableau
for i in range(len(b_array)):
#convertion en binaire
octet = bin(b_array[i])[2:]
#effectuons la parite
if (Counter(octet)['1'] % 2 == 0):
#le nombre de 1 est pair
res.append(int('0' + octet,2))
else:
#le nombre de 1 est impair on ajoute 1
res.append(int('1' + octet,2))
return bit
def p_impair(b_array):
#b_array doit être un bytearray
bit = bytearray()
for i in range(len(b_array)):
#boucle qui permet de parcourir le tableau
#convertion en binaire
octet = bin(b_array[i])[2:]
if (Counter(octet)['1'] % 2 == 0):
res.append(int('1' + octet,2))
else:
res.append(int('0' + octet,2))
return bit
|
import os
HOST = os.getenv('HOST')
PORT = os.getenv('PORT')
MONGODB = os.getenv('MONGODB')
PANINI_DB = 'panini'
SYNOPSIS_COLLECTION = 'synopsis'
REPOSITORY = 'https://github.com/jallysson/PaniniApi' |
#!/usr/bin/python3
import actuator
actuator.Actuator("COOL", 1.5)
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Nombre: visorImagenes.py
# Autor: Miguel Andres Garcia Niño
# Creado: 15 de Noviembre 2018
# Modificado: 15 de Noviembre 2018
# Copyright: (c) 2018 by Miguel Andres Garcia Niño, 2018
# License: Apache License 2.0
# ----------------------------------------------------------------------------
__nombre__ = "Vima"
__versión__ = "1.0"
"""
El módulo *visorImagenes* permite seleccionar una imagen (png, jpg, ico, bmp) y
visualizarla, e igualmente visualizar las demás imágenes que se encuentren en la
carpeta de la imagen seleccionada.
"""
# Versión Python: 3.6.0
# Versión PyQt5: 5.11.3
from random import randint
from PyQt5.QtGui import QIcon, QFont, QPalette, QImage, QPixmap
from PyQt5.QtCore import (Qt, QDir, QFile, QFileInfo, QPropertyAnimation, QRect,
QAbstractAnimation, QTranslator, QLocale, QLibraryInfo)
from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QPushButton, QMessageBox,
QFrame, QLabel, QFileDialog)
# ========================= CLASE Widgets ==========================
class Widgets(QWidget):
def __init__(self, parent=None):
super(Widgets, self).__init__(parent)
self.parent = parent
self.initUI()
def initUI(self):
# ======================== WIDGETS ===========================
framePrincipal = QFrame(self)
framePrincipal.setFrameShape(QFrame.Box)
framePrincipal.setFrameShadow(QFrame.Sunken)
framePrincipal.setAutoFillBackground(True)
framePrincipal.setBackgroundRole(QPalette.Light)
framePrincipal.setFixedSize(662, 503)
framePrincipal.move(10, 10)
frame = QFrame(framePrincipal)
frame.setFixedSize(640, 480)
frame.move(10, 10)
self.labelImagen = QLabel(frame)
self.labelImagen.setAlignment(Qt.AlignCenter)
self.labelImagen.setGeometry(0, 0, 640, 480)
# self.labelImagen.setScaledContents(True)
self.labelImagenUno = QLabel(frame)
self.labelImagenUno.setAlignment(Qt.AlignCenter)
self.labelImagenUno.setGeometry(-650, 0, 640, 480)
# =================== BOTONES (QPUSHBUTTON) ==================
self.buttonCargar = QPushButton("Cargar imagen", self)
self.buttonCargar.setCursor(Qt.PointingHandCursor)
self.buttonCargar.setFixedSize(325, 30)
self.buttonCargar.move(10, 519)
self.buttonEliminar = QPushButton("Eliminar imagen", self)
self.buttonEliminar.setCursor(Qt.PointingHandCursor)
self.buttonEliminar.setFixedSize(255, 30)
self.buttonEliminar.move(345, 519)
self.buttonAnterior = QPushButton("<", self)
self.buttonAnterior.setObjectName("Anterior")
self.buttonAnterior.setToolTip("Imagen anterior")
self.buttonAnterior.setCursor(Qt.PointingHandCursor)
self.buttonAnterior.setFixedSize(30, 30)
self.buttonAnterior.move(607, 519)
self.buttonSiguiente = QPushButton(">", self)
self.buttonSiguiente.setObjectName("Siguiente")
self.buttonSiguiente.setToolTip("Imagen siguiente")
self.buttonSiguiente.setCursor(Qt.PointingHandCursor)
self.buttonSiguiente.setFixedSize(30, 30)
self.buttonSiguiente.move(642, 519)
# ===================== CONECTAR SEÑALES =====================
self.buttonCargar.clicked.connect(self.Cargar)
self.buttonEliminar.clicked.connect(self.Eliminar)
self.buttonAnterior.clicked.connect(self.anteriorSiguiente)
self.buttonSiguiente.clicked.connect(self.anteriorSiguiente)
# Establecer los valores predeterminados
self.posicion = int
self.estadoAnterior, self.estadoSiguiente = False, False
self.carpetaActual = QDir()
self.imagenesCarpeta = []
# ======================= FUNCIONES ==============================
def bloquearBotones(self, bool):
self.buttonCargar.setEnabled(bool)
self.buttonEliminar.setEnabled(bool)
self.buttonAnterior.setEnabled(bool)
self.buttonSiguiente.setEnabled(bool)
def Mostrar (self, label, imagen, nombre, posicionX=650):
imagen = QPixmap.fromImage(imagen)
# Escalar imagen a 640x480 si el ancho es mayor a 640 o el alto mayor a 480
if imagen.width() > 640 or imagen.height() > 480:
imagen = imagen.scaled(640, 480, Qt.KeepAspectRatio, Qt.SmoothTransformation)
# Mostrar imagen
label.setPixmap(imagen)
# Animación (al finalizar la animación se muestra en la barra de estado el nombre y la extensión de la imagen
# y se desbloquean los botones).
self.animacionMostar = QPropertyAnimation(label, b"geometry")
self.animacionMostar.finished.connect(lambda: (self.parent.statusBar.showMessage(nombre),
self.bloquearBotones(True)))
self.animacionMostar.setDuration(200)
self.animacionMostar.setStartValue(QRect(posicionX, 0, 640, 480))
self.animacionMostar.setEndValue(QRect(0, 0, 640, 480))
self.animacionMostar.start(QAbstractAnimation.DeleteWhenStopped)
def Limpiar(self, labelConImagen, labelMostrarImagen, imagen, nombre,
posicionInternaX, posicionX=None):
def Continuar(estado):
if estado:
if posicionX:
self.Mostrar(labelMostrarImagen, imagen, nombre, posicionX)
else:
self.Mostrar(labelMostrarImagen, imagen, nombre)
self.animacionLimpiar = QPropertyAnimation(labelConImagen, b"geometry")
self.animacionLimpiar.finished.connect(lambda: labelConImagen.clear())
self.animacionLimpiar.setDuration(200)
# self.animacionLimpiar.valueChanged.connect(lambda x: print(x))
self.animacionLimpiar.stateChanged.connect(Continuar)
self.animacionLimpiar.setStartValue(QRect(0, 0, 640, 480))
self.animacionLimpiar.setEndValue(QRect(posicionInternaX, 0, 640, 480))
self.animacionLimpiar.start(QAbstractAnimation.DeleteWhenStopped)
def Cargar(self):
nombreImagen, _ = QFileDialog.getOpenFileName(self, "Seleccionar imagen",
QDir.currentPath(),
"Archivos de imagen (*.jpg *.png *.ico *.bmp)")
if nombreImagen:
# Verificar que QLabel tiene imagen
labelConImagen = ""
if self.labelImagen.pixmap():
labelConImagen = self.labelImagen
elif self.labelImagenUno.pixmap():
labelConImagen = self.labelImagenUno
imagen = QImage(nombreImagen)
if imagen.isNull():
if labelConImagen:
self.Eliminar()
QMessageBox.information(self, "Visor de imágenes",
"No se puede cargar %s." % nombreImagen)
return
# Obtener ruta de la carpeta que contiene la imagen seleccionada
self.carpetaActual = QDir(QFileInfo(nombreImagen).absoluteDir().path())
# Obtener la ruta y el nombre de las imagenes que se encuentren en la carpeta de
# la imagen seleccionada
imagenes = self.carpetaActual.entryInfoList(["*.jpg", "*.png", "*.ico", "*.bmp"],
QDir.Files, QDir.Name)
self.imagenesCarpeta = [imagen.absoluteFilePath() for imagen in imagenes]
self.posicion = self.imagenesCarpeta.index(nombreImagen)
self.estadoAnterior = True if self.posicion == 0 else False
self.estadoSiguiente = True if self.posicion == len(self.imagenesCarpeta)-1 else False
# Función encargada de bloquear o desbloquear los botones
self.bloquearBotones(False)
# Nombre y extensión de la imagen
nombre = QFileInfo(nombreImagen).fileName()
if labelConImagen:
posicionInternaX = -650
labelMostrarImagen = self.labelImagen if self.labelImagenUno.pixmap() else self.labelImagenUno
self.Limpiar(labelConImagen, labelMostrarImagen, imagen, nombre, posicionInternaX)
else:
self.Mostrar(self.labelImagen, imagen, nombre)
def Eliminar(self):
def establecerValores():
labelConImagen.clear()
labelConImagen.move(0, 0)
# Limpiar la barra de estado
self.parent.statusBar.clearMessage()
# Establecer los valores predeterminados
self.posicion = int
self.estadoAnterior, self.estadoSiguiente = False, False
self.carpetaActual = QDir()
self.imagenesCarpeta.clear()
self.bloquearBotones(True)
# Verificar que QLabel tiene imagen
labelConImagen = ""
if self.labelImagen.pixmap():
labelConImagen = self.labelImagen
elif self.labelImagenUno.pixmap():
labelConImagen = self.labelImagenUno
if labelConImagen:
self.bloquearBotones(False)
self.animacionEliminar = QPropertyAnimation(labelConImagen, b"geometry")
self.animacionEliminar.finished.connect(establecerValores)
self.animacionEliminar.setDuration(200)
self.animacionEliminar.setStartValue(QRect(0, 0, 640, 480))
self.animacionEliminar.setEndValue(QRect(-650, 0, 640, 480))
self.animacionEliminar.start(QAbstractAnimation.DeleteWhenStopped)
def anteriorSiguiente(self):
if self.imagenesCarpeta:
widget = self.sender().objectName()
if widget == "Anterior":
self.estadoAnterior = True if self.posicion == 0 else False
self.estadoSiguiente = False
self.posicion -= 1 if self.posicion > 0 else 0
posicionInternaX, posicionX = 650, -650
else:
self.estadoSiguiente = True if self.posicion == len(self.imagenesCarpeta)-1 else False
self.estadoAnterior = False
self.posicion += 1 if self.posicion < len(self.imagenesCarpeta)-1 else 0
posicionInternaX, posicionX = -650, 650
if self.estadoAnterior or self.estadoSiguiente:
return
else:
imagen = self.imagenesCarpeta[self.posicion]
# Verificar que la carpeta que contiene la imagene exista
if not QDir(self.carpetaActual).exists():
self.Eliminar()
return
elif not QFile.exists(imagen):
# Obtener la ruta y el nombre de las imagenes que se encuentren en la
# carpeta de la imagen seleccionada
imagenes = self.carpetaActual.entryInfoList(["*.jpg", "*.png", "*.ico", "*.bmp"],
QDir.Files, QDir.Name)
if not imagenes:
self.Eliminar()
return
self.imagenesCarpeta = [imagen.absoluteFilePath() for imagen in imagenes]
self.posicion = randint(0, len(self.imagenesCarpeta)-1)
self.estadoAnterior = True if self.posicion == 0 else False
self.estadoSiguiente = True if self.posicion == len(self.imagenesCarpeta)-1 else False
elif QImage(imagen).isNull():
del self.imagenesCarpeta[self.posicion]
if not self.imagenesCarpeta:
self.Eliminar()
return
self.posicion = randint(0, len(self.imagenesCarpeta)-1)
self.estadoAnterior = True if self.posicion == 0 else False
self.estadoSiguiente = True if self.posicion == len(self.imagenesCarpeta)-1 else False
imagen = self.imagenesCarpeta[self.posicion]
if self.labelImagen.pixmap():
labelConImagen = self.labelImagen
elif self.labelImagenUno.pixmap():
labelConImagen = self.labelImagenUno
# Función encargada de bloquear o desbloquear los botones
self.bloquearBotones(False)
# Nombre y extensión de la imagen
nombre = QFileInfo(imagen).fileName()
# Label en el que se va a mostrar la imagen
labelMostrarImagen = self.labelImagen if self.labelImagenUno.pixmap() else self.labelImagenUno
# Quitar la imagen actual y mostrar la siguiente
self.Limpiar(labelConImagen, labelMostrarImagen, QImage(imagen),
nombre, posicionInternaX, posicionX)
# ====================== CLASE visorImagenes =======================
class visorImagenes(QMainWindow):
def __init__(self, parent=None):
super(visorImagenes, self).__init__(parent)
self.setWindowIcon(QIcon("Qt.png"))
self.setWindowTitle("Visor de imagenes en PyQt5 por: ANDRES NIÑO")
self.setWindowFlags(Qt.MSWindowsFixedSizeDialogHint)
self.setFixedSize(682, 573)
self.initUI()
def initUI(self):
# ===================== LLAMAR WIDGETS =======================
widget = Widgets(self)
self.setCentralWidget(widget)
# =============== BARRA DE ESTADO (STATUSBAR) ================
labelVersion = QLabel(self)
labelVersion.setText(" Vima versión beta: 1.0 ")
self.statusBar = self.statusBar()
self.statusBar.addPermanentWidget(labelVersion, 0)
# ==================================================================
if __name__ == '__main__':
import sys
aplicacion = QApplication(sys.argv)
traductor = QTranslator(aplicacion)
lugar = QLocale.system().name()
path = QLibraryInfo.location(QLibraryInfo.TranslationsPath)
traductor.load("qtbase_%s" % lugar, path)
aplicacion.installTranslator(traductor)
fuente = QFont()
fuente.setPointSize(10)
aplicacion.setFont(fuente)
ventana = visorImagenes()
ventana.show()
sys.exit(aplicacion.exec_())
|
import os
import pandas as pd
import numpy as np
from pickle import loads
from src.trainModel import trainModel
from sklearn.impute import SimpleImputer
class evalModel:
def __init__(self):
self.path = "../data/input/"
self.root = "../data/predict/"
# for traits
self.name = list()
# to store classification model
self.label_model = dict()
# store regression "Yes" model
self.modelYes = dict()
# store regression "No" model
self.modelNo = dict()
def getData(self):
# return test data
docs = []
for r, d, f in os.walk(self.path):
for files in f:
if files.endswith(".csv"):
docs.append(files)
return docs
def getModel(self):
model = trainModel()
self.name = model.name
# store the best models for each traits
self.label_model = model.trainModelLabel()
model.saveModel()
# store the best models for yes category for each traits
self.modelYes = model.modelYes
# store the best models for yes category for each traits
self.modelNo = model.modelNo
# apply trained model on test dataset
def getTrainedData(self, user):
# store the data file
path = self.path + user
df = pd.read_csv(path)
# print(df.dtypes)
# for missing values
imputer = SimpleImputer(strategy='median')
imputer.fit(df)
df = pd.DataFrame(imputer.transform(df)).astype('int64')
# print(df.dtypes)
prediction = list()
# pred = dict()
for i, item in enumerate(self.name):
pred = loads(self.label_model[item]).predict(df)
prediction.append(pred)
matrix = pd.concat([pd.DataFrame(df), pd.DataFrame(prediction).T], axis=1)
matrix.columns = ['anger', 'anticipation', 'disgust', 'fear', 'joy', 'negative', 'positive', 'sadness', 'surprise', 'trust', 'ext', 'neu', 'agr', 'con', 'opn']
return matrix
def getRegressed(self, mat, trait, status):
sample = mat[mat[trait] == status]
if sample.empty:
return [0]
else:
sample = sample.iloc[:, 0:10]
if status == 1:
pred = loads(self.modelYes[trait]).predict(sample).tolist()
return pred
else:
pred = loads(self.modelNo[trait]).predict(sample).tolist()
return pred
def getRated(self):
self.getModel()
data = self.getData()
f = open('../data/predict/output.txt', 'a')
for file in data:
f.write("Processing classification validation user data:%s\n" % file)
print("Processing classification validation user data:", file)
matrix = self.getTrainedData(file)
# matrix.to_csv('../data/predict/'+file)
# store the best score
score_dict = dict()
# score_dict2 = dict()
f.write("Processing regression validation user data:%s\n" % (file))
print("Processing regression validation user data: ", file)
for each in self.name:
pred1 = self.getRegressed(matrix, each, 1)
pred2 = self.getRegressed(matrix, each, 0)
score = (np.nanmean(pred1)*len(pred1)+np.nanmean(pred2)*len(pred2))/(len(pred1) + len(pred2))
score_dict[each] = score
for k, v in score_dict.items():
f.write('%s: %s\n' % (k, v))
print(k, ":", v)
x = evalModel()
x.getRated()
|
#!/usr/bin/python
from cStringIO import StringIO
from boto.s3.connection import S3Connection
from boto.s3.connection import OrdinaryCallingFormat
from boto.s3.key import Key as S3Key
from utils.linda import *
import os
import zipfile
import sys
import json
import base64
import time
import threading
import random
def cache_update(model_name, s3_path):
print "cache update for model {} and path {}".format(model_name, s3_path)
# Get idata for s3 connection
s3_host = None
s3_service = linda_rd('S3', categ='catalog/service')
if isinstance(s3_service, list) and len(s3_service) > 0:
s3_host = s3_service[0]['Address']
s3_port = s3_service[0]['ServicePort']
s3_key = linda_rd('s3/admin/access-key-id')
s3_secret = linda_rd('s3/admin/secret-access-key')
# Get CSAR file for the model
if s3_host is not None:
print "s3_hot ok"
conn = S3Connection(s3_key, s3_secret, host=s3_host, port=s3_port, calling_format=OrdinaryCallingFormat(), is_secure=False)
if conn is not None:
print "conn OK"
model_bucket = conn.get_bucket(model_name)
s3key_name = '{}.csar.zip'.format(model_name)
if model_bucket is not None:
print "bucket ok"
for key in model_bucket.list():
print "{name}\t{size}".format(name = key.name, size = key.size)
csar_key = model_bucket.get_key(s3key_name)
print "csar_key_name = {}".format(s3key_name)
if csar_key is not None:
csar_zip_path = '/tmp/{}'.format(s3key_name)
res = csar_key.get_contents_to_filename(csar_zip_path)
print "res = {}".format(res)
zip_ref = zipfile.ZipFile(csar_zip_path, 'r')
csardir="/var/local/{}".format(model_name)
zip_ref.extractall(csardir)
else:
print "Key {} does not exist in the bucket {}".format(model_name, s3_keyname)
else:
print "Bucket {} does not exist".format(model_name)
else:
print "Connection to S3 failed"
def main(args=None):
watch_input = sys.stdin.readlines()
if type(watch_input) is not list or len(watch_input) != 1:
return
try:
print "\n>>>>>>>>>>>>>>>>>> exec_cache_csar: {}".format(watch_input)
list_input = eval(watch_input[0])
except:
return
if type(list_input) is not list:
print "Format error for the event sent to oet : {}".format(watch_input)
return
for event_data in list_input:
key = event_data.get('Key')
val = event_data.get('Value')
if key is not None:
key_args = key.split('/')
model_name = key_args[1]
print "model_name = {}".format(model_name)
if val is not None:
s3_path = base64.b64decode(val)
print "s3_path = {}".format(s3_path)
t = threading.Thread( target=cache_update, args =(model_name, s3_path) )
t.start()
if __name__ == '__main__':
main()
|
'''
数据来源:东方财富网-行情中心
http://quote.eastmoney.com/center
'''
import requests
import re
import json
#用get方法访问服务器并提取页面数据
def get_stocks(page):
url = "http://42.push2.eastmoney.com/api/qt/clist/get?cb=jQuery11240574199433107409_1590886830210&pn={0}&pz=20&po=1&np=1&ut=bd1d9ddb04089700cf9c27f6f7426281&fltt=2&invt=2&fid=f3&fs=m:0+t:6,m:0+t:13,m:0+t:80,m:1+t:2,m:1+t:23&fields=f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152&_=1590886830256".format(page)
r = requests.get(url)
# print(r.text)
content = r.text.replace("jQuery11240574199433107409_1590886830210", "")
content = content.replace(";", "")
content = content[1:-1]
content_dict = json.loads(content)
print(content_dict)
try:
size = len(content_dict.get('data').get('diff'))
for i in range(size+1):
code = content_dict.get('data').get('diff')[i].get('f12')
name = content_dict.get('data').get('diff')[i].get('f14')
print(code, name)
f.write("{0},{1}\n".format(code, name))
except:
pass
if __name__ == "__main__":
f = open("data.csv", 'w')
for page in range(200+1):
get_stocks(page)
|
import MySQLdb
# Open database connection
db = MySQLdb.connect("localhost","root","toor","CARRENTALDB" )
# prepare a cursor object using cursor() method
cursor = db.cursor()
# Drop table if it already exist using execute() method.
#cursor.execute("DROP TABLE IF EXISTS CUSTOMER")
# Create table as per requirement
sql = """CREATE TABLE RENT(
RENT_ID varchar(20)NOT NULL,
CUST_ID varchar(20),
REG_NO varchar(20),
RENT_DATE varchar(20),
RETURN_DATE varchar(20),
PRIMARY KEY (RENT_ID)
)"""
cursor.execute(sql)
# disconnect from server
db.close()
|
import time
import random
from MSExploit import MSExploit
class MSExploit_Buffer_Overflow(MSExploit):
def __init__(self, name):
MSExploit.__init__(self, name)
#Create
def Create(self):
print "Assessing overflow type...\n"
time.sleep(0.75)
print "Stack buffer overflow detected.\nApproach: NOP-Sled.\n"
time.sleep(1.25)
print "Crafting malicious shellcode..."
time.sleep(0.5)
shellcode = self.Shellcode()
for i in range(len(shellcode)):
print shellcode[i],
time.sleep(0.1)
print "\nDone!\n"
time.sleep(1)
self.OptionalRootkit()
print "Compiling...\n"
time.sleep(2)
print "Buffer overflow Exploit successfully created and awaiting deployment!\n"
#Deploy
def Deploy(self, t, port):
service = ""
try:
service = self.t.portsAndServices[port]
except:
print "That is not a valid port number.\n"
return
print "Gathering necessary files..."
time.sleep(2)
print "Deploying %s to %s %s service on port %d" % (self.name, self.t.ipAddress, service, port)
time.sleep(3)
print "Success!\n"
time.sleep(2)
self.t.deployedExploit = self
self.Results()
#Results
def Results(self):
print "Corrupting stack woth No-Op machine instructions..."
time.sleep(3)
print "Attempting to locate return address..."
time.sleep(1.5)
print "Location found! Present at memory location: %s" % (self.MemoryLocation())
time.sleep(0.5)
print "Setting relative jump location to before return address: %s" % (self.MemoryLocation())
time.sleep(0.75)
print "Setting relative jump location pointing to payload: %s" % (self.MemoryLocation())
time.sleep(1)
print "Payload address: %s\n" % (self.MemoryLocation())
print "Running..."
time.sleep(4)
print "Executing payload..."
time.sleep(3)
print "Deleting key program files..."
time.sleep(2)
print "\nComplete!\n"
#Memory Location
def MemoryLocation(self):
size = 8
location = ""
start = "0x"
hexadecimal = ['0','1','2','3','4','5','6','7','8','9','0','a','b','c','d','e','f']
for i in range(0, size):
nextNum = random.randrange(0, len(hexadecimal))
if i == 0:
location = start + hexadecimal[nextNum]
else:
location += hexadecimal[nextNum]
return location
#Shellcode
def Shellcode(self):
hexCode = ['0', '1', '2', '3', '4','5','6','7','8','9','0','a','b','c','d','e','f']
code = [None] * 30
breakChar = '\\x'
for i in range(len(code)):
hex1 = random.randrange(0, len(hexCode))
hex2 = random.randrange(0, len(hexCode))
if i == len(code) - 1:
code[i] = breakChar + hexCode[hex1] + hexCode[hex2] + "\\"
else:
code[i] = breakChar + hexCode[hex1] + hexCode[hex2]
return code |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.api as am
#read the data set into a pandas DataFrame
wine = pd.read_csv('winequality-both.csv',sep=',',header=0)
wine.columns = wine.columns.str.replace(' ','_')
# display descriptive statistics for quality by wine type
print("<와인 종류에 따른 기술 통계를 출력하기>")
w = wine.groupby('type')
print(wine.groupby('type').describe())
|
from ruamel.yaml import YAML
import os
import json
def load_yaml(yaml_in:str)->dict:
""" Read a YAML file and return the result as a
dictionary.
"""
yaml = YAML(typ='safe')
yaml.preserve_quotes = True
with open(yaml_in) as file:
return yaml.load(file)
def write_yaml(filename:str, dictionary:dict):
""" Function to convert a dictionary into a YAML file
"""
yml = YAML()
yml.explicit_start = True
yml.default_flow_style = False
yml.encoding = "utf-8" # default when using YAML() or YAML(typ="rt")
yml.allow_unicode = True # always default in the new API
yml.errors = "strict"
yml.indent(sequence=4, offset=2)
yml.explicit_end = True
if isinstance(dictionary,dict):
with open(filename, 'w') as outfile:
print(filename)
yml.dump(dictionary, outfile)
else:
raise Exception('its not a dictionary')
|
#!/usr/bin/env python
# coding: utf-8
#
#******************************************************************************\
# *
# * Copyright (C) 2006 - 2014, Jérôme Kieffer <imagizer@terre-adelie.org>
# * Conception : Jérôme KIEFFER, Mickael Profeta & Isabelle Letard
# * Licence GPL v2
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
# *
#*****************************************************************************/
from __future__ import with_statement, division, print_function, absolute_import
"""
Config is a class containing all the configuration of the imagizer suite.
Technically it is a Borg (design Pattern) so every instance of Config has exactly the same contents.
"""
__author__ = "Jérôme Kieffer"
__contact = "imagizer@terre-adelie.org"
__date__ = "20141129"
__license__ = "GPL"
import os, locale, logging, ConfigParser
installdir = os.path.dirname(os.path.abspath(__file__))
logger = logging.getLogger("imagizer.config")
try:
import resource
except ImportError:
resource = None
if os.name == 'nt': # sys.platform == 'win32':
listConfigurationFiles = [os.path.join(os.getenv("ALLUSERSPROFILE"), "imagizer.conf"), os.path.join(os.getenv("USERPROFILE"), "imagizer.conf")]
elif os.name == 'posix':
listConfigurationFiles = ["/etc/imagizer.conf", os.path.join(os.getenv("HOME"), ".imagizer")]
def float_or_None(inp):
try:
out = float(inp)
except:
out = None
return out
################################################################################################
############### Class Config for storing the cofiguratio in a Borg ############################
################################################################################################
class Config(object):
"""this class is a Borg : always returns the same values regardless to the instance of the object"""
__shared_state = {}
def __init__(self, configuration_files=None):
"""
This is a Borg, so the constructor is more or less empty
"""
self.__dict__ = self.__shared_state
if len(self.__dict__) < 5:
logging.debug("Config: initialization of the class")
self.ScreenSize = 600
self.NbrPerPage = 20
self.PagePrefix = "page"
self.TrashDirectory = "Trash"
self.SelectedDirectory = "Selected"
self.Selected_save = ".selected-photos"
self.Extensions = [".jpg", ".jpeg", ".jpe", ".jfif"]
self.RawExtensions = [".cr2", ".arw", ".mrw", ".dng", ".pef", ".nef", ".raf"]
self.AutoRotate = False
self.DefaultMode = "664"
try:
self.DefaultRepository = os.getcwd()
except OSError:
self.DefaultRepository = ""
self.CommentFile = "index.desc"
self.Interpolation = 1
self.DefaultFileMode = int(self.DefaultMode, 8)
self.DefaultDirMode = self.DefaultFileMode + 3145 # 73 = +111 en octal ... 3145 +s mode octal
self.Filigrane = False
self.FiligraneSource = os.path.join(installdir, "signature.png")
self.FiligranePosition = 5
self.FiligraneQuality = 75
self.FiligraneOptimize = False
self.FiligraneProgressive = False
self.ContrastMaskGaussianSize = 11.77
self.WebDirIndexStyle = "list"
self.MediaSize = 680
self.Burn = "grave-rep $Selected"
self.WebServer = "cp -r $Selected/* $WebRepository && generator"
self.WebRepository = "/var/www/imagizer"
self.Locale, self.Coding = locale.getdefaultlocale()
self.ExportSingleDir = False
self.GraphicMode = "Normal"
self.WebPageAnchor = "end"
self.SlideShowDelay = 5.0
self.SlideShowType = "chronological"
self.SlideShowMinRating = 3
self.SynchronizeRep = "user@host:/mnt/photo"
self.SynchronizeType = "Newer"
self.ImageCache = 100
self.ImageWidth = None
self.ImageHeight = None
self.DEBUG = None
self.Gimp = "gimp"
self.Rawtherapee = "rawtherapee"
self.Dcraw = "dcraw -w -c"
self.DefaultRatingSelectedImage = 3
self.SelectedFilter = "ContrastMask"
self.Thumbnails = {
"Size":160,
"Suffix": "thumb",
"Interpolation":1,
"Progressive":False,
"Optimize":False,
"ExifExtraction":True,
"Quality": 75
}
self.ScaledImages = {
"Size":800,
"Suffix": "scaled",
"Interpolation":1,
"Progressive":False,
"Optimize":False,
"ExifExtraction":False,
"Quality": 75
}
# Video default options
self.ScratchDir = "/tmp"
self.VideoBitRate = 600
self.AudioBitRatePerChannel = 64
self.X264Options = "subq=7:nr=100:me=umh:partitions=all:direct_pred=auto:bframes=3:frameref=5"
self.FramesPerSecond = None
self.MPlayer = "/usr/bin/mplayer"
self.MEncoder = "/usr/bin/mencoder"
self.Sox = "/usr/bin/sox"
self.Convert = "/usr/bin/convert"
self.AviMerge = "/usr/bin/avimerge"
self.VideoExtensions = [".avi", ".mpeg", ".mpg", ".mp4", ".divx", ".mov", ".webm", ".mkv", ".m2ts"]
self.ThumbnailExtensions = [".thm", ".jpg"]
self.BatchScriptExecutor = "/usr/bin/batch"
self.BatchUsesPipe = True
if configuration_files is not None:
self.load(configuration_files)
def load(self, filenames):
"""retrieves the the default options, if the filenames does not exist, uses the default instead
@param filenames: list of filename
@type filenames: list of strings or unicode
"""
logging.debug("Config.load")
configparser = ConfigParser.ConfigParser()
files = []
for i in filenames:
if os.path.isfile(i):files.append(i)
if len(files) == 0:
logging.warning("No configuration file found. Falling back on defaults")
return
configparser.read(files)
for i in configparser.items("Selector"):
j = i[0]
if j == "ScreenSize".lower(): self.ScreenSize = int(i[1])
elif j == "Interpolation".lower(): self.Interpolation = int(i[1])
elif j == "PagePrefix".lower(): self.PagePrefix = i[1]
elif j == "NbrPerPage".lower(): self.NbrPerPage = int(i[1])
elif j == "TrashDirectory".lower(): self.TrashDirectory = i[1]
elif j == "SelectedDirectory".lower():self.SelectedDirectory = i[1]
elif j == "Selected_save".lower(): self.Selected_save = i[1]
elif j == "AutoRotate".lower(): self.AutoRotate = configparser.getboolean("Selector", "AutoRotate")
elif j == "Filigrane".lower(): self.Filigrane = configparser.getboolean("Selector", "Filigrane")
elif j == "FiligraneSource".lower(): self.FiligraneSource = i[1]
elif j == "FiligranePosition".lower():self.FiligranePosition = int(i[1])
elif j == "FiligraneQuality".lower(): self.FiligraneQuality = int(i[1])
elif j == "FiligraneOptimize".lower():self.FiligraneOptimize = configparser.getboolean("Selector", "FiligraneOptimize")
elif j == "FiligraneProgressive".lower():self.FiligraneProgressive = configparser.getboolean("Selector", "FiligraneProgressive")
elif j == "CommentFile".lower(): self.CommentFile = i[1]
elif j == "WebDirIndexStyle".lower(): self.WebDirIndexStyle = i[1]
elif j == "DefaultFileMode".lower():
self.DefaultFileMode = int(i[1], 8)
self.DefaultDirMode = self.DefaultFileMode + 3145 # 73 = +111 en octal ... 3145 +s mode octal
elif j == "RawExtensions".lower(): self.RawExtensions = i[1].split()
elif j == "Extensions".lower(): self.Extensions = i[1].split()
elif j == "DefaultRepository".lower(): self.DefaultRepository = i[1]
elif j == "MediaSize".lower(): self.MediaSize = float(i[1])
elif j == "Burn".lower(): self.Burn = i[1]
elif j == "WebServer".lower(): self.WebServer = i[1]
elif j == "WebRepository".lower(): self.WebRepository = i[1]
elif j == "Locale".lower():
self.Locale = i[1]
try:
locale.setlocale(locale.LC_ALL, self.Locale)
except locale.Error:
self.Locale, _ = locale.getdefaultlocale()
logger.warning("Unsupported locale %s, reverting to %s" % (i[1], self.Locale))
elif j == "Coding".lower(): self.Coding = i[1]
elif j == "ExportSingleDir".lower(): self.ExportSingleDir = configparser.getboolean("Selector", "ExportSingleDir")
elif j == "WebPageAnchor".lower(): self.WebPageAnchor = i[1]
elif j == "SlideShowDelay".lower(): self.SlideShowDelay = float(i[1])
elif j == "SlideShowMinRating".lower(): self.SlideShowMinRating = min(5, max(0, int(i[1])))
elif j == "SlideShowType".lower(): self.SlideShowType = i[1]
elif j == "SynchronizeRep".lower(): self.SynchronizeRep = i[1]
elif j == "SynchronizeType".lower(): self.SynchronizeType = i[1]
elif j == "ImageCache".lower(): self.ImageCache = int(i[1])
elif j == "ImageWidth".lower(): self.ImageWidth = int(i[1])
elif j == "ImageHeight".lower(): self.ImageHeight = int(i[1])
elif j == "gimp".lower(): self.Gimp = i[1]
elif j == "dcraw".lower(): self.Dcraw = i[1]
elif j == "SelectedFilter".lower(): self.SelectedFilter = i[1]
else: logging.warning(str("Config.load: unknown key %s" % j))
for k in ["ScaledImages", "Thumbnails"]:
try:
dico = eval(k)
except:
dico = {}
for i in configparser.items(k):
j = i[0]
if j == "Size".lower():dico["Size"] = int(i[1])
elif j == "Suffix".lower():dico["Suffix"] = i[1]
elif j == "Interpolation".lower():dico["Interpolation"] = int(i[1])
elif j == "Progressive".lower():dico["Progressive"] = configparser.getboolean(k, "Progressive")
elif j == "Optimize".lower():dico["Optimize"] = configparser.getboolean(k, "Optimize")
elif j == "ExifExtraction".lower():dico["ExifExtraction"] = configparser.getboolean(k, "ExifExtraction")
elif j == "Quality".lower():dico["Quality"] = int(i[1])
self.__setattr__(k, dico)
# exec("self.%s=dico" % k)
# Read Video options
try:
for i in configparser.items("Video"):
j = i[0]
if j == "ScratchDir".lower(): self.ScratchDir = os.path.abspath(i[1])
elif j == "VideoBitRate".lower(): self.VideoBitRate = int(i[1])
elif j == "AudioBitRatePerChannel".lower(): self.AudioBitRatePerChannel = int(i[1])
elif j == "X264Options".lower(): self.X264Options = i[1]
elif j == "FramesPerSecond".lower(): self.FramesPerSecond = float_or_None(i[1])
elif j == "MPlayer".lower(): self.MPlayer = os.path.abspath(i[1])
elif j == "MEncoder".lower(): self.MEncoder = os.path.abspath(i[1])
elif j == "Sox".lower(): self.Sox = os.path.abspath(i[1])
elif j == "Convert".lower(): self.Convert = os.path.abspath(i[1])
elif j == "AviMerge".lower(): self.AviMerge = os.path.abspath(i[1])
elif j == "VideoExtensions".lower(): self.VideoExtensions = i[1].split()
elif j == "ThumbnailExtensions".lower(): self.ThumbnailExtensions = i[1].split()
elif j == "BatchScriptExecutor".lower(): self.BatchScriptExecutor = os.path.abspath(i[1])
elif j == "BatchUsesPipe".lower(): self.BatchUsesPipe = configparser.getboolean("Video", "BatchUsesPipe")
else: logging.warning(str("Config.load: unknown key %s" % j))
except ConfigParser.NoSectionError:
logging.warning("No Video section in configuration file !")
if resource:
max_files = resource.getrlimit(resource.RLIMIT_NOFILE)[0] - 15
if max_files < self.ImageCache:
self.ImageCache = max_files
if self.Interpolation > 1:
self.Interpolation = 1
if self.Interpolation < 0:
self.Interpolation = 0
def __repr__(self):
logging.debug("Config.__repr__")
listtxt = ["",
"Size on the images on the Screen: %s pixels in the largest dimension" % self.ScreenSize,
"Page prefix:\t\t\t %s" % self.PagePrefix,
"Number of images per page:\t %s" % self.NbrPerPage,
"Use Exif for Auto-Rotate:\t %s" % self.AutoRotate,
"Default mode for files (octal):\t %o" % self.DefaultFileMode,
"JPEG extensions:\t\t %s" % self.Extensions,
"Default photo repository:\t %s" % self.DefaultRepository,
"Add signature for exported images:%s" % self.Filigrane,
"Backup media size (CD,DVD):\t %s MByte" % self.MediaSize,
"Scaled imagesSize:\t\t %s pixels in the largest dimension" % self.ScaledImages["Size"],
"Thumbnail Size:\t\t\t %s pixels in the largest dimension" % self.Thumbnails["Size"],
"Caching of %s images " % self.ImageCache
]
return os.linesep.join(listtxt)
def printConfig(self):
"""
Print out the configuration
"""
logging.debug("Config.printConfig")
logging.info(self.__repr__())
def saveConfig(self, filename):
"""Wrapper for self.config"""
self.save(filename)
def save(self, filename):
"""
Saves the default options to file
@param filename: name of the file to save the configuration to
@type filename: string or unicode
"""
logging.debug("Config.save")
lsttxt = ["[Selector]",
"#Size of the image on the Screen, by default", "ScreenSize: %s" % self.ScreenSize, "",
"#Downsampling quality [0=nearest, 1=bilinear]", "Interpolation: %s" % self.Interpolation, "",
"#Page prefix (used when there are too many images per day to fit on one web page)", "PagePrefix: %s" % self.PagePrefix, "",
"#Maximum number of images per web page", "NbrPerPage: %s" % self.NbrPerPage, "",
"#Trash sub-directory", "TrashDirectory: %s" % self.TrashDirectory, "",
"#Selected/processed images sub-directory", "SelectedDirectory: %s" % self.SelectedDirectory, "",
"#File containing the list of selected but unprocessed images", "Selected_save: %s" % self.Selected_save, "",
"#Use Exif data for auto-rotation of the images (canon cameras mainly)", "AutoRotate: %s" % self.AutoRotate, "",
"#Default mode for files (in octal)", "DefaultFileMode: %o" % self.DefaultFileMode, "",
"#Default JPEG extensions", "Extensions: " + " ".join(self.Extensions), "",
"#Default Raw images extensions", "RawExtensions: " + " ".join(self.RawExtensions), "",
"#Default photo repository", "DefaultRepository: %s" % self.DefaultRepository, "",
"#Size of the backup media (in MegaByte)", "MediaSize: %s" % self.MediaSize, "",
"#Add signature to web published images", "Filigrane: %s" % self.Filigrane, "",
"#File containing the image of the signature for the filigrane", "FiligraneSource: %s" % self.FiligraneSource, "",
"#Position of the filigrane : 0=center 12=top center 1=upper-right 3=center-right...", "FiligranePosition: %s" % self.FiligranePosition, "",
"#Quality of the saved image in filigrane mode (JPEG quality)", "FiligraneQuality: %s" % self.FiligraneQuality, "",
"#Optimize the filigraned image (2 pass JPEG encoding)", "FiligraneOptimize: %s" % self.FiligraneOptimize, "",
"#Progressive JPEG for saving filigraned images", "FiligraneProgressive: %s" % self.FiligraneProgressive, "",
"#File containing the description of the day in each directory", "CommentFile: %s" % self.CommentFile, "",
"#Style of the dirindex web pages, either <<list>> or <<table>>, the latest includes thumbnail photos", "WebDirIndexStyle: %s" % self.WebDirIndexStyle, "",
"#System command to use to burn a CD or a DVD", "# $Selected will be replaced by the directory where the files are", "Burn: %s" % self.Burn, "",
"#System command to copy the selection to the server", "# $Selected will be replaced by the directory where the files are", "# $WebRepository will be replaced by the directory of the root of generator", "WebServer: %s" % self.WebServer, "",
"#The location of the root of generator", "WebRepository: %s" % self.WebRepository, "",
"#The localization code, fr_FR is suggested for unix or FR for win32", "Locale: %s" % self.Locale, "",
"#Default encoding for text files, latin-1 is suggested,UTF-8 should be possible", "Coding: %s" % self.Coding, "",
"#All selected photos should be exported in a single directory", "ExportSingleDir: %s" % self.ExportSingleDir, "",
"#Where should the dirindex page start-up ? [begin/end] ", "WebPageAnchor: %s" % self.WebPageAnchor, "",
"#Delay between imges in the slideshow? ", "SlideShowDelay: %s" % self.SlideShowDelay, "",
"#Type of slideshow : chronological, anti-chronological or random ?", "SlideShowType: %s" % self.SlideShowType, "",
"#Minimum rating of an image to appear in the slidesho [0-5]", "SlideShowMinRating: %i" % self.SlideShowMinRating, "",
"#Remote repository to synchronize with (rsync like)", "SynchronizeRep: %s" % self.SynchronizeRep, "",
"#Synchronization type, acceptable values are Newer, Older, Selected and All", "SynchronizeType: %s" % self.SynchronizeType, "",
"#Allow the creation of a Cache of images with the given size in number of images", "ImageCache: %s" % self.ImageCache, "",
"#Gnu Image Manipulation Program (GIMP) path to executable", "Gimp: %s" % self.Gimp, "",
"#Digital Camera Raw (dcraw) extraction program and option (-w -c is suggested)", "Dcraw: %s" % self.Dcraw, "",
"#Filter selected by default for image processing: ContrastMask, AutoWB, ...", "SelectedFilter: %s" % self.SelectedFilter, ""]
if self.ImageWidth is not None:
lsttxt += ["#Width of the last image displayed ... should not be modified", "ImageWidth:%s" % self.ImageWidth, ""]
if self.ImageHeight is not None:
lsttxt += ["#Height of the last image displayed ... should not be modified", "ImageHeight:%s" % self.ImageHeight, ""]
for i in ["ScaledImages", "Thumbnails"]:
lsttxt += ["[%s]" % i, ""]
j = eval("self.%s" % i)
lsttxt += ["#%s size" % i, "Size: %s" % j["Size"], ""]
lsttxt += ["#%s suffix" % i, "Suffix: %s" % j["Suffix"], ""]
lsttxt += ["#%s downsampling quality [0=nearest, 1=antialias 2=bilinear, 3=bicubic]" % i, "Interpolation: %s" % j["Interpolation"], ""]
lsttxt += ["#%s progressive JPEG files" % i, "Progressive: %s" % j["Progressive"], ""]
lsttxt += ["#%s optimized JPEG (2 pass encoding)" % i, "Optimize: %s" % j["Optimize"], ""]
lsttxt += ["#%s quality (in percent)" % i, "Quality: %s" % j["Quality"], ""]
lsttxt += ["#%s image can be obtained by Exif extraction ?" % i, "ExifExtraction: %s" % j["ExifExtraction"], ""]
lsttxt += ["[Video]",
"#Directory where you want PBS to work? (/tmp)", "ScratchDir: %s" % self.ScratchDir, "",
"#Video bit rate Higher is better but bigger (600)", "VideoBitRate: %s" % self.VideoBitRate, "",
"#audio bit rate per ausio channel (x2 for stereo), default=64", "AudioBitRatePerChannel: %s" % self.AudioBitRatePerChannel, "",
"#Options to be used for the X264 encoder (man mencoder)", "X264Options: %s" % self.X264Options, "",
"#Number of Frames per secondes in the video (25):", "FramesPerSecond: %s" % self.FramesPerSecond, "",
"#Path to the mplayer (mplayer package) executable", "MPlayer: %s" % self.MPlayer, "",
"#Path to the mencoder (mplayer package) executable", "MEncoder: %s" % self.MEncoder, "",
"#Path to the sox (Sound processing) executable", "Sox: %s" % self.Sox, "",
"#Path to the convert (imagemagick package) executable", "Convert: %s" % self.Convert, "",
"#Path to the avimerge (transcode package) executable", "AviMerge: %s" % self.AviMerge, "",
"#List of video extensions", "VideoExtensions: %s" % " ".join(self.VideoExtensions), "",
"#list of thumbnail extension related to videos", "ThumbnailExtensions: %s" % " ".join(self.ThumbnailExtensions), "",
"#Batch queueing system launcher (/bin/sh if none present)", "BatchScriptExecutor: %s" % self.BatchScriptExecutor, "",
"#Batch queuing needs a pipe (like batch) or not (like PBS)", "BatchUsesPipe: %s" % self.BatchUsesPipe, "",
]
w = open(filename, "w")
w.write(os.linesep.join(lsttxt))
w.close()
if self.DEBUG:
logging.info(str("Configuration saved to file %s" % filename))
config = Config(listConfigurationFiles)
|
"""
Tests for the module firecrown.parameters.
"""
import pytest
import numpy as np
from firecrown.parameters import RequiredParameters, parameter_get_full_name, ParamsMap
from firecrown.parameters import (
DerivedParameterScalar,
DerivedParameterCollection,
create,
InternalParameter,
SamplerParameter,
)
def test_create_with_no_arg():
"""Calling parameters.create() with no argument should return an
SamplerParameter"""
a_parameter = create()
assert isinstance(a_parameter, SamplerParameter)
def test_create_with_float_arg():
"""Calling parameters.create() with a float argument should return a
InternalParameter ."""
a_parameter = create(1.5)
assert isinstance(a_parameter, InternalParameter)
assert a_parameter.value == 1.5
def test_create_with_wrong_arg():
"""Calling parameters.create() with an org that is neither float nor None should
raise a TypeError."""
with pytest.raises(TypeError):
_ = create("cow") # type: ignore
def test_get_params_names_does_not_allow_mutation():
"""The caller of RequiredParameters.get_params_names should not be able to modify
the state of the object on which the call was made."""
orig = RequiredParameters(["a", "b"])
names = set(orig.get_params_names())
assert names == {"a", "b"}
assert names == {"b", "a"}
names.add("c")
assert set(orig.get_params_names()) == {"a", "b"}
def test_params_map():
my_params = ParamsMap({"a": 1})
x = my_params.get_from_prefix_param(None, "a")
assert x == 1
with pytest.raises(KeyError):
_ = my_params.get_from_prefix_param("no_such_prefix", "a")
with pytest.raises(KeyError):
_ = my_params.get_from_prefix_param(None, "no_such_name")
def test_parameter_get_full_name_reject_empty_name():
with pytest.raises(ValueError):
_ = parameter_get_full_name(None, "")
with pytest.raises(ValueError):
_ = parameter_get_full_name("cow", "")
def test_parameter_get_full_name_with_prefix():
full_name = parameter_get_full_name("my_prefix", "my_name")
# TODO: do we really want to allow underscores in parameter names, when we
# are using the underscore as our separator?
assert full_name == "my_prefix_my_name"
def test_parameter_get_full_name_without_prefix():
full_name = parameter_get_full_name(None, "nomen_foo")
assert full_name == "nomen_foo"
def test_derived_parameter_scalar():
derived_param = DerivedParameterScalar("sec1", "name1", 3.14)
assert isinstance(derived_param.get_val(), float)
assert derived_param.get_val() == 3.14
assert derived_param.get_full_name() == "sec1--name1"
def test_derived_parameter_wrong_type():
"""Try instantiating DerivedParameter objects with wrong types."""
with pytest.raises(TypeError):
_ = DerivedParameterScalar( # pylint: disable-msg=E0110,W0612
"sec1", "name1", "not a float" # type: ignore
)
with pytest.raises(TypeError):
_ = DerivedParameterScalar( # pylint: disable-msg=E0110,W0612
"sec1", "name1", [3.14] # type: ignore
)
with pytest.raises(TypeError):
_ = DerivedParameterScalar( # pylint: disable-msg=E0110,W0612
"sec1", "name1", np.array([3.14]) # type: ignore
)
def test_derived_parameters_collection():
olist = [
DerivedParameterScalar("sec1", "name1", 3.14),
DerivedParameterScalar("sec2", "name2", 2.72),
]
orig = DerivedParameterCollection(olist)
clist = orig.get_derived_list()
clist.append(DerivedParameterScalar("sec3", "name3", 0.58))
assert orig.get_derived_list() == olist
def test_derived_parameters_collection_add():
olist = [
DerivedParameterScalar("sec1", "name1", 3.14),
DerivedParameterScalar("sec2", "name2", 2.72),
DerivedParameterScalar("sec2", "name3", 0.58),
]
dpc1 = DerivedParameterCollection(olist)
dpc2 = None
dpc = dpc1 + dpc2
for (section, name, val), derived_parameter in zip(dpc, olist):
assert section == derived_parameter.section
assert name == derived_parameter.name
assert val == derived_parameter.get_val()
def test_derived_parameters_collection_add_iter():
olist1 = [
DerivedParameterScalar("sec1", "name1", 3.14),
DerivedParameterScalar("sec2", "name2", 2.72),
DerivedParameterScalar("sec2", "name3", 0.58),
]
dpc1 = DerivedParameterCollection(olist1)
olist2 = [
DerivedParameterScalar("sec3", "name1", 3.14e1),
DerivedParameterScalar("sec3", "name2", 2.72e1),
DerivedParameterScalar("sec3", "name3", 0.58e1),
]
dpc2 = DerivedParameterCollection(olist2)
dpc = dpc1 + dpc2
olist = olist1 + olist2
for (section, name, val), derived_parameter in zip(dpc, olist):
assert section == derived_parameter.section
assert name == derived_parameter.name
assert val == derived_parameter.get_val()
|
#-------------------------------------
# Project: Lightweight Industrial Image Classifier based on Federated Few-Shot Learning
# code is based on https://github.com/floodsung/LearningToCompare_FSL
#-------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim.lr_scheduler import StepLR
import numpy as np
import task_generator_chip as tg
import os
import math
import argparse
from ecci_sdk import Client
import threading
parser = argparse.ArgumentParser(description="One Shot Visual Recognition")
parser.add_argument("-f","--feature_dim",type = int, default = 32)
parser.add_argument("-r","--relation_dim",type = int, default = 8)
parser.add_argument("-w","--class_num",type = int, default = 2)
parser.add_argument("-s","--sample_num_per_class",type = int, default = 1)
parser.add_argument("-b","--batch_num_per_class",type = int, default = 1)
parser.add_argument("-e","--episode",type = int, default= 1000000)
parser.add_argument("-t","--test_episode", type = int, default = 100)
# parser.add_argument("-l","--learning_rate", type = float, default = 0.001)
parser.add_argument("-g","--gpu",type=int, default=0)
parser.add_argument("-u","--hidden_unit",type=int,default=10)
args = parser.parse_args()
def omniglot_character_folders():
metatrain_folder = './new_client/test'
metatest_folder = './new_client/test'
metatrain_folders = [os.path.join(metatrain_folder, label) \
for label in os.listdir(metatrain_folder) \
if os.path.isdir(os.path.join(metatrain_folder, label)) \
]
metatest_folders = [os.path.join(metatest_folder, label) \
for label in os.listdir(metatest_folder) \
if os.path.isdir(os.path.join(metatest_folder, label)) \
]
return metatrain_folders,metatest_folders
class CNNEncoder(nn.Module):
"""docstring for ClassName"""
def __init__(self):
super(CNNEncoder, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=3, padding=0),
nn.BatchNorm2d(32, momentum=1, affine=True),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(32, 32, kernel_size=3, padding=0),
nn.BatchNorm2d(32, momentum=1, affine=True),
nn.ReLU(),
nn.MaxPool2d(2))
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
return out
class RelationNetwork(nn.Module):
"""docstring for RelationNetwork"""
def __init__(self, input_size, hidden_size):
super(RelationNetwork, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(32 * 2, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32, momentum=1, affine=True),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(32, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32, momentum=1, affine=True),
nn.ReLU(),
nn.MaxPool2d(2))
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, 1)
def forward(self,x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0),-1)
out = F.relu(self.fc1(out))
out = F.sigmoid(self.fc2(out))
return out
def main():
ecci_client = Client()
mqtt_thread = threading.Thread(target=ecci_client.initialize)
mqtt_thread.start()
ecci_client.wait_for_ready()
FEATURE_DIM = args.feature_dim
CLASS_NUM = args.class_num
SAMPLE_NUM_PER_CLASS = 1
BATCH_NUM_PER_CLASS = args.batch_num_per_class
EPISODE = args.episode
TEST_EPISODE = args.test_episode
# LEARNING_RATE = args.learning_rate
GPU = args.gpu
# HIDDEN_UNIT = args.hidden_unit
metatrain_character_folders, metatest_character_folders = omniglot_character_folders()
for episode in range(EPISODE):
data_msg_queue = ecci_client.get_sub_data_payload_queue()
data_msg = data_msg_queue.get()
feature_encoder = data_msg['feature_encoder']
relation_network = data_msg['relation_network']
if (episode == 0) or (episode == 1) or (episode % 10 == 0):
print("Testing...")
total_rewards = 0
for i in range(TEST_EPISODE):
task = tg.OmniglotTask(metatest_character_folders,CLASS_NUM,SAMPLE_NUM_PER_CLASS,SAMPLE_NUM_PER_CLASS,)
sample_dataloader = tg.get_data_loader(task,num_per_class=SAMPLE_NUM_PER_CLASS,split="train",shuffle=False)
test_dataloader = tg.get_data_loader(task,num_per_class=SAMPLE_NUM_PER_CLASS,split="test",shuffle=True)
sample_images,sample_labels = sample_dataloader.__iter__().next()
test_images,test_labels = test_dataloader.__iter__().next()
sample_features = feature_encoder(Variable(sample_images).cuda(GPU)) # 5x64
test_features = feature_encoder(Variable(test_images).cuda(GPU)) # 20x64
sample_features_ext = sample_features.unsqueeze(0).repeat(SAMPLE_NUM_PER_CLASS*CLASS_NUM,1,1,1,1)
test_features_ext = test_features.unsqueeze(0).repeat(SAMPLE_NUM_PER_CLASS*CLASS_NUM,1,1,1,1)
test_features_ext = torch.transpose(test_features_ext,0,1)
relation_pairs = torch.cat((sample_features_ext,test_features_ext),2).view(-1,FEATURE_DIM*2,5,5)
relations = relation_network(relation_pairs).view(-1,CLASS_NUM)
_,predict_labels = torch.max(relations.data,1)
rewards = [1 if predict_labels[j]==test_labels[j].cuda(GPU) else 0 for j in range(CLASS_NUM)]
total_rewards += np.sum(rewards)
test_accuracy = total_rewards/1.0/CLASS_NUM/TEST_EPISODE
print("episode:"+','+ str(episode)+','+"test accuracy:" + ',' + str(test_accuracy))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
t=int(input())
for i in range(t):
n=int(input())
print("YES" if (n//2020>=n%2020) else "NO")
|
import numpy as np
from sklearn.datasets import load_iris
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.metrics import accuracy_score
from sklearn.svm import LinearSVC, SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression # 이진 분류
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
# 1. 데이터
dataset = load_iris()
x = dataset.data
y = dataset.target
print(x.shape, y.shape) # (150, 4) ,(150,)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, shuffle=True, random_state=45)
kfold = KFold(n_splits=5, shuffle=True)
# 2. 모델
for i in [LinearSVC, SVC, KNeighborsClassifier, DecisionTreeClassifier, RandomForestClassifier]:
print()
model = i()
# 훈련
scores = cross_val_score(model, x_train, y_train, cv=kfold)
print(i.__name__ + '\'s score(acc) :', scores)
'''
LinearSVC's score(acc) : [0.91666667 0.95833333 0.95833333 0.95833333 0.95833333]
SVC's score(acc) : [0.91666667 1. 0.91666667 0.95833333 0.91666667]
KNeighborsClassifier's score(acc) : [0.91666667 0.91666667 1. 0.95833333 0.95833333]
DecisionTreeClassifier's score(acc) : [1. 0.95833333 0.95833333 0.91666667 0.91666667]
RandomForestClassifier's score(acc) : [0.95833333 0.91666667 1. 0.91666667 0.91666667]
''' |
#!/usr/bin/env python3
from sensor_msgs.msg import Image
from nav_msgs.msg import Odometry
from rospy_wrapper import ros_wrapper
class SLAMWrapper():
RGB_RAW='rgb'
DEPTH_RAW='depth'
INFRA1_RAW='infra1'
INFRA2_RAW='infra2'
ODOMETRY='odom'
def __init__(self):
self.topics={self.RGB_RAW: "/camera/color/image_raw",
self.DEPTH_RAW: "/camera/depth/image_rect_raw",
self.INFRA1_RAW: "/camera/infra1/image_rect_raw",
self.INFRA2_RAW: "/camera/infra2/image_rect_raw",
self.ODOMETRY: "/rtabmap/odom"}
self.types={self.RGB_RAW: Image,
self.DEPTH_RAW: Image,
self.INFRA1_RAW: Image,
self.INFRA2_RAW: Image,
self.ODOMETRY: Odometry}
self.ros_wrapper=ros_wrapper()
def subscribe_batch(self, frames, callback):
topics=[self.topics[frame] for frame in frames]
types=[self.types[frame] for frame in frames]
self.ros_wrapper.add_synced_topics(topics, types, callback)
def subscribe_single(self, frame, callback):
self.ros_wrapper.add_topic(self.topics[frame], self.types[frame], callback)
def spin(self):
self.ros_wrapper.spin()
def start(self):
self.ros_wrapper.start()
|
class Persona():
def __init__(self,vCedula,vNombre,vEdad):
self.cedula=vCedula
self.nombre=vNombre
self.edad=vEdad
def __str__(self):
return ("El objeto es {0},{1},{2}".format(self.cedula,self.nombre,self.edad))
class IngSistemas(Persona):
def __init__(self,vCedula,vNombre,vEdad,vUniversidad):
Persona.__init__(self, vCedula, vNombre, vEdad)
self.Universidad=vUniversidad
def programar(self,vLenguaje):
print("Programar en ",vLenguaje," en la U ",self.Universidad)
class Abogado(Persona):
def EstudiaCaso(self,numCaso):
print("Se estuadia el caso ",numCaso)
class Estudiante (IngSistemas,Abogado):
pass
objeto =Estudiante(25450650,"Freddy",37,"UTN")
print(objeto)
objeto.EstudiaCaso(1020)
aaslkdjf = Estudiante()
#objeto=Estudiante(25450650,"Freddy",25)
#objeto.EstudiaCaso(1010)
#objeto.programar("Python")
#print(objeto)
|
__author__ = 'pschiffmann'
import pandas as pd
import numpy as np
from random import randint
class smartkitdata(object):
def __init__(self):
self._data = self.gen_data()
def get_data(self, length=60):
return pd.DataFrame(self._data)[-length:]
def get_data_smooth(self, length=60, smooth=10):
return pd.rolling_mean(pd.DataFrame(self._data[-(length+smooth):]), smooth)[-length:]
def update_data(self):
self._data = np.vstack([self._data, self.gen_data()])
def gen_data(self):
return np.array([randint(0,1023), randint(0,1023)])
if __name__ == "__main__":
data = smartkitdata()
for i in range(10):
print data.get_data() |
# collecting relation surfaces from OpenIE
# http://openie.allenai.org/search?arg1=book&rel=&arg2=entertainment&corpora=
import requests
import re
from multiprocessing.dummy import Pool
import time
surface_dict = None
def get_relation(subj, obj):
global surface_dict
url = "http://openie.allenai.org/search?arg1=%s&rel=&arg2=%s&corpora=" % (subj, obj)
page = requests.get(url)
# print(url)
if page.status_code != 200:
print(url.encode("utf-8"))
print(page.status_code)
time.sleep(1)
return
content = page.text
surfaces = []
if len(content) >=1000:
pat = re.compile('<span class="title-string">*(.*)\s*<\/span>\s*\((\d)\)<\/a><\/li>')
for r in pat.findall(content):
# print(r) # r[0] is the relation surface, r[1] is the frequency
surfaces.append((r[0], int(r[1])))
for s, f in surfaces:
if s not in surface_dict:
surface_dict[s] = 0
surface_dict[s] += f
def get_relation_templates(concept_pairs):
global surface_dict
surface_dict = dict()
#for subj, obj in concept_pairs:
# get_relation(subj, obj)
with Pool(10) as p:
p.starmap(get_relation, concept_pairs)
return surface_dict
|
#!/usr/bin/python
import random, sys, string
import argparse
class shuf:
def __init__(self, filename):
if filename=="-" or filename=="":
self.lines = ""
else:
f = open(filename, 'r')
self.lines = f.readlines()
f.close()
def shuffling(self):
random.shuffle(self.lines)
return self.lines
def chooseline(self):
return random.choice(self.lines)
def main():
usage_msg = """%prog [OPTION]... FILE
Output randomly selected lines from FILE."""
parser = argparse.ArgumentParser()
parser.add_argument('infile', nargs='?',action="store", default='')
parser.add_argument("-n", "--head-count",action="store", dest="numlines", help="output NUMLINES lines")
parser.add_argument("-e","--echo",action="store", nargs="+",help="treat each command line operand as an input line")
parser.add_argument("-i","--input_range",dest="inprange",action="store",help="Act as if input came from a file containing the range of unsigned decimal integers lo…hi, one per line.")
parser.add_argument("-r","--repeat",action="store_true",help="Repeat output values, that is, select with replacement. With this option the output is not a permutation of the input; instead, each output line is randomly chosen from all the inputs. This option is typically combined with --head-count; if --head-count is not given, shuf repeats indefinitely.")
args = parser.parse_args()
hc_fl = False
if args.numlines:
hc_fl = True
numlines = int(args.numlines)
if numlines < 1:
parser.error("negative count")
rng_fl = False
if args.inprange:
rng_fl = True
input_rng = args.inprange.split('-')
if(len(input_rng) != 2):
parser.error("invalid input range")
low = int(input_rng[0])
high = int(input_rng[1])
if high < low:
parser.error("low to high input")
rangelist=[]
for i in range(low,high+1):
rangelist.append(str(i))
random.shuffle(rangelist)
inputfile = args.infile
generator = shuf(inputfile)
echo_fl =False
if args.echo:
echo_fl = True
if args.echo == "":
parser.error("enter stdin for echo")
echo_list = args.echo
random.shuffle(echo_list)
if args.repeat and hc_fl:
if echo_fl==False:
if rng_fl==False:
for i in range(0,numlines):
sys.stdout.write(generator.chooseline())
elif rng_fl==True:
for i in range(0,numlines):
print(random.choice(rangelist))
elif echo_fl==True:
if rng_fl==False:
for i in range(0,numlines):
print(random.choice(echo_list))
elif rng_fl:
for x in range(0,numlines):
print(random.choice(rangelist))
for i in range(0,numlines):
print(random.choice(echo_list))
elif args.repeat and hc_fl==False:
if echo_fl==False:
if rng_fl==False:
while True:
sys.stdout.write(generator.chooseline())
elif rng_fl ==True:
while True:
print(random.choice(echo_list))
elif echo_fl==True:
if rng_fl ==False:
while True:
print(random.choice(echo_list))
elif rng_fl == True:
while true:
print (random.choice(echo_list))
print (random.choice(rangelist))
elif hc_fl and args.repeat == False:
if echo_fl==False:
if rng_fl==False:
temp = generator.shuffling()
if(numlines<=len(generator.lines)):
for i in range(0,numlines):
sys.stdout.write(temp[i])
elif(numlines>len(generator.lines)):
for x in generator.shuffling():
sys.stdout.write(x)
elif rng_fl == True:
try:
for i in range(0,numlines):
print(rangelist[i])
except:
for i in rangelist:
print(i)
elif echo_fl==True:
if rng_fl ==False:
if(numlines<=len(echo_list)):
for i in range(0,numlines):
print(echo_list[i])
elif(numlines>len(echo_list)):
for i in echo_list:
print(i)
elif rng_fl == True:
if(numlines<=len(echo_list) and numlines<=len(rangelist)):
for i in range(0,numlines):
print(echo_list[i])
print(rangelist[i])
else:
for i in echo_list:
print(i)
for j in rangelist:
print(j)
elif rng_fl and args.repeat==False and hc_fl==False and echo_fl==False:
for x in rangelist:
print(x)
elif echo_fl and args.repeat==False and hc_fl ==False and rng_fl ==False:
for x in echo_list:
print(x)
else:
for x in generator.shuffling():
sys.stdout.write(x)
if __name__ == "__main__":
main()
|
'''Question 6
Level 2
Question:
Write a program that calculates and prints the value according to the given formula:
Q = Square root of [(2 * C * D)/H]
Following are the fixed values of C and H:
C is 50. H is 30.
D is the variable whose values should be input to your program in a comma-separated sequence.
Example
Let us assume the following comma separated input sequence is given to the program:
100,150,180
The output of the program should be:
18,22,24
Hints:
If the output received is in decimal form, it should be rounded off to its nearest value (for example, if the output received is 26.0, it should be printed as 26)
In case of input data being supplied to the question, it should be assumed to be a console input.'''
import math
stuff = str(input("Please enter the variables(comma-seperated): "))
variables = stuff.split(',')
result = []
for variable in variables:
# 整數 cast str->int
result.append(int(math.sqrt(100*int(variable)/30)))
print(result) |
from django.db import models
# Create your models here.
class User(models.Model):
name = models.CharField(max_length=128,unique=True)
password = models.CharField(max_length=258)
def __str__(self):
return self.name+','+self.password
class Score(models.Model):
name = models.CharField(max_length=128,unique=True)
png_path = models.CharField(max_length=258)
json_path = models.CharField(max_length=258)
def __str__(self):
return self.name+','+self.png_path+','+self.json_path
|
from networkx import write_gpickle, DiGraph
from pandas import read_csv
#logging
import logging
logging.basicConfig(filename='../log/graph_optimize_log.log', \
format='%(asctime)s\t%(levelname)s,\t%(funcName)s\t%(message)s', \
level=logging.DEBUG)
'''
class Graph:
def __init__(self):
self.graph = nx.DiGraph()
def load_edges(self, filename):
edges = read_csv(filename)
self.graph.add_edges_from(edges)
def load_nodes(self, filename):
nodes = read_csv(filename)
self.graph.add_nodes_from(nodes)
def create_graph(self, nodes, edges):
graph = nx.DiGraph()
for n in nodes:
graph.load_nodes(n)
for e in edges:
graph.load_edges(e)
return graph
def save_graph(self, filename):
self.graph.to_pickle(filename)
'''
def create_graph():
logging.info('Start create graph')
# 0 level nodes - core nodes
core = read_csv('../ref/nodes_0_core.csv', index_col=0)
# 1st level nodes
#ufps = read_csv('../ref/nodes_1_ufps.csv', index_col=0)
# 2nd level nodes
#pochtamts = read_csv('../ref/nodes_2_pochtamts.csv', index_col=0)
#3rd level nodes
#ops = read_csv('../ref/nodes_3_ops_pochtamts.csv', index_col=0)
# read core transport nodes
auto_nodes = read_csv('../ref/0_nodes_core_auto.csv', index_col=0)
# TODO: update grpah for contain of all node
#auto_nodes = read_csv('../ref/cuted_auto_graph.csv', index_col=0)
aero_nodes = read_csv('../ref/0_nodes_core_aero.csv', index_col=0)
# DONE: add railway nodes
railway_nodes = read_csv('../ref/0_nodes_core_rail.csv', index_col=0)
master = node_dict_from_df(core)
total_core_nodes_in, total_core_nodes_out, \
master_nodes_internal, master_nodes_internal_edge = generate_internal_nodes_and_edges(master)
# create transport edges and nodes
auto_nodes_1, auto_nodes_2, auto_edges = create_edges(auto_nodes)
aero_nodes_1, aero_nodes_2, aero_edges = create_edges(aero_nodes)
rail_nodes_1, rail_nodes_2, rail_edges = create_edges(railway_nodes)
# create graph
russian_post_graph = DiGraph()
# this part of code create fullconnected core graph of 82 master nodes
# adding master nodes (82 nodes) and internal edges
russian_post_graph.add_nodes_from(master_nodes_internal)
russian_post_graph.add_edges_from(master_nodes_internal_edge)
# add auto nodes between masters
russian_post_graph.add_nodes_from(auto_nodes_1)
russian_post_graph.add_nodes_from(auto_nodes_2)
# add avia nodes between masters
russian_post_graph.add_nodes_from(aero_nodes_1)
russian_post_graph.add_nodes_from(aero_nodes_2)
# add rail nodes between masters
russian_post_graph.add_nodes_from(rail_nodes_1)
russian_post_graph.add_nodes_from(rail_nodes_2)
# add transport edges between master nodes
russian_post_graph.add_edges_from(auto_edges)
russian_post_graph.add_edges_from(aero_edges)
russian_post_graph.add_edges_from(rail_edges)
logging.info('Graph created')
logging.info('Seve graph on disk...')
write_gpickle(russian_post_graph, '../result/base_graph')
return total_core_nodes_in, total_core_nodes_out, russian_post_graph
def node_dict_from_df(df):
# convert df to node dict for graph
# df must contains columns ['idx', 'name', 'lat', 'lng', 'type', 'capacity',
# 'time_transit', 'time_sort', 'time_limit',\
# 'cost_transit', 'cost_sort', 'avg_cost_transit', 'avg_cost_sort']
nodes = [(row['idx'], { 'type': row['type'],
'time': row['time_transit'],
'capacity': row['capacity'],
'cost': row['cost_transit'],
'avg_cost': row['avg_cost_transit']}) for _, row in df.iterrows()]
return nodes
def generate_internal_nodes_and_edges(master_nodes):
total_nodes_in = []
total_nodes_out = []
master_nodes_internal = []
master_nodes_internal_edge = []
for node in master_nodes:
node_in = (str(node[0])+ '_in',
{'idx': node[1].get('idx', 1),
'time': node[1].get('time', 1),
'cost': node[1].get('cost', 1),
'avg_cost': node[1].get('avg_cost', 1),
'type': node[1].get('type', 1),
'capacity': node[1].get('capacity', 1)})
node_out = (str(node[0]) + '_out', {'name': node[0]})
# node_sort_in = (str(node[0])+ '_sort_in',\
# {'name': node[0],'time': node[1].get('time_sort', 1),'cost': node[1].get('cost_sort', 1)})
# node_sort_out = (str(node[0])+ '_sort_out' , {'name': node[0]})
total_nodes_in.append(node_in[0])
total_nodes_out.append(node_out[0])
master_nodes_internal.append(node_in)
master_nodes_internal.append(node_out)
# master_nodes_internal.append(node_sort_in)
# master_nodes_internal.append(node_sort_out)
master_nodes_internal_edge.append((node_in[0], node_out[0], \
{'time': node_in[1].get('time', 1) ,
'cost':node_in[1].get('cost',1),
'avg_cost': node[1].get('avg_cost', 1),
'type': node[1].get('type', 1),
'capacity': node[1].get('capacity', 1)}))
# master_nodes_internal_edge.append((node_in[0], node_sort_in[0]))
# master_nodes_internal_edge.append((node_sort_in[0], node_sort_out[0], \
# {'time': node_sort_in[1].get('time', 1) , 'cost':node_sort_in[1].get('cost',1)}))
# master_nodes_internal_edge.append((node_sort_out[0], node_out[0], {}))
return total_nodes_in, total_nodes_out, master_nodes_internal, master_nodes_internal_edge
def create_edges(nodes):
#create edges
# direct
transport_nodes = [(str(row['from']) + '_' + str(row['to']) + '_' + str(row['type']),
{'from': row['from'],
'to': row['to'],
'dist': row['dist'],
'time': row['time'],
'cost':row['cost'],
'avg_cost':row['avg_cost'],
'type': row['type'],
'time_limit': row['time_limit'],
'capacity': row['capacity']}) for _, row in nodes.iterrows()]
# return
transport_nodes_2 = [(str(row['to']) + '_' + str(row['from']) + '_' + str(row['type']),
{'from': row['to'],
'to': row['from'],
'dist': row['dist_ret'],
'time': row['time_ret'],
'cost':row['cost'],
'avg_cost':row['avg_cost'],
'type': row['type'],
'time_limit': row['time_limit'],
'capacity': row['capacity']}) for _, row in nodes.iterrows()]
new_edges_transport = []
for node in transport_nodes:
new_edges_transport.append((str(node[1].get('from')) + '_out', node[0],\
{'time': node[1].get('time', 1),
'dist':node[1].get('dist',1),
'cost':node[1].get('cost',1),
'avg_cost':node[1].get('avg_cost',1),
'time_limit': node[1].get('time_limit', 1),
'capacity': node[1].get('capacity', 1),
'type':node[1].get('type',1)}))
new_edges_transport.append((node[0], str(node[1].get('to')) + '_in'))
for node in transport_nodes_2:
new_edges_transport.append((str(node[1].get('from')) + '_out', node[0],\
{'time': node[1].get('time', 1),
'dist':node[1].get('dist',1),\
'cost':node[1].get('cost',1),
'avg_cost':node[1].get('avg_cost',1),
'time_limit': node[1].get('time_limit', 1),
'capacity': node[1].get('capacity', 1),
'type':node[1].get('type',1)}))
new_edges_transport.append((node[0], str(node[1].get('to')) + '_in'))
return transport_nodes, transport_nodes_2, new_edges_transport
|
def checkio(number):
result = ""
x = 0
while len(str(number)) > 1:
temp = x
for n in range(9,1,-1):
print("nnnn:",n)
if number % n == 0:
result += str(n)
number = int(number / n)
x += 1
break
print("While 1:", result, number, x)
if x == temp:
return 0
result += str(number)
result="".join(sorted(list(result)))
print("结果:", result)
return int("".join(sorted(list(result))))
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert checkio(20) == 45, "1st example"
assert checkio(21) == 37, "2nd example"
assert checkio(17) == 0, "3rd example"
assert checkio(33) == 0, "4th example"
assert checkio(3125) == 55555, "5th example"
assert checkio(9973) == 0, "6th example"
# def checkio(number):
# result = ""
# x = 0
# while len(str(number)) > 1 or number in {4, 6, 8, 9}:
# temp = x
# for n in range(2, 10):
# if number % n == 0:
# result += str(n)
# number = int(number / n)
# x += 1
# break
# print("While 1:", result, number, x)
# if x == temp:
# return 0
# result += str(number)
# N = 0
# while N == 0:
# r = ""
# for i in range(1, len(result)):
# n = int(result[i]) * int(result[i - 1])
# print(i, result[i - 1], result[i])
# print("n:", n)
# if len(str(n)) == 1:
# r = result[:i - 1] + str(n) + result[i + 1:]
# print("R:", r)
# result = r
# break
# elif i == len(result) - 1 and len(str(n)) > 1:
# N += 1
# break
# print(result)
# # result="".join(sorted(list(result)))
# print("结果:", result)
# return int("".join(sorted(list(result))))
|
#!/usr/bin/env python
# coding: utf-8
# In[3]:
from PIL import Image
import numpy as np
import glob
import tensorflow as tf
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten
from keras.utils import to_categorical
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
Images= []
Labels= []
train_lbls=[]
train_imgs= []
#the dataset Path
directory = r"#Paste the dataset Path here\trainingSet/"
def Load_data(directory):
i=0
x=0
count= 0
#Loading images in each file 0-9 in a numoy array to be used by Keras model later
for i in range(10):
print("Loading data of dataset file " + str(i))
#getting all image paths in each file in (fn)
fn=glob.glob(directory+str(i)+"/*.jpg")
for x in fn :
#1-opening each image from file pathes in (fn) using Image from pillow library
#2-appending each file 0-9 of index(i) into a numoy array
#3-getting the number of images in each file in (count)
img=Image.open(x)
imgs=np.array(img)
count+=1
Images.append(np.array(imgs))
for j in range(count):
#using the (count and index of file(i)) to assign each image in each file to its right label
#appending labels of each file in array Labels
Labels.append(i)
count = 0
#finally getting all the images in numpy array train_imgs
#and all labels in numpy array train_lbls
train_imgs=np.array(Images)
train_lbls=np.array(Labels)
print("All Data is loaded Successfully")
return train_imgs , train_lbls
train_imgs,train_lbls=Load_data(directory)
# printing the shape for debugging only
print(train_imgs.shape)
print(train_lbls.shape)
# In[4]:
#normalizing images to have a value from -0.5 to 0.5 instead from 0 to 255
#to be easy in calculations and for faster and better results
train_imgs = (train_imgs / 255) - 0.5
#adding a 3rd dimension to be used in keras model
train_imgs = np.expand_dims(train_imgs,axis=3)
#print Shape for checking the 3rd dimension is added
print(train_imgs.shape)
# In[5]:
def Train(train_imgs,train_lbls):
#set the hyperparameters
num_filters= 8
filter_size= 3
pool_size= 2
#creating the model
model = Sequential([
Conv2D(num_filters, filter_size, input_shape=(28, 28, 1)),MaxPooling2D(pool_size=pool_size),
Flatten(),Dense(10, activation='softmax'),])
model.compile(loss='categorical_crossentropy',optimizer='adam',)
#printing the model summary to check layers are correct
model.summary()
#training the model
#to categorical is used
#because keras converts the array to one hot vectors instead of int
model.fit(train_imgs,to_categorical(train_lbls),epochs=4,)
#saving the model to be loaded later in the Predict function
#so that when we need it we don't train the model again
model.save_weights('cnn.h5')
#Prediction of first 7 images in the trainingSet vs the True trainingSet Labels
p = model.predict(train_imgs[:7])
#printing the first 7 images themselves
print("The First 7 images:")
fp=glob.glob(directory+"0/*.jpg")
for i in range(7):
img=Image.open(fp[i])
imgplot = plt.imshow(img, cmap="gray")
plt.show()
print("First 7 True labels in the trainingSet :"+str(train_lbls[:7])) #first 7 images are images of 0s Label
print("First 7 Predicted image Labels :"+str(np.argmax(p, axis=1))) #predict should be 7 0s
Train(train_imgs,train_lbls)
|
import os
import sys
import traceback
import math
import cProfile
import torch
import torch.nn as nn
from torch.utils.data.dataloader import DataLoader
import torch.nn.functional as F
from torch.autograd import Variable
import IPython
import numpy as np
from tensorboardX import SummaryWriter
import data_video
from vocabulary import *
from models import model_lstm_multimodal_1 as model_lstm_multimodal
from util.beam_search_util_lstm import *
from util.coco_result_generator import *
from util.timer import *
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
segment_method = 'char'
vocab_path = data_video.msvd_bilingual_vocab_char_path
feature_type = 'resnet'
os.environ["JAVA_HOME"] = r"/usr/local/lib/jre1.8.0_161"
run_name = sys.argv[1]
assert(len(run_name) > 0)
result_folder = '../all_results/{}'.format(run_name)
assert (not os.path.exists(result_folder)), 'result folder {} exists!'.format(result_folder)
print('run:', run_name)
def train():
save_steps = 10000
n_epoch = 500
learning_rate = 1e-4
scheduler_step_size = 15
batch_size = 32
writer = SummaryWriter(log_dir=os.path.join('runs', run_name))
vocab = Vocabulary.load(vocab_path)
vocab_size = (vocab.idx // 100 + 1) * 100
dataset = data_video.MSVDDatasetMultiModal(vocab=vocab, segment_method=segment_method, split='train', feature=feature_type)
collate_fn = data_video.collate_fn_1
data_loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn, num_workers=0)
lm_config = model_lstm_multimodal.Config()
lm_config.vocab_size = vocab_size
lm_lstm = model_lstm_multimodal.LanguageModelLSTM1(lm_config, device)
lm_lstm.to(device)
lm_lstm.train(True)
epoch = 0
global_step = 0
optimizer = torch.optim.Adam(lm_lstm.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=scheduler_step_size, gamma=.5)
criterion = nn.CrossEntropyLoss()
while epoch < n_epoch:
scheduler.step()
print('1 epoch = {} steps'.format(len(data_loader)))
for _, (image_filename_list, features, captions, res5b_features, caption_eng, lengths) in enumerate(data_loader):
timer = Timer()
timer.start()
global_step += 1
features_resnet = features.to(device) # batch_size * 2048
captions = captions.to(device)
res5b_features = res5b_features.to(device)
eng_embedding = torch.zeros(len(caption_eng), lm_config.eng_caption_max_len, lm_config.vis_dim)
for _i in range(len(caption_eng)):
l = len(caption_eng[_i])
tokens = torch.tensor(caption_eng[_i]).to(device)
eng_embedding[_i][:l] = lm_lstm.embed(tokens)
eng_embedding = eng_embedding.to(device) # batch_size * 15 * 512
timer.step('to gpu')
# word_prob_output, last_hidden_state = lm_lstm.forward(features, res5b_feature=res5b_features,
# eng_embedding=eng_embedding,
# input_words=captions, lengths=lengths)
# # print(word_prob_output.shape) # (batch, seq_len, vocab_size)
# target = torch.nn.utils.rnn.pack_padded_sequence(captions, lengths=lengths, batch_first=True)[0]
#
# loss = criterion(word_prob_output, target)
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
optimizer.zero_grad()
predicts = lm_lstm.forward(features_resnet, res5b_features, eng_embedding, captions, lengths)
predicts = torch.nn.utils.rnn.pack_padded_sequence(predicts, [l - 1 for l in lengths], batch_first=True)[0]
targets = torch.nn.utils.rnn.pack_padded_sequence(captions[:, 1:], [l - 1 for l in lengths], batch_first=True)[0]
timer.step('forward')
loss = criterion(predicts, targets) # loss.device is 'cuda'
loss.backward()
optimizer.step()
timer.step('optimize')
print('epoch {}, global step: {}, loss: {:.8f}, lr: [{}]'.format(epoch, global_step, loss, ' '.join(
'{}'.format(param_group['lr']) for param_group in optimizer.param_groups)))
lr = optimizer.param_groups[0]['lr']
writer.add_scalar("loss", loss, global_step=global_step)
writer.add_scalar("lr", lr, global_step=global_step)
if global_step % 10 == 0:
print(data_video.get_c3d_feature_of_video.cache_info())
timer.print()
if (global_step % save_steps == 0 and global_step > 0):
test1(lm_lstm, global_step, test_index=0, split='val')
test1(lm_lstm, global_step, test_index=1, split='val')
test1(lm_lstm, global_step, test_index=0, split='test')
save_model(os.path.join(result_folder, 'models', 'model-{}'.format(global_step)), (lm_lstm, optimizer, epoch, global_step))
epoch += 1
def save_model(save_path, items):
dirname = os.path.dirname(save_path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
lm, optimizer, epoch, global_step = items
state_dict = {'lm_lstm': lm,
'optimizer': optimizer,
'epoch': epoch,
'global_step': global_step}
torch.save(state_dict, save_path)
print('model saved at {}'.format(save_path))
def load_model(save_path):
if device.type == 'cuda':
state_dict = torch.load(save_path)
else:
state_dict = torch.load(save_path, map_location=lambda storage, loc: storage)
lm_lstm, optimizer, epoch, global_step = state_dict['lm_lstm'], state_dict['optimizer'], \
state_dict['epoch'], state_dict['global_step']
print('loaded {}'.format(save_path))
return lm_lstm, optimizer, epoch, global_step
def test1(lm_lstm, global_step, test_index, split='test'):
lm_lstm.train(False)
assert split in ['test', 'val']
annotation_file_name = os.path.join(result_folder, 'msvd_annotation_{}_{}_{}.json'.format(split, global_step, test_index))
output_file_name = os.path.join(result_folder, 'msvd_result_{}_{}_{}.json'.format(split, global_step, test_index))
eval_file_name = os.path.join(result_folder, 'eval_{}_{}_{}.txt'.format(split, global_step, test_index))
if not os.path.exists(result_folder):
os.makedirs(result_folder)
length_normalization_factor = 0.0
beam_size = 3
max_sentence_length = 15
vocab = Vocabulary.load(vocab_path)
dataset = data_video.MSVDDatasetMultiModal(vocab=vocab, segment_method=segment_method, caption_mode='text', split=split, feature=feature_type)
data_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=0)
dataset_size = len(dataset)
start_word_id = vocab.get_index(start_word)
end_word_id = vocab.get_index(end_word)
en_token_id = vocab.get_index(lang_en)
chs_token_id = vocab.get_index(lang_chs)
unk_token_id = vocab.get_index(unknown_word)
result_generator = COCOResultGenerator()
for index, (image_filename_list, features, captions, res5b_feature, caption_eng) in enumerate(data_loader):
try:
image_id = image_filename_list[0]
caption = captions[0]
result_generator.add_annotation(image_id, caption)
if result_generator.has_output(image_id):
continue
initial_beam = Caption(
sentence=[start_word_id],
state=None,
logprob=0.0,
score=0.0,
metadata=[""]
)
partial_captions = TopN(beam_size)
partial_captions.push(initial_beam)
complete_captions = TopN(beam_size)
output_softmax = nn.Softmax(dim=-1)
h0, c0 = torch.zeros(beam_size, lm_lstm.config.hidden_dim), torch.zeros(beam_size, lm_lstm.config.hidden_dim)
h0 = h0.to(device)
c0 = c0.to(device)
caption_eng = [i for i in caption_eng if i not in [start_word_id, end_word_id, en_token_id, chs_token_id, unk_token_id]]
caption_eng = caption_eng[:min(len(caption_eng), lm_lstm.config.eng_caption_max_len)]
eng_embedding = torch.zeros(lm_lstm.config.eng_caption_max_len, lm_lstm.config.vis_dim)
eng_embedding[:len(caption_eng), :] = lm_lstm.embed(torch.tensor(caption_eng).to(device))
eng_embedding = eng_embedding.unsqueeze(0).to(device) # 512 * 15
# # res5b_feature: 1 * 512 * 1 * 7 * 7
# # res5b_feature = res5b_feature.reshape(512, 49).to(device)
# res5b_feature = res5b_feature.squeeze(2) # 1 * 512 * 7 * 7
# feas = res5b_feature.mean(3).mean(2).squeeze(0) # 512
# feas = feas.expand(beam_size, feas.shape[0]) # beam_size * 512
#
# features_v = lm_lstm.att_conv2d_v_1(res5b_feature) # 1 * 512 * 5 * 5
# features_all = torch.cat([features_v, eng_embedding], dim=1) # used for attention
res5b_feature = res5b_feature.to(device)
# feas, features_all = lm_lstm.get_attn_input(res5b_feature, eng_embedding)
_, features_all = lm_lstm.get_attn_input(res5b_feature, eng_embedding)
features = features.to(device)
feas = lm_lstm.img_embed.forward(features)
# feas = feas.expand([beam_size, feas.shape[1]])
feas = feas.repeat([beam_size, 1])
words = lm_lstm.embed(torch.tensor([start_word_id] * beam_size).to(device))
for j in range(max_sentence_length):
partial_captions_list = partial_captions.extract()
partial_captions.reset()
if len(partial_captions_list) == 0:
break
if j > 0:
ii = torch.tensor([c.sentence[-1] for c in partial_captions_list]).to(device)
words = lm_lstm.embed(ii)
beam_size = len(ii)
res5b_feature_expand = features_all.expand(beam_size, features_all.shape[1], features_all.shape[2])
h0 = torch.cat([c.state[0].unsqueeze(0) for c in partial_captions_list], dim=0)
c0 = torch.cat([c.state[1].unsqueeze(0) for c in partial_captions_list], dim=0)
feas, alpha = lm_lstm._attention_layer(res5b_feature_expand, h0)
inputs = torch.cat([feas, words], 1)
h0, c0 = lm_lstm.lstm_cell(inputs, (h0, c0))
outputs = lm_lstm.fc_out(h0)
# hiddens, states = lm_lstm.lstm(inputs, states) # hiddens: states: [1, 3, 512] [1, 3, 512] FIXME: here?
# outputs = lm_lstm.output_word_layer(hiddens.squeeze(0)) # lstm outputs:
softmax = output_softmax(outputs)
for (i, partial_caption) in enumerate(partial_captions_list):
word_probabilities = softmax[i].detach().cpu().numpy() # cuda tensors -> cpu for sorting
# state = (states[0][0][i].detach().cpu().numpy(), states[1][0][i].detach().cpu().numpy())
# state = (states[0][:, i:i + 1], states[1][:, i:i + 1])
state = (h0[i, :], c0[i, :])
words_and_probs = list(enumerate(word_probabilities))
words_and_probs.sort(key=lambda x: -x[1])
words_and_probs = words_and_probs[0:beam_size]
# print([(self.vocab.get_word(w), p) for w, p in words_and_probs])
for w, p in words_and_probs:
if p < 1e-12:
continue # Avoid log(0).
sentence = partial_caption.sentence + [w]
logprob = partial_caption.logprob + math.log(p)
score = logprob
metadata_list = None
if w == end_word_id:
if length_normalization_factor > 0:
score /= len(sentence) ** length_normalization_factor
beam = Caption(sentence, state, logprob, score, metadata_list)
complete_captions.push(beam)
else:
beam = Caption(sentence, state, logprob, score, metadata_list)
partial_captions.push(beam)
if partial_captions.size() == 0:
break
if not complete_captions.size():
complete_captions = partial_captions
captions = complete_captions.extract(sort=True)
print(len(result_generator.test_image_set))
print('{}, {}/{} {}'.format(
image_id, index, dataset_size, result_generator.has_output(image_id)))
for i, caption in enumerate(captions):
sentence = [vocab.get_word(w) for w in caption.sentence]
# print(sentence)
sentence = [w for w in sentence if (w != start_word and w != end_word)] # ignore start and end tokens
sentence = "".join(sentence)
print(" %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob)))
if i == 0:
print(sentence)
result_generator.add_output(image_id, sentence)
except Exception:
IPython.embed()
annotation_obj, result_obj = result_generator.get_annotation_and_output()
# print(annotation_obj)
with open(annotation_file_name, 'w') as f:
json.dump(annotation_obj, f)
with open(output_file_name, 'w') as f:
json.dump(result_obj, f)
print('annotation images:', len(annotation_obj['images']))
print('output images:', len(result_obj))
print('saved to {}'.format(output_file_name))
eval_cmd = '{} {} {} {} {}'.format(sys.executable,
r"/media/mcislab/sdb1/home/mcislab/zwt/coco-caption-master/eval.py",
annotation_file_name,
output_file_name,
eval_file_name)
os.system(eval_cmd)
lm_lstm.train(True)
def test(model_path, result_folder, split='test'):
lm_lstm, optimizer, epoch, global_step = load_model(model_path)
lm_lstm.train(False)
assert split in ['test', 'val']
if not os.path.exists(result_folder):
os.makedirs(result_folder)
annotation_file_name = os.path.join(result_folder, 'msvd_annotation_{}_{}.json'.format(split, global_step))
output_file_name = os.path.join(result_folder, 'msvd_result_{}_{}.json'.format(split, global_step))
eval_file_name = os.path.join(result_folder, 'eval_{}_{}.txt'.format(split, global_step))
if not os.path.exists(result_folder):
os.makedirs(result_folder)
length_normalization_factor = 0.0
beam_size = 3
max_sentence_length = 15
vocab = Vocabulary.load(vocab_path)
dataset = data_video.MSVDDatasetMultiModal(vocab=vocab, segment_method=segment_method, caption_mode='text',
split=split, feature=feature_type, english_src='groundtruth')
data_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=0)
dataset_size = len(dataset)
start_word_id = vocab.get_index(start_word)
end_word_id = vocab.get_index(end_word)
en_token_id = vocab.get_index(lang_en)
chs_token_id = vocab.get_index(lang_chs)
unk_token_id = vocab.get_index(unknown_word)
result_generator = COCOResultGenerator()
for index, (image_filename_list, features, captions, res5b_feature, caption_eng) in enumerate(data_loader):
image_id = image_filename_list[0]
caption = captions[0]
result_generator.add_annotation(image_id, caption)
if result_generator.has_output(image_id):
continue
initial_beam = Caption(
sentence=[start_word_id],
state=None,
logprob=0.0,
score=0.0,
metadata=[""]
)
partial_captions = TopN(beam_size)
partial_captions.push(initial_beam)
complete_captions = TopN(beam_size)
output_softmax = nn.Softmax(dim=-1)
h0, c0 = torch.zeros(beam_size, lm_lstm.config.hidden_dim), torch.zeros(beam_size, lm_lstm.config.hidden_dim)
h0 = h0.to(device)
c0 = c0.to(device)
ee = caption_eng
caption_eng = [i for i in caption_eng if
i not in [start_word_id, end_word_id, en_token_id, chs_token_id, unk_token_id]]
caption_eng = caption_eng[:min(len(caption_eng), lm_lstm.config.eng_caption_max_len)]
eng_embedding = torch.zeros(lm_lstm.config.eng_caption_max_len, lm_lstm.config.vis_dim)
eng_embedding[:len(caption_eng), :] = lm_lstm.embed(torch.tensor(caption_eng).to(device))
eng_embedding = eng_embedding.unsqueeze(0).to(device) # 512 * 15
# # res5b_feature: 1 * 512 * 1 * 7 * 7
# # res5b_feature = res5b_feature.reshape(512, 49).to(device)
# res5b_feature = res5b_feature.squeeze(2) # 1 * 512 * 7 * 7
# feas = res5b_feature.mean(3).mean(2).squeeze(0) # 512
# feas = feas.expand(beam_size, feas.shape[0]) # beam_size * 512
#
# features_v = lm_lstm.att_conv2d_v_1(res5b_feature) # 1 * 512 * 5 * 5
# features_all = torch.cat([features_v, eng_embedding], dim=1) # used for attention
res5b_feature = res5b_feature.to(device)
# feas, features_all = lm_lstm.get_attn_input(res5b_feature, eng_embedding)
_, features_all = lm_lstm.get_attn_input(res5b_feature, eng_embedding)
features = features.to(device)
feas = lm_lstm.img_embed.forward(features)
# feas = feas.expand([beam_size, feas.shape[1]])
feas = feas.repeat([beam_size, 1])
words = lm_lstm.embed(torch.tensor([start_word_id] * beam_size).to(device))
for j in range(max_sentence_length):
partial_captions_list = partial_captions.extract()
partial_captions.reset()
if len(partial_captions_list) == 0:
break
if j > 0:
ii = torch.tensor([c.sentence[-1] for c in partial_captions_list]).to(device)
words = lm_lstm.embed(ii)
beam_size = len(ii)
res5b_feature_expand = features_all.expand(beam_size, features_all.shape[1], features_all.shape[2])
h0 = torch.cat([c.state[0].unsqueeze(0) for c in partial_captions_list], dim=0)
c0 = torch.cat([c.state[1].unsqueeze(0) for c in partial_captions_list], dim=0)
feas, alpha = lm_lstm._attention_layer(res5b_feature_expand, h0)
inputs = torch.cat([feas, words], 1)
h0, c0 = lm_lstm.lstm_cell(inputs, (h0, c0))
outputs = lm_lstm.fc_out(h0)
# hiddens, states = lm_lstm.lstm(inputs, states) # hiddens: states: [1, 3, 512] [1, 3, 512] FIXME: here?
# outputs = lm_lstm.output_word_layer(hiddens.squeeze(0)) # lstm outputs:
softmax = output_softmax(outputs)
for (i, partial_caption) in enumerate(partial_captions_list):
word_probabilities = softmax[i].detach().cpu().numpy() # cuda tensors -> cpu for sorting
# state = (states[0][0][i].detach().cpu().numpy(), states[1][0][i].detach().cpu().numpy())
# state = (states[0][:, i:i + 1], states[1][:, i:i + 1])
state = (h0[i, :], c0[i, :])
words_and_probs = list(enumerate(word_probabilities))
words_and_probs.sort(key=lambda x: -x[1])
words_and_probs = words_and_probs[0:beam_size]
# print([(self.vocab.get_word(w), p) for w, p in words_and_probs])
for w, p in words_and_probs:
if p < 1e-12:
continue # Avoid log(0).
sentence = partial_caption.sentence + [w]
logprob = partial_caption.logprob + math.log(p)
score = logprob
metadata_list = None
if w == end_word_id:
if length_normalization_factor > 0:
score /= len(sentence) ** length_normalization_factor
beam = Caption(sentence, state, logprob, score, metadata_list)
complete_captions.push(beam)
else:
beam = Caption(sentence, state, logprob, score, metadata_list)
partial_captions.push(beam)
if partial_captions.size() == 0:
break
if not complete_captions.size():
complete_captions = partial_captions
captions = complete_captions.extract(sort=True)
print(len(result_generator.test_image_set))
print('{}, {}/{} {}'.format(
image_id, index, dataset_size, result_generator.has_output(image_id)))
for i, caption in enumerate(captions):
sentence = [vocab.get_word(w) for w in caption.sentence]
# print(sentence)
sentence = [w for w in sentence if (w != start_word and w != end_word)] # ignore start and end tokens
sentence = "".join(sentence)
print(" %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob)))
if i == 0:
print(sentence)
result_generator.add_output(image_id, sentence)
annotation_obj, result_obj = result_generator.get_annotation_and_output()
# print(annotation_obj)
with open(annotation_file_name, 'w') as f:
json.dump(annotation_obj, f)
with open(output_file_name, 'w') as f:
json.dump(result_obj, f)
print('annotation images:', len(annotation_obj['images']))
print('output images:', len(result_obj))
print('saved to {}'.format(output_file_name))
eval_cmd = '{} {} {} {} {}'.format(sys.executable,
r"/media/mcislab/sdb1/home/mcislab/zwt/coco-caption-master/eval.py",
annotation_file_name,
output_file_name,
eval_file_name)
os.system(eval_cmd)
if __name__ == '__main__':
# train()
# cProfile.run('train()', filename='run_profile')
test(model_path=r'/media/mcislab/sdb1/home/mcislab/zwt/caption_models_Chinese/all_results/results_lstm_multi_modal_both_e_conv_2/models/model-50000',
result_folder=os.path.join('../all_results/', run_name)) |
import argparse
def start():
parser = argparse.ArgumentParser()
parser.add_argument("logfile", type=str)
args = parser.parse_args()
return Logger(args.logfile)
class Logger:
def __init__(self, logfile):
self.logfile = logfile
file_to_create = open(self.logfile, "a")
file_to_create.close()
def request(self, flow):
if flow.client_conn.tls_established:
with open(self.logfile, "a") as logfile:
if flow.server_conn.ip_address:
logfile.write("host: " + flow.request.pretty_host + ",ip: " + flow.server_conn.ip_address.host + "\n")
elif flow.server_conn.address:
logfile.write("host: " + flow.request.pretty_host + ",ip: " + flow.server_conn.address.host + "\n")
else:
logfile.write("host: " + flow.request.pretty_host + ",NO IP ADDRESS" + "\n")
logfile.write("url: " + flow.request.pretty_url + "\n")
elif flow.server_conn.protocol == "http":
with open(self.logfile, "a") as logfile:
logfile.write("http host: " + "http://" + flow.request.pretty_host +
",ip: " + flow.server_conn.ip_address.host + "\n")
logfile.write("http url: " + "http://" + flow.request.pretty_url + "\n")
|
from restaurant import Restaurant
restaurant_one = Restaurant("McDonalds", "Fast Food")
restaurant_two = Restaurant("Burger King", "Burgers")
restaurant_three = Restaurant("Healthy Cuisine Restaurant", "Salad")
restaurant_one.describe_restaurant()
restaurant_two.describe_restaurant()
restaurant_three.describe_restaurant()
|
from django.urls import path
from . import views
from django.views.decorators.csrf import csrf_exempt
urlpatterns= [
path('', views.index, name= 'expenses'),
path('add_expenses/', views.add_expense, name="add_expenses"),
path('update_expense/<int:id>', views.update_expense, name ='update_expense'),
path('delete_expense/<int:id>', views.delete_expense, name="delete_expense"),
path('search_expense', csrf_exempt(views.search_expense), name="search_expenses"),
path('expense_summary', views.expense_summary, name="expense_summary"),
path('status', views.status_view, name="status"),
path('export_exl', views.export_exl, name= 'export_exl'),
] |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
file = pd.read_csv("pima_indians_diabetes_original.csv")
#QUES1
print("\n******QUESTION-1*******")
file1 = pd.read_csv("pima_indians_diabetes_miss.csv")
Df = file1.isnull().sum()
y = Df.values[0:9]
x = ["pregs","plas","pres","skin","test","BMI","pedi","Age","class"]
plt.bar(x,y)
plt.xlabel("Attributes")
plt.ylabel("No. of missing values")
plt.show()
#QUES2
#2(a)
print("\n******QUESTION-2(a)*******")
file2a = pd.read_csv("pima_indians_diabetes_miss.csv")
df = pd.DataFrame(file2a)
totDel=0
rowNo1=[]
row=[]
#deleting tuples having equal to or more than 1/3 of attributes with missing values:
for i in range(len(df.index)) :
if df.iloc[i].isnull().sum() >= len(df.columns)//3 :
totDel+=1
rowNo1.append(i+1)
row.append(i)
file2a.drop(i,inplace=True)
print("\nTotal no of tuples deleted : ",totDel)
print("\nRow nos. of deleted tuples : \n",rowNo1)
#2(b)
print("\n******QUESTION-2(b)*******")
file2b = pd.read_csv("pima_indians_diabetes_miss.csv")
totDel2=0
rowNo2=[]
classNull = pd.DataFrame(file2b["class"])
#deleting tuples with missing class attribute:
for i in range(len(classNull.index)):
if classNull.iloc[i].isnull().sum() == 1 :
totDel2+=1
rowNo2.append(i+1)
row.append(i)
file2b.drop(i,inplace=True)
print("\nTotal no of tuples deleted : ",totDel2)
print("\nRow nos. of deleted tuples : ",rowNo2)
#QUES3
print("\n******QUESTION-3*******")
file3 = pd.read_csv("pima_indians_diabetes_miss.csv")
df = pd.DataFrame(file3)
rowDel=list(set(row)) #total no. of rows to deleted after Q2(a) and Q2(b)
for i in rowDel :
df=df.drop(i)
df_null = df.isnull().sum()
y = df_null.values[0:9]
print("\nAfter Deleting tuples in Q2 :-")
print("No. of missing values in pregs : ",y[0])
print("No. of missing values in plas : ",y[1])
print("No. of missing values in pres : ",y[2])
print("No. of missing values in skin : ",y[3])
print("No. of missing values in test : ",y[4])
print("No. of missing values in BMI : ",y[5])
print("No. of missing values in pedi : ",y[6])
print("No. of missing values in Age : ",y[7])
print("No. of missing values in class : ",y[8])
print("Total no. of missing values : ",sum(y))
#QUES4
#4(a)
print("\n******QUESTION-4(a)*******")
df = pd.DataFrame(file3)
for i in rowDel :
df=df.drop(i)
df_1=df.fillna(df.mean()) #Replacing by mean
#Part(i)
print("\n---Part(i)---")
print("\nMean, Median, Mode and Standard Deviation after filling missing values: ")
print("\nMean:\n",df_1.mean())
print("\nMedian:\n",df_1.median())
print("\nMode:\n",df_1.mode().loc[0])
print("\nStandard Deviation:\n",df_1.std())
print("\n\nMean, Median, Mode and Standard Deviation of original file: ")
print("\nMean:\n",file.mean())
print("\nMedian:\n",file.median())
print("\nMode:\n",file.mode().loc[0])
print("\nStandard Deviation:\n",file.std())
#Part(ii)
print("\n---Part(ii)---")
print("\nRMSE value for: ")
rmse=[]
for i in df.columns:
ind=df[i][df[i].isnull()].index
if len(ind)!=0:
x=0
for j in ind:
x+=(df_1[i][j]-file[i][j])**2
x/=len(ind)
rmse.append(round(x**0.5,4))
else:
rmse.append(0)
print(i,'=',rmse[-1])
plt.bar(df.columns,rmse)
plt.ylabel('RMSE')
plt.show()
#4(b)
print("\n******QUESTION-4(b)*******")
df_2=df.fillna(df.interpolate()) #Replacing by interpolation
#Part(i)
print("\n---Part(i)---")
print("\nMean, Median, Mode and Standard Deviation after filling missing values: ")
print("\nMean:\n",df_2.mean())
print("\nMedian:\n",df_2.median())
print("\nMode:\n",df_2.mode().loc[0])
print("\nStandard Deviation:\n",df_2.std())
print("\n\nMean, Median, Mode and Standard Deviation of original file: ")
print("\nMean:\n",file.mean())
print("\nMedian:\n",file.median())
print("\nMode:\n",file.mode().loc[0])
print("\nStandard Deviation:\n",file.std())
#Part(ii)
print("\n---Part(ii)---")
print("\nRMSE value for: ")
rmse=[]
for i in df.columns:
ind=df[i][df[i].isnull()].index
if len(ind)!=0:
x=0
for j in ind:
x+=(df_2[i][j]-file[i][j])**2
x/=len(ind)
rmse.append(round(x**0.5,4))
else:
rmse.append(0)
print(i,'=',rmse[-1])
plt.bar(df.columns,rmse)
plt.ylabel('RMSE')
plt.show()
#QUES5
print("\n******QUESTION-5*******")
#Part-(i)
print("\n---Part(i)---")
Age = df_2["Age"] #Outliers for Age
Age_Q1=np.quantile(Age,.25)
Age_Q3=np.quantile(Age,.75)
Age_IQR=Age_Q3-Age_Q1
Age_Out=[]
ind1=[]
for i in range(len(Age.index)):
if Age.iloc[i].item()<=(Age_Q1-1.5*(Age_IQR)) or Age.iloc[i].item()>=(Age_Q3+1.5*(Age_IQR)):
Age_Out.append(Age.iloc[i].item())
ind1.append(i)
print("\nOutliers for Age : ",Age_Out)
BMI = df_2["BMI"] #Outliers for BMI
BMI_Q1=np.quantile(BMI,.25)
BMI_Q3=np.quantile(BMI,.75)
BMI_IQR=BMI_Q3-BMI_Q1
BMI_Out=[]
ind2=[]
for i in range(len(BMI.index)):
if BMI.iloc[i].item()<=(BMI_Q1-1.5*(BMI_IQR)) or BMI.iloc[i].item()>=(BMI_Q3+1.5*(BMI_IQR)):
BMI_Out.append(BMI.iloc[i].item())
ind2.append(i)
print("\nOutliers for BMI : ",BMI_Out)
boxplot = file3.boxplot(column=["Age","BMI"])
plt.title("Boxplot for Age and BMI")
plt.show()
#Part-(ii)
print("\n---Part(ii)---")
for i in ind1:
Age.iloc[i]=Age.median() #replacing outliers with median
for i in ind2:
BMI.iloc[i]=round(BMI.median(),2)
file3["Age"]=Age
file3["BMI"]=BMI
boxplot = file3.boxplot(column=["Age","BMI"])
plt.title("Modified Boxplot for Age and BMI")
plt.show()
|
import torch
w = torch.tensor(1.0, requires_grad=True) # requires_grad=True 하면 w.grad 에 gradient 가 자동저장된다.
# 단, 사용하려면 element 가 float 이어야 한다.
a = w*3
l = a**2
l.backward() # backward 하는 객체는 scalar 이어야 한다.
print(w.grad)
print('l을 w로 미분한 값은 {}'.format(w.grad))
# requires_grad=True 를 하기 위해선 data type 이 float 이어야 한다.
x = torch.tensor(([[1, 2, 3], [4, 5, 6]]), dtype=torch.float, requires_grad=True)
y = torch.tensor(([[1],[2],[3]]), dtype=torch.float, requires_grad=True)
z = torch.mm(x,y)
t = torch.sum(z)
t.backward()
print('x grad : \n', x.grad)
print('y grad : \n', y.grad)
# DL1 과 ML 에서 다루었던 affine 자동 미분 아주 편리. |
from cardmodel import Card
from solitairemodel import SolitaireModel
class SolitaireView:
def __init__(self, model):
self.model = model
def draw(self):
# get data from model
stock = self.model.getStock()
# limit waste to last 3 cards
waste = self.model.getWaste()
waste = waste[(-1*(min(3, len(waste)))):]
foundation = []
for i in range(4):
foundation.append(self.model.getFoundation(i))
tableau = []
for i in range(7):
tableau.append(self.model.getTableau(i))
# print header
print(" f0 f1 f2 f3 t0 t1 t2 t3 t4 t5 t6 STK WST")
print("--- --- --- --- --- --- --- --- --- --- --- --- ---")
# find longest list from foundation, tableau, stock (show only one card),
# and waste (already reduced to max length of 3)
longest = reduce(max,
[reduce(max, map(len, foundation)),
reduce(max, map(len, tableau)),
1,
len(waste)
])
for i in range(longest):
# build string
st = ""
# add foundation
for j in range(4):
st += '{:>3s} '.format(self.model.isCardFaceUp(foundation[j][i]) and \
self.card(foundation[j][i]) or "X") \
if len(foundation[j]) > i else " " * 4
st += " "
# add tableau
for j in range(7):
st += '{:>3s} '.format(self.model.isCardFaceUp(tableau[j][i]) and \
self.card(tableau[j][i]) or "X") \
if len(tableau[j]) > i else " " * 4
st += " "
# add stock. Either X or EMP if first row
st += '{:>3s} '.format(len(stock) and "X" or "") if i == 0 else " " * 4
st += " "
# add waste. Only top 3 cards
st += '{:>3s} '.format(self.model.isCardFaceUp(waste[i]) and \
self.card(waste[i]) or "X") \
if len(waste) > i and i < 3 else " " * 4
print(st)
def help(self):
print("Available commands:")
print(" help => print help")
print(" quit => end game")
print(" new => new game")
print(" deal => deal cards from stock to waste")
print(" stock => move waste cards back to empty stock")
print(" flip tx => flip top card on tableau tx")
print(" card stack => move card to stack (fx or tx)")
def dealFailed(self):
print "DEAL FAILED"
def stockFailed(self):
print "STOCK FAILED"
def unknownCommand(self):
print "UNKNOWN COMMAND"
def gameWon(self):
print "GAME WON!"
def fullStack(self, stack):
if len(stack):
cards = []
for c in stack:
cards.append(self.model.isCardFaceUp(c) and self.card(c) or "X")
return " ".join(cards)
else:
return "(empty)"
def card(self, card):
suits = {Card.SPADES:"S",
Card.CLUBS:"C",
Card.HEARTS:"H",
Card.DIAMONDS:"D"}
ranks = {Card.ACE:"A",
Card.TWO:"2",
Card.THREE:"3",
Card.FOUR:"4",
Card.FIVE:"5",
Card.SIX:"6",
Card.SEVEN:"7",
Card.EIGHT:"8",
Card.NINE:"9",
Card.TEN:"10",
Card.JACK:"J",
Card.QUEEN:"Q",
Card.KING:"K"}
return ranks[card.rank] + suits[card.suit]
|
import random
def hangman():
lists = [
" O ",
" | ",
" ======= ",
"/ | \ ",
" / \ ",
" | | "
]
print("========================")
print("Welcome to Hangman Game.")
print("========================")
#Save the word_list.txt in the directory.
filename = 'C:/Users/markf/Desktop/word_list.txt'
with open(filename) as f: #get list of words from filename with no white spaces or anything
words = f.read().split()
random_number = random.randint(0, len(words) - 1)
word_answer = words[random_number]
display_lines = []
display_lines.extend(word_answer)
#this takes number of lines from unique word
for i in range(len(display_lines)):
display_lines[i] = '-'
print(''.join(display_lines)) #displays number of '-' rather than lists
guess_count = 6
count = 0
while guess_count > 0:
print("Number of guesses: %d" % guess_count)
flag = False
guess = input("Please guess a letter: ")
guess = guess.lower()
#CHeck if user enters a letter. If the user enters anything not a letter, then re-promt for input
if len(guess) != 1 or guess.isdigit():
print("Letters only please")
continue
if guess in display_lines:
print("Letter '%s' has already been guessed." % guess)
continue
for i in range(len(word_answer)):
if word_answer[i] == guess:
flag = True
display_lines[i] = guess
if flag:
print("\nYup.")
else:
print("\nIncorrect guesses: ")
count += 1
for i in range(count):
print(lists[i])
guess_count -= 1
print("\nWord so far: ")
print(''.join(display_lines))
#if statement goal is to check number of '-' cleared up then ends while loop.
if '-' not in display_lines:
break
#End while
#print results depends on remaining guess count.
if guess_count > 0:
print("\nYou Won!!")
print("You have used %d of six guesses." % guess_count)
else:
print("\nPlayer won.")
print("The word was '%s'" % word_answer)
'''
String method:
.join(seq)
.lower()
.split([white space])
'''
|
# -*- coding: utf-8 -*-
"""evaluate.py
This is a simple ad-hoc bulk evaluator of the pre trained corpora matching the path:
pretrained/{language_code}/{1,..,4}-gram.pickle
Once ran, this file will start printing the Json-encoded resulting counters after concluded
experiments to the STDOUT. It will also notify reaching a milestone once per 1000 test sentences."""
import json
from diacritics_restorer import HmmNgramRestorer
DIR = "pretrained"
TRAINING_FILE = "target_test.detok.txt"
for language in ["hr", "cs", "sk", "ga", "hu", "pl", "ro", "fr", "es", "lv", ]:
for n in [1, 2, 3, 4]:
print("Testing {}-gram model for language {}".format(n, language))
# load the Diacritics restorer from file and feed it the test sentences
accuracy_counter = HmmNgramRestorer.load("/".join(["pretrained", language, str(n) + "-gram.pickle"])) \
.test("/".join(["corpora", language, TRAINING_FILE]), 1000)
print("Result: {}".format(json.dumps(accuracy_counter.as_dict())))
|
#-*- coding:utf-8 -*-
# setup.py
from distutils.core import setup
import py2exe
setup(console=['hello.py']) |
# 通过 if 和 else 来完成不同分支流程执行不同的任务
people = 20
cars = 30
buses = 15
if cars > people:
print "We should take the cars."
elif cars < prople:
print "We should not take the cars."
else:
print "We can't decide."
if buses > cars:
print "That's too many buses."
elif buses < cars:
print "Maybe we could take the buses."
else:
print "We still can't decide."
if people > buses:
print "Alright, let's just take the buses."
else:
print "Fine, let's stay home then."
# 可以通过不同的 if 、 else 和 elif 完成多分支的逻辑判断处理 |
track = [
'#ToqueDeQueda',
'#RenunciaPiñeraCuliao',
'#ChileEnResistencia',
'#ToqueDeQuedaTotal',
'#EstadoEmergencia',
'#EstadoDeExcepcion',
'#ChileDesperto',
'#ChileResiste',
'#Valparaiso',
'#Coquimbo',
'#FuerzaChile',
'#ChileSeCanso',
'#ChileProtests',
'#ChileEnMarcha',
'#PiñeraRenuncia',
'#ToqueDeQuedaChile',
'#ChileSeLevanta',
'#RenunciaPiñera'
] |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
import shutil
import socket
from twisted.trial import unittest
from twisted.spread import pb
from twisted.internet import reactor, defer
from twisted.cred import checkers, portal
from zope.interface import implements
from buildslave import bot
from mock import Mock
# I don't see any simple way to test the PB equipment without actually setting
# up a TCP connection. This just tests that the PB code will connect and can
# execute a basic ping. The rest is done without TCP (or PB) in other test modules.
class MasterPerspective(pb.Avatar):
def __init__(self, on_keepalive=None):
self.on_keepalive = on_keepalive
def perspective_keepalive(self):
if self.on_keepalive:
on_keepalive, self.on_keepalive = self.on_keepalive, None
on_keepalive()
class MasterRealm:
def __init__(self, perspective, on_attachment):
self.perspective = perspective
self.on_attachment = on_attachment
implements(portal.IRealm)
def requestAvatar(self, avatarId, mind, *interfaces):
assert pb.IPerspective in interfaces
self.mind = mind
self.perspective.mind = mind
d = defer.succeed(None)
if self.on_attachment:
d.addCallback(lambda _: self.on_attachment(mind))
def returnAvatar(_):
return pb.IPerspective, self.perspective, lambda: None
d.addCallback(returnAvatar)
return d
def shutdown(self):
return self.mind.broker.transport.loseConnection()
class TestBuildSlave(unittest.TestCase):
def setUp(self):
self.realm = None
self.buildslave = None
self.listeningport = None
self.basedir = os.path.abspath("basedir")
if os.path.exists(self.basedir):
shutil.rmtree(self.basedir)
os.makedirs(self.basedir)
# the slave tries to call socket.getfqdn to write its hostname; this hangs
# without network, so fake it
self.patch(socket, "getfqdn", lambda : 'test-hostname.domain.com')
def tearDown(self):
d = defer.succeed(None)
if self.realm:
d.addCallback(lambda _ : self.realm.shutdown())
if self.buildslave and self.buildslave.running:
d.addCallback(lambda _ : self.buildslave.stopService())
if self.listeningport:
d.addCallback(lambda _ : self.listeningport.stopListening())
if os.path.exists(self.basedir):
shutil.rmtree(self.basedir)
return d
def start_master(self, perspective, on_attachment=None):
self.realm = MasterRealm(perspective, on_attachment)
p = portal.Portal(self.realm)
p.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(testy="westy"))
self.listeningport = reactor.listenTCP(0, pb.PBServerFactory(p), interface='127.0.0.1')
# return the dynamically allocated port number
return self.listeningport.getHost().port
def test_keepalive_called(self):
# set up to fire this deferred on receipt of a keepalive
d = defer.Deferred()
def on_keepalive():
# need to wait long enough for the remote_keepalive call to
# finish, but not for another one to queue up
reactor.callLater(0.01, d.callback, None)
persp = MasterPerspective(on_keepalive=on_keepalive)
# start up the master and slave, with a very short keepalive
port = self.start_master(persp)
self.buildslave = bot.BuildSlave("127.0.0.1", port,
"testy", "westy", self.basedir,
keepalive=0.1, keepaliveTimeout=0.05, usePTY=False)
self.buildslave.startService()
# and wait for it to keepalive
return d
def test_buildslave_print(self):
d = defer.Deferred()
# set up to call print when we are attached, and chain the results onto
# the deferred for the whole test
def call_print(mind):
print_d = mind.callRemote("print", "Hi, slave.")
print_d.addCallbacks(d.callback, d.errback)
# start up the master and slave, with a very short keepalive
persp = MasterPerspective()
port = self.start_master(persp, on_attachment=call_print)
self.buildslave = bot.BuildSlave("127.0.0.1", port,
"testy", "westy", self.basedir,
keepalive=0, usePTY=False, umask=022)
self.buildslave.startService()
# and wait for the result of the print
return d
def test_hostname_file(self):
self.buildslave = bot.BuildSlave("127.0.0.1", 9999,
"testy", "westy", self.basedir,
keepalive=0, usePTY=False, umask=022)
self.assertEqual(open(os.path.join(self.basedir, "twistd.hostname")).read().strip(),
'test-hostname.domain.com')
def test_buildslave_graceful_shutdown(self):
"""Test that running the build slave's gracefulShutdown method results
in a call to the master's shutdown method"""
d = defer.Deferred()
fakepersp = Mock()
called = []
def fakeCallRemote(*args):
called.append(args)
d1 = defer.succeed(None)
return d1
fakepersp.callRemote = fakeCallRemote
# set up to call shutdown when we are attached, and chain the results onto
# the deferred for the whole test
def call_shutdown(mind):
self.buildslave.bf.perspective = fakepersp
shutdown_d = self.buildslave.gracefulShutdown()
shutdown_d.addCallbacks(d.callback, d.errback)
persp = MasterPerspective()
port = self.start_master(persp, on_attachment=call_shutdown)
self.buildslave = bot.BuildSlave("127.0.0.1", port,
"testy", "westy", self.basedir,
keepalive=0, usePTY=False, umask=022)
self.buildslave.startService()
def check(ign):
self.assertEquals(called, [('shutdown',)])
d.addCallback(check)
return d
def test_buildslave_shutdown(self):
"""Test watching an existing shutdown_file results in gracefulShutdown
being called."""
buildslave = bot.BuildSlave("127.0.0.1", 1234,
"testy", "westy", self.basedir,
keepalive=0, usePTY=False, umask=022,
allow_shutdown='file')
# Mock out gracefulShutdown
buildslave.gracefulShutdown = Mock()
# Mock out os.path methods
exists = Mock()
mtime = Mock()
self.patch(os.path, 'exists', exists)
self.patch(os.path, 'getmtime', mtime)
# Pretend that the shutdown file doesn't exist
mtime.return_value = 0
exists.return_value = False
buildslave._checkShutdownFile()
# We shouldn't have called gracefulShutdown
self.assertEquals(buildslave.gracefulShutdown.call_count, 0)
# Pretend that the file exists now, with an mtime of 2
exists.return_value = True
mtime.return_value = 2
buildslave._checkShutdownFile()
# Now we should have changed gracefulShutdown
self.assertEquals(buildslave.gracefulShutdown.call_count, 1)
# Bump the mtime again, and make sure we call shutdown again
mtime.return_value = 3
buildslave._checkShutdownFile()
self.assertEquals(buildslave.gracefulShutdown.call_count, 2)
# Try again, we shouldn't call shutdown another time
buildslave._checkShutdownFile()
self.assertEquals(buildslave.gracefulShutdown.call_count, 2)
|
import logging
import logging.handlers
import datetime
class cls_logger:
__inst = None
name = 'cls_logger'
handler=None
@staticmethod
def init_logger(log_level):
'''
:param log_level: CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
:return:
'''
if(cls_logger.__inst is None):
print ('Initializing logger...')
cls_logger.__inst = logging.getLogger(cls_logger.name)
str_now = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
log_file_name = str(cls_logger.name + '_' + str_now) + '.log'
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s',
datefmt='[%Y/%m/%d_%H:%M:%S]')
cls_logger.__inst.setLevel(log_level)
cls_logger.handler = logging.handlers.RotatingFileHandler(log_file_name, maxBytes=128 * 1024 * 1024,
backupCount=16)
cls_logger.handler.setLevel(log_level)
cls_logger.handler.setFormatter(formatter)
cls_logger.__inst.addHandler(cls_logger.handler)
cls_logger.write_log('cls_logger','Logger initialized! Log filter:'+str(log_level),20)
@staticmethod
def set_log_filter_level(log_level):
'''
:param log_level:CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
:return:
'''
if cls_logger.__inst is not None:
cls_logger.__inst.setLevel(log_level)
cls_logger.write_log('cls_logger', 'Set log level=' + str(log_level), 20)
@staticmethod
def write_log(tag,message,log_level=10,*args,**kwargs):
'''
:param tag:
:param message:
:param log_level: CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
:param args:
:param kwargs:
:return:
'''
# if(cls_logger.__inst is None):
# cls_logger.init_logger(logging.INFO)
if (cls_logger.__inst is not None):
log_msg = '[' + str(tag) + '] ' + str(message)
cls_logger.__inst.log(log_level,log_msg,*args,**kwargs)
@staticmethod
def close_logger():
if cls_logger.__inst is not None and cls_logger.handler is not None:
cls_logger.__inst.removeHandler(cls_logger.handler) |
def solve():
s = input().lower()
count = [s.count(c) for c in "aeiou"]
print(sum(count))
if __name__ == "__main__":
solve()
|
from flask import Flask, render_template, send_file, send_from_directory, request
import json
from flask_tus import tus_manager
from flask_cors import CORS
import bcrypt
from flask_sqlalchemy import SQLAlchemy
import os
import redis
from file_storage import FileStorage
from s3_storage import S3Storage
from elasticsearch import Elasticsearch
from elasticsearch_dsl import connections
from pathlib import Path
import yaml
app = Flask(__name__)
app_settings = os.getenv(
'APP_SETTINGS',
'server.config.DevelopmentConfig'
)
app.config.from_object(app_settings)
if app.config.get('UPLOAD_TYPE') == 'file':
storage = FileStorage(app.config.get('UPLOAD_FOLDER'))
elif app.config.get('UPLOAD_TYPE') == 'aws':
storage = S3Storage(app.config.get('AWS_BUCKET'))
tm = tus_manager(app, upload_url='/uploads', upload_folder=app.config.get('UPLOAD_FOLDER'), overwrite=True,
upload_finish_cb=None, storage=storage)
db = SQLAlchemy(app)
redis_db = redis.StrictRedis(host=app.config.get('REDIS_SERVERNAME'), port=6379, db=0, password='devpassword')
es_client = Elasticsearch()
connections.create_connection(hosts=['elasticsearch'])
from .work import work as work_blueprint
app.register_blueprint(work_blueprint)
from .bookmark import bookmark as bookmark_blueprint
app.register_blueprint(bookmark_blueprint)
from .message import message as message_blueprint
app.register_blueprint(message_blueprint)
from .api import api as api_blueprint
app.register_blueprint(api_blueprint)
from .tag import tag as tag_blueprint
app.register_blueprint(tag_blueprint)
@app.route('/<path:stuff>/uploads/<path:filename>', methods=['GET'])
def download(stuff, filename):
uploads = os.path.join(app.root_path, tm.upload_folder)
return send_from_directory(directory=uploads, filename=filename)
@app.route('/audio/<string:audio_file>')
def audio(audio_file):
return send_from_directory(filename=audio_file, directory='audio')
@app.before_first_request
def do_init():
path = os.path.dirname(os.path.abspath(__file__))+"/seed.yml"
my_file = Path(path)
develop = True
if my_file.is_file():
with open(path, 'r') as stream:
try:
objects = yaml.safe_load(stream)
admin = user.logic.get_by_username('admin')
if admin is None:
user.logic.create_user('admin', objects['admin_pw'], objects['admin_email'], True)
if objects['develop'] == False:
develop = False
except yaml.YAMLError as exc:
print(exc)
if develop is False:
os.remove(path)
if __name__ == '__main__':
app.run(debug=True)
|
import yaml
from utils.apiLib import *
from utils.utilGetJson import *
import json
global app_data
app_data = yaml.load(open("../settings/config.yml"))
app_data2 = yaml.load(open("../data/data.yml"))
def before_all (context):
print("************************ BEFORE ALL *************************************************")
context.host = app_data['app']['host']
context.root = app_data['app']['root']
context.version = app_data['app']['version']
context.token = app_data['user']['token']
context.url=context.host+context.root+context.version
context.id = None
context.data = None
context.method = None
def before_scenario(context, scenario):
if 'insert_tasks' in scenario.tags:
context.data = app_data2['task1']['task_name']
response=perform_post("tasks", None, context.data)
json_response = response.json()
print(json_response['id'])
context.id=json_response['id']
if 'close_tasks' in scenario.tags:
data = app_data2['task1']['task_name']
response = perform_post("tasks", None, data)
json_response1 = response.json()
context.id = json_response1['id']
response2 = perform_close("tasks", context.id)
if ('update_project' in scenario.tags) or ('get_project' in scenario.tags) or ('delete_project' in scenario.tags):
#Before getting a Project I have to create one to be dinamic
#Creating a new Project
#Gathering data from file2
data=app_data2['project']['project_name_new']
response=perform_post("projects",None,data)
json_response = response.json()
print("_____________________________________________________________________________________")
#Getting the id of new Project
context.id=json_response['id']
if 'get_all_projects' in scenario.tags:
response = perform_gets("projects")
context.data = getJson()
if ('get_label' in scenario.tags) or ('delete_label' in scenario.tags) or ('update_label' in scenario.tags):
#Before getting or deleting a label I have to create one
data = app_data2['labels']['label_name']
response = perform_post("labels", None, data)
json_response = response.json()
print(json_response['id'])
context.id = json_response['id']
def after_scenario(context, scenario):
if 'delete_tasks' in scenario.tags:
response=perform_delete("tasks",context.id)
if ('create_label'in scenario.tags) or ('get_label' in scenario.tags) or ('update_label' in scenario.tags):
# After getting the label I have to remove it
response = perform_delete("labels", context.id)
if 'delete_project' in scenario.tags:
# After getting, updating the Project I have to remove it
response=perform_delete("projects",context.id)
|
"""
WIP
"""
import sys
import os
import fbx
from brenpy.qt.bpQtImportUtils import QtWidgets
from brenpy.qt import bpCollapsibleWidgets
from brenfbx.core import bfCore
from brenpy.qt import bpQtWidgets
from brenfbx.qt.scene import bfQtSceneModels
from brenfbx.items import bfSceneItems
from brenfbx.qt.object import bfObjectReferenceWidgets
from brenfbx.qt.object import bfQtObjectWidgets
from brenfbx.qt import bfQtCore
from brenfbx.fbxsdk.core.math import bfMath
from brenfbx.qt.property import bfQtPropertyValueWidgets
from brenfbx.fbxsdk.core import bfObject
from brenfbx.fbxsdk.scene.geometry import bfNode
class BFbxNodeAttributeWidget(
# bfCore.BfObjectBase,
bfObject.BfObjectReferenceBase,
bfObjectReferenceWidgets.BFbxObjectReferenceWidget
):
"""stuff
"""
FBX_CLASS_ID = fbx.FbxNode.ClassId
def __init__(self, *args, **kwargs):
super(BFbxNodeAttributeWidget, self).__init__(*args, **kwargs)
self.set_label("Node Attribute")
self.refresh()
def _refresh(self):
"""Get model index for current Node attribute and create mapping.
"""
if self.bf_environment().scene_model() is None:
return True
attr_object = self.fbx_object().GetNodeAttribute()
if attr_object is not None:
index = self.bf_environment().scene_model().create_index(attr_object)
self.set_index(index)
else:
self.set_index(None)
return True
def _set_fbx_object_reference(self, fbx_object):
res = super(BFbxNodeAttributeWidget, self)._set_fbx_object_reference(fbx_object)
self.debug("Setting node attribute {} -> {}".format(fbx_object, self.fbx_object()))
self.fbx_object().SetNodeAttribute(fbx_object)
self.debug("Node attribute set: {} -> {}".format(
self.fbx_object(), self.fbx_object().GetNodeAttribute()
))
class BfNodeTypeWidget(
# QtWidgets.QWidget
# bfCore.BfObjectBase,
bfObject.BfObjectReferenceBase,
bpCollapsibleWidgets.BpCollapsibleWidget
):
FBX_CLASS_ID = fbx.FbxNode.ClassId
def __init__(self, *args, **kwargs):
super(BfNodeTypeWidget, self).__init__(show_handle=False, *args, **kwargs)
self.set_label("Node Attribute")
self._widget = BFbxNodeAttributeWidget(self.bf_environment(), self.bf_object(), parent=self)
for widget in [
self._widget
]:
self.add_debug_object(widget)
self.add_refreshable_widget(widget)
self.add_widget(widget, show_handle=False)
class BfNodeRotationOrderWidget(
bfCore.BfObjectBase,
bpQtWidgets.BpRefreshableWidgetBase,
bpQtWidgets.BpComboWidget
):
"""Widget to change rotation order of FbxNode
TODO
for some reason fbx is not syncing this data with the rotation order property
investigate!!
"""
FBX_CLASS_ID = fbx.FbxNode.ClassId
def __init__(self, *args, **kwargs):
super(BfNodeRotationOrderWidget, self).__init__(
items=bfMath.BfRotationOrderEnum.NAMES,
*args, **kwargs
)
self.set_label("Rotation Order")
self.label_widget().setFixedWidth(bfQtCore.DEFAULT_LABEL_WIDTH)
self._refresh()
self.connect_widgets()
def _refresh(self):
order_value = self.fbx_object().GetRotationOrder(self.fbx_object().eSourcePivot)
index = bfMath.BfRotationOrderEnum.VALUES.index(order_value)
self.combo_widget().setCurrentIndex(index)
def connect_widgets(self):
self._combo.currentIndexChanged.connect(self._combo_index_changed)
def _combo_index_changed(self):
"""If user changes combo box value, set fbx node rotation order to match.
"""
index = self.combo_widget().currentIndex()
order_value = bfMath.BfRotationOrderEnum.VALUES[index]
order_name = bfMath.BfRotationOrderEnum.NAMES[index]
self.fbx_object().SetRotationOrder(self.fbx_object().eSourcePivot, order_value)
# for some reason the above method does not update the property, so we must do that as well
self.fbx_object().RotationOrder.Set(order_value)
self.debug(
"FbxNode rotation order changed: {} {}".format(self.fbx_object().GetName(), order_name),
level=0
)
self.emit_refresh_request()
return True
class BfNodeTransformWidget(
# QtWidgets.QWidget
# bfCore.BfObjectBase,
bfObject.BfObjectReferenceBase,
bpCollapsibleWidgets.BpCollapsibleWidget
):
"""TODO wrap FbxProperties in BfProperty's in BfObjects etc...
"""
FBX_CLASS_ID = fbx.FbxNode.ClassId
def __init__(self, *args, **kwargs):
super(BfNodeTransformWidget, self).__init__(show_handle=False, *args, **kwargs)
self.set_label("Transform")
self._lcl_translation_widget = bfQtPropertyValueWidgets.BfDoubleArrayPropertySpinBoxWidget(
self.bf_environment(), self.bf_object().lcl_translation() #self.fbx_object().LclTranslation#, parent=self
)
self._lcl_rotation_widget = bfQtPropertyValueWidgets.BfDoubleArrayPropertySpinBoxWidget(
self.bf_environment(), self.bf_object().lcl_rotation()#self.fbx_object().LclRotation#, parent=self
)
self._lcl_scale_widget = bfQtPropertyValueWidgets.BfDoubleArrayPropertySpinBoxWidget(
self.bf_environment(), self.bf_object().lcl_scale() #self.fbx_object().LclScaling#, parent=self
)
for widget in [
self._lcl_translation_widget,
self._lcl_rotation_widget,
self._lcl_scale_widget,
]:
self.add_debug_object(widget)
self.add_refreshable_widget(widget)
self.add_widget(widget, show_handle=False)
class BfNodeOrientationWidget(
# QtWidgets.QWidget
# bfCore.BfObjectBase,
bfObject.BfObjectReferenceBase,
bpCollapsibleWidgets.BpCollapsibleWidget
):
FBX_CLASS_ID = fbx.FbxNode.ClassId
def __init__(self, *args, **kwargs):
super(BfNodeOrientationWidget, self).__init__(show_handle=False, *args, **kwargs)
self.set_label("Orientation")
self._pre_rotation_widget = bfQtPropertyValueWidgets.BfDoubleArrayPropertySpinBoxWidget(
self.bf_environment(), self.bf_object().pre_rotation() #self.fbx_object().PreRotation, parent=self
)
self._post_rotation_widget = bfQtPropertyValueWidgets.BfDoubleArrayPropertySpinBoxWidget(
self.bf_environment(), self.bf_object().post_rotation()# self.fbx_object().PostRotation, parent=self
)
self._rotate_order_widget = BfNodeRotationOrderWidget(
self.bf_environment(), self.fbx_object()
)
for widget in [
self._pre_rotation_widget,
self._post_rotation_widget,
self._rotate_order_widget,
]:
self.add_widget(widget, show_handle=False)
self.add_refreshable_widget(widget)
self.add_debug_object(widget)
self._pre_rotation_widget.VALUE_CHANGED.connect(self._pre_rotation_value_changed)
self._post_rotation_widget.VALUE_CHANGED.connect(self._post_rotation_value_changed)
def fbx_node(self):
return self._fbx_node
def _pre_rotation_value_changed(self, values):
self.debug("Setting pre-rotation: {} {}".format(self.fbx_node().GetName(), values), level=self.LEVELS.mid())
self.fbx_node().SetPreRotation(
self.fbx_node().eSourcePivot, # == joint orientation
fbx.FbxVector4(*values)
)
return True
def _post_rotation_value_changed(self, values):
self.debug("Setting post-rotation: {} {}".format(self.fbx_node().GetName(), values), level=self.LEVELS.mid())
self.fbx_node().SetPostRotation(
self.fbx_node().eSourcePivot, # == joint orientation
fbx.FbxVector4(*values)
)
return True
class BfNodeAEWidget(bfQtObjectWidgets.BfObjectAttributesEditorWidget):
def __init__(self, *args, **kwargs):
super(BfNodeAEWidget, self).__init__(*args, **kwargs)
def create_widgets(self):
"""Create node specific widgets and insert into self._widgets list.
"""
super(BfNodeAEWidget, self).create_widgets()
self._node_type_widget = BfNodeTypeWidget(self.bf_environment(), self.bf_object())#, parent=self)
self._transform_widget = BfNodeTransformWidget(self.bf_environment(), self.bf_object())#, parent=self)
self._orientation_widget = BfNodeOrientationWidget(self.bf_environment(), self.bf_object())#, parent=self)
self._widgets += [
self._node_type_widget,
self._transform_widget,
self._orientation_widget
]
self.add_debug_objects([
self._node_type_widget,
self._transform_widget,
self._orientation_widget
])
self.add_refreshable_widgets([
self._node_type_widget,
self._transform_widget,
self._orientation_widget
])
# self._widgets.insert(0, self._node_type_widget)
# self._widgets.insert(1, self._transform_widget)
# self._widgets.insert(2, self._orientation_widget)
class BfNodeEditorWidget(bfQtObjectWidgets.BfObjectEditorWidget):
"""BfObjectEditorWidget subclass for FbxNode objects
"""
ATTRIBUTES_EDITOR_CLS = BfNodeAEWidget
def __init__(self, *args, **kwargs):
super(BfNodeEditorWidget, self).__init__(*args, **kwargs)
class BFbxSkeletonTypeComboBox(
QtWidgets.QComboBox
# QtWidgets.QWidget
# bpCollapsibleWidgets.BpCollapsibleWidget
):
"""TODO use refreshable widget!
"""
def __init__(self, fbx_object, parent=None):
super(BFbxSkeletonTypeComboBox, self).__init__(parent=parent)
self._fbx_object = None
self._values = [
fbx.FbxSkeleton.eRoot,
fbx.FbxSkeleton.eLimb,
fbx.FbxSkeleton.eLimbNode,
fbx.FbxSkeleton.eEffector,
]
self.addItems([
"Root",
"Limb",
"LimbNode",
"Effector"
])
if fbx_object is not None:
self.set_fbx_object(fbx_object)
self.currentIndexChanged.connect(self.index_changed)
def fbx_object(self):
return self._fbx_object
def set_fbx_object(self, fbx_object):
self._fbx_object = fbx_object
value = fbx_object.GetSkeletonType()
index = self._values.index(value)
self.setCurrentIndex(index)
def index_changed(self, index):
if self._fbx_object is None:
return False
value = self._values[index]
self._fbx_object.SetSkeletonType(value)
return True
class BfSkeletonAEWidget(bfQtObjectWidgets.BfObjectAttributesEditorWidget):
"""Object widget customised for FbxSkeleton objects
"""
def __init__(self, *args, **kwargs):
super(BfSkeletonAEWidget, self).__init__(*args, **kwargs)
def create_skeleton_type_widgets(self):
self._skeleton_type_widget = QtWidgets.QWidget()
self._skeleton_type_widget.setFixedHeight(30)
self._skeleton_type_lyt = QtWidgets.QHBoxLayout()
self._skeleton_type_widget.setLayout(self._skeleton_type_lyt)
self._skeleton_type_lyt.setContentsMargins(0, 0, 0, 0)
self._skeleton_type_label = QtWidgets.QLabel("Skeleton Type")
self._skeleton_type_label.setFixedWidth(bfQtCore.DEFAULT_LABEL_WIDTH)
self._skeleton_type_combo = BFbxSkeletonTypeComboBox(self.fbx_object(), parent=self)
self._skeleton_type_lyt.addWidget(self._skeleton_type_label)
self._skeleton_type_lyt.addWidget(self._skeleton_type_combo)
self._skeleton_type_lyt.addStretch()
def create_widgets(self):
"""Create skeleton specific widgets and insert into self._widgets list.
"""
super(BfSkeletonAEWidget, self).create_widgets()
self.create_skeleton_type_widgets()
self._skeleton_collapsible_widget = bpCollapsibleWidgets.BpCollapsibleWidget(
label="Skeleton stuff", show_handle=False
)
self._skeleton_collapsible_widget.add_widget(self._skeleton_type_widget, show_handle=False)
self._widgets.insert(0, self._skeleton_collapsible_widget)
# class BfSkeletonEditorWidget(bfQtObjectWidgets.BfObjectEditorWidget):
# """BfObjectEditorWidget subclass for FbxSkeleton objects
# """
# ATTRIBUTES_EDITOR_CLS = BfSkeletonAEWidget
#
# def __init__(self, *args, **kwargs):
# super(BfSkeletonEditorWidget, self).__init__(*args, **kwargs)
class Test1(QtWidgets.QWidget):
def __init__(self, base):
super(Test1, self).__init__()
scene_tree = bfSceneItems.FbxSceneTreeItemManager(
base.bf_environment(), fbx_scene=base._scene
)
self._scene_model = bfQtSceneModels.BfFbxSceneModel()
self._scene_model.set_item_manager(scene_tree)
base.bf_environment().set_scene_model(self._scene_model)
# child 2 should be skeleton node
self._node = base._scene.GetRootNode().GetChild(2)
print "Node: ", self._node.GetName()
self.edit_widget = BFbxNodeAttributeWidget(
base.bf_environment(), self._node
)
self.create_layout()
self.show()
def create_layout(self):
self.lyt = QtWidgets.QVBoxLayout()
self.setLayout(self.lyt)
self.lyt.addWidget(self.edit_widget)
class Test2(object):
def __init__(self, base):
# child 2 should be skeleton node
self._node = base._scene.GetRootNode().GetChild(2)
print "Node: ", self._node.GetName()
self._test = BfNodeRotationOrderWidget(base.bf_environment(), self._node)
self._test.show()
class Test3(object):
def __init__(self, base):
scene_tree = bfSceneItems.FbxSceneTreeItemManager(
base.bf_environment(), fbx_scene=base._scene
)
self._scene_model = bfQtSceneModels.BfFbxSceneModel()
self._scene_model.set_item_manager(scene_tree)
base.bf_environment().set_scene_model(self._scene_model)
self._node = base._scene.GetRootNode().GetChild(2)
bf_object = bfNode.BfNode(base.bf_environment(), self._node)
self._widget = BfNodeAEWidget(
base.bf_environment(), bf_object#, debug_level=bpDebug.DebugLevel.all()
)
# self._properties_widget.set_fbx_object(fbx_object, self._fbx_manager)
self._widget.debug_refreshable_widgets(recursive=True)
self._widget.show()
if __name__ == "__main__":
DUMP_DIR = r"D:\Repos\dataDump\brenfbx"
TEST_FILE = "brenfbx_test_scene_01.fbx"
app = QtWidgets.QApplication(sys.argv)
from brenfbx.utils import bfEnvironmentUtils
base = bfEnvironmentUtils.BfTestBase(file_path=os.path.join(DUMP_DIR, TEST_FILE))
# test = Test1(base)
# test = Test2(base)
test = Test3(base)
sys.exit(app.exec_())
|
# Generated by Django 3.2.3 on 2021-07-08 12:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userapp', '0005_auto_20210708_1259'),
]
operations = [
migrations.CreateModel(
name='orders1',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('quantity', models.IntegerField()),
('price', models.IntegerField()),
('total', models.IntegerField()),
],
),
migrations.AddField(
model_name='details',
name='total',
field=models.IntegerField(default=1),
preserve_default=False,
),
]
|
for i in range(3):
num = input()
sum = 0
if(int(num)<0):
a1=int(num[0:2])
sum+=a1
for j in range(2,len(num)):
sum+=int(num[j])
else:
for j in range(len(num)):
sum+=int(num[j])
print(sum) |
from tkinter import *
from tkinter import ttk
import random
#メイン窓枠設定等
root = Tk()
root.title("0518.py GUI")
#文字書込み部
write_frame = ttk.Frame(root)
write_frame.grid()
write_label = ttk.Label(
write_frame,
text = "何かお話してください",
)
write_label.grid(row = 0, column = 0)
write = StringVar()
write_entry = ttk.Entry(
write_frame,
textvariable = write,
width = 100
)
write_entry.grid(row = 0, column = 1)
#書込みアルゴリズム
def write_button_click():
ram = random.randint(1,3)
if ram%3 == 0:
read_lb.insert(-1.0, write.get() + "\n>>> なるほど. \n")
write_entry.delete(0, END)
elif ram%3 == 1:
read_lb.insert(-1.0, write.get() + "\n>>> すごいな. \n")
write_entry.delete(0, END)
else:
read_lb.insert(-1.0, write.get() + "\n>>> 悪いのは君じゃない. \n")
write_entry.delete(0, END)
write_button = ttk.Button(write_frame, text = "話す", command=write_button_click)
write_button.grid(row = 0, column = 2)
#文字表示部
read_frame = ttk.Frame(root)
read_frame.grid()
read_lb = Text(read_frame, width = 100,height = 20)
read_lb.grid()
read_Scrollbar = ttk.Scrollbar(
read_frame,
orient = VERTICAL,
command = read_lb.yview
)
read_lb["yscrollcommand"] = read_Scrollbar.set
read_Scrollbar.grid(row = 0,column = 1, sticky=(N,S))
root.mainloop() |
from conversion.conversion_utils import fixed_attribute
from .base import BaseEnaConverter
PROJECT_SPEC = {
'@center_name': ['center_name'],
'NAME': ['study_name'],
'TITLE': ['short_description'],
'DESCRIPTION': ['abstract'],
'SUBMISSION_PROJECT': {
'SEQUENCING_PROJECT': ['', fixed_attribute, '']
}
}
class EnaProjectConverter(BaseEnaConverter):
def __init__(self):
super().__init__(ena_type='Project', xml_spec=PROJECT_SPEC)
|
from turtle import Turtle
UP = 90
DOWN = 270
LEFT = 180
RIGHT = 0
MOVE_DISTANCE = 20
class Snake:
# initializes the snakes body with 3 squares
def __init__(self):
self.width = 0
self.snake_segment = []
self.create_snake()
self.head = self.snake_segment[0]
# The create function is called to create the body
def create_snake(self):
for position in range(3):
self.add_segment(position)
# Adds the segment to the body of the snake
def add_segment(self,position):
snake = Turtle(shape="square")
snake.penup()
snake.color("white")
snake.setpos(self.width, 0)
self.width -= 20
self.snake_segment.append(snake)
# Extends the new segment to the snake when it is called.
def extend(self):
self.add_segment(self.snake_segment[-1].position())
# moves the snake
def move(self):
for piece in range(len(self.snake_segment) - 1, 0, -1):
new_x = self.snake_segment[piece - 1].xcor()
new_y = self.snake_segment[piece - 1].ycor()
self.snake_segment[piece].goto(new_x, new_y)
self.snake_segment[0].forward(MOVE_DISTANCE)
# Changes the direction of the snake
def up(self):
if self.head.heading() != DOWN:
self.head.setheading(UP)
def down(self):
if self.head.heading() != UP:
self.head.setheading(DOWN)
def left(self):
if self.head.heading() != RIGHT:
self.head.setheading(LEFT)
def right(self):
if self.head.heading() != LEFT:
self.head.setheading(RIGHT)
|
import cv2
from threading import Thread
import socket
import struct
import time
import sys
import argparse
import os
import zlib
import base64 as b64
from datetime import datetime, timedelta
try:
import cPickle as pickle
except ImportError:
import pickle
class VideoCamera(object):
def __init__(self, width=320, height=240, save="", fps=20):
self.video = cv2.VideoCapture(0)
self.width = width
self.height = height
self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
self.fps = fps
if len(save) > 0:
self.out = cv2.VideoWriter(save + ".avi", self.fourcc, int(fps), (int(width), int(height)))
def __del__(self):
self.video.release()
def get_frame(self, save=None):
while True:
success, image = self.video.read()
if success:
image = cv2.resize(image, (self.width, self.height))
return success, image
def save_flow(self, flow):
self.out.write(flow)
class VideoList(object):
def __init__(self, width=320, height=240, list="videoList", save="", fps=20):
self.video_list = open(list, 'r').readlines()
self.cursor = 0
self.video = cv2.VideoCapture(self.video_list[self.cursor].replace("\n", ""))
self.width = width
self.height = height
self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
self.fps = fps
self.save = save
if len(save) > 0:
if not os.path.exists(save):
os.makedirs(save)
self.out = cv2.VideoWriter(self.save + "/" + str(self.cursor) + ".avi", self.fourcc, int(self.fps), (int(self.width), int(self.height)))
def __del__(self):
self.video.release()
def load_new_video(self, save):
self.cursor += 1
if self.cursor < len(self.video_list):
self.video = cv2.VideoCapture(self.video_list[self.cursor].replace("\n", ""))
if save:
self.out.release()
self.out = cv2.VideoWriter(self.save + "/" + str(self.cursor) + ".avi", self.fourcc, int(self.fps), (int(self.width), int(self.height)))
def get_frame(self, save):
success, image = self.video.read()
if success:
image = cv2.resize(image, (self.width, self.height))
else:
self.load_new_video(save)
success, image = self.video.read()
if success:
image = cv2.resize(image, (self.width, self.height))
return success, image
def save_flow(self, flow):
self.out.write(flow)
class ImageList(object):
def __init__(self, width=320, height=240, list="imageList", save=""):
self.image_list = open(list, 'r').readlines()
self.cursor = 0
self.image = cv2.imread(self.image_list[self.cursor].replace("\n", ""))
self.width = width
self.height = height
self.save = save
if len(save) > 0:
if not os.path.exists(save):
os.makedirs(save)
def get_new_image(self):
if self.cursor < len(self.image_list):
self.image = cv2.imread(self.image_list[self.cursor].replace("\n", ""), 0)
self.image = cv2.resize(self.image, (self.width, self.height))
self.cursor += 1
return True, self.image
return False, self.image
def get_frame(self, save=None):
success, image = self.get_new_image()
return success, image
def save_flow(self, flow):
print(self.save + "/" + str(self.cursor) + ".png")
cv2.imwrite(self.save + "/" + str(self.cursor) + ".png", flow, [cv2.IMWRITE_PNG_COMPRESSION, 9])
class FpsMetter(object):
def __init__(self, args):
self.args = args
self.chrono = time.time()
self.fps = 0
self.average_fps = 0
self.first_loop = True
self.init_finished = False
def get_fps(self, nb_loop):
if time.time() - self.chrono > 1:
self.first_loop = False
self.fps = nb_loop / (time.time() - self.chrono)
if not self.first_loop:
self.init_finished = True
self.average_fps = self.fps
self.average_fps = (self.average_fps + self.fps) / 2
self.chrono = time.time()
if self.args.preview > 1 or self.args.preview == -1:
print(self.fps)
return 0
nb_loop += 1
return nb_loop
class CatchFall(object):
def __init__(self):
self.image_list = []
self.list_len = 0
self.fall_detected = False
self.image_to_add_post_fall = 0
self.nb_rush_image = 200 #number of image within the video
self.nb_rush_image_after_fall = 60 #number of image to send after the fall
self.rush_date = time.time()
def write_send_fall_video(self, width=320, height=240, fps=20):
fourcc = cv2.VideoWriter_fourcc(*'XVID')
tag = 0
while os.path.isfile("tmp" + str(tag) + ".avi"):
tag += 1
out = cv2.VideoWriter("tmp" + str(tag) + ".avi", fourcc, fps, (width, height))
print(fps)
for i in range(0, len(self.image_list)):
out.write(self.image_list[i])
self.rush_date = time.time()
out.release()
print("send video in POST request")
#os.remove("tmp" + str(tag) + ".avi")
def add_image(self, flow_image, width=320, height=240, fps=20):
if self.fall_detected:
self.image_to_add_post_fall -= 1
if self.image_to_add_post_fall < 0:
self.image_to_add_post_fall = 0
self.fall_detected = False
self.write_send_fall_video(width, height, fps)
if self.list_len < self.nb_rush_image:
self.image_list.append(flow_image)
self.list_len += 1
else:
self.image_list.pop(0)
self.image_list.append(flow_image)
def toggle_fall(self):
if not self.fall_detected:
print("fall detected")
self.fall_detected = True
self.image_to_add_post_fall = self.nb_rush_image_after_fall
else:
print("fall already detected")
class Streaming(Thread):
def __init__(self, args_conf):
self.args = args_conf
self.catchFall = CatchFall()
self.fpsMetter = FpsMetter(args_conf)
self.estimation = False
# for algorithms that need to load a neural network we pass the first loop otherwise the
# computing estimation could be rigged
self.first_loop = True
if self.args.estimation > 0:
self.estimation = True
if self.args.mode == 0:
self.cap = VideoCamera(self.args.width, self.args.height, self.args.save, self.args.fps)
elif self.args.mode == 1:
self.cap = VideoList(self.args.width, self.args.height, self.args.list, self.args.save, self.args.fps)
elif self.args.mode == 2:
self.cap = ImageList(self.args.width, self.args.height, self.args.list, self.args.save)
self.chrono = time.time()
self.fps = 0
def compress(self, o):
p = pickle.dumps(o, pickle.HIGHEST_PROTOCOL)
return p
def decompress(self, s):
p = pickle.loads(s)
return p
def send_image(self, s):
success, image = self.cap.get_frame(self.args.save)
if not success:
s.close()
sys.exit(0)
serialized_data = self.compress(image)
s.send(struct.pack('!i', len(serialized_data)))
s.send(serialized_data)
def receive_image(self, s):
len_str = s.recv(4)
size = struct.unpack('!i', len_str)[0]
blob = b''
while size > 0:
if size >= 4096:
data = s.recv(4096)
else:
data = s.recv(size)
if not data:
break
size -= len(data)
blob += data
unserialized_blob = self.decompress(blob)
return unserialized_blob
def send_receive_and_analyse_fall_prob(self, s2, flow):
serialized_data = self.compress(flow)
s2.send(struct.pack('!i', len(serialized_data)))
s2.send(serialized_data)
len_str = s2.recv(4)
size = struct.unpack('!i', len_str)[0]
fall_coef = b''
while size > 0:
if size >= 4096:
data = s2.recv(4096)
else:
data = s2.recv(size)
if not data:
break
size -= len(data)
fall_coef += data
fall_coef = struct.unpack('!d', fall_coef)[0]
if fall_coef > 0.8:
print("fall detected, probability:", fall_coef)
return fall_coef
def estimate_compute_time(self, fps):
self.estimation = False
total_nb_frame = 0
print("Calculating compute time...\nEstimated FPS: " + "{:1.2f}".format(fps) + "\n")
for i in range(0, len(self.cap.video_list)):
cap = cv2.VideoCapture(self.cap.video_list[i].replace("\n", ""))
frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
if self.args.estimation == 2:
print("video " + str(i) + ": " + str(frames) + " frames (" + "{:1.2f}".format(frames / fps) + " seconds)")
total_nb_frame += frames
print("\nThere are " + str(total_nb_frame) + " frames to compute")
print("Estimated compute time (day, hour, min, sec):")
sec = timedelta(seconds=total_nb_frame / fps)
d = datetime(1, 1, 1) + sec
print("{:02d}".format(d.day - 1) + ":" + "{:02d}".format(
d.hour) + ":" + "{:02d}".format(d.minute) + ":" + "{:02d}".format(
d.second))
def preview(self, nb_loop, flow):
nb_loop = self.fpsMetter.get_fps(nb_loop)
# does not estimate for the first loop, and this is reserved to video computing
if self.fpsMetter.fps > 0 and self.fpsMetter.init_finished and self.estimation and self.args.mode == 1:
self.estimate_compute_time(self.fpsMetter.fps)
if self.args.preview > 0 and self.args.preview != 3:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(flow, "fps: " + "{:1.2f}".format(self.fpsMetter.fps), (10, 20),
font, 0.5, (255, 0, 255), 2,
cv2.LINE_AA)
self.catchFall.add_image(flow, fps=self.fpsMetter.average_fps)
if self.args.preview > -1:
cv2.imshow('opticalflow received', flow)
if cv2.waitKey(1) & 0xFF == ord('f'):
self.catchFall.toggle_fall()
if cv2.waitKey(1) & 0xFF == 27:
return -1
return nb_loop
def run(self):
s = socket.socket()
s.connect((self.args.ip, int(self.args.port)))
nb_loop = 0
print("Connected")
while True:
self.send_image(s)
flow = self.receive_image(s)
if len(self.args.save) > 0:
self.cap.save_flow(flow)
nb_loop = self.preview(nb_loop, flow)
if nb_loop == -1:
break
s.close()
cv2.destroyAllWindows()
print("Socket closed, windows destroyed, exiting.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--ip", type=str, default="localhost")
parser.add_argument("-p", "--port", type=int, default=10000)
parser.add_argument("-p2", "--port2", type=int, default=10001)
parser.add_argument("--width", help="width of preview / save", type=int, default=320)
parser.add_argument("--height", help="height of preview / save", type=int, default=240)
parser.add_argument("-pre", "--preview", help="[-2] no preview [-1] print fps [0] image (default), [1] image+fps, [2] print+image+fps, [3] print+image", type=int, default=0, choices=[-2, -1, 0, 1, 2, 3])
parser.add_argument("-e", "--estimation", help="[0] no computing estimation [1] simple estimate [2] complete estimation (video mode only)", type=int, default=0, choices=[0, 1, 2])
parser.add_argument("-m", "--mode", help="[0] stream (default), [1] video, [2] image", type=int, default=0, choices=[0, 1, 2])
parser.add_argument("-l", "--list", help="file containing image/video list. Format: \"path\\npath...\"", type=str, default="video_list_example")
parser.add_argument("-s", "--save", help="save flow under [string].avi or save videos/images in folder [string] (empty/default: no save)", type=str, default="")
parser.add_argument("-f", "--fps", help="choose how many fps will have the video you receive from the server", type=int, default=20)
args = parser.parse_args()
print(args)
try:
Streaming(args).run()
except Exception as e:
print(e)
|
import configparser
def get_config(title: str, key: str):
"""
获取配置文件的值,title是[]里的内容,key是某一项的键
:param title: 配置文件中[]中的值
:param key: 配置文件的key
:return:
"""
config = configparser.ConfigParser()
config.read("static/config.ini")
print(config.sections())
return config.get(title, key)
class ConfigGet:
"""
获取配置文件中的配置信息
提供给用户调用
"""
@staticmethod
def get_data_file_path():
"""
获取数据文件存储路径
:return: 返回数据文件保存的位置
"""
return get_config("data_upload", "data_save_path")
@staticmethod
def get_model_save_path():
"""
获取模型保存的路径
:return: 返回模型文件的保存位置
"""
return get_config("models", "models")
@staticmethod
def get_server_host():
"""
获取本机的ip和端口
:return: 本机ip:端口
"""
return get_config("data_upload", "host")
|
# Generated by Django 3.0.1 on 2019-12-24 00:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('FinaceNote', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='upload',
name='user_city',
field=models.CharField(max_length=20, verbose_name='城市'),
),
migrations.AlterField(
model_name='upload',
name='user_hobby',
field=models.CharField(max_length=10, verbose_name='IT受理人员'),
),
migrations.AlterField(
model_name='upload',
name='user_sex',
field=models.CharField(max_length=20, verbose_name='性别'),
),
]
|
import time
import openpyxl
import os
from utilities import ExcelUtil
import datetime
from win32com import client
from utilities.readProperties import ReadConfig
excel = client.Dispatch(dispatch="Excel.Application")
wb = excel.Workbooks.Add()
today = datetime.date.today()
d1 = today.strftime("%d-%m-%Y")
t = time.localtime()
ctime = time.strftime("%H:%M:%S", t).replace(":", "_")
path = os.path.join(os.getcwd(), f'excel-report\\Test_Data_For_Automating_Sales_Module-{d1}_{ctime}.xlsx')
wb.SaveAs(path)
excel.Application.Quit()
xl = client.Dispatch("Excel.Application")
# xl.Visible = True # You can remove this line if you don't want the Excel application to be visible
originalFilePath = os.path.join(os.getcwd(), ReadConfig.getExcelFileName())
wb1 = xl.Workbooks.Open(Filename=originalFilePath)
wb2 = xl.Workbooks.Open(Filename=path)
ws1 = wb1.Worksheets(1)
ws1.Copy(Before=wb2.Worksheets(1))
wb2.Close(SaveChanges=True)
xl.Quit()
row = 7
class TestData:
BASE_URL = ReadConfig.getApplicationURL()
USERNAME = ReadConfig.getUsername()
PASSWORD = ReadConfig.getPassword()
@staticmethod
def getSalesTestData():
dataList = []
filePath = path
testSheet = ReadConfig.getExcelSheet()
rowCount = ExcelUtil.get_rowcount(filePath, testSheet)
for i in range(7, rowCount + 1, 7): # to get rows
Dict = {}
Dict['customername'] = ExcelUtil.read_data(filePath, testSheet, i, 4)
Dict['validity'] = ExcelUtil.read_data(filePath, testSheet, i + 1, 4)
Dict['product1'] = ExcelUtil.read_data(filePath, testSheet, i + 2, 4)
Dict['product2'] = ExcelUtil.read_data(filePath, testSheet, i + 3, 4)
Dict['product3'] = ExcelUtil.read_data(filePath, testSheet, i + 4, 4)
Dict['paymentterms'] = ExcelUtil.read_data(filePath, testSheet, i + 5, 4)
Dict['downpayment'] = ExcelUtil.read_data(filePath, testSheet, i + 6, 4)
Dict['expected'] = ExcelUtil.read_data(filePath, testSheet, i, 5)
dataList.append(Dict)
return dataList
@staticmethod
def write_result(actual, passfail):
global row
ExcelUtil.write_data(path,
ReadConfig.getExcelSheet(), row, 6,
actual)
ExcelUtil.write_data(path,
ReadConfig.getExcelSheet(), row, 7,
passfail)
row = row + 7
@staticmethod
def write_valid_result():
global row
ExcelUtil.write_data(path,
ReadConfig.getExcelSheet(), row, 6,
"Record Created Successfully")
ExcelUtil.write_data(path,
ReadConfig.getExcelSheet(), row, 7,
'Pass')
row = row + 7
@staticmethod
def write_invalid_result():
global row
expected = ExcelUtil.read_data(path,ReadConfig.getExcelSheet(), row, 5)
if "should not be" in expected:
expected = expected.replace('should not be', 'is not')
ExcelUtil.write_data(path,
ReadConfig.getExcelSheet(), row, 6,
expected)
ExcelUtil.write_data(path,
ReadConfig.getExcelSheet(), row, 7,
'Pass')
row = row + 7
|
#--------------------------------------------------------------------------------
# G e n e r a l I n f o r m a t i o n
#--------------------------------------------------------------------------------
# Name: Exercise 5.1
#
# Usage: python "Exercise 5.1.py"
#
# Description: Calculate and plot distance traveled using given velocity and time data.
# Uses trapezoid rule.
#
# Inputs: None
#
# Outputs: Console data AND a visual plot
#
# Auxiliary Files: velocity.txt
#
# Special Instructions: Script will output data into the console and display a visual plot - watch for both!
#
#--------------------------------------------------------------------------------
# C o d e H i s t o r y
#--------------------------------------------------------------------------------
# Version: 1.0
#
# Author(s): Kole Frazier
#
#--------------------------------------------------------------------------------
import math
import matplotlib
import matplotlib.pylab as plot
import matplotlib.patches as mpatches
def trapezoidRule(data):
#If I'm understanding this right, the rule is essentially:
# Sum of the equally spaced points
# First and last point are halved, however.
# Interior points (not first or last) are not modified.
Sum = 0.0
Sum += (data[0]/2)
Sum += (data[-1]/2) #[-1] is the last element of a list. Reference: https://stackoverflow.com/questions/930397/getting-the-last-element-of-a-list-in-python
#Iterate over the "inner" elements of the list.
#S kip the first ([0]) and last ([len(data)-1]) elements
# Len is 1-based, indexing is 0 based. Second to last item is (with conversion from 1 to 0 base) is -2.
for x in range(1, len(data)-2):
Sum += data[x]
return Sum
def readVelocitiesFile():
# Open and read in velocities.txt file
velocitiesData = open('velocities.txt', 'r')
rawData = velocitiesData.readlines()
velocitiesData.close()
dataTime = []
dataVelocities = []
for line in rawData:
parsedData = line.split('\t')
#Data format: [0] = time step \t [1] = velocity value
#After splitting data, stick the data in the right array
dataTime.append(parsedData[0])
dataVelocities.append(float(parsedData[1]))
return dataTime, dataVelocities
def distanceTraveledTotal(data):
#This is a total distance travelled, regardless of forward/backwards placement.
#We know that velocity = (distance / time). Since we need distance, we can find it by modifying the v=(d/t) formula: (velocity * time) = distance.
# From there, we know that negative distances won't matter for this, so we can simply call abs on all values.
runningTotal = 0.0
for x in range(1, len(data), 1):
distance = abs(data[x]) / x
runningTotal += abs(distance) #Enforce adding positive numbers only.
return runningTotal
def distanceTraveledPoints(data):
distancePoints = []
distancePoints.append(0) #At time=0, the point had zero velocity. Add it in before hand, but prevent div-by-zero errors.
for x in range(1, len(data), 1):
distance = abs(data[x]) / x
distancePoints.append(abs(distance)) #Enforce adding positive numbers only.
return distancePoints
#Get file data
dataTime, dataVelocities = readVelocitiesFile()
TrapezoidRuleResult = trapezoidRule(dataVelocities)
TotalDistanceTraveled = distanceTraveledTotal(dataVelocities)
DistanceTraveledPoints = distanceTraveledPoints(dataVelocities)
print('Trapezoid Rule result: {0}\nTotal distance travelled: {1}'.format(str(TrapezoidRuleResult), str(TotalDistanceTraveled)))
#Plot data
plot.figure()
plot.plot(dataTime, dataVelocities, 'b', linewidth=2.0)
velocityLegendEntry = mpatches.Patch(color='blue', label='Velocity (m/s) over time')
plot.plot(dataTime, DistanceTraveledPoints, 'y', linewidth=2.0)
distanceLegendEntry = mpatches.Patch(color='yellow', label='Distance (m) traveled over time')
plot.title('Distance over Time')
plot.xlabel('Time (seconds)')
plot.legend(handles=[velocityLegendEntry, distanceLegendEntry])
plot.show() |
class Vector:
def __init__(self,*coeffs):
self.coeffs=coeffs
def __repr__(self):
return 'Vector(*{!r})'.format(self.coeffs)
def __add__(self,other):
return Vector(*(x+y for x,y in zip(self.coeffs,other.coeffs))) #Vector object is returned
def __len__(self):
return len(self.coeffs)
def __call__(self,*coeffs):
self.coeffs=coeffs
P1=Vector(1,2,3)
P2=Vector(2,2,4)
P3=P1+P2 |
from collections import OrderedDict
from all import setting
import requests
import simplejson as json
import xlrd
# region Data into db
def write_service_facility():
auth_data = {"code": "666666", "mobile": "09207869164"}
auth = requests.post(f"{setting.base_api_uri}/authenticates", json=auth_data)
get_id = requests.post(f"{setting.base_api_uri}/authenticates", json=auth_data)
get_id_json = get_id.json()
auth_id = get_id_json['code']
print(auth_id)
workbook = xlrd.open_workbook(setting.path_excel)
sheet = workbook.sheet_by_name('Services')
for row in range(1, sheet.nrows):
row_values = sheet.row_values(row)
for row in row_values[11].split(","):
service_slug = row_values[1]
facility_slug =row
print(service_slug)
if len(facility_slug) > 0:
res = requests.put(f"{setting.base_api_uri}/services/{service_slug}/facilities/{facility_slug}", headers={'Authorization': f"Bearer {auth_id}"})
print(res.json)
return "down"
# endregion
|
# Generated by Django 2.2 on 2019-08-09 07:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0003_auto_20190809_0548'),
]
operations = [
migrations.AddField(
model_name='category',
name='category_code',
field=models.IntegerField(default=0),
preserve_default=False,
),
]
|
'''Code to query, create and edit Entry data
'''
from logging import getLogger
from openspending.lib.aggregator import update_distincts
from openspending.ui.lib.browser import Browser
from openspending.model import Dataset, Entry, mongo
log = getLogger(__name__)
def facets_for_fields(facet_fields, dataset_name=None, **query):
'''Get the facets for the fields *facet_fields* for all elements
that are part of the dataset *dataset_name* and that
match ***query*.
``facet_fields``
A ``list`` of field names
``dataset``
A dataset name or a :class:`openspending.model.Dataset` object
``**query``
Parameters for an *AND* query. Only the *key* values objects
matching these queries will be counted.
Returns: A ``dict`` where the keys are the names in the
facet_fields list and the values are dictionaries with
"<facet value>:<count>" items.
'''
# browser with no limit for facets.
browser = Browser({'facet_limit': -1})
browser.facet_by(*facet_fields)
# we don't want any docs. Facets listed in the Response
browser.limit(0)
if dataset_name is not None:
browser.filter_by('+dataset:%s' % dataset_name)
for (key, value) in query.items():
if isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
filter_ = '+%s:%s' % (key, value)
browser.filter_by(filter_)
return dict([(f, browser.facet_values(f)) for f in facet_fields])
def distinct(key, dataset_name=None, **query):
'''Return the distinct values for `key` for all *Entry* objects
matching the dataset_name or ***query*. It will query solr for
a result. There may be short time frames where the result from
solr does not match the distincts for a key in the datastore (mongodb).
``key``
The key of the field for which the distinct will be returned
``dataset``
A dataset name or a :class:`openspending.model.Dataset` object
``**query``
Parameters for an *AND* query. Only the *key* values objects
matching these queries will be counted. If you want to query
by dataset **don't** add the condition here, use *dataset_name*.
Returns: A list of distinct values.
'''
direct_mongo_query = False
# the same keys used in serverside_js/compute_distincts.js
not_aggregated_keys = ['_id', 'name', 'amount', 'classifiers',
'entities', 'currency']
if ((dataset_name is None) or (len(query) > 0) or
(key in not_aggregated_keys)):
direct_mongo_query = True
else:
dataset = Dataset.c.find_one({'name': dataset_name},
as_class=dict)
if not dataset:
raise ValueError('Dataset "%s" does not exist' % dataset_name)
if not direct_mongo_query:
collection_name = 'distincts__%s' % dataset_name
db = mongo.db()
if collection_name not in db.collection_names():
# We need to create the distincts collection first
update_distincts(dataset_name)
distincts_collection = db[collection_name]
log.info('use distincts collection %s' % collection_name)
return distincts_collection.find({'value.keys': key}).distinct('_id')
if direct_mongo_query:
if dataset_name is not None:
query['dataset.name'] = dataset_name
return Entry.c.find(query).distinct(key)
def used_keys(dataset_name):
collection_name = 'distincts__%s' % dataset_name
db = mongo.db()
if collection_name not in db.collection_names():
update_distincts(dataset_name)
db[collection_name].distinct('value')
def distinct_count(key, dataset_name):
assert ('.' not in key or key.startswith('time.'))
collection_name = 'distincts__%s' % dataset_name
db = mongo.db()
if collection_name not in db.collection_names():
update_distincts(dataset_name)
db[collection_name].find({'values': key})
def count(dataset_name=None, **query):
'''Count the number of element that are in the *dataset_name*
and match the ***query*.
``dataset``
A dataset name or a :class:`openspending.model.Dataset` object
``**query``
Parameters for an *AND* query. Only the *key* values objects
matching these queries will be counted.
Returns: The count as an int.
'''
browser = Browser({})
# we don't want any docs. Facets listed in the Response
browser.limit(0)
if dataset_name is not None:
browser.filter_by('+dataset:%s' % dataset_name)
for (key, value) in query.items():
if isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
filter_ = '+%s:%s' % (key, value)
browser.filter_by(filter_)
return browser.num_results
def classify_entry(entry, classifier, name):
'''Update the *entry* to be classified with *classifier*.
*entry* is mutated, but not returned.
``entry``
A ``dict`` like object, e.g. an instance of
:class:`openspending.model.Base`.
``classifier``
A :class:`wdmg.model.Classifier` object
``name``
This is the key where the value of the classifier
will be saved. This my be the same as classifier['name'].
return:``None``
'''
if classifier is None:
return
entry[name] = classifier.to_ref_dict()
entry['classifiers'] = list(set(entry.get('classifiers', []) +
[classifier.id]))
def entitify_entry(entry, entity, name):
'''Update the *entry* to use the *entity* for the
dimension *name*.
``entry``
A ``dict`` like object, e.g. an instance of
:class:`openspending.model.Base`.
``entity``
A :class:`wdmg.model.entity` object
``name``
This is the key where the value of the entity
will be saved. This my be the same as entity['name'].
return:``None``
'''
if entity is None:
return
entry[name] = entity.to_ref_dict()
entry['entities'] = list(set(entry.get('entities', []) +
[entity.id]))
|
import os
import wget
from workflow.task import Task
from workflow.utils.ansible import Ansible
from workflow.utils import env as ENV
'''
从制品库拉,然后push到部署服务的机器
'''
class Push(Task):
def __init__(self, *args, **kwargs):
self.src = kwargs.get('src')
self.dst = kwargs.get('dst')
self.servers = kwargs.get('servers')
self.unarchive = kwargs.get('unarchive')
self.logger = ENV.GET('logger')
def info(self):
self.logger.info('TaskName=Push')
def exec(self):
self.info()
working_dir = os.path.join(self.workspace, ENV.GET('name'))
os.chdir(working_dir)
appsurl = AppArtifacts.objects.filter(app_id=ENV.GET('app_id'), version=ENV.GET('version', '1.0.1'))
self.logger.info(appsurl[0].download_url)
out_fname = wget.filename_from_url(appsurl[0].download_url)
self.logger.info('out_fname: '+out_fname)
if os.path.exists(os.path.join(working_dir, out_fname)):
self.logger.info('file exists, removing it...')
os.remove(out_fname)
#src_path 为本地缓存目录,不同于数据库中的src
wget.download(appsurl[0].download_url, out=out_fname)
os.chdir(self.workspace)
self.logger.info('entering... '+self.workspace)
src_path = os.path.join(self.workspace, self.src, out_fname)
dst_path = self.dst
#推送到部署服务的主机,dst目录不存在则创建;远程文件存在,内容不同于源时,将替换远程文件;
ansible = Ansible(inventory=self.servers['inventory'], connection='smart', become=True, become_method='sudo')
if self.unarchive:
self.logger.info('unarchive: '+str(self.unarchive))
ansible.run(hosts=','.join(self.servers['hosts']), module='unarchive', args='src=%s dest=%s' % (src_path, dst_path))
else:
self.logger.info('unarchive: '+str(self.unarchive))
ansible.run(hosts=','.join(self.servers['hosts']), module='copy', args='src=%s dest=%s' % (src_path, dst_path))
self.logger.info('ansible result: '+str(ansible.get_result()))
#完成后,清理下载的临时文件
if os.path.exists(os.path.join(working_dir,out_fname)):
self.logger.info('push ok, removing donwload tmp file...')
os.remove(os.path.join(working_dir,out_fname)) |
#!/usr/bin/env python
# this runs as separate ROS node and publishes messages from joystick - generally working fin
import rospy
from sensor_msgs.msg import Joy
joybuttons = []
joyaxes = []
def listen_joystick():
rospy.Subscriber('xwiimote_node/joy', Joy, callback)
def callback(data):
global joybuttons, joyaxes
joybuttons = data.buttons
joyaxes = data.axes
return
def get_joystick():
return joybuttons, joyaxes
if __name__ == '__main__':
rospy.init_node('joy_listen', anonymous=False)
listen_joystick()
rospy.spin()
|
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Single neural network layer in which activations are randomly turned off
according to a specific Bernoulli probability threshold.
"""
import logging
from neon.layers.layer import Layer
from neon.util.param import opt_param
logger = logging.getLogger(__name__)
class DropOutLayer(Layer):
"""
Dropout layer randomly kills activations from being passed on at each
fprop call.
Uses parameter 'keep' as the threshhold above which to retain activation.
During training, the mask is applied, but during inference, we switch
off the random dropping.
Make sure to set train mode to False during inference.
Attributes:
keep (numeric, optional): The Bernoulli success probability, indicating
the cutoff below which we keep an activation.
Defaults to 0.5, and should lie in range
[0, 1].
"""
def __init__(self, **kwargs):
self.is_random = True
super(DropOutLayer, self).__init__(**kwargs)
def initialize(self, kwargs):
opt_param(self, ['keep'], 0.5)
super(DropOutLayer, self).initialize(kwargs)
self.keepmask = self.backend.empty((self.nin, self.batch_size),
dtype=self.weight_dtype)
self.train_mode = True
self.allocate_output_bufs()
def set_previous_layer(self, pl):
if pl.is_local:
self.is_local = True
self.nifm = self.nofm = pl.nofm
self.ifmshape = self.ofmshape = pl.ofmshape
self.nout = self.nin = pl.nout
self.prev_layer = pl
def fprop(self, inputs):
if (self.train_mode):
self.backend.make_binary_mask(self.keepmask, self.keep)
self.backend.multiply(inputs, self.keepmask, out=self.output)
else:
self.backend.multiply(inputs, self.keep, out=self.output)
def bprop(self, error):
if self.deltas is not None:
self.backend.multiply(error, self.keepmask, out=self.deltas)
def set_train_mode(self, mode):
self.train_mode = mode
|
import pandas as pd
from mvmm.multi_view.block_diag.graph.bipt_community import get_block_mat
from mvmm.clustering_measures import MEASURE_MIN_GOOD
from mvmm_sim.simulation.run_sim import get_n_blocks
def get_bd_mvmm_model_sel(mvmm_results, select_metric='bic',
user_best_idx=None):
"""
Parameters
----------
mvmm_results
select_metric: str
Which metric to use for model selection.
user_best_idx: None, int
User provided index for which model to select. May be used to overwrite
best BIC selected index.
"""
fit_data = mvmm_results['fit_data']
n_view_comp_seq = mvmm_results['n_view_components']
# n_view_comp_seq = mvmm_results['n_view_components']
n_view_comp_settings = len(fit_data) # len(n_view_comp_seq)
model_sel_df = []
for view_comp_idx in range(n_view_comp_settings):
data = fit_data[view_comp_idx]
bd_models = data['models']['bd_mvmm']
if 'full_mvmm' in data['models'].keys():
full_model = data['models']['full_mvmm']
else:
full_model = None
model_scores_measures = bd_models.model_sel_scores_.columns.values
#############
# BD models #
#############
# bd_scores = bd_models.scores_
# for bd_idx, row in bd_scores.iterrows():
for bd_idx in range(len(bd_models.estimators_)):
est = bd_models.estimators_[bd_idx]
model_sel_scores = bd_models.model_sel_scores_.iloc[bd_idx]
# est n_blocks
D_est = est.final_.bd_weights_
zero_thresh = est.final_.zero_thresh
comm_mat_est = get_block_mat(D_est > zero_thresh)
n_blocks_est = get_n_blocks(comm_mat_est)
n_comp_est = (D_est > zero_thresh).sum()
# requested n blocks
n_blocks_req = est.final_.n_blocks
res = {'model': 'bd_' + str(bd_idx),
'view_comp_idx': view_comp_idx,
# 'bic': scores['bic'],
# 'aic': scores['aic'],
'n_blocks_est': n_blocks_est,
'n_blocks_req': n_blocks_req,
'n_comp_est': n_comp_est,
'n_view_comp': n_view_comp_seq[view_comp_idx]}
for measure in model_scores_measures:
res[measure] = model_sel_scores[measure]
model_sel_df.append(res)
#############
# Full mvmm #
#############
if full_model is not None:
comm_mat_est = get_block_mat(full_model.weights_mat_ > zero_thresh)
n_blocks_est = get_n_blocks(comm_mat_est)
n_comp_est = (full_model.weights_mat_ > zero_thresh).sum()
res = {'model': 'full',
'view_comp_idx': view_comp_idx,
# 'bic': data['full_model_sel_scores']['bic'],
'n_blocks_req': 1,
'n_blocks_est': n_blocks_est,
'n_comp_est': n_comp_est,
'n_view_comp': n_view_comp_seq[view_comp_idx]
}
for measure in model_scores_measures:
res[measure] = data['full_model_sel_scores'][measure]
model_sel_df.append(res)
model_sel_df = pd.DataFrame(model_sel_df)
# get best model
if user_best_idx is None:
# select with BIC/AIC
if MEASURE_MIN_GOOD[measure]:
best_idx = model_sel_df[select_metric].idxmin()
else:
best_idx = model_sel_df[select_metric].idxmax()
else:
# user provided
best_idx = int(user_best_idx)
best_model_name = model_sel_df.loc[best_idx]['model']
best_view_comp_idx = model_sel_df.loc[best_idx]['view_comp_idx']
best_vc_models = fit_data[best_view_comp_idx]['models']
if 'bd' in best_model_name:
bd_idx = int(best_model_name.split('_')[1])
best_model = best_vc_models['bd_mvmm'].estimators_[bd_idx]
else:
best_model = best_vc_models['full_mvmm']
# if isinstance(best_model, TwoStage):
# best_model = best_model.final_
sel_metadata = {'idx': best_idx,
'model_name': best_model_name,
'best_view_comp_idx': view_comp_idx}
return best_model, model_sel_df, sel_metadata
def get_log_pen_mvmm_model_sel(mvmm_results, select_metric='bic',
user_best_idx=None):
"""
Parameters
----------
mvmm_results
select_metric: str
Which metric to use for model selection.
user_best_idx: None, int
User provided index for which model to select. May be used to overwrite
best BIC selected index.
"""
fit_data = mvmm_results['fit_data']
n_view_comp_seq = mvmm_results['n_view_components']
# n_view_comp_seq = mvmm_results['n_view_components']
n_view_comp_settings = len(fit_data) # len(n_view_comp_seq)
model_sel_df = []
for view_comp_idx in range(n_view_comp_settings):
data = fit_data[view_comp_idx]
log_pen_models = data['models']['log_pen_mvmm']
if 'full_mvmm' in data['models'].keys():
full_model = data['models']['full_mvmm']
model_scores_measures = log_pen_models.model_sel_scores_.columns.values
#############
# BD models #
#############
# bd_scores = bd_models.scores_
# for bd_idx, row in bd_scores.iterrows():
for tune_idx in range(len(log_pen_models.estimators_)):
est = log_pen_models.estimators_[tune_idx]
model_sel_scores = log_pen_models.model_sel_scores_.iloc[tune_idx]
Pi_est = est.final_.weights_mat_
zero_thresh = 0
comm_mat_est = get_block_mat(Pi_est > zero_thresh)
n_blocks_est = get_n_blocks(comm_mat_est)
n_comp_est = (Pi_est > zero_thresh).sum()
res = {'model': 'logpen_' + str(tune_idx),
'view_comp_idx': view_comp_idx,
# 'bic': scores['bic'],
# 'aic': scores['aic'],
'n_blocks_est': n_blocks_est,
# 'n_blocks_req': n_blocks_req,
'n_comp_est': n_comp_est,
'n_view_comp': n_view_comp_seq[view_comp_idx],
'n_view_comp_est': list(Pi_est.shape)}
for measure in model_scores_measures:
res[measure] = model_sel_scores[measure]
model_sel_df.append(res)
#############
# Full mvmm #
#############
if full_model is not None:
comm_mat_est = get_block_mat(full_model.weights_mat_ > zero_thresh)
n_blocks_est = get_n_blocks(comm_mat_est)
n_comp_est = (full_model.weights_mat_ > zero_thresh).sum()
res = {'model': 'full',
'view_comp_idx': view_comp_idx,
# 'bic': data['full_model_sel_scores']['bic'],
'n_blocks_est': n_blocks_est,
'n_comp_est': n_comp_est,
'n_view_comp': n_view_comp_seq[view_comp_idx]
}
for measure in model_scores_measures:
res[measure] = data['full_model_sel_scores'][measure]
model_sel_df.append(res)
model_sel_df = pd.DataFrame(model_sel_df)
# get best model
if user_best_idx is None:
# select with BIC/AIC
if MEASURE_MIN_GOOD[measure]:
best_idx = model_sel_df[select_metric].idxmin()
else:
best_idx = model_sel_df[select_metric].idxmax()
else:
# user provided
best_idx = int(user_best_idx)
best_model_name = model_sel_df.loc[best_idx]['model']
best_view_comp_idx = model_sel_df.loc[best_idx]['view_comp_idx']
best_vc_models = fit_data[best_view_comp_idx]['models']
if 'logpen' in best_model_name:
bd_idx = int(best_model_name.split('_')[1])
best_model = best_vc_models['log_pen_mvmm'].estimators_[bd_idx]
else:
best_model = best_vc_models['full_mvmm']
# if isinstance(best_model, TwoStage):
# best_model = best_model.final_
sel_metadata = {'idx': best_idx,
'model_name': best_model_name,
'best_view_comp_idx': view_comp_idx}
return best_model, model_sel_df, sel_metadata
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-11-10 22:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reportng', '0021_auto_20161110_2159'),
]
operations = [
migrations.AddField(
model_name='calldetailreport',
name='a_leg_uuid',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='acdrt', to='reportng.CallDetailReportTransaction'),
),
migrations.AddField(
model_name='calldetailreport',
name='b_leg_uuid',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bcdrt', to='reportng.CallDetailReportTransaction'),
),
]
|
# Generated by Django 2.2 on 2019-06-12 07:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_nginx_access', '0004_auto_20190612_0733'),
]
operations = [
migrations.AddIndex(
model_name='urlsagg',
index=models.Index(fields=['agg_month', 'url'], name='django_ngin_agg_mon_8b2ce4_idx'),
),
]
|
import sys
from math import log10
def calculate_prob(string, gc):
''' Calculate the probability that a random string matches string exactly
:param string: string to compare
:param gc: GC content to construct random strings
:return: log(probability) that a random string constructed with the GC content matches string exactly
'''
sym_probs = {"G": log10(gc / 2), "C": log10(gc / 2), "A": log10((1 - gc) / 2), "T": log10((1 - gc) / 2)}
log_probability = 0
for sym in string:
log_probability += sym_probs[sym]
return log_probability
if __name__ == "__main__":
'''
Given: A DNA string s of length at most 100 bp and an array A containing at most 20 numbers between 0 and 1.
Return: An array B having the same length as A in which B[k] represents the common logarithm of the probability that
a random string constructed with the GC-content found in A[k] will match s exactly.
'''
input_lines = sys.stdin.read().splitlines()
DNA_string = input_lines[0]
GC_list = map(float, input_lines[1].split(" "))
prob_list = []
for GC_content in GC_list:
prob_list.append(calculate_prob(DNA_string, GC_content))
print(' '.join(map(str, prob_list)))
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, x,nextNode=None):
self.val = x
self.next = nextNode
class Solution:
def swapPairs(self, head):
last = pre = ListNode(0)
last.next = pre.next = head
while last.next!=None and last.next.next!=None:
first = last.next
second = first.next
third = second.next
first.next = third
second.next = first
last.next = second
last = first
return pre.next
a4 = ListNode(4)
a3 = ListNode(3,a4)
a2 = ListNode(2,a3)
a1 = ListNode(1,a2)
s = Solution()
f = s.swapPairs(a1)
while f!=None:
print(f.val)
f = f.next
# print(f) |
from django.urls import path
from authors.apps.reports.views import ReportArticleViewSet
urlpatterns = [
path(
"<str:slug>/report-article/",
ReportArticleViewSet.as_view({'post':'create'}), name="report"
),
path(
"report-article/<int:report_id>/",
ReportArticleViewSet.as_view({'get':'retrieve', 'put':'update', 'delete':'destroy'}), name="edit-report"
),
path(
"report-article/",
ReportArticleViewSet.as_view({'get':'list'}), name="reports-list"
),
] |
import errno
import io
import json
import logging
import os
import subprocess
from builtins import ValueError
from json import JSONDecodeError
from typing import List, Tuple, Callable
from definitions import CLI_SIMULATOR_FILE_PATH, CDE_API_USER_PASSWORD_ENV_VAR
logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s")
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
class CdeCliJsonParser:
def __init__(self, cmd: List[str]):
self.proc_handler = CdeCliJsonParser.create_default_for_cde_cli(cmd)
@staticmethod
def create_default_for_cde_cli(cmd: List[str]):
if not os.getenv("%s" % CDE_API_USER_PASSWORD_ENV_VAR):
raise ValueError("Please set environment variable: %s" % CDE_API_USER_PASSWORD_ENV_VAR)
actions = [
LineAction("WARN: Plaintext or insecure TLS connection requested", SubprocessHandler.send_input, ["yes"]),
LineAction("API User Password:", SubprocessHandler.send_input, [os.getenv("%s" % CDE_API_USER_PASSWORD_ENV_VAR)])]
ignore_output = ["cde job list "]
# stdout.readline() and stdout.readlines() won't work with the output of cde cli.
# These methods would block forever as cde cli wouldn't print a newline at the end of the line, apparently.
# line = process.stdout.readlines().decode('utf-8')
# line = process.stdout.readline().decode('utf-8')
def looped_data_read_func(data):
return data.startswith("[") and not data.endswith("]")
return SubprocessHandler(cmd, ignore_output, actions,
looped_data_read_func=looped_data_read_func)
def run(self):
self.proc_handler.run()
def parse_job_names(self):
self.proc_handler.run()
if self.proc_handler.exit_code != 0:
raise ValueError("Underlying process failed. "
"Command was: {} "
"stderr from process: {}".format(self.proc_handler.cmd, self.proc_handler.stderr))
return self.get_jobs(filter_type="airflow")
def get_jobs(self, filter_type=None):
LOG.info("Decoding json: %s", self.proc_handler.stored_lines)
json_str = "\n".join(self.proc_handler.stored_lines)
try:
parsed_json = json.loads(json_str)
except JSONDecodeError as e:
LOG.error("Invalid json output from cde cli process. output was: '%s'", json_str)
raise e
job_names = []
for job in parsed_json:
if filter_type:
if job["type"] == filter_type:
job_names.append(job["name"])
else:
job_names.append(job["name"])
return job_names
class LineAction:
def __init__(self, line, action, args: List[str]):
self.line = line
self.action = action
self.args = args
def handle(self, handler, process, input_line):
if input_line.startswith(self.line):
self.action(handler, self, process, *self.args)
return True
return False
def __str__(self):
return "{}: line: {}, action: {}, args: {}".format(self.__class__, self.line, self.action, self.args)
class SubprocessHandler:
def __init__(self, cmd: List[str],
ignore_output: List[str],
line_handlers: List[LineAction],
looped_data_read_func: Callable[[str], bool],
print_all=True):
self.cmd = cmd
self.ignore_out_lines = ignore_output
self.line_actions = line_handlers
self.looped_data_read_func = looped_data_read_func
self.print_all = print_all
self.stderr = None
self.exit_code = None
self.exited = False
self.stored_lines = []
def read_lines(self, process) -> List[str]:
orig_exit_code = self.exit_code
self.exit_code = process.poll()
self.exited = orig_exit_code != self.exit_code
data = process.stdout.read1().decode('utf-8')
if self.exited and not data:
return []
if self.looped_data_read_func:
if self.looped_data_read_func(data):
all_data = data
while process.poll() is None:
data = process.stdout.read1().decode('utf-8')
all_data += data
return all_data.split("\n")
if "\n" in data:
lines = data.split("\n")
return [l.strip() for l in lines]
else:
return [data.strip()]
@staticmethod
def send_input(handler, action, proc, *input):
for i in input:
try:
if not i:
raise ValueError("Input was none. Line action: {}".format(action))
encoded_line = i.encode('utf-8')
proc.stdin.write(encoded_line)
proc.stdin.write(b"\n")
proc.stdin.flush()
except IOError as e:
if e.errno != errno.EPIPE and e.errno != errno.EINVAL:
raise
def run(self):
with subprocess.Popen(
self.cmd,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
) as process:
while True:
lines = self.read_lines(process)
for line in lines:
if self._check_for_ignores(line):
continue
if self._check_for_handlers(line, process):
continue
print(line)
# unhandled + not ignores --> store
self.stored_lines.append(line)
if self.exited:
break
if self.exit_code != 0:
self.stderr = process.stderr.read1().decode('utf-8')
def _check_for_handlers(self, line, process):
handled = False
for action in self.line_actions:
if action.handle(self, process, line):
handled = True
break
if handled:
if self.print_all:
print(line)
return handled
def _check_for_ignores(self, line):
ignored = False
for ignored_line in self.ignore_out_lines:
if line.startswith(ignored_line):
ignored = True
break
if ignored:
if self.print_all:
print(line)
return ignored
if __name__ == '__main__':
cmd = [
"python",
"-u", # Unbuffered stdout and stderr
CLI_SIMULATOR_FILE_PATH,
]
parser = CdeCliJsonParser(cmd)
print(parser.parse_job_names())
|
#!/usr/bin/env python
import os
import sys
import inspect
import re
import argparse
import random
parser = argparse.ArgumentParser(description="""
Description
-----------
This script merges close alignement generated by promer.
""",formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Authors
-------
Vincent Merel
""")
#Input files
parser.add_argument("--coords", type=str, required=True, dest="coords", default=None, help="the coords File")
#Output files
parser.add_argument("--output", type=str, required=True, dest="output", default=None, help="the output file")
#Additional arg
parser.add_argument("--interval", type=int, required=True, dest="interval", default=None, help="the interval")
args = parser.parse_args()
##################################################################################################
####################Creating a dictionnary of alignments per couple of Contigs####################
##################################################################################################
coords = open(args.coords,"r")
myDict={}
for line in coords :
line=line[:-1]
S_Contig=line.split()[6]
Q_Contig=line.split()[7]
Couple=S_Contig+" "+Q_Contig
if Couple not in myDict :
myDict[Couple]=[line]
else :
myDict[Couple].append(line)
coords.close()
#print(myDict) #OK
##################################################################################################
##################################################################################################
##################################################################################################
###################################Parsing the dictionnary#######################################
#####################################Merging eventually##########################################
###################################And writting to output#######################################
output = open(args.output,"w")
cpt=0
Nkey=0
ToWrite=[]
for key in myDict :
if key=="X Draco":
print("plop")
line=myDict[key][0].split()
S_Contig=line[6]
Q_Contig=line[7]
S_Start_Stocked=int(line[0])
S_End_Stocked=int(line[1])
Q_Start_Stocked=int(line[2])
Q_End_Stocked=int(line[3])
Aln_Dir_Stocked="" #+ or -
if (Q_End_Stocked-Q_Start_Stocked)<0 :
print("Something unexpected happened !")
if (S_End_Stocked-S_Start_Stocked)<0:
print("Something unexpected happened !")
Nkey=Nkey+1
if len(myDict[key])==1:
ToWrite.append(line)
for i in range(1,len(myDict[key])) :
line=myDict[key][i].split()
S_Start=int(line[0])
S_End=int(line[1])
Q_Start=int(line[2])
Q_End=int(line[3])
Aln_Dir="" #+ or -
if (Q_End-Q_Start)<0 :
print("Something unexpected happened !")
if (S_End-S_Start)<0 :
print("Something unexpected happened !")
cpt=cpt+1
A=1
if (0<=(S_Start-S_End_Stocked)<=args.interval
and 0<=(Q_Start-Q_End_Stocked)<=args.interval) :
S_End_Stocked=S_End
Q_End_Stocked=Q_End
Aln_Dir_Stocked=Aln_Dir
if i==len(myDict[key])-1: #Last aln of the contig couples
ToWrite.append([str(S_Start_Stocked),
str(S_End),
str(Q_Start_Stocked),
str(Q_End),
str(abs(Q_End-Q_Start_Stocked+1)),
str(abs(S_End-S_Start_Stocked+1)),
S_Contig,
Q_Contig])
else :
Aln_Dir_Stocked=Aln_Dir
ToWrite.append([str(S_Start_Stocked),
str(S_End_Stocked),
str(Q_Start_Stocked),
str(Q_End_Stocked),
str(abs(S_End_Stocked-S_Start_Stocked+1)),
str(abs(Q_End_Stocked-Q_Start_Stocked+1)),
S_Contig,
Q_Contig])
S_End_Stocked=S_End
Q_End_Stocked=Q_End
S_Start_Stocked=S_Start
Q_Start_Stocked=Q_Start
if i==len(myDict[key])-1 : #Last aln of the contig couples
ToWrite.append([str(S_Start_Stocked),
str(S_End_Stocked),
str(Q_Start_Stocked),
str(Q_End_Stocked),
str(abs(S_End-S_Start_Stocked+1)),
str(abs(Q_End-Q_Start_Stocked+1)),
S_Contig,
Q_Contig])
for item in ToWrite:
output.write('\t'.join(item)+"\n")
'''
or
(Aln_Dir_Stocked=="-" and Aln_Dir=="-"
and (S_Start-S_End_Stocked)>=0 and (S_Start-S_End_Stocked)<=args.interval
and (Q_Start-Q_End_Stocked)<=0 and abs(Q_Start-Q_End_Stocked)<=args.interval):
'''
|
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.callbacks import ModelCheckpoint
import json
import word_table as w_t
from keras.utils import np_utils
from error_analysis import serialize_errors
wt = w_t.WordTable()
wt.load_dictionary()
wt.top_answers('data_processed_val.csv')
top_words = len(wt.word2Idx)+1
import csv
import numpy as np
ques_maxlen = 20
X = []
Y = []
#mapping = {}
total = 0
Xtrain_cont = []
Xtest_cont = []
ngram = 4
with open('data_processed.csv', 'rb') as csvfile:
dp = csv.reader(csvfile, delimiter='~')
for row in dp:
ques = row[1]
ques = ques.lower().strip().strip('?!.').split()
leng = len(ques)
try:
for i in range(leng):
Xtrain_cont.append(wt.getIdx(ques[i])+1)
except:
pass
with open('data_processed_val.csv', 'rb') as csvfile:
dp = csv.reader(csvfile, delimiter='~')
for row in dp:
ques = row[1]
ques = ques.lower().strip().strip('?!.').split()
leng = len(ques)
try:
for i in range(leng):
Xtest_cont.append(wt.getIdx(ques[i])+1)
except:
pass
Xdata = []
Ydata = []
for i in range(len(Xtrain_cont) - ngram):
Xdata.append(Xtrain_cont[i:i+ngram])
Ydata.append(Xtrain_cont[i+ngram])
Xdata = np.array(Xdata)
Ydata = np_utils.to_categorical(Ydata)
Xtest = []
Ytest = []
for i in range(len(Xtest_cont) - ngram):
Xtest.append(Xtest_cont[i:i+ngram])
Ytest.append(Xtest_cont[i+ngram])
Xtest = np.array(Xtest)
Ytest = np_utils.to_categorical(Ytest, top_words)
# create the model
embedding_vecor_length = 64
model = Sequential()
model.add(Embedding(top_words, embedding_vecor_length, input_length=ngram))
model.add(LSTM(300, dropout_W = 0.2, dropout_U = 0.2))
model.add(Dense(top_words, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
model.fit(Xdata, Ydata, nb_epoch=10, batch_size = 64)
# Finding accuracy
scores, acc = model.evaluate(Xtest, Ytest, verbose=1)
print "Accuracy:"
print acc
#Finding perplexity
predictions = model.predict(Xtest)
prob = np.sum(np.multiply(Ytest, predictions), 1)
entropy = np.sum(np.multiply(np.log2(prob), prob))
print "Entropy"
print entropy
perplexity = (1.0/2**entropy)
print "Perplexity Final:"
print perplexity
# serialize_errors(X_test, predictions, Y_test, 'lstm', wt)
|
import pandas as pd
import glob
def parse_path(p):
parsed = [pair.split('_') for pair in p.split('.csv')[0].split('__')]
return pd.Series({k:v for k,v in parsed})
def get_stage_results(stage):
stage_results = pd.concat([
pd.read_csv(f).assign(path=f.split(f'STAGE{stage}__')[1]) \
for f in glob.glob(f'output/MSEs/GRF_simulation_CV/CVregularizers_True__STAGE{stage}__*.csv')
], axis=0)
stage_results = pd.concat([stage_results.drop('path',axis=1),
stage_results.path.apply(parse_path)],axis=1)
return stage_results
if __name__=='__main__':
get_stage_results(1).to_csv('output/MSEs/GRF_simulation_CV/STAGE1_all_results.csv',index=False)
get_stage_results(2).to_csv('output/MSEs/GRF_simulation_CV/STAGE2_all_results.csv',index=False) |
import pytest
from ioweb.request import BaseRequest
def test_default_meta():
req = BaseRequest()
assert req.meta == {} # pylint: disable=use-implicit-booleaness-not-comparison
def test_custom_default_config():
class CustomRequest(BaseRequest):
def get_default_config(self):
return {
"foo": "bar",
}
req = CustomRequest()
assert req.config["foo"] == "bar"
def test_setup_invalid_key():
class CustomRequest(BaseRequest):
def get_default_config(self):
return {
"foo": "bar",
}
req = CustomRequest()
with pytest.raises(AssertionError):
req.setup(zzz=1)
def test_getitem_method():
class CustomRequest(BaseRequest):
def get_default_config(self):
return {
"foo": "bar",
}
req = CustomRequest()
assert req["foo"] == "bar"
def test_getitem_method_invalid_key():
class CustomRequest(BaseRequest):
def get_default_config(self):
return {
"foo": "bar",
}
req = CustomRequest()
with pytest.raises(KeyError):
assert req["zzz"]
def test_as_data():
class CustomRequest(BaseRequest):
def get_default_config(self):
return {
"foo": "bar",
}
req = CustomRequest()
assert req.as_data()["config"] == {"foo": "bar"}
assert all(
x in req.as_data() for x in ("config", "meta", "priority", "retry_count")
)
def test_lt():
req1 = BaseRequest(priority=1)
req2 = BaseRequest(priority=2)
req3 = BaseRequest(priority=3)
assert req2 > req1
assert req1 < req3
def test_eq():
req1 = BaseRequest(priority=1)
req2 = BaseRequest(priority=2)
req3 = BaseRequest(priority=2)
assert req1 != req2
assert req2 == req3
def test_from_data():
data = {
"config": {"foo": "bar"},
"meta": {"name": "Putin"},
"priority": 1,
"retry_count": 0,
}
req = BaseRequest.from_data(data)
assert req.config["foo"] == "bar"
assert req.meta["name"] == "Putin"
|
# -*- coding:utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Lasso,Ridge
from sklearn.model_selection import GridSearchCV
if __name__ == "__main__":
data = pd.read_csv('data/Advertising.data')
x = data[['TV','radio','newspaper']]
y = data['sales']
x_train,x_test,y_train,y_test = train_test_split(x, y, random_state=1)
model = Lasso()
model = Ridge()
alpha_can = np.logspace(-3,2,base=10)
np.set_printoptions(suppress = True)
print('alpha_can: {}'.format(alpha_can))
# find the best alpha of L1 norm
lasso_model = GridSearchCV(model,{'alpha':alpha_can},cv = 5)
lasso_model.fit(x_train,y_train)
print('best alpha : {}'.format(lasso_model.best_params_))
order = y_test.argsort(axis = 0)
y_test = y_test.values[order]
x_test = x_test.values[order]
y_hat = lasso_model.predict(x_test)
print (lasso_model.score(x_test,y_test))
mse = np.average((y_hat - np.array(y_test)) ** 2)
rmse = np.sqrt(mse)
print ('mse : {}, rmse: {}'.format(mse, rmse))
t = np.arange(len(x_test))
mpl.rcParams['font.sans-serif'] = [u'simHei']
mpl.rcParams['axes.unicode_minus'] = False
plt.figure(facecolor='w')
plt.plot(t,y_test,'r-',linewidth=2,label = 'True Data')
plt.plot(t, y_hat, 'g-', linewidth=2, label='Predict Data')
plt.title(u'线性回归预测销售数据', fontsize=18)
plt.legend(loc='upper right')
plt.grid()
plt.show()
print('best')
|
from django.contrib import admin
from django.urls import path, include
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
"""JWT auth"""
schema_view = get_schema_view(
openapi.Info(
title="Booking API",
default_version="v1",
),
public=True,
)
urlpatterns = [
path("admin/", admin.site.urls),
path("api/v1/", include("meetings.urls")),
path("", schema_view.with_ui("swagger", cache_timeout=0), name="schema-swagger-ui"),
]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.pointnet2 import pointnet2_utils as pointutils
class FlowEmbedding(nn.Module):
def __init__(self, radius, nsample, in_channel, mlp, pooling='max', corr_func='concat', knn=True, use_instance_norm=False):
super(FlowEmbedding, self).__init__()
self.radius = radius
self.nsample = nsample
self.knn = knn
self.pooling = pooling
self.corr_func = corr_func
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
if corr_func is 'concat':
last_channel = in_channel * 2 + 3
for out_channel in mlp:
self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1, bias=False))
if use_instance_norm:
self.mlp_bns.append(nn.InstanceNorm2d(out_channel, affine=True))
else:
self.mlp_bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
def forward(self, pos1, pos2, feature1, feature2):
"""
Input:
pos1: (batch_size, 3, npoint)
pos2: (batch_size, 3, npoint)
feature1: (batch_size, channel, npoint)
feature2: (batch_size, channel, npoint)
Output:
pos1: (batch_size, 3, npoint)
feat1_new: (batch_size, mlp[-1], npoint)
"""
pos1_t = pos1.permute(0, 2, 1).contiguous()
pos2_t = pos2.permute(0, 2, 1).contiguous()
B, N, C = pos1_t.shape
if self.knn:
dist, idx = pointutils.knn(self.nsample, pos1_t, pos2_t)
tmp_idx = idx[:, :, 0].unsqueeze(2).repeat(1, 1, self.nsample).to(idx.device)
idx[dist > self.radius] = tmp_idx[dist > self.radius]
else:
# If the ball neighborhood points are less than nsample,
# than use the knn neighborhood points
idx, cnt = pointutils.ball_query(self.radius, self.nsample, pos2_t, pos1_t)
_, idx_knn = pointutils.knn(self.nsample, pos1_t, pos2_t)
cnt = cnt.view(B, -1, 1).repeat(1, 1, self.nsample)
idx = idx_knn[cnt > (self.nsample - 1)]
pos2_grouped = pointutils.grouping_operation(pos2, idx) # [B, 3, N, S]
pos_diff = pos2_grouped - pos1.view(B, -1, N, 1) # [B, 3, N, S]
feat2_grouped = pointutils.grouping_operation(feature2, idx) # [B, C, N, S]
if self.corr_func == 'concat':
feat_diff = torch.cat([feat2_grouped, feature1.view(B, -1, N, 1).repeat(1, 1, 1, self.nsample)], dim=1)
feat1_new = torch.cat([pos_diff, feat_diff], dim=1) # [B, 2*C+3,N,S]
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
feat1_new = F.relu(bn(conv(feat1_new)))
feat1_new = torch.max(feat1_new, -1)[0] # [B, mlp[-1], npoint]
return pos1, feat1_new
class PointNetSetAbstraction(nn.Module):
def __init__(self, npoint, radius, nsample, in_channel, mlp, group_all,
return_fps=False, use_xyz=True, use_act=True, act=F.relu, mean_aggr=False, use_instance_norm=False):
super(PointNetSetAbstraction, self).__init__()
self.npoint = npoint
self.radius = radius
self.nsample = nsample
self.group_all = group_all
self.use_xyz = use_xyz
self.use_act = use_act
self.mean_aggr = mean_aggr
self.act = act
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = (in_channel + 3) if use_xyz else in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1, bias=False))
if use_instance_norm:
self.mlp_bns.append(nn.InstanceNorm2d(out_channel, affine=True))
else:
self.mlp_bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
if group_all:
self.queryandgroup = pointutils.GroupAll(self.use_xyz)
else:
self.queryandgroup = pointutils.QueryAndGroup(radius, nsample, self.use_xyz)
self.return_fps = return_fps
def forward(self, xyz, points, fps_idx=None):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, S, C]
new_points: sample points feature data, [B, S, D']
"""
device = xyz.device
B, C, N = xyz.shape
xyz = xyz.contiguous()
xyz_t = xyz.permute(0, 2, 1).contiguous()
if (self.group_all == False) and (self.npoint != -1):
if fps_idx == None:
fps_idx = pointutils.furthest_point_sample(xyz_t, self.npoint) # [B, N]
new_xyz = pointutils.gather_operation(xyz, fps_idx) # [B, C, N]
else:
new_xyz = xyz
new_points, _ = self.queryandgroup(xyz_t, new_xyz.transpose(2, 1).contiguous(), points) # [B, 3+C, N, S]
# new_xyz: sampled points position data, [B, C, npoint]
# new_points: sampled points data, [B, C+D, npoint, nsample]
for i, conv in enumerate(self.mlp_convs):
if self.use_act:
bn = self.mlp_bns[i]
new_points = self.act(bn(conv(new_points)))
else:
new_points = conv(new_points)
if self.mean_aggr:
new_points = torch.mean(new_points, -1)
else:
new_points = torch.max(new_points, -1)[0]
if self.return_fps:
return new_xyz, new_points, fps_idx
else:
return new_xyz, new_points
class PointNetFeaturePropogation(nn.Module):
def __init__(self, in_channel, mlp):
super(PointNetFeaturePropogation, self).__init__()
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
self.apply_mlp = mlp is not None
last_channel = in_channel
if self.apply_mlp:
for out_channel in mlp:
self.mlp_convs.append(nn.Conv1d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm1d(out_channel))
last_channel = out_channel
def forward(self, pos1, pos2, feature1, feature2):
"""
Input:
pos1: input points position data, [B, C, N]
pos2: sampled input points position data, [B, C, S]
feature1: input points data, [B, D, N]
feature2: input points data, [B, D, S]
Return:
feat_new: upsampled points data, [B, D', N]
"""
pos1_t = pos1.permute(0, 2, 1).contiguous()
pos2_t = pos2.permute(0, 2, 1).contiguous()
B, C, N = pos1.shape
dists, idx = pointutils.three_nn(pos1_t, pos2_t)
dists[dists < 1e-10] = 1e-10
weight = 1.0 / dists
weight = weight / torch.sum(weight, -1, keepdim=True) # [B,N,3]
interpolated_feat = torch.sum(pointutils.grouping_operation(feature2, idx) * weight.view(B, 1, N, 3),
dim=-1) # [B,C,N,3]
if feature1 is not None:
feat_new = torch.cat([interpolated_feat, feature1], 1)
else:
feat_new = interpolated_feat
if self.apply_mlp:
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
feat_new = F.relu(bn(conv(feat_new)))
return feat_new
|
import cStringIO
from Scapy_Control import *
from scapy.all import *
from SMB_COM import *
class ReadAndx():
def ReadAndxRequest(self,
Flow = 'SMB',
**kwargs):
raw = self.SMBHeader(command=SMB_COM_READ_ANDX,
flags=24,
flags2=59399,
mid=self.MID,
pid=self.PID,
uid=self.UID,
tid=self.TID)
raw += HexCodeInteger(12, HexCodes=1) # word count
raw += HexCodeInteger(255, HexCodes=1) # andX (255 no further commands
raw += HexCodeInteger(0, HexCodes=1) # reserved
raw += HexCodeInteger(0, HexCodes=2) # offset
raw += HexCodeInteger(self.FID, HexCodes=2) # FID
raw += HexCodeInteger(kwargs.get('offset') or 0, HexCodes=4) # offset
raw += HexCodeInteger(len(open(self.ActiveFile).read()), HexCodes=2) # max low count
raw += HexCodeInteger(0, HexCodes=2) # mincount
raw += HexCodeInteger(0, HexCodes=4) # maxhigh
raw += HexCodeInteger(0, HexCodes=2) # remaining
raw += HexCodeInteger(0, HexCodes=4) # highoffset
raw += HexCodeInteger(0, HexCodes=2) # bytecound
raw = self.add_raw_to_nb(raw=raw)
load = Raw(load=raw)
self.Flows[Flow].ConStruct_Packet_Without_Data(fromSrc=True,
Flags='PA',
AttachLayers=load)
def ReadAndxResponse(self,
Flow='SMB',
**kwargs):
raw = self.SMBHeader(command=SMB_COM_READ_ANDX,
flags=152,
flags2=59399,
mid=self.MID,
pid=self.PID,
uid=self.UID,
tid=self.TID)
fsize = len(open(self.ActiveFile).read())
raw += HexCodeInteger(12, HexCodes=1) # word count
raw += HexCodeInteger(255, HexCodes=1) # andX (255 no further commands
raw += HexCodeInteger(0, HexCodes=1) # reserved
raw += HexCodeInteger(0, HexCodes=2) # offset
raw += HexCodeInteger(0, HexCodes=2) # remaining ********************************************************65535
raw += HexCodeInteger(0, HexCodes=2) # data compaction mode
raw += HexCodeInteger(0, HexCodes=2) # reserved
raw += HexCodeInteger(fsize, HexCodes=2) # data len low
raw += HexCodeInteger(60, HexCodes=2) # data offset
raw += HexCodeInteger(0, HexCodes=4) # data len high
raw += HexCodeInteger(0, HexCodes=6) # reserved
raw += HexCodeInteger(fsize+1, HexCodes=2) # ByteCount
raw += HexCodeInteger(0, HexCodes=1) # padding
raw += open(self.ActiveFile).read()
raw = self.add_raw_to_nb(raw=raw)
#now lets split up the raw into parts
r = cStringIO.StringIO(raw)
r.seek(0)
# at this point of time raw is too large. need to split it up into mtu (lets just make this in thousand
bytes_remaining = len(raw)
while bytes_remaining > 0:
if bytes_remaining < 1000:
load = r.read(bytes_remaining)
bytes_remaining -= bytes_remaining
else:
bytes_remaining -= 1000
load = r.read(1000)
self.Flows[Flow].ConStruct_Packet_Without_Data(fromSrc=False,
Flags='PA',
AttachLayers=load)
r.close() |
#!/usr/bin/env python
import sys
import rospy
import moveit_commander
import geometry_msgs.msg
import math
import tf.transformations
def main():
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('move_group_mover', anonymous=True)
group = moveit_commander.MoveGroupCommander("al5d_joints")
pose_target = geometry_msgs.msg.PoseStamped()
pose_target.header.frame_id = "part_1_link"
pose_target.pose.position.x = 0.1
pose_target.pose.position.y = 0.0
pose_target.pose.position.z = 0.1
roll = 0
pitch = math.pi / 2
yaw = 0
quaternion = tf.transformations.quaternion_from_euler(roll, pitch, yaw)
pose_target.pose.orientation.x = quaternion[0]
pose_target.pose.orientation.y = quaternion[1]
pose_target.pose.orientation.z = quaternion[2]
pose_target.pose.orientation.w = quaternion[3]
group.set_pose_target(pose_target)
plan = group.plan()
group.execute(plan)
rospy.sleep(5)
moveit_commander.roscpp_shutdown()
if __name__ == "__main__":
main()
|
import psycopg2
from openpyxl.workbook import Workbook
import pandas as pd
import logging
# importing packages
class Total_compensation:
# function to list total compensation given at Department level till date
def compensation(self):
try:
# trying to connect to postgresql database
conn = psycopg2.connect(
database="assignment",
user="postgres",
password="munna1998")
cursor = conn.cursor()
# connection established
script = """
select dept.deptno, dept_name, sum(total_compensation) from Compensation, dept
where Compensation.dept_name=dept.dname
group by dept_name, dept.deptno
"""
# query to list total compensation given at Department level till date
cursor.execute(script)
columns = [desc[0] for desc in cursor.description]
data = cursor.fetchall()
df = pd.DataFrame(list(data), columns=columns)
# storing data in dataframe
writer = pd.ExcelWriter('ques_4.xlsx')
df.to_excel(writer, sheet_name='bar')
writer.save()
# using the data frame to generate excel file
except Exception as e:
print("Error", e)
finally:
# after completion of above block,closing the connection
if conn is not None:
cursor.close()
conn.close()
#main method
#creating an object of employees class and calling the compensation_by_dept method
if __name__ == '__main__':
conn = None
cur = None
comp = Total_compensation()
comp.compensation |
# -*- coding: UTF-8 -*-
import MySQLdb
from scipy import stats
__author__ = 'Eric huizh'
# ============================= rule 1 =================================
"""
function: 对销售记录表的数据单价跟销售数量进行乘积,求出最初数据表中的每个商家或者特定商家每条销售记录的销售额
@para: tag = 1:全部商家
tag = 1:特定商家
"""
def sql_fee(table, **kw):
# 对数据集进行划分成特定商家 或者全部商家, tag为1时表示所有商家,tag为0时表示特定商家,
if kw['tag'] == 1:
return "SELECT SUBSTRING(`createtime`, 1, 10) AS `Date`, `{Table}`.`itemnum`, `number`*`price` AS `fee`, `sellernick`" \
"FROM `{Table}`" \
"WHERE `sellernick` IS NOT NULL AND date_sub(curdate(), INTERVAL {Day} DAY) <= date(`{Table}`.`createtime`)" \
.format(Table=table, Day=kw['Day'])
elif kw['tag'] == 0:
sellernick = kw['sellernick']
return "SELECT SUBSTRING(`createtime`, 1, 10) AS `Date`, `{Table}`.`itemnum`, `number`*`price` AS `fee`" \
"FROM `{Table}`" \
"WHERE `sellernick` ='{SellerNick}' AND date_sub(curdate(), INTERVAL {Day} DAY) <= date(`{Table}`.`createtime`)" \
.format(Table=table, SellerNick=sellernick, Day=kw['Day'])
"""
function: 获取数据表中包含所要查询关键字类目的的全部货号
@para:
"""
def sql_category(table, category):
return "SELECT DISTINCT `{Table}`.`itemnum`" \
"FROM `{Table}`" \
"WHERE `title` LIKE '%{Category}%' OR `title` LIKE '{Category}%' OR `title` LIKE '%{Category}'"\
.format(Table=table, Category=category)
"""
function: (注:此处商家的每天销售信息不一定全部都有)
tag = get_SellerCategoryItemNumber: 获取指定商家销售指定商品关键字类目的所有的货号
tag = get_AllSellernick: 获取特定商家的特定商品关键字类目的每天销售额
tag = get_SellerCategoryDayFee: 获取卖这种商品关键字类目的所有商家
@para:tag 作为标识
"""
def sql_itemfee_Or_sellernick_Or_item(**arg):
"""
ordertable:订单数据表
sellernick:商家名称
itemtable: 类目表
category: 具体的商品类目
itemnum: 具体的商品货号
tag: 记号
# arg = {'OrderTable': ordertable, 'SellerNick': sellernick, 'ItemTable': itemtable, 'Category': category, 'ItemNum': itemnum, 'Tag': tag}
"""
# ======此处注释可以帮助理解,最好不要删除===========
# kw = {'sellernick': sellernick, 'tag': 1} # tag=0 指定商家, tag=1 所有商家
# sql_SpeSellerNickFee = sql_fee(ordertable, **kw) # 获取指定商家的每天每个销售记录的销售额(没有筛掉商品的类目)(给定时间区间[此处是三十天])
# sql_AllSellerNickFee = sql_fee(ordertable, **kw) # 获取所有的商家的每天每个销售记录的销售额(没有筛掉商品的类目)(给定时间区间[此处是三十天])
# sql_Category = sql_category(itemtable, category) # 获取特定商品类目的货号(给定时间区间)
tag = arg['Tag']
if tag == 'get_SellerCategoryItemNumber': # 获取指定商家销售指定商品类目的所有的货号(给定时间区间[此处是三十天])
kw = {'sellernick': arg['SellerNick'], 'tag': 0, 'Day': arg['Day']}
sql_SpeSellerNickFee = sql_fee(arg['OrderTable'], **kw)
sql_Category = sql_category(arg['ItemTable'], arg['Category'])
return "SELECT DISTINCT `per`.`itemnum`" \
"FROM({SQL1}) AS `per` INNER JOIN({SQL2}) AS `item` ON `item`.`itemnum` = `per`.`itemnum` LIMIT 10" \
.format(SQL1=sql_SpeSellerNickFee, SQL2=sql_Category)
elif tag == 'get_AllSellernick': # 获取卖这种商品类目的所有商家(给定时间区间[此处是三十天])
kw = {'tag': 1, 'Day': arg['Day']}
sql_AllSellerNickFee = sql_fee(arg['OrderTable'], **kw)
sql_Category = sql_category(arg['ItemTable'], arg['Category'])
sql_iszhuican = sql_IsZhuican()
return "SELECT DISTINCT `per`.`sellernick`" \
"FROM({SQL1}) AS `per` INNER JOIN({SQL2}) AS `item` ON `item`.`itemnum` = `per`.`itemnum`" \
"INNER JOIN ({SQL3})AS `meta` ON `meta`.`sellernick` = `per`.`sellernick` LIMIT 10" \
.format(SQL1=sql_AllSellerNickFee, SQL2=sql_Category, SQL3=sql_iszhuican)
elif tag == 'get_SellerCategoryDayFee': # 获取特定商家的特定商品类目的每天销售额(给定时间区间[此处是三十天])
kw = {'sellernick': arg['SellerNick'], 'tag': 0, 'Day': arg['Day']}
sql_AllSellerNickFee = sql_fee(arg['OrderTable'], **kw)
sql_Category = sql_category(arg['ItemTable'], arg['Category'])
return "SELECT `per`.`Date`, SUM(`per`.`fee`) AS `dFee`" \
"FROM({SQL1}) AS `per` INNER JOIN({SQL2}) AS `item` ON `item`.`itemnum` = `per`.`itemnum`" \
"WHERE `per`.`itemnum` = '{ItemNum}' GROUP BY `per`.`Date`, `per`.`itemnum` ORDER BY `per`.`Date`" \
.format(SQL1=sql_AllSellerNickFee, SQL2=sql_Category, ItemNum=arg['ItemNum'])
"""
function: 获取系统对应的时间区间(此处为三十天),可以设置参数进行自动改变
@para:
"""
def sql_getDays(day):
return "SELECT `etc_dimension_date`.`updatetime`" \
"FROM `etc_dimension_date`" \
"WHERE `updatetime` <= CURDATE() AND `updatetime` > date_sub(curdate(), INTERVAL {Day} DAY)".format(Day=day)
"""
function: 补全商家的区段销售信息,RIGHT JOIN 时间表
@para:
"""
def sql_seller_Days_fee(ordertable, sellernick, itemtable, category, itemnum, tag, day):
kw = {'OrderTable': ordertable, 'SellerNick': sellernick, 'ItemTable': itemtable, 'Category': category,
'ItemNum': itemnum, 'Tag': tag, 'Day': day}
# sql_seller_dF = sql_itemfee_Or_sellernick_Or_item(ordertable, sellernick, itemtable, category, itemnum, tag)
sql_seller_dF = sql_itemfee_Or_sellernick_Or_item(**kw)
sql_getDay = sql_getDays(day)
return "SELECT `C`.`updatetime`, COALESCE(`B`.`dFee`, 0) AS `tFee`, '{SellerNick}' AS `seller`, '{ItemNum}' AS `itemnum`" \
"FROM({SQL1}) AS `B`" \
"RIGHT JOIN({SQL2}) AS `C` ON `C`.`updatetime` = `B`.`Date` ORDER BY `C`.`updatetime`".format(
SQL1=sql_seller_dF, SQL2=sql_getDay, SellerNick=sellernick, ItemNum=itemnum)
"""
淘宝书上的保暖内衣最近三十天的销售记录
"""
def sql_trend(day):
return "SELECT SUBSTRING(`date`, 1, 30) AS `Date`, `value`" \
"FROM `trend_thermalunderwear`" \
"WHERE date_sub(curdate(), INTERVAL 30 DAY) <= date(`date`)"
"""
获取追灿自招的商家
"""
def sql_IsZhuican():
return "SELECT DISTINCT `sellernick`, `is_zhuican`" \
"FROM `meta_cooperation`" \
"WHERE `is_zhuican` = '1'"
|
# Generated by Django 2.2.4 on 2020-05-13 17:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('property', '0013_auto_20200513_1958'),
]
operations = [
migrations.AlterField(
model_name='owner',
name='flats',
field=models.ManyToManyField(null=True, related_name='owners', to='property.Flat', verbose_name='квартиры в собственности'),
),
]
|
#!/bin/python3
rcvd = input().strip()
orig = "SOS" * (len(rcvd) // 3)
count = 0
for i in range(len(rcvd)):
count += 1 if orig[i] != rcvd[i] else 0
print(count) |
"""
_WMWorkloadTools_
Define some generic tools used by the StdSpecs and WMWorkload
to validate arguments that modify a WMWorkload and/or WMTask.
Created on Jun 13, 2013
@author: dballest
"""
import json
import logging
from Utils.Utilities import makeList, strToBool
from WMCore.DataStructs.LumiList import LumiList
from WMCore.WMSpec.WMSpecErrors import WMSpecFactoryException
def makeLumiList(lumiDict):
try:
if isinstance(lumiDict, basestring):
lumiDict = json.loads(lumiDict)
ll = LumiList(compactList=lumiDict)
return ll.getCompactList()
except:
raise WMSpecFactoryException("Could not parse LumiList, %s: %s" % (type(lumiDict), lumiDict))
def parsePileupConfig(mcPileup, dataPileup):
"""
_parsePileupConfig_
If the pileup config is defined as MCPileup and DataPileup
then make sure we get the usual dictionary as
PileupConfig : {'mc' : '/mc/procds/tier', 'data': '/minbias/procds/tier'}
"""
pileUpConfig = {}
if mcPileup is not None:
pileUpConfig['mc'] = [mcPileup]
if dataPileup is not None:
pileUpConfig['data'] = [dataPileup]
return pileUpConfig
def _validateArgument(argument, value, argumentDefinition):
"""
Validate a single argument against its definition in the spec
"""
validNull = argumentDefinition["null"]
if not validNull and value is None:
raise WMSpecFactoryException("Argument %s can't be None" % argument)
elif value is None:
return value
try:
value = argumentDefinition["type"](value)
except Exception:
raise WMSpecFactoryException("Argument: %s: value: %s type is incorrect in schema." % (argument, value))
_validateArgFunction(argument, value, argumentDefinition["validate"])
return value
def _validateArgumentDict(argument, argValue, argumentDefinition):
"""
Validate arguments that carry a dict value type
"""
validNull = argumentDefinition["null"]
if not validNull and None in argValue.values():
raise WMSpecFactoryException("Argument %s can't be None" % argument)
elif all(val is None for val in argValue.values()):
return argValue
for val in argValue.values():
try:
# sigh.. LumiList has a peculiar type validation
if argument == 'LumiList':
val = argumentDefinition["type"](argValue)
break
val = argumentDefinition["type"](val)
except Exception:
raise WMSpecFactoryException("Argument: %s, value: %s type is incorrect in schema." % (argument, val))
_validateArgFunction(argument, argValue, argumentDefinition["validate"])
return argValue
def _validateArgFunction(argument, value, valFunction):
"""
Perform the validation function as in the argument definition
"""
if valFunction:
try:
if not valFunction(value):
raise WMSpecFactoryException(
"Argument %s, value: %s doesn't pass the validation function." % (argument, value))
except Exception as ex:
# Some validation functions (e.g. Lexicon) will raise errors instead of returning False
logging.error(str(ex))
raise WMSpecFactoryException("Validation failed: %s value: %s" % (argument, value))
return
def _validateArgumentOptions(arguments, argumentDefinition, optionKey=None):
"""
Check whether create or assign mandatory parameters were properly
set in the request schema.
"""
for arg, argValue in argumentDefinition.iteritems():
optional = argValue.get(optionKey, True)
if not optional and arg not in arguments:
msg = "Validation failed: %s parameter is mandatory. Definition: %s" % (arg, argValue)
raise WMSpecFactoryException(msg)
# TODO this need to be done earlier then this function
# elif optionKey == "optional" and not argumentDefinition[argument].get("assign_optional", True):
# del arguments[argument]
# specific case when user GUI returns empty string for optional arguments
elif arg not in arguments:
continue
elif isinstance(arguments[arg], dict):
arguments[arg] = _validateArgumentDict(arg, arguments[arg], argValue)
else:
arguments[arg] = _validateArgument(arg, arguments[arg], argValue)
return
def _validateInputDataset(arguments):
inputdataset = arguments.get("InputDataset", None)
dbsURL = arguments.get("DbsUrl", None)
if inputdataset != None and dbsURL != None:
# import DBS3Reader here, since Runtime code import this module and worker node doesn't have dbs3 client
from WMCore.Services.DBS.DBS3Reader import DBS3Reader
from WMCore.Services.DBS.DBSErrors import DBSReaderError
try:
DBS3Reader(dbsURL).checkDatasetPath(inputdataset)
except DBSReaderError as ex:
# we need to Wrap the exception to WMSpecFactoryException to be caught in reqmgr validation
raise WMSpecFactoryException(str(ex))
return
def validateInputDatasSetAndParentFlag(arguments):
inputdataset = arguments.get("InputDataset", None)
if strToBool(arguments.get("IncludeParents", False)):
if inputdataset == None:
msg = "IncludeParent flag is True but there is no inputdataset"
raise WMSpecFactoryException(msg)
else:
dbsURL = arguments.get("DbsUrl", None)
if dbsURL != None:
# import DBS3Reader here, since Runtime code import this module and worker node doesn't have dbs3 client
from WMCore.Services.DBS.DBS3Reader import DBS3Reader
result = DBS3Reader(dbsURL).listDatasetParents(inputdataset)
if len(result) == 0:
msg = "IncludeParent flag is True but inputdataset %s doesn't have parents" % (inputdataset)
raise WMSpecFactoryException(msg)
else:
_validateInputDataset(arguments)
return
def validatePhEDExSubscription(arguments):
"""
_validatePhEDExSubscription_
Validate all the PhEDEx arguments provided during request
creation and assignment.
"""
for site in arguments.get("AutoApproveSubscriptionSites", []):
if site.endswith('_MSS'):
raise WMSpecFactoryException("Auto-approval to MSS endpoint is not allowed: %s" % site)
if arguments.get("SubscriptionPriority", "Low") not in ["Low", "Normal", "High"]:
raise WMSpecFactoryException("Invalid subscription priority: %s" % arguments["SubscriptionPriority"])
if arguments.get("CustodialSubType", "Replica") not in ["Move", "Replica"]:
raise WMSpecFactoryException("Invalid custodial subscription type: %s" % arguments["CustodialSubType"])
if arguments.get("NonCustodialSubType", "Replica") not in ["Move", "Replica"]:
raise WMSpecFactoryException("Invalid non custodial subscription type: %s" % arguments["NonCustodialSubType"])
if 'CustodialGroup' in arguments and not isinstance(arguments["CustodialGroup"], basestring):
raise WMSpecFactoryException("Invalid custodial PhEDEx group: %s" % arguments["CustodialGroup"])
if 'NonCustodialGroup' in arguments and not isinstance(arguments["NonCustodialGroup"], basestring):
raise WMSpecFactoryException("Invalid non custodial PhEDEx group: %s" % arguments["NonCustodialGroup"])
if 'DeleteFromSource' in arguments and not isinstance(arguments["DeleteFromSource"], bool):
raise WMSpecFactoryException("Invalid DeleteFromSource type, it must be boolean")
return
def validateSiteLists(arguments):
whiteList = arguments.get("SiteWhitelist", [])
blackList = arguments.get("SiteBlacklist", [])
whiteList = makeList(whiteList)
blackList = makeList(blackList)
res = (set(whiteList) & set(blackList))
if len(res):
msg = "Validation failed: The same site cannot be white and blacklisted: %s" % list(res)
raise WMSpecFactoryException(msg)
# store the properly formatted values (list instead of string)
arguments["SiteWhitelist"] = whiteList
arguments["SiteBlacklist"] = blackList
return
def validateAutoGenArgument(arguments):
autoGenArgs = ["TotalInputEvents", "TotalInputFiles", "TotalInputLumis", "TotalEstimatedJobs"]
protectedArgs = set(autoGenArgs).intersection(set(arguments.keys()))
if len(protectedArgs) > 0:
raise WMSpecFactoryException("Shouldn't set auto generated params %s: remove it" % list(protectedArgs))
return
def validateArgumentsCreate(arguments, argumentDefinition):
"""
_validateArguments_
Validate a set of arguments against and argument definition
as defined in StdBase.getWorkloadArguments. It returns
an error message if the validation went wrong,
otherwise returns None, this is used for spec creation
checks the whether argument is optional as well as validation
"""
validateAutoGenArgument(arguments)
_validateArgumentOptions(arguments, argumentDefinition, "optional")
validateInputDatasSetAndParentFlag(arguments)
validatePhEDExSubscription(arguments)
validateSiteLists(arguments)
return
def validateArgumentsUpdate(arguments, argumentDefinition):
"""
_validateArgumentsUpdate_
Validate a set of arguments against and argument definition
as defined in StdBase.getWorkloadArguments. It returns
an error message if the validation went wrong,
otherwise returns None
"""
_validateArgumentOptions(arguments, argumentDefinition, "assign_optional")
validatePhEDExSubscription(arguments)
validateSiteLists(arguments)
return
def validateArgumentsNoOptionalCheck(arguments, argumentDefinition):
"""
_validateArgumentsNoOptionalCheck_
Validate a set of arguments against and argument definition
as defined in StdBase.getWorkloadArguments. But treats everything optional
This is used for TaskChain request if some argument need to be overwritten
It returns an error message if the validation went wrong,
otherwise returns None
"""
return _validateArgumentOptions(arguments, argumentDefinition)
def setAssignArgumentsWithDefault(arguments, argumentDefinition, checkList):
"""
sets the default value if arguments value is specified as None
"""
for argument in checkList:
if not argument in arguments:
arguments[argument] = argumentDefinition[argument]["default"]
return
def setArgumentsWithDefault(arguments, argumentDefinition):
"""
sets the default value if arguments value is specified as None
"""
for argument in argumentDefinition:
if argument not in arguments and "default" in argumentDefinition[argument]:
arguments[argument] = argumentDefinition[argument]["default"]
# set the Campaign default value to the same as AcquisitionEra if Campaign is not specified
if not arguments.get("Campaign"):
if ("AcquisitionEra" in arguments) and isinstance(arguments["AcquisitionEra"], basestring):
arguments["Campaign"] = arguments["AcquisitionEra"]
return
def loadSpecClassByType(specType):
factoryName = "%sWorkloadFactory" % specType
mod = __import__("WMCore.WMSpec.StdSpecs.%s" % specType,
globals(), locals(), [factoryName])
specClass = getattr(mod, factoryName)
return specClass
def loadSpecByType(specType):
specClass = loadSpecClassByType(specType)
return specClass()
def checkDBSURL(url):
# import DBS3Reader here, since Runtime code import this module and worker node doesn't have dbs3 client
from WMCore.Services.DBS.DBS3Reader import DBS3Reader
return DBS3Reader(url).checkDBSServer()
|
def mentor_successful_interaction(soft_skills):
return soft_skills*2 + 10
def idea_generation_time(percent, team_soft_skills):
return (percent*10 + 300)*4*2/team_soft_skills
def final_score(soft_skills, idea, programming, design):
return (idea*1.5 + programming + design)*(soft_skills + 5) / 30
|
'''
Created on Apr 24, 2016
BST Sequences: A binary search tree was created by traversing through an array from left to right and inserting
each element. Given a binary search tree with distinct elements, print all possible arrays that could have led to
this tree.
@author: chunq
'''
def getBSTSequences(tree):
return getBSTSequncesHelper(tree.root)
def getBSTSequncesHelper(node):
result = []
if node is None:
result.append([])
return result
leftResult = getBSTSequncesHelper(node.left)
rightResult = getBSTSequncesHelper(node.right)
for left in leftResult:
for right in rightResult:
temp = []
weave(left, right, [], temp)
result = result + temp
for oneResult in result:
oneResult.insert(0, node)
return result
def weave(list1, list2, prehend, result):
if (len(list1) == 0) or (len(list2) == 0):
result.append(prehend + list1 + list2)
return
if len(list1) > 0:
prehend.append(list1[0])
weave(list1[1:], list2, prehend, result)
prehend.pop()
if len(list2) > 0:
prehend.append(list2[0])
weave(list1, list2[1:], prehend, result)
prehend.pop()
if __name__ == '__main__':
from objects.BST import BST
testTree = BST()
testTree.insert(5)
testTree.insert(2)
testTree.insert(1)
testTree.insert(3)
print(getBSTSequences(testTree)) |
from django.conf import settings
if settings.DEBUG:
# Development environment
app_id = '88726'
app_key = '1751ee3d48c493fa8347'
app_secret = 'e332918120e6efe62095'
else:
# Production enviroment (Heroku app)
app_id = '88725'
app_key = '3dc3533a2828e91bd034'
app_secret = '4fd4201ee58cba4d5ba3' |
from pydantic import BaseModel
from datetime import datetime
from typing import List, Optional
# ===================================
# Organization Models start here
# ===================================
class OrganizationBase(BaseModel):
name: str
class OrganizationCreate(OrganizationBase):
pass
class OrganizationID(BaseModel):
id: int
class Organization(OrganizationBase, OrganizationID):
owner_id: int
class Config:
orm_mode = True
class OrganizationList(BaseModel):
orgs: List[Organization]
# ===================================
# Project Models start here
# ===================================
class ProjectBase(BaseModel):
name: str
base_location: str
class ProjectCreate(ProjectBase):
pass
class ProjectEdit(ProjectBase):
pass
class ProjectID(BaseModel):
id: int
class Project(ProjectBase, ProjectID):
class Config:
orm_mode = True
class ProjectList(BaseModel):
projects: List[Project]
# ===================================
# User Models start here
# ===================================
class UserBase(BaseModel):
username: str
class UserCreate(UserBase):
password: str
class UserLogin(UserBase):
password: str
class User(UserBase):
id: int
class Config:
orm_mode = True
class UserAuthenticated(UserBase):
auth_token: str
class Config:
orm_mode = True
class UserList(BaseModel):
users: List[User]
# ===================================
# Chat Models start here
# ===================================
class Chat(BaseModel):
id: int
user_list: int
messages: List[str] # Messages should probably be a different model, but I'm tired rn -\_(")_/-
class ChatList(BaseModel):
chats: List[Chat]
class Message(BaseModel):
text: str
sender: str
chat_id: int
time: int
|
from cleo import Application
from myapp.commands.hello_world_command import HelloWorldCommand
app = Application()
app.add(HelloWorldCommand())
if __name__ == '__main__':
app.run()
|
"""
Author: JiaHui (Jeffrey) Lu
ID: 25944800
"""
import numpy as np
import matplotlib.pyplot as plt
import time
def my_log1p(x):
"""
The function takes in x value then uses taylor expansion formula to generate the value for log1p
:param x: the input to the function log(1+x)
:return: ans: the result of log1p with input x.
n: the number of iteration used to obtain a convergent result
error: the error of my_log1p compare to inbuilt np.log1p
inbuilt_error: error of np.log compare to np.log1p
my_runtime: runtime of my_log1p
inbuilt_runtime: runtime of inbuilt log
actual_runtime: runtime of np.log1p
"""
n = 2
ans = x
start = time.time()
while True:
delta = (((-1) ** (n + 1)) * (x ** n)) / n
ans += delta
if n >= 1000000 or abs(delta) < 0.000001:
break
n += 1
end = time.time()
my_runtime = end - start
# compute the error and run time.
start = time.time()
inbuilt = np.log(1 + x)
end = time.time()
inbuilt_runtime = end - start
start = time.time()
actual = np.log1p(x)
end = time.time()
actual_runtime = end - start
error = abs((ans - actual) / actual)
inbuilt_error = abs((inbuilt - actual) / actual)
# Return value as required
return ans, n, error, inbuilt_error, my_runtime, inbuilt_runtime, actual_runtime
# Generate test values
# Using values that were significantly smaller than machine eps.
small_vals = np.array([2.220446049250313e-50, 2.220446049250313e-20, 2.220446049250313e-16])
# add in values from 0 to 1
tmp = np.linspace(0, 1, 10000)
test_val = np.concatenate([small_vals, tmp[1:]])
my_runtime, inbuilt_runtime, actual_runtime = [], [], []
my_errors = []
inbuilt_errors = []
for test_item in test_val:
_, __, my_error, inbuilt_error, mine, inbuilt, actual = my_log1p(test_item)
my_runtime.append(mine)
inbuilt_runtime.append(inbuilt)
actual_runtime.append(actual)
my_errors.append(my_error)
inbuilt_errors.append(inbuilt_error)
fig = plt.figure(figsize=(16, 10), dpi=80, facecolor='w', edgecolor='k')
# plot the run time of all three methods
plt.subplot(2, 1, 1)
plt.xlim((-0.01, 1.01))
plt.ylim((0, 0.0002))
plt.plot(test_val, my_runtime, label="my_log1p runtime")
plt.plot(test_val, inbuilt_runtime, label="inbuilt log runtime")
plt.plot(test_val, actual_runtime, label="inbuilt log1p runtime")
plt.legend(loc=2)
plt.title("Computation time in seconds")
# plot the relative error
plt.subplot(2, 1, 2)
plt.ylim((0, 0.000001))
plt.xlim((-0.01, 1.01))
plt.plot(test_val, my_errors, label="my_log1p error")
plt.plot(test_val, inbuilt_errors, label="inbuilt log errors")
plt.legend(loc=1)
plt.title("Relative Errors")
plt.show()
|
from copy import deepcopy
from readability import Document
from .abstract_extractor import AbstractExtractor
from ..article_candidate import ArticleCandidate
class ReadabilityExtractor(AbstractExtractor):
"""This class implements Readability as an article extractor. Readability is
a subclass of Extractors and newspaper.Article.
"""
def __init__(self):
self.name = "readability"
def extract(self, item):
"""Creates an readability document and returns an ArticleCandidate containing article title and text.
:param item: A NewscrawlerItem to parse.
:return: ArticleCandidate containing the recovered article data.
"""
doc = Document(deepcopy(item['spider_response'].body))
description = doc.summary()
article_candidate = ArticleCandidate()
article_candidate.extractor = self._name
article_candidate.title = doc.short_title()
article_candidate.description = description
article_candidate.text = self._text(item)
article_candidate.topimage = self._topimage(item)
article_candidate.author = self._author(item)
article_candidate.publish_date = self._publish_date(item)
article_candidate.language = self._language(item)
return article_candidate
|
import sys
sys.path.append('../STANDAR_LIBRARIES')
from URL_Lib import descargarResultadoData, descargarResultado, descargarResultadoDataSinBeautiful
from File_Lib import saveFile, saveFileExc, loadFile
import re
import requests
from bs4 import BeautifulSoup # pip install beautifulsoup4
import http.client
http.client._MAXHEADERS = 1000
sys.setrecursionlimit(1500)
#
#
# ********************************** Programa principal **********************************
#
#
def escanearProducto(url):
print( str(len(listaResultados)) + ' ' + str(len(listaResultados) / len(listaProductos) * 100) + ' ' +url)
conn = http.client.HTTPSConnection("articulo.mercadolibre.com.mx")
headers = { }
cant =0
conn.request("GET", url, headers=headers)
res = conn.getresponse()
data = res.read()
# try:
aaa = data.decode("utf-8")
pagina = BeautifulSoup(aaa, 'html.parser');
campos = []
campos.append(url);
try:
campos.append('"' + pagina.find_all('h1', class_='item-title__primary ')[0].text.strip().replace('"','\'\'') + '"');
except:
return
campos.append('"' + pagina.find_all('span', class_='price-tag')[0].find_all('span', class_='price-tag-symbol')[0]['content'].strip().replace('"','\'\'') + '"' );
carater = pagina.find_all('li', class_='specs-item specs-item-primary')
marca = ''
for car in carater:
if ('Marca' in car.text and 'bater' not in car.text):
marca= car.text.split(':')[0].replace('Marca','').strip() + '\n'
campos.append( '"' + marca + '"' )
modelo = ''
for car in carater:
if ('Modelo de la batería' in car.text):
modelo= car.text.split(':')[0].replace('Modelo de la batería','').strip() + '\n'
if (modelo == ''):
for car in carater:
if ('Modelo' in car.text):
modelo= car.text.split(':')[0].replace('Modelo','').strip() + '\n'
campos.append( '"' + modelo + '"' )
compat = ''
for car in carater:
if ('Compatibilidad con notebooks' in car.text):
compat= car.text.replace('Compatibilidad con notebooks','').replace('Compatible','').strip() + '\n'
campos.append( '"' + compat + '"' )
carga = ''
for car in carater:
if ('Capacidad de la batería' in car.text):
carga= car.text.split(':')[0].replace('Capacidad de la batería','').strip() + '\n'
campos.append( '"' + carga + '"' )
try:
desc = pagina.find_all('div', class_='item-description__text')[0].prettify()
desc = desc.strip()
desc = desc.replace('<br>','\n')
desc = desc.replace('</br>','');
while (' ' in desc):
desc = desc.replace(' ',' ')
while (' ' in desc):
desc = desc.replace(' ',' ')
while ('\n \n' in desc):
desc = desc.replace('\n \n','\n')
while ('\n\n' in desc):
desc = desc.replace('\n\n','\n')
desc = desc.replace('<p>','').replace('</p>','').replace('</div>','');
desc = desc.replace('"','\'\'');
desc = desc.replace('<div class=\'\'item-description__text\'\'>','')
except:
desc = ''
aux = desc;
descSuma = ''
for aa in desc.split('======================'):
if ( 'Para saber si este producto es compatible' not in aa and
'NO ES NECESARIO CONFIRMAR' not in aa and
'AQUI PUEDES VER TODOS NUESTROS PRODUCTOS A' not in aa and
'Nuestras baterias son %100 nuevas' not in aa and
'Para saber si esta bateria es compatible' not in aa and
'Bateria Para ' not in aa and
( 'COMPATIBLE CON ' in aa or 'Compatible con ' in aa or 'REEMPLAZA A LOS SIGUIENTES' in aa or 'Numeros de parte compatibles' in aa ) and
'Bateria Laptop' not in aa):
descSuma += aa.strip() + '\n\n';
# else:
# print(aa.strip())
if (descSuma.strip() == ''):
descSuma = aux;
campos.append('"' + descSuma + '"' );
fotosAUX = pagina.find_all('img');
fotos = []
for aa in fotosAUX:
if ('https://http2.mlstatic.com/' in aa.prettify() and 'NQ_NP' in aa['src']):
campos.append('"' + aa['src'] + '"' );
listaResultados.append(';'.join(campos))
saveFile('BATERIAS-Escaneado.csv', listaResultados)
return;
listaProductos = []
listaDone = []
loadFile('Baterias.csv', listaProductos)
listaResultados = []
for pagina in listaProductos:
try:
escanearProducto(pagina);
except KeyboardInterrupt:
print('The user abort the script.')
sys.exit()
# except:
# dd = ''
#for prod in listaProductos:
# escanearProducto(prod);
# saveFile('pino.csv',listaResultados); |
from tensorflow.keras.applications import InceptionV3
from tensorflow.keras import layers
from tensorflow import keras
def build_model_with_flatten(img_h, img_w):
"""
Build a model with a flatten layer
@type img_h: int
@param img_h: Height of the images (nb of pixels)
@type img_w: int
@param img_w: Width of the images (nb of pixels)
@rtype: Keras model
@return: The model
"""
# Inception model
base_model = InceptionV3(
include_top=False,
weights='imagenet',
input_shape=(img_h, img_w, 3)
)
# Freeze convolutional layers (are not retrained)
base_model.trainable = False
# Flatten then fully connected layer
flat1 = layers.Flatten()(base_model.layers[-1].output)
output = layers.Dense(units=1, activation='sigmoid')(flat1)
# Build
model = keras.Model(base_model.inputs, output)
return model
|
from django.db import models
# Create your models here.
class Customer(models.Model):
device = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return f'name: {self.device}'
|
import game
import random
from node import Node
import time
import math
import util
class MCTS(object):
"""Base routine of MCTS method.
Attributes:
PLAYOUT_NUM: paremter to define maximum iteration times of MCTS
"""
# flg to specify computational budget of MCTS search
TIME = 0 # do MCTS search within specified seconds
PLAYOUT = 1 # do MCTS search iteration for specified times
def __init__(self):
self.show_progress = False
self.budget = self.PLAYOUT
self.limit = 50
self.overflow_flg = False
self.ME = 0
def set_playout(self, num):
self.limit = num
def set_limit(self, val):
self.limit = val
def set_budget(self, flg):
self.budget = flg
def within_budget(self, start_time, play_count):
""" Check if reached to defined computational budget
Args:
start_time: search started time (need if budget==TIME)
play_count: search iteration count (need if budget==PLAYOUT)
"""
budget = play_count # if budget == PLAYOUT
if self.budget == self.TIME:
ct = time.time()
budget = ct - start_time # if budget == TIME
return budget < self.limit
def start(self, start_state):
"""Start point of MCTS algorithm
Args:
game_state: game object which represents current game state
Returns:
act_index: index of best action of root node.
"""
self.ME = start_state.next_player
self.overflow_flg = False
v_0 = Node(start_state.act_num)
st = time.time()
counter = 0
while self.within_budget(st, counter) and not self.overflow_flg:
if self.show_progress:
util.show_progress(counter, self.limit)
game_state = start_state.clone()
v_l = self.tree_policy(v_0, game_state)
delta = self.default_policy(v_l, game_state)
self.backpropagation(v_l, delta)
counter += 1
if self.show_progress: util.fin_progress()
act_index = self.best_child(v_0, 0)
return v_0, act_index
C = 0.7071067811865475 # 1.0/sqrt(2)
def tree_policy(self, v, game):
"""Descend tree until un-visited node is found.
Descends the tree with updating state of passed game object
until un-visited node(action) is found.
If v is terminal state, then just return v.
Args:
v: start node to descend the tree.(mostly root node)
game: game object which corresponds to the state of v
Returns:
v: the node which has un-visited child node or terminal node.
"""
while True:
if v.is_terminal: return v
if v.unvisited != 0: return self.expand(v, game)
act_index = self.best_child(v, self.C)
v = v.children[act_index]
game.update(act_index)
return v
def expand(self, v, game):
"""Choose un-tried action and expand tree.
Args:
v: parent node which has un-visited child node.
game: game state which corresponds to passed node v
Returns:
child_node: insrance of new created node whose parent is v
"""
act_index = 0
while True:
act_index = v.children.index(-1) # get index of untried action
v.unvisited -= 1
if not game.is_legal(act_index):
v.children[act_index] = -2 # -2 indicates this action is illegal
# if all unvisited nodes are illegal one,
# then go tree_policy process and descend the tree again.
if v.unvisited == 0: return self.tree_policy(v, game)
else:
break
# add new expanded node to the tree
child_node = Node(game.act_num)
child_node.parent = v
is_terminal, score = game.is_terminal(self.ME, act_index)
if is_terminal:
child_node.is_terminal = True
child_node.val = score
game.update(act_index)
v.children[act_index] = child_node
return child_node
def best_child(self, v, c):
"""Choose best child node of v(passed node).
Args:
v: choose best child node of v
c: adjustment constant for calculate node score
Returns:
best_index: index of child node which gets highest score
"""
is_first, best_val, best_index = True, 0, -1
for i, child in enumerate(v.children):
if child == -2 or child == -1: continue # this child is illegal action
val = self.calc_node_score(child, c)
if val > best_val or is_first:
best_val = val
best_index = i
is_first = False
elif val == best_val:
if bool(random.Random().getrandbits(1)): # probability of 1/2.
best_index = i
return best_index
def calc_node_score(self, node, c):
"""Calculate score of passed node
Now score is calculated by UCT algorithm with
adjustment constant C = 1.0/sqrt(2)
"""
exploitation_term = 1.0*node.val/node.update
exploration_term = c*math.sqrt(\
2*math.log(node.parent.update)/node.update)
score = exploitation_term + exploration_term
if score > (1<<16): self.overflow_flg = True
return score
def default_policy(self, v_l, game):
""" do the simulation until reaches the end state.
Args:
v_l: start point node of simulation
game: start point game state of simulation
Returns:
result score of simulation which defined in game object.
"""
if v_l.is_terminal: return v_l.val
return game.simulation(self.ME)
def backpropagation(self, v_l, delta):
"""backpropagates simulation result
Args:
v_l: start point node of backpropagation
delta: simulation result score to backpropagetes
"""
cp = v_l
while cp:
cp.update += 1
cp.val += delta
delta = -delta # do negamax here
cp = cp.parent
|
def tr(srcstr, dststr, string, lowup=0):
if lowup == 1:
srcstr = srcstr.lower()
dststr = dststr.lower()
string = string.lower()
return string.replace(srcstr, dststr)
else:
return string.replace(srcstr, dststr)
print(tr('Abc', 'Mno', 'abcDef', lowup=1))
|
from datetime import timedelta, datetime
from email.mime.text import MIMEText
from smtplib import SMTP_SSL as SMTP
from sys import exc_info
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from emailr.models import Event
from emailr.settings.config import DB_USERNAME, DB_PASSWORD, DB_ENDPOINT, \
DB_DATABASE, EMAIL_SERVER, EMAIL_SENDER, EMAIL_USERNAME, EMAIL_PASSWORD
'''Database model for the application.'''
# The code below was mostly copied from:
# http://flask.pocoo.org/docs/0.12/patterns/sqlalchemy/#declarative
# engine = create_engine('sqlite:///emailr.db', convert_unicode=True)
engine = create_engine('postgresql://' + DB_USERNAME + ':' + DB_PASSWORD + '@'
+ DB_ENDPOINT + '/' + DB_DATABASE,
client_encoding='utf8')
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False, bind=engine))
def update_next_utc(event_obj):
"""Updates the next_utc value in the database for the event_obj passed."""
new_utc = Event(event_obj.local_weekday, event_obj.local_time.hour,
event_obj.local_time.minute, event_obj.subject,
event_obj.user.timezone_str, event_obj.user.id).next_utc
target_obj = db_session.query(Event).filter(Event.id == event_obj.id).one()
target_obj.next_utc = new_utc
db_session.commit()
def day_int_to_text(weekday):
# Given the integer of a weekday, return the datetime string.
weekdays = ['Monday',
'Tuesday',
'Wednesday',
'Thursday',
'Friday',
'Saturday',
'Sunday']
return weekdays[weekday]
def send_message(reminder):
"""Sends email for the reminder object passed to it."""
destination = reminder.user.email
# typical values for text_subtype are plain, html, xml
text_subtype = 'html'
subject = '[jog.gy] ' + reminder.subject
local_weekday = day_int_to_text(reminder.local_weekday) + 's'
local_time = reminder.local_time.strftime("%I:%M %p")
body_html = '''<html>
<head>
<style type="text/css">
@media screen {{
#wrapper {{
min-width: 300px;
max-width: 600px;
font-size: 1em;
line-height: 1.7em;
background-color: #f5f5f5;
padding: 60px;
}}
p {{
color: #555;
}}
#logo-wrapper {{
text-align: center;
}}
#logo-image {{
width: 200px;
height: auto;
}}
a {{
text-decoration: none;
color: #247ba0;
}}
a:hover {{
text-decoration: underline;
text-decoration-style: dotted;
}}
}}
</style>
</head>
<body>
<div id="wrapper">
<p id="logo-wrapper"><img
src="https://s3.amazonaws.com/jog.gy/img/joggy.png"
id="logo-image"></img></p>
<p>Hi there,</p>
<p>You requested to have <a style="color: #555">jog.gy</a>
send you reminder emails on {d} at {t} with the subject
<strong>"{s}"</strong>.</p>
<p>Have a nice day,
<br/>Your friends at <a href="http://jog.gy/">jog.gy</a></p>
</div>
</body>
</html>'''.format(d=local_weekday, t=local_time, s=reminder.subject)
body_text = 'Hi there,\r\nYou requested to have jog.gy send you ' \
'reminder emails on {d} at {t} with the subject "{s}".\r\n' \
'So here\'s your reminder.\r\nHave a nice ' \
'day,\r\nYour friends at jog.gy'.format(d=local_weekday,
t=local_time,
s=reminder.subject)
try:
msg = MIMEText(body_html, text_subtype)
msg['Subject'] = subject
msg['From'] = EMAIL_SENDER
msg['To'] = reminder.user.email
conn = SMTP(EMAIL_SERVER)
conn.set_debuglevel(False)
conn.login(EMAIL_USERNAME, EMAIL_PASSWORD)
try:
conn.sendmail(EMAIL_SENDER, destination, msg.as_string())
update_next_utc(reminder)
finally:
conn.quit()
except:
_, err, _ = exc_info()
print('email error:', err)
def handle_pending(event, context):
"""Finds pending reminders and sends them to send_message()."""
now = datetime.utcnow() + timedelta(seconds=1)
reminders = db_session.query(Event).filter(Event.next_utc < now)
for reminder in reminders:
send_message(reminder)
if __name__ == "__main__":
handle_pending(None, None)
|
import torch.utils.data as data
from PIL import Image
import os
import os.path
import numpy as np
class CocoDetection(data.Dataset):
"""`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
def __init__(self, root, annFile, transform=None, img_and_target_transform=None, target_transform=None):
from pycocotools.coco import COCO
self.root = root
self.coco = COCO(annFile)
self.ids = list(self.coco.imgs.keys())
self.transform = transform
self.target_transform = target_transform
self.img_and_target_transform=img_and_target_transform
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
"""
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
target = coco.loadAnns(ann_ids)
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
print(np.array(img).shape)
if self.img_and_target_transform is not None:
img, target, _ = self.img_and_target_transform(img, target)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.ids)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str |
#TLE
from functools import reduce
class Solution:
def numSubarrayProductLessThanK(self, nums: List[int], k: int) -> int:
out = 0
for _ in range(1, len(nums)+1):
start = 0
end = _
while end <= len(nums):
if end == 1:
for _, val in enumerate(nums):
if val < k:
out += 1
break
else:
if reduce(lambda a, b : a*b, nums[start:end]) < k:
out += 1
start += 1
end += 1
return out
"""
Time Complexity = O(N)
Space Complexity = O(1)
"""
class Solution:
def numSubarrayProductLessThanK(self, nums: List[int], k: int) -> int:
if not nums or k <= 1:
return 0
start = 0
prod = 1
count = 0
for end, val in enumerate(nums):
prod *= val
while prod >= k:
prod /= nums[start]
start += 1
count += end - start + 1
return count |
#!/usr/bin/env python
import os,sys,gzip,re,json,pickle
from datetime import datetime
"""Example of the log lines we are looking for:
3bfcc1ecaa7b79a1f8ab596ecb0b59b89d08560e 2012-11-18 00:00:56.578 21767 21767 I PhoneLabSystemAnalysis-Location: {"Action":"edu.buffalo.cse.phonelab.LOCATION_UPDATE","Location":"Location[mProvider=network,mTime=1353214856559,mLatitude=43.008654,mLongitude=-78.8087041,mHasAltitude=0.0,mHasSpeed=false,mSpeed=0.0,mHasBearing=false,mBearing=0.0,mHasAccuracy=true,mAccuracy=2033.0,mExtras=Bundle[mParcelledData.dataSize=212]]","LogFormatVersion":"1.0"}
7d27bc4cea99ce72041b14640511c6b233fab832 2012-11-18 00:01:05.843 6805 6805 I PhoneLabSystemAnalysis-Telephony: {"State":"DATA_DISCONNECTED","Action":"onDataConnectionStateChanged","LogFormatVersion":"1.0"}
6c0ef8dc70de4238bb59c545cf15d1b07d46de0a 2012-11-18 00:00:25.801 1098 1098 I PhoneLabSystemAnalysis-Wifi: {"State":"CONNECTING","Action":"android.net.wifi.STATE_CHANGE","LogFormatVersion":"1.0"}"""
LOGLINE_PATTERN = re.compile(r"""^
(?P<hashed_ID>\w{40})\s+
(?P<year>\d+)-(?P<month>\d+)-(?P<day>\d+)\s+(?P<hour>\d+):(?P<minute>\d+):(?P<second>\d+)\.(?P<millisecond>\d+)\s+
(?P<process_id>\d+)\s+(?P<thread_id>\d+)\s+(?P<log_level>\w)\s+
(?P<log_tag>.+?):\s+(?P<json>.*?)$""", re.VERBOSE)
# Example of location info:
# "Location[mProvider=network,mTime=1353214856559,mLatitude=43.008654,mLongitude=-78.8087041,mHasAltitude=0.0,mHasSpeed=false,mSpeed=0.0,mHasBearing=false,mBearing=0.0,mHasAccuracy=true,mAccuracy=2033.0,mExtras=Bundle[mParcelledData.dataSize=212]]"
LOCATION_PATTERN = re.compile(r"""^Location\[(?P<loc_data>.*?),mExtras.*?\]$""", re.VERBOSE)
if len(sys.argv) <= 1:
print >>sys.stderr, "Please define a DATA environment variable that points to a compressed PhoneLab output archive."
sys.exit(-1)
# locations[device] = (start_time, end_time, geo_code)
locations = {}
# Just consider connected time for Wifi and Mobile
# Save time while changing CONNECTING to DISCONNECTED for Wifi
# Save time while changing DATA_CONNECTED to DATA_DISCONNECTED for mobile
# mobiles[device] = (start_time, end_time)
# wifies[device] = (start_time, end_time)
mobiles = {}
wifies = {}
for line in set(gzip.open(sys.argv[1], 'rb')):
line = line.strip()
logline_match = LOGLINE_PATTERN.match(line)
if logline_match == None:
continue
if logline_match.group('log_tag') != 'PhoneLabSystemAnalysis-Location' and \
logline_match.group('log_tag') != 'PhoneLabSystemAnalysis-Telephony' and \
logline_match.group('log_tag') != 'PhoneLabSystemAnalysis-Wifi':
continue
try:
device_time = datetime(int(logline_match.group('year')),
int(logline_match.group('month')),
int(logline_match.group('day')),
int(logline_match.group('hour')),
int(logline_match.group('minute')),
int(logline_match.group('second')),
int(logline_match.group('millisecond')) * 1000)
log_json = json.loads(logline_match.group('json'))
except Exception, e:
print >>sys.stderr, "Error processsing %s: %s" % (line, e)
continue
device = logline_match.group('hashed_ID')
if logline_match.group('log_tag') == 'PhoneLabSystemAnalysis-Location':
if not 'Location' in log_json:
continue
location_match = LOCATION_PATTERN.match(log_json['Location'])
if location_match == None:
continue
location = dict(u.split("=") for u in location_match.group('loc_data').split(","))
if not locations.has_key(device):
locations[device] = []
locations[device].append((device_time, location['mLatitude'], location['mLongitude']))
elif logline_match.group('log_tag') == 'PhoneLabSystemAnalysis-Telephony':
if not 'State' in log_json or log_json['Action'] != "onDataConnectionStateChanged":
continue
if not mobiles.has_key(device):
mobiles[device] = []
state = log_json['State']
mobiles[device].append((device_time, state))
elif logline_match.group('log_tag') == 'PhoneLabSystemAnalysis-Wifi':
if not 'State' in log_json or log_json['Action'] != "android.net.wifi.STATE_CHANGE":
continue
if not wifies.has_key(device):
wifies[device] = []
state = log_json['State']
wifies[device].append((device_time, state))
else:
continue
#sorting by date for manipulating mobiles and wifies
for device in locations.keys():
locations[device] = sorted(locations[device], key=lambda i: i[0])
for device in mobiles.keys():
mobiles[device] = sorted(mobiles[device], key=lambda i: i[0])
for device in wifies.keys():
wifies[device] = sorted(wifies[device], key=lambda i: i[0])
#change format of mobiles and wifies data set
#mid_mobiles[device] = (start_time, end_time)
#mid_wifies[device] = (start_time, end_time)
mid_mobiles = {}
for device in mobiles.keys():
if not mid_mobiles.has_key(device):
mid_mobiles[device] = []
set_record = False
for d in mobiles[device]:
if d[1] == 'DATA_CONNECTED':
set_record = True
start_time = d[0]
continue
elif d[1] == 'DATA_DISCONNECTED':
if set_record:
end_time = d[0]
mid_mobiles[device].append((start_time, end_time))
set_record = False
continue
else:
continue
mid_wifies = {}
for device in wifies.keys():
if not mid_wifies.has_key(device):
mid_wifies[device] = []
set_record = False
for d in wifies[device]:
if d[1] == 'CONNECTING':
set_record = True
start_time = d[0]
continue
elif d[1] == 'DISCONNECTED':
if set_record:
end_time = d[0]
mid_wifies[device].append((start_time, end_time))
set_record = False
continue
else:
continue
#network_location[device] = (time, geocode, network_type)
network_locations = {}
for device in mid_mobiles.keys():
if not locations.has_key(device):
continue
if not network_locations.has_key(device):
network_locations[device] = []
for time in mid_mobiles[device]:
for location in locations[device]:
if time[0] <= location[0] <= time[1]:
network_locations[device].append((location[0], location[1], location[2], "mobile"))
else:
continue
for device in mid_wifies.keys():
if not locations.has_key(device):
continue
if not network_locations.has_key(device):
network_locations[device] = []
for time in mid_wifies[device]:
for location in locations[device]:
if time[0] <= location[0] <= time[1]:
network_locations[device].append((location[0], location[1], location[2], "wifi"))
else:
continue
#sorting by date for manipulating mobiles and wifies
for device in network_locations.keys():
network_locations[device] = sorted(network_locations[device], key=lambda i: i[0])
#save dictionary to file
#pickle.dump(final_traffics, open('data.dat', 'wb'), -1)
|
#!/usr/bin/env python3
import math
def solve_quadratic(a, b, c):
return (-b + math.sqrt(b*b - 4*a*c))/(2*a), (-b - math.sqrt(b*b - 4*a*c))/(2*a)
def main():
solve_quadratic(1,2,3)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dict Methods
==============================================================================
"""
import time
from pyclopedia.deco import run_if_is_main
keys = ["n%s" for i in range(1000000)]
@run_if_is_main(__name__)
def fromkeys():
"""dict.fromkeys is a very fast way to initial a dict that all keys
map to the same value. But the value should not be a mutable value if
you will edit the value.
"""
st = time.clock()
d = {key: 0 for key in keys}
e1 = time.clock() - st
st = time.clock()
d = dict.fromkeys(keys, 0)
e2 = time.clock() - st
assert e1 > e2
fromkeys()
@run_if_is_main(__name__)
def get():
"""Try to get one item, if key not exist, then return default value.
"""
d = dict(a=1, b=2)
assert d.get("a") == 1
assert d.get("b") == 2
assert d.get("c") == None
get()
@run_if_is_main(__name__)
def setdefault():
"""Try to set one item, if key exist, then do nothing.
"""
d = dict(a=1, b=2)
d.setdefault("a", 3)
d.setdefault("b", 3)
d.setdefault("c", 3)
assert d == {"a": 1, "b": 2, "c": 3}
setdefault()
@run_if_is_main(__name__)
def update():
d = dict(a=1, b=1)
d.update(dict(b=2, c=3))
assert d == {"a": 1, "b": 2, "c": 3}
update()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.