blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c605b9dc5167844975b9187ae3acebe8834279c4 | 0ec0fa7a6dc0659cc26113e3ac734434b2b771f2 | /4.refactored/log/2016-10-05@17:04/minibatch.py | 886ce8f9f587e873a21c60e3789b11ffa3ac5c45 | [] | no_license | goldleaf3i/3dlayout | b8c1ab3a21da9129829e70ae8a95eddccbf77e2f | 1afd3a94a6cb972d5d92fe373960bd84f258ccfe | refs/heads/master | 2021-01-23T07:37:54.396115 | 2017-03-28T10:41:06 | 2017-03-28T10:41:06 | 86,431,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,170 | py | from __future__ import division
import datetime as dt
import numpy as np
import util.layout as lay
import util.GrafoTopologico as gtop
import util.transitional_kernels as tk
import util.MappaSemantica as sema
from object import Segmento as sg
from util import pickle_util as pk
from util import accuracy as ac
from util import layout as lay
from util import disegna as dsg
from object import Superficie as fc
from object import Spazio as sp
import parameters as par
import pickle
import os
import glob
import shutil
import time
import cv2
import warnings
warnings.warn("Settare i parametri del lateralLine e cvThresh")
def start_main(parametri_obj, path_obj):
#----------------------------1.0_LAYOUT DELLE STANZE----------------------------------
#------inizio layout
#leggo l'immagine originale in scala di grigio e la sistemo con il thresholding
img_rgb = cv2.imread(path_obj.metricMap)
img_ini = img_rgb.copy() #copio l'immagine
# 127 per alcuni dati, 255 per altri
ret,thresh1 = cv2.threshold(img_rgb,parametri_obj.cv2thresh,255,cv2.THRESH_BINARY)#prova
#------------------1.1_CANNY E HOUGH PER TROVARE MURI---------------------------------
walls , canny = lay.start_canny_ed_hough(thresh1,parametri_obj)
#walls , canny = lay.start_canny_ed_hough(img_rgb,parametri_obj)
if par.DISEGNA:
#disegna mappa iniziale, canny ed hough
dsg.disegna_map(img_rgb,filepath = path_obj.filepath )
dsg.disegna_canny(canny,filepath = path_obj.filepath)
dsg.disegna_hough(img_rgb,walls,filepath = path_obj.filepath)
lines = lay.flip_lines(walls, img_rgb.shape[0]-1)
walls = lay.crea_muri(lines)
if par.DISEGNA:
#disegno linee
dsg.disegna_segmenti(walls)#solo un disegno poi lo elimino
#------------1.2_SETTO XMIN YMIN XMAX YMAX DI walls-----------------------------------
#tra tutti i punti dei muri trova l'ascissa e l'ordinata minima e massima.
estremi = sg.trova_estremi(walls)
xmin = estremi[0]
xmax = estremi[1]
ymin = estremi[2]
ymax = estremi[3]
offset = 20
xmin -= offset
xmax += offset
ymin -= offset
ymax += offset
#-------------------------------------------------------------------------------------
#---------------1.3_CONTORNO ESTERNO--------------------------------------------------
(contours, vertici) = lay.contorno_esterno(img_rgb, parametri_obj, path_obj)
if par.DISEGNA:
dsg.disegna_contorno(vertici,xmin,ymin,xmax,ymax,filepath = path_obj.filepath)
#-------------------------------------------------------------------------------------
#---------------1.4_MEAN SHIFT PER TROVARE CLUSTER ANGOLARI---------------------------
(indici, walls, cluster_angolari) = lay.cluster_ang(parametri_obj.h, parametri_obj.minOffset, walls, diagonali= parametri_obj.diagonali)
if par.DISEGNA:
#dsg.disegna_cluster_angolari(walls, cluster_angolari, filepath = path_obj.filepath,savename = '5b_cluster_angolari')
dsg.disegna_cluster_angolari_corretto(walls, cluster_angolari, filepath = path_obj.filepath,savename = '5b_cluster_angolari')
#-------------------------------------------------------------------------------------
#---------------1.5_CLUSTER SPAZIALI--------------------------------------------------
#questo metodo e' sbagliato, fai quella cosa con il hierarchical clustering per classificarli meglio.e trovare in sostanza un muro
#cluster_spaziali = lay.cluster_spaz(parametri_obj.minLateralSeparation, walls)
#inserisci qui il nuovo Cluster_spaz
nuovo_clustering = 2
#in walls ci sono tutti i segmenti
if nuovo_clustering == 1:
cluster_spaziali = lay.cluster_spaz(parametri_obj.minLateralSeparation, walls)#metodo di matteo
elif nuovo_clustering ==2:
cluster_mura = lay.get_cluster_mura(walls, cluster_angolari, parametri_obj)#metodo di valerio
cluster_mura_senza_outliers = []
for c in cluster_mura:
if c!=-1:
cluster_mura_senza_outliers.append(c)
# ottengo gli outliers
# outliers = []
# for s in walls:
# if s.cluster_muro == -1:
# outliers.append(s)
# dsg.disegna_segmenti(outliers, savename = "outliers")
#ora che ho un insieme di cluster relativi ai muri voglio andare ad unire quelli molto vicini
#ottengo i rappresentanti dei cluster (tutti tranne gli outliers)
#segmenti_rappresentanti = lay.get_rappresentanti(walls, cluster_mura)
segmenti_rappresentanti = lay.get_rappresentanti(walls, cluster_mura_senza_outliers)
if par.DISEGNA:
dsg.disegna_segmenti(segmenti_rappresentanti,filepath = path_obj.filepath, savename = "5c_segmenti_rappresentanti")
#classifico i rappresentanti
#qui va settata la soglia con cui voglio separare i cluster muro
#segmenti_rappresentanti = segmenti_rappresentanti
segmenti_rappresentanti = sg.spatialClustering(parametri_obj.sogliaLateraleClusterMura, segmenti_rappresentanti)
#in questo momento ho un insieme di segmenti rappresentanti che hanno il cluster_spaziale settato correttamente, ora setto anche gli altri che hanno lo stesso cluster muro
cluster_spaziali = lay.new_cluster_spaziale(walls, segmenti_rappresentanti, parametri_obj)
#gestire gli outliers
#in sostanza devo unire al cluster piu' vicino ogni segmento di outlier
#lay.set_cluster_spaziale_to_outliers(walls, outliers, segmenti_rappresentanti)
# print cluster_spaziali
# print len(cluster_spaziali)
# print len(set(cluster_spaziali))
#
# angolari = []
# for ang in set(cluster_angolari):
# row =[]
# for s in walls:
# if s.cluster_angolare == ang:
# row.append(s)
# angolari.append(row)
#
# cluster=[]
# for s in angolari[0]:
# cluster.append(s.cluster_spaziale)
# print len(set(cluster))
#
# cluster=[]
# for s in angolari[1]:
# cluster.append(s.cluster_spaziale)
# print len(set(cluster))
if par.DISEGNA:
dsg.disegna_cluster_spaziali(cluster_spaziali, walls,filepath = path_obj.filepath)
dsg.disegna_cluster_mura(cluster_mura, walls,filepath = path_obj.filepath, savename= '5d_cluster_mura')
#-------------------------------------------------------------------------------------
#-------------------1.6_CREO EXTENDED_LINES-------------------------------------------
(extended_lines, extended_segments) = lay.extend_line(cluster_spaziali, walls, xmin, xmax, ymin, ymax,filepath = path_obj.filepath)
if par.DISEGNA:
dsg.disegna_extended_segments(extended_segments, walls,filepath = path_obj.filepath)
#-------------------------------------------------------------------------------------
#-------------1.7_CREO GLI EDGES TRAMITE INTERSEZIONI TRA EXTENDED_LINES--------------
edges = sg.crea_edges(extended_segments)
#-------------------------------------------------------------------------------------
#----------------------1.8_SETTO PESI DEGLI EDGES-------------------------------------
edges = sg.setPeso(edges, walls)
#-------------------------------------------------------------------------------------
#----------------1.9_CREO LE CELLE DAGLI EDGES----------------------------------------
celle = fc.crea_celle(edges)
#-------------------------------------------------------------------------------------
#----------------CLASSIFICO CELLE-----------------------------------------------------
global centroid
#verificare funzioni
if par.metodo_classificazione_celle ==1:
print "1.metodo di classificazione ", par.metodo_classificazione_celle
(celle, celle_out, celle_poligoni, indici, celle_parziali, contorno, centroid, punti) = lay.classificazione_superfici(vertici, celle)
elif par.metodo_classificazione_celle==2:
print "2.metodo di classificazione ", par.metodo_classificazione_celle
#sto classificando le celle con il metodo delle percentuali
(celle_out, celle, centroid, punti,celle_poligoni, indici, celle_parziali) = lay.classifica_celle_con_percentuale(vertici, celle, img_ini)
#-------------------------------------------------------------------------------------
#--------------------------POLIGONI CELLE---------------------------------------------
(celle_poligoni, out_poligoni, parz_poligoni, centroid) = lay.crea_poligoni_da_celle(celle, celle_out, celle_parziali)
#ora vorrei togliere le celle che non hanno senso, come ad esempio corridoi strettissimi, il problema e' che lo vorrei integrare con la stanza piu' vicina ma per ora le elimino soltanto
#RICORDA: stai pensando solo a celle_poligoni
#TODO: questo metodo non funziona benissimo(sbagli ad eliminare le celle)
#celle_poligoni, celle = lay.elimina_celle_insensate(celle_poligoni,celle, parametri_obj)#elimino tutte le celle che hanno una forma strana e che non ha senso siano stanze
#-------------------------------------------------------------------------------------
#------------------CREO LE MATRICI L, D, D^-1, ED M = D^-1 * L------------------------
(matrice_l, matrice_d, matrice_d_inv, X) = lay.crea_matrici(celle)
#-------------------------------------------------------------------------------------
#----------------DBSCAN PER TROVARE CELLE NELLA STESSA STANZA-------------------------
clustersCelle = lay.DB_scan(parametri_obj.eps, parametri_obj.minPts, X, celle_poligoni)
#questo va disegnato per forza perche' restituisce la lista dei colori
if par.DISEGNA:
colori, fig, ax = dsg.disegna_dbscan(clustersCelle, celle, celle_poligoni, xmin, ymin, xmax, ymax, edges, contours,filepath = path_obj.filepath)
else:
colori = dsg.get_colors(clustersCelle)
#-------------------------------------------------------------------------------------
#------------------POLIGONI STANZE(spazio)--------------------------------------------
stanze, spazi = lay.crea_spazio(clustersCelle, celle, celle_poligoni, colori, xmin, ymin, xmax, ymax, filepath = path_obj.filepath)
if par.DISEGNA:
dsg.disegna_stanze(stanze, colori, xmin, ymin, xmax, ymax,filepath = path_obj.filepath)
#-------------------------------------------------------------------------------------
#--------------------------------fine layout------------------------------------------
#------------------------------GRAFO TOPOLOGICO---------------------------------------
#costruisco il grafo
(stanze_collegate, doorsVertices, distanceMap, points, b3) = gtop.get_grafo(path_obj.metricMap, stanze, estremi, colori, parametri_obj)
(G, pos) = gtop.crea_grafo(stanze, stanze_collegate, estremi, colori)
#ottengo tutte quelle stanze che non sono collegate direttamente ad un'altra, con molta probabilita' quelle non sono stanze reali
stanze_non_collegate = gtop.get_stanze_non_collegate(stanze, stanze_collegate)
#ottengo le stanze reali, senza tutte quelle non collegate
stanze_reali, colori_reali = lay.get_stanze_reali(stanze, stanze_non_collegate, colori)
if par.DISEGNA:
#sto disegnando usando la lista di colori originale, se voglio la lista della stessa lunghezza sostituire colori con colori_reali
dsg.disegna_stanze(stanze_reali, colori_reali, xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '8_Stanze_reali')
#------------------------------------------------------------------------------------
if par.DISEGNA:
dsg.disegna_distance_transform(distanceMap, filepath = path_obj.filepath)
dsg.disegna_medial_axis(points, b3, filepath = path_obj.filepath)
dsg.plot_nodi_e_stanze(colori,estremi, G, pos, stanze, stanze_collegate, filepath = path_obj.filepath)
#-----------------------------fine GrafoTopologico------------------------------------
#-------------------------------------------------------------------------------------
#DA QUI PARTE IL NUOVO PEZZO
#creo l'oggetto plan che contiene tutti gli spazi, ogni stanza contiene tutte le sue celle, settate come out, parziali o interne.
#setto gli spazi come out se non sono collegati a nulla.
spazi = sp.get_spazi_reali(spazi, stanze_reali) #elimino dalla lista di oggetti spazio quegli spazi che non sono collegati a nulla.
#trovo le cellette parziali
sp.set_cellette_parziali(spazi, contorno)#trovo le cellette di uno spazio che sono parziali
spazi = sp.trova_spazi_parziali(spazi)#se c'e' almeno una celletta all'interno di uno spazio che e' parziale, allora lo e' tutto lo spazio.
print "le celle parziali sono:", celle_parziali
print "dovrebbe essere vuoto"
exit()
# for s in spazi:
# print s.parziale
# print s.out
# print"---------------"
dsg.disegna_spazi(spazi, colori, xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '13_spazi')
#------------------------CREO PICKLE--------------------------------------------------
#creo i file pickle per il layout delle stanze
print("creo pickle layout")
pk.crea_pickle((stanze, clustersCelle, estremi, colori, spazi, stanze_reali, colori_reali), path_obj.filepath_pickle_layout)
print("ho finito di creare i pickle del layout")
#creo i file pickle per il grafo topologico
print("creo pickle grafoTopologico")
pk.crea_pickle((stanze, clustersCelle, estremi, colori), path_obj.filepath_pickle_grafoTopologico)
print("ho finito di creare i pickle del grafo topologico")
#-----------------------CALCOLO ACCURACY----------------------------------------------
#L'accuracy e' da controllare, secondo me non e' corretta.
if par.mappa_completa:
#funzione per calcolare accuracy fc e bc
print "Inizio a calcolare metriche"
results, stanze_gt = ac.calcola_accuracy(path_obj.nome_gt,estremi,stanze_reali, path_obj.metricMap,path_obj.filepath, parametri_obj.flip_dataset)
if par.DISEGNA:
dsg.disegna_grafici_per_accuracy(stanze, stanze_gt, filepath = path_obj.filepath)
print "Fine calcolare metriche"
else:
#setto results a 0, giusto per ricordarmi che non ho risultati per le mappe parziali
results = 0
#in questa fase il grafo non e' ancora stato classificato con le label da dare ai vai nodi.
#-------------------------------------------------------------------------------------
#creo il file xml dei parametri
par.to_XML(parametri_obj, path_obj)
#-------------------------prova transitional kernels----------------------------------
#splitto una stanza e restituisto la nuova lista delle stanze
#stanze, colori = tk.split_stanza_verticale(2, stanze, colori,estremi)
#stanze, colori = tk.split_stanza_orizzontale(3, stanze, colori,estremi)
#stanze, colori = tk.slit_all_cell_in_room(spazi, 1, colori, estremi) #questo metodo e' stato fatto usando il concetto di Spazio, dunque fai attenzione perche' non restituisce la cosa giusta.
#stanze, colori = tk.split_stanza_reverce(2, len(stanze)-1, stanze, colori, estremi) #questo unisce 2 stanze precedentemente splittate, non faccio per ora nessun controllo sul fatto che queste 2 stanze abbiano almeno un muro in comune, se sono lontani succede un casino
#-----------------------------------------------------------------------------------
#-------------------------MAPPA SEMANTICA-------------------------------------------
'''
#in questa fase classifico i nodi del grafo e conseguentemente anche quelli della mappa.
#gli input di questa fase non mi sono ancora molto chiari
#per ora non la faccio poi se mi serve la copio/rifaccio, penso proprio sia sbagliata.
#stanze ground truth
(stanze_gt, nomi_stanze_gt, RC, RCE, FCES, spaces, collegate_gt) = sema.get_stanze_gt(nome_gt, estremi)
#corrispondenze tra gt e segmentate (backward e forward)
(indici_corrispondenti_bwd, indici_gt_corrispondenti_fwd) = sema.get_corrispondenze(stanze,stanze_gt)
#creo xml delle stanze segmentate
id_stanze = sema.crea_xml(nomeXML,stanze,doorsVertices,collegate,indici_gt_corrispondenti_fwd,RCE,nomi_stanze_gt)
#parso xml creato, va dalla cartella input alla cartella output/xmls, con feature aggiunte
xml_output = sema.parsa(dataset_name, nomeXML)
#classifico
predizioniRCY = sema.classif(dataset_name,xml_output,'RC','Y',30)
predizioniRCN = sema.classif(dataset_name,xml_output,'RC','N',30)
predizioniFCESY = sema.classif(dataset_name,xml_output,'RCES','Y',30)
predizioniFCESN = sema.classif(dataset_name,xml_output,'RCES','N',30)
#creo mappa semantica segmentata e ground truth e le plotto assieme
sema.creaMappaSemantica(predizioniRCY, G, pos, stanze, id_stanze, estremi, colori, clustersCelle, collegate)
sema.creaMappaSemanticaGt(stanze_gt, collegate_gt, RC, estremi, colori)
plt.show()
sema.creaMappaSemantica(predizioniRCN, G, pos, stanze, id_stanze, estremi, colori, clustersCelle, collegate)
sema.creaMappaSemanticaGt(stanze_gt, collegate_gt, RC, estremi, colori)
plt.show()
sema.creaMappaSemantica(predizioniFCESY, G, pos, stanze, id_stanze, estremi, colori, clustersCelle, collegate)
sema.creaMappaSemanticaGt(stanze_gt, collegate_gt, FCES, estremi, colori)
plt.show()
sema.creaMappaSemantica(predizioniFCESN, G, pos, stanze, id_stanze, estremi, colori, clustersCelle, collegate)
sema.creaMappaSemanticaGt(stanze_gt, collegate_gt, FCES, estremi, colori)
plt.show()
'''
#-----------------------------------------------------------------------------------
print "to be continued..."
return results
#TODO
def load_main(filepath_pickle_layout, filepath_pickle_grafoTopologico, parXML):
#carico layout
pkl_file = open(filepath_pickle_layout, 'rb')
data1 = pickle.load(pkl_file)
stanze = data1[0]
clustersCelle = data1[1]
estremi = data1[2]
colori = data1[3]
spazi = data1[4]
stanze_reali = data1[5]
colori_reali= data1[6]
#print "controllo che non ci sia nulla di vuoto", len(stanze), len(clustersCelle), len(estremi), len(spazi), len(colori)
#carico il grafo topologico
pkl_file2 = open( filepath_pickle_grafoTopologico, 'rb')
data2 = pickle.load(pkl_file2)
G = data2[0]
pos = data2[1]
stanze_collegate = data2[2]
doorsVertices = data2[3]
#creo dei nuovi oggetti parametri caricando i dati dal file xml
new_parameter_obj, new_path_obj = par.load_from_XML(parXML)
#continuare il metodo da qui
def makeFolders(location,datasetList):
for dataset in datasetList:
if not os.path.exists(location+dataset):
os.mkdir(location+dataset)
os.mkdir(location+dataset+"_pickle")
def main():
start = time.time()
print ''' PROBLEMI NOTI \n
1] LE LINEE OBLIQUE NON VANNO;\n
2] NON CLASSIFICA LE CELLE ESTERNE CHE STANNO DENTRO IL CONVEX HULL, CHE QUINDI VENGONO CONSIDERATE COME STANZE;\n
OK 3] ACCURACY NON FUNZIONA;\n
4] QUANDO VENGONO RAGGRUPPATI TRA DI LORO I CLUSTER COLLINEARI, QUESTO VIENE FATTO A CASCATA. QUESTO FINISCE PER ALLINEARE ASSIEME MURA MOLTO DISTANTI;\n
5] IL SISTEMA E' MOLTO SENSIBILE ALLA SCALA. BISOGNEREBBE INGRANDIRE TUTTE LE IMMAGINI FACENDO UN RESCALING E RISOLVERE QUESTO PROBLEMA. \n
[4-5] FANNO SI CHE I CORRIDOI PICCOLI VENGANO CONSIDERATI COME UNA RETTA UNICA\n
6] BISOGNEREBBE FILTRARE LE SUPERFICI TROPPO PICCOLE CHE VENGONO CREATE TRA DEI CLUSTER;\n
7] LE IMMAGINI DI STAGE SONO TROPPO PICCOLE; VANNO RIPRESE PIU GRANDI \n
>> LANCIARE IN BATCH SU ALIENWARE\n
>> RENDERE CODICE PARALLELO\n
8] MANCANO 30 DATASET DA FARE CON STAGE\n
9] OGNI TANTO NON FUNZIONA IL GET CONTORNO PERCHE SBORDA ALL'INTERNO\n
>> PROVARE CON SCAN BORDO (SU IMMAGINE COPIA)\n
>> PROVARE A SETTARE IL PARAMETRO O A MODIFICARE IL METODO DI SCAN BORDO\n
>> CERCARE SOLUZIONI ALTERNATIVE (ES IDENTIFICARE LE CELLE ESTERNE)\n
OK 10] VANNO TARATI MEGLIO I PARAMETRI PER IL CLUSTERING\n
>> I PARAMETRI DE CLUSTERING SONO OK; OGNI TANTO FA OVERSEGMENTAZIONE.\n
>>> EVENTUALMENTE SE SI VEDE CHE OVERSEGMENTAZIONE SONO UN PROBLEMA CAMBIARE CLUSTERING O MERGE CELLE\n
11] LE LINEE DELLA CANNY E HOUGH TALVOLTA SONO TROPPO GROSSE \n
>> IN REALTA SEMBRA ESSERE OK; PROVARE CON MAPPE PIU GRANDI E VEDERE SE CAMBIA.
12] BISOGNEREBBE AUMENTARE LA SEGMENTAZIONE CON UN VORONOI
OK 13] STAMPA L'IMMAGINE DELLA MAPPA AD UNA SCALA DIVERSA RISPETTO A QUELLA VERA.\n
OK 14] RISTAMPARE SCHOOL_GT IN GRANDE CHE PER ORA E' STAMPATO IN PICCOLO (800x600)\n
OK VEDI 10] 15] NOI NON CALCOLIAMO LA DIFFUSION DEL METODO DI MURA; PER ALCUNI VERSI E' UN BENE PER ALTRI NO\n
OK VEDI 4] 16] NON FACCIAMO IL CLUSTERING DEI SEGMENTI IN MANIERA CORRETTA; DOVREMMO SOLO FARE MEANSHIFT\n
17] LA FASE DEI SEGMENTI VA COMPLETAMENTE RIFATTA; MEANSHIFT NON FUNZIONA COSI'; I SEGMENTI HANNO UN SACCO DI "==" CHE VANNO TOLTI; SPATIAL CLUSTRING VA CAMBIATO;\n
18] OGNI TANTO IL GRAFO TOPOLOGICO CONNETTE STANZE CHE SONO ADIACENTI MA NON CONNESSE. VA RIVISTA LA PARTE DI MEDIALAXIS;\n
19] PROVARE A USARE L'IMMAGINE CON IL CONTORNO RICALCATO SOLO PER FARE GETCONTOUR E NON NEGLI ALTRI STEP.\n
20] TOGLIERE THRESHOLD + CANNY -> USARE SOLO CANNY.\n
21] TOGLIERE LE CELLE INTERNE CHE SONO BUCHI.\n
>> USARE VORONOI PER CONTROLLARE LA CONNETTIVITA.\n
>> USARE THRESHOLD SU SFONDO \n
>> COMBINARE I DUE METODI\n
22] RIMUOVERE LE STANZE ERRATE:\n
>> STANZE "ESTERNE" INTERNE VANNO TOLTE IN BASE ALLE CELLE ESTERNE\n
>> RIMUOVERE STANZE CON FORME STUPIDE (ES PARETI LUNGHE STRETTE), BISOGNA DECIDERE SE ELIMINARLE O INGLOBARLE IN UN ALTRA STANZA\n
23] RISOLVERE TUTTI I WARNING.\n
da chiedere: guardare il metodo clustering_dbscan_celle(...) in layout la riga
af = DBSCAN(eps, min_samples, metric="precomputed").fit(X) non dovrebbe essere cosi?
af = DBSCAN(eps= eps, min_samples = min_samples, metric="precomputed").fit(X)
'''
print '''
FUNZIONAMENTO:\n
SELEZIONARE SU QUALI DATASETs FARE ESPERIMENTI (variabile DATASETs -riga165- da COMMENTARE / DECOMMENTARE)\n
SPOSTARE LE CARTELLE CON I NOMI DEI DATASET CREATI DALL'ESPERIMENTO PRECEDENTE IN UNA SOTTO-CARTELLA (SE TROVA UNA CARTELLA CON LO STESSO NOME NON CARICA LA MAPPA)\n
SETTARE I PARAMERI \n
ESEGUIRE\n
OGNI TANTO IL METODO CRASHA IN FASE DI VALUTAZIONE DI ACCURATEZZA. NEL CASO, RILANCIARLO\n
SPOSTARE TUTTI I RISULTATI IN UNA CARTELLA IN RESULTS CON UN NOME SIGNIFICATIVO DEL TEST FATTO\n
SALVARE IL MAIN DENTRO QUELLA CARTELLA\n
'''
#-------------------PARAMETRI-------------------------------------------------------
#carico parametri di default
parametri_obj = par.Parameter_obj()
#carico path di default
path_obj = par.Path_obj()
#-----------------------------------------------------------------------------------
makeFolders(path_obj.OUTFOLDERS,path_obj.DATASETs)
skip_performed = True
#-----------------------------------------------------------------------------------
#creo la cartella di log con il time stamp
our_time = str(dt.datetime.now())[:-10].replace(' ','@') #get current time
SAVE_FOLDER = os.path.join('./log', our_time)
if not os.path.exists(SAVE_FOLDER):
os.mkdir(SAVE_FOLDER)
SAVE_LOGFILE = SAVE_FOLDER+'/log.txt'
#------------------------------------------------------------------------------------
with open(SAVE_LOGFILE,'w+') as LOGFILE:
print "AZIONE", par.AZIONE
print >>LOGFILE, "AZIONE", par.AZIONE
shutil.copy('./minibatch.py',SAVE_FOLDER+'/minibatch.py') #copio il file del main
shutil.copy('./parameters.py',SAVE_FOLDER+'/parameters.py') #copio il file dei parametri
if par.AZIONE == "batch":
if par.LOADMAIN==False:
print >>LOGFILE, "SONO IN MODALITA' START MAIN"
else:
print >>LOGFILE, "SONO IN MODALITA' LOAD MAIN"
print >>LOGFILE, "-----------------------------------------------------------"
for DATASET in path_obj.DATASETs :
print >>LOGFILE, "PARSO IL DATASET", DATASET
global_results = []
print 'INIZIO DATASET ' , DATASET
for metricMap in glob.glob(path_obj.INFOLDERS+'IMGs/'+DATASET+'/*.png') :
print >>LOGFILE, "---parso la mappa: ", metricMap
print 'INIZIO A PARSARE ', metricMap
path_obj.metricMap =metricMap
map_name = metricMap.split('/')[-1][:-4]
#print map_name
SAVE_FOLDER = path_obj.OUTFOLDERS+DATASET+'/'+map_name
SAVE_PICKLE = path_obj.OUTFOLDERS+DATASET+'_pickle/'+map_name.split('.')[0]
if par.LOADMAIN==False:
if not os.path.exists(SAVE_FOLDER):
os.mkdir(SAVE_FOLDER)
os.mkdir(SAVE_PICKLE)
else:
# evito di rifare test che ho gia fatto
if skip_performed :
print 'GIA FATTO; PASSO AL SUCCESSIVO'
continue
#print SAVE_FOLDER
path_obj.filepath = SAVE_FOLDER+'/'
path_obj.filepath_pickle_layout = SAVE_PICKLE+'/'+'Layout.pkl'
path_obj.filepath_pickle_grafoTopologico = SAVE_PICKLE+'/'+'GrafoTopologico.pkl'
add_name = '' if DATASET == 'SCHOOL' else ''
path_obj.nome_gt = path_obj.INFOLDERS+'XMLs/'+DATASET+'/'+map_name+add_name+'.xml'
#--------------------new parametri-----------------------------------
#setto i parametri differenti(ogni dataset ha parametri differenti)
parametri_obj.minLateralSeparation = 7 if 'SCHOOL' in DATASET else 15
#parametri_obj.cv2thresh = 150 if DATASET == 'SCHOOL' else 200
parametri_obj.cv2thresh = 170 if DATASET == 'SCHOOL' else 200
parametri_obj.flip_dataset = True if DATASET == 'SURVEY' else False
#--------------------------------------------------------------------
#-------------------ESECUZIONE---------------------------------------
if par.LOADMAIN==False:
print "start main"
results = start_main(parametri_obj, path_obj)
global_results.append(results);
#calcolo accuracy finale dell'intero dataset
if metricMap == glob.glob(path_obj.INFOLDERS+'IMGs/'+DATASET+'/*.png')[-1]:
accuracy_bc_medio = []
accuracy_bc_in_pixels = []
accuracy_fc_medio = []
accuracy_fc_in_pixels=[]
for i in global_results :
accuracy_bc_medio.append(i[0])
accuracy_fc_medio.append(i[2])
accuracy_bc_in_pixels.append(i[4])
accuracy_fc_in_pixels.append(i[5])
filepath= path_obj.OUTFOLDERS+DATASET+'/'
print filepath
f = open(filepath+'accuracy.txt','a')
#f.write(filepath)
f.write('accuracy_bc = '+str(np.mean(accuracy_bc_medio))+'\n')
f.write('accuracy_bc_pixels = '+str(np.mean(accuracy_bc_in_pixels))+'\n')
f.write('accuracy_fc = '+str(np.mean(accuracy_fc_medio))+'\n')
f.write('accuracy_fc_pixels = '+str(np.mean(accuracy_fc_in_pixels))+'\n\n')
f.close()
LOGFILE.flush()
elif par.LOADMAIN==True:
print "load main"
print >>LOGFILE, "---parso la mappa: ", path_obj.metricMap
load_main(path_obj.filepath_pickle_layout, path_obj.filepath_pickle_grafoTopologico, path_obj.filepath+"parametri.xml")
LOGFILE.flush()
else :
continue
break
LOGFILE.flush()
elif par.AZIONE =='mappa_singola':
#-------------------ESECUZIONE singola mappa----------------------------------
if par.LOADMAIN==False:
print "start main"
print >>LOGFILE, "SONO IN MODALITA' START MAIN"
print >>LOGFILE, "---parso la mappa: ", path_obj.metricMap
start_main(parametri_obj, path_obj)
LOGFILE.flush()
else:
print "load main"
print >>LOGFILE, "SONO IN MODALITA' LOAD MAIN"
print >>LOGFILE, "---parso la mappa: ", path_obj.metricMap
load_main(path_obj.filepath_pickle_layout, path_obj.filepath_pickle_grafoTopologico, path_obj.filepath+"parametri.xml")
LOGFILE.flush()
#-------------------TEMPO IMPIEGATO-------------------------------------------------
fine = time.time()
elapsed = fine-start
print "la computazione ha impiegato %f secondi" % elapsed
if __name__ == '__main__':
main() | [
"matteo.luperto@polimi.it"
] | matteo.luperto@polimi.it |
23846ce4bec5d5384d38a3c2953d879d89b4d23d | 8e81c6c053f0a886675f47c84185ee2c749d7144 | /quiz/Problem 5-1.py | 69e13e5800c0e5a891f1bf88b5a2fd8502cf15e3 | [] | no_license | Silver-Iron/EDX-MITx--6.00.2x | 81acf404cda7fe34e0e3c0a9532faafb29048fe6 | 47740ea97d9f54ef5396fe3186e6485cbfeeba0f | refs/heads/master | 2021-01-22T22:34:16.283793 | 2017-03-20T09:34:31 | 2017-03-20T09:34:31 | 85,552,811 | 0 | 0 | null | 2017-03-20T08:33:25 | 2017-03-20T08:33:25 | null | UTF-8 | Python | false | false | 1,162 | py | """
You are taking a class that plans to assign final grades based on two midterm quizzes and a final exam. The final grade will be based on 25% for each midterm, and 50% for the final. You are told that the grades on the exams were each uniformly distributed integers:
Midterm 1: 50 <= grade <= 80
Midterm 2: 60 <= grade <= 90
Final Exam: 55 <= grade <= 95
Write a function called sampleQuizzes that implements a Monte Carlo simulation that estimates the probability of a student having a final score >= 70 and <= 75. Assume that 10,000 trials are sufficient to provide an accurate answer.
Note: Do not include any "import" statements in your code. We import the random module for you, and you should not be using any functions from the Pylab module for this problem.
"""
import random
def sampleQuizzes():
yes = 0
for e in range(10000):
mid1 = random.choice(range(50, 80))
mid2 = random.choice(range(60, 90))
finalExam = random.randrange(55, 95)
score = mid1*0.25 + mid2*0.25 + finalExam*0.5
if score >= 70 and score <= 75:
yes += 1
return yes / 10000.0
print sampleQuizzes()
| [
"dr.tallin@gmail.com"
] | dr.tallin@gmail.com |
1939dc7db68354a767eb23fbeb3da366e3043063 | 20176bf4fbd8aec139c7b5a27f2c2e155e173e6e | /data/all-pratic/preety_10022/Day2/Guess_Word_Game.py | 54df69da926c00a645e99a803a03b03a155f693c | [] | no_license | githubjyotiranjan/pytraining | 4ac4a1f83cc4270e2939d9d32c705019c5bc61c5 | 8b50c4ab7848bd4cbfdfbc06489768d577289c66 | refs/heads/master | 2020-03-19T06:22:20.793296 | 2018-06-15T20:08:11 | 2018-06-15T20:08:11 | 136,013,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | word_input="Apple"
word_length=len(word_input)
word_input=word_input.lower()
letter_guess = ''
count = 0
limit = 10
livesleft=limit
correct_words=0
while livesleft >= 1:
letter_guess = input('Guess a letter: ')
letter_guess=letter_guess.lower()
if letter_guess in word_input:
print(' It is correct')
count += 1
correct_words +=1
if letter_guess not in word_input:
livesleft -=1
if livesleft==0:
print("Sorry .Left with No Chances!!!")
else:
print('Wrong Answer!Left with {}'.format(livesleft))
count += 1
if correct_words==word_length:
print("You have guessed all the letters correctly")
break
| [
"jsatapathy007@gmail.com"
] | jsatapathy007@gmail.com |
d0570e0c7100f66c352d8a6765156f68a49587d1 | ffc4f38fcb8fd341132152fad008d10ff5b3f4e7 | /menu-duplicatefirst.py | 467265b5b8cd581f940580f34412a8b3a9af1e87 | [] | no_license | imclab/AddLocator | 840a88371bf11b4f44818bd88e803f4bdf3c1b13 | 67b6f47a8eade3601a233dccfdeee2d6918ee20d | refs/heads/master | 2021-01-21T02:20:18.746329 | 2013-08-13T20:36:29 | 2013-08-13T20:36:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,492 | py | from maya.cmds import *
import maya.mel as mel
#1. make an array of all selected objects
target = ls(sl=True)
#2. if only one selection, just make a new duplicate at the same coordinates...
if(len(target)==1):
#call through mel because python has no rc option!
mel.eval("duplicate -un -ic -rc")
else:
try:
#3. check if the first selection is skinned.
select(target[0])
skinCluster(q=True)
print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
print "Select the root joint for this to work properly."
print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
except:
#4. ...otherwise, for each selected object...
for i in range(1,len(target)):
#5. ...get current selection's position and copy keyframes
select(target[i])
pos = xform(target[i], q=True, t=True, ws=True)
try:
copyKey()
except:
print "Couldn't copy keys."
#6. duplicate the first selection
select(target[0])
#call through mel because python has no rc option!
mel.eval("duplicate -un -ic -rc")
#7. move first selection to position and paste keyframes
move(pos[0],pos[1],pos[2])
try:
pasteKey()
except:
print "Couldn't paste keys."
#8. delete selection
delete(target[i])
| [
"nick@fox-gieg.com"
] | nick@fox-gieg.com |
3943ff161decd9e1262e58fabc959079a0dbd52b | 07aa9b5a07df2a80b7d899da1da63c84b1060fec | /src/iegen/codegen/visitor/_visitor.py | 573f2e54a91763a9bcdc2516091ce08d8217736e | [] | no_license | lamielle/iegen | f26da812a01557daca086e0a1c76a62af8fe7cd4 | 0f48edad8d14ae18c907d705751552cf6eb53c8e | refs/heads/master | 2016-09-05T12:48:23.698779 | 2010-12-14T19:17:13 | 2010-12-14T19:17:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,741 | py | # _visitor.py
#
# Visitor class for traversing an AST of a program
# Alan LaMielle 10/20/2008
from iegen import IEGenObject
#---------- Depth First Visitor ----------
class DFVisitor(IEGenObject):
def __init__(self):
#---------- State members ----------
#Will be True if we are within a function, False otherwise
self.in_function=False
#-----------------------------------
#---------- Default In/Out Methods ----------
#Do nothing by default
def defaultIn(self,node): pass
def defaultOut(self,node): pass
def defaultBetween(self,node): pass
#--------------------------------------------
#---------- In/Out/Between Methods ----------
def inProgram(self,node):
self.defaultIn(node)
def outProgram(self,node):
self.defaultOut(node)
def betweenFunctions(self,node):
self.defaultBetween(node)
def inFunction(self,node):
self.defaultIn(node)
def outFunction(self,node):
self.defaultOut(node)
def betweenParamsStatements(self,node):
self.defaultBetween(node)
def inParameter(self,node):
self.defaultIn(node)
def outParameter(self,node):
self.defaultOut(node)
def betweenParameters(self,node):
self.defaultBetween(node)
def inStatement(self,node):
self.defaultIn(node)
def outStatement(self,node):
self.defaultOut(node)
def inVarDecl(self,node):
self.defaultIn(node)
def outVarDecl(self,node):
self.defaultOut(node)
def inComment(self,node):
self.defaultIn(node)
def outComment(self,node):
self.defaultOut(node)
#------------------------------------
#---------- Visit methods ----------
def visit(self,node):
node.apply_visitor(self)
return self
def visitProgram(self,node):
from iegen.util import iter_islast
self.inProgram(node)
for statement in node.preamble:
statement.apply_visitor(self)
for function,is_last in iter_islast(node.functions):
function.apply_visitor(self)
if not is_last:
self.betweenFunctions(node)
self.outProgram(node)
def visitFunction(self,node):
from iegen.util import iter_islast
self.in_function=True
self.inFunction(node)
for param,is_last in iter_islast(node.params):
param.apply_visitor(self)
if not is_last:
self.betweenParameters(param)
self.betweenParamsStatements(node)
for statement in node.body:
statement.apply_visitor(self)
self.outFunction(node)
self.in_function=False
def visitParameter(self,node):
self.inParameter(node)
self.outParameter(node)
def visitStatement(self,node):
self.inStatement(node)
self.outStatement(node)
def visitVarDecl(self,node):
self.inVarDecl(node)
self.outVarDecl(node)
def visitComment(self,node):
self.inComment(node)
self.outComment(node)
#-----------------------------------
#-----------------------------------------
| [
"lamielle@cs.colostate.edu"
] | lamielle@cs.colostate.edu |
1270547d683a15d46dcba368745d6f9d8a4a63fd | c646ad2dfab80f7183076dde82a82e6e1a6222d2 | /athenatools/migrations/0021_auto_20190310_1802.py | 80ac23bf864276666e7f2ae06dfa0036f6cc49eb | [
"MIT"
] | permissive | taojy123/AthenaTools | b7f5a799dca60237fb69f312f5a913964ae00097 | 612b113c5c9aeb0e6612242540fa05b7f0ac02c5 | refs/heads/master | 2023-07-21T06:32:15.638271 | 2023-07-19T09:42:01 | 2023-07-19T09:42:01 | 141,523,525 | 9 | 2 | MIT | 2023-06-30T22:19:13 | 2018-07-19T04:14:20 | Python | UTF-8 | Python | false | false | 2,397 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-03-10 10:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('athenatools', '0020_auto_20190310_1740'),
]
operations = [
migrations.AlterField(
model_name='product',
name='default_check_freeze',
field=models.BooleanField(default=False, verbose_name=b'\xe9\xbb\x98\xe8\xae\xa4\xe5\x86\xbb\xe5\x93\x81\xe6\xb8\xa9\xe5\xba\xa6\xe2\x89\xa4-12\xe2\x84\x83\xe4\xb8\x94\xe6\x97\xa0\xe8\xbd\xaf\xe5\x8c\x96'),
),
migrations.AlterField(
model_name='product',
name='default_check_label',
field=models.BooleanField(default=False, verbose_name=b'\xe9\xbb\x98\xe8\xae\xa4\xe6\xa0\x87\xe7\xad\xbe\xe6\xad\xa3\xe5\xb8\xb8'),
),
migrations.AlterField(
model_name='product',
name='default_check_odorless',
field=models.BooleanField(default=False, verbose_name=b'\xe9\xbb\x98\xe8\xae\xa4\xe6\x97\xa0\xe5\xbc\x82\xe5\x91\xb3'),
),
migrations.AlterField(
model_name='product',
name='default_check_package',
field=models.BooleanField(default=False, verbose_name=b'\xe9\xbb\x98\xe8\xae\xa4\xe5\x8c\x85\xe8\xa3\x85\xe5\xae\x8c\xe5\xa5\xbd'),
),
migrations.AlterField(
model_name='purchase',
name='check_freeze',
field=models.BooleanField(default=False, verbose_name=b'\xe5\x86\xbb\xe5\x93\x81\xe6\xb8\xa9\xe5\xba\xa6\xe2\x89\xa4-12\xe2\x84\x83\xe4\xb8\x94\xe6\x97\xa0\xe8\xbd\xaf\xe5\x8c\x96'),
),
migrations.AlterField(
model_name='purchase',
name='check_label',
field=models.BooleanField(default=False, verbose_name=b'\xe6\xa0\x87\xe7\xad\xbe\xe6\xad\xa3\xe5\xb8\xb8'),
),
migrations.AlterField(
model_name='purchase',
name='check_odorless',
field=models.BooleanField(default=False, verbose_name=b'\xe6\x97\xa0\xe5\xbc\x82\xe5\x91\xb3'),
),
migrations.AlterField(
model_name='purchase',
name='check_package',
field=models.BooleanField(default=False, verbose_name=b'\xe5\x8c\x85\xe8\xa3\x85\xe5\xae\x8c\xe5\xa5\xbd'),
),
]
| [
"taojy123@163.com"
] | taojy123@163.com |
ce991260a7e6c3aa07a348aed0fc5b6a06f621dd | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/T/tlevine/do_floats_break_the_datatable.py | ca732952aa668563159bca0a2b24b53642ebc4ac | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from scraperwiki.sqlite import save
save([],{"chainsaw":float(334.00023)})from scraperwiki.sqlite import save
save([],{"chainsaw":float(334.00023)}) | [
"pallih@kaninka.net"
] | pallih@kaninka.net |
0e1f8fb4c87029d81455a43272cd900f200b5f9e | 51575eeda79a6e12c8839046721168e5cc5b6774 | /gns/inference/optimization/tune.py | b369718420e2fe4eae814ede4bb9deea5eb6b764 | [] | no_license | rfeinman/GNS-Modeling | 59ad26efea4045c7dae98e98263d1193d53052b8 | 2c6b3400bfbb30f8f117042722fbcca2a8e9cb98 | refs/heads/master | 2023-06-08T21:22:27.914054 | 2021-07-08T14:17:56 | 2021-07-08T14:17:56 | 274,778,209 | 22 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,624 | py | import numpy as np
import torch
def get_param_grid(pblur, peps, nbins_blur, nbins_eps):
blur_grid = np.linspace(-50, pblur, nbins_blur)
eps_grid = np.linspace(-2, 1, nbins_eps-1)
eps_grid = np.append(eps_grid, peps)
param_grid = np.meshgrid(blur_grid, eps_grid)
param_grid = np.stack(param_grid, axis=-1).reshape(-1,2)
return param_grid
@torch.no_grad()
def render_tuning_multi(parse_list, img, tune_fn, nbins_blur=20, nbins_eps=40):
K = len(parse_list)
drawing_list = [parse.drawing for parse in parse_list]
blur_params = [parse.blur_base.item() for parse in parse_list]
eps_params = [parse.epsilon_base.item() for parse in parse_list]
param_grids = [get_param_grid(blur_params[k], eps_params[k], nbins_blur, nbins_eps)
for k in range(K)]
losses = torch.zeros(nbins_blur*nbins_eps, K)
for i, param_vals in enumerate(zip(*param_grids)):
for parse, (pblur, peps) in zip(parse_list, param_vals):
parse.blur_base.data = torch.tensor(pblur, device=parse.blur_base.device)
parse.epsilon_base.data = torch.tensor(peps, device=parse.epsilon_base.device)
losses[i] = tune_fn(parse_list, drawing_list, img)
best_losses, best_idx = torch.min(losses, dim=0)
for parse, ix, grid in zip(parse_list, best_idx, param_grids):
pblur, peps = grid[ix]
parse.blur_base.data = torch.tensor(pblur, device=parse.blur_base.device)
parse.epsilon_base.data = torch.tensor(peps, device=parse.epsilon_base.device)
best_states = [parse.state for parse in parse_list]
return best_losses, best_states | [
"rfeinman16@gmail.com"
] | rfeinman16@gmail.com |
12a50275d49eb47f247e28b62b8b6aec74918723 | ba3231b25c60b73ca504cd788efa40d92cf9c037 | /nitro-python-13.0.36/nssrc/com/citrix/netscaler/nitro/resource/config/appqoe/appqoepolicy_lbvserver_binding.py | 8ee87e1e11a7806b4dc6076f7173a54e13258719 | [
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | zhuweigh/vpx13 | f6d559ae85341e56472e3592cbc67062dac34b93 | b36caa3729d3ca5515fa725f2d91aeaabdb2daa9 | refs/heads/master | 2020-07-04T22:15:16.595728 | 2019-09-20T00:19:56 | 2019-09-20T00:19:56 | 202,435,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,627 | py | #
# Copyright (c) 2008-2019 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class appqoepolicy_lbvserver_binding(base_resource) :
""" Binding class showing the lbvserver that can be bound to appqoepolicy.
"""
def __init__(self) :
self._boundto = None
self._bindpriority = None
self._activepolicy = None
self._gotopriorityexpression = None
self._name = None
self.___count = None
@property
def name(self) :
r""".<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r""".<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def boundto(self) :
r"""The name of the entity to which the policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
r"""The name of the entity to which the policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def bindpriority(self) :
r"""Specifies the binding of the policy. use only in display.
"""
try :
return self._bindpriority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
r"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@property
def activepolicy(self) :
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(appqoepolicy_lbvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.appqoepolicy_lbvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
r""" Use this API to fetch appqoepolicy_lbvserver_binding resources.
"""
try :
if not name :
obj = appqoepolicy_lbvserver_binding()
response = obj.get_resources(service, option_)
else :
obj = appqoepolicy_lbvserver_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
r""" Use this API to fetch filtered set of appqoepolicy_lbvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appqoepolicy_lbvserver_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
r""" Use this API to count appqoepolicy_lbvserver_binding resources configued on NetScaler.
"""
try :
obj = appqoepolicy_lbvserver_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
r""" Use this API to count the filtered set of appqoepolicy_lbvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appqoepolicy_lbvserver_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class appqoepolicy_lbvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.appqoepolicy_lbvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.appqoepolicy_lbvserver_binding = [appqoepolicy_lbvserver_binding() for _ in range(length)]
| [
"zhuwei@xsky.com"
] | zhuwei@xsky.com |
f45a639996035bde27c7378c7867c3a692a5132f | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/classification/Googlenet_ID0447_for_PyTorch/torchvision/datasets/utils.py | 83d0a4d8ce3a6a44c81762c82af400b537e7f70b | [
"GPL-1.0-or-later",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 10,219 | py | #
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#import os
import os.path
import hashlib
import gzip
import errno
import tarfile
import zipfile
import torch
from torch.utils.model_zoo import tqdm
def gen_bar_updater():
pbar = tqdm(total=None)
def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
pbar.update(progress_bytes - pbar.n)
return bar_update
def calculate_md5(fpath, chunk_size=1024 * 1024):
md5 = hashlib.md5()
with open(fpath, 'rb') as f:
for chunk in iter(lambda: f.read(chunk_size), b''):
md5.update(chunk)
return md5.hexdigest()
def check_md5(fpath, md5, **kwargs):
return md5 == calculate_md5(fpath, **kwargs)
def check_integrity(fpath, md5=None):
if not os.path.isfile(fpath):
return False
if md5 is None:
return True
return check_md5(fpath, md5)
def makedir_exist_ok(dirpath):
"""
Python2 support for os.makedirs(.., exist_ok=True)
"""
try:
os.makedirs(dirpath)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
def download_url(url, root, filename=None, md5=None):
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the basename of the URL
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
from six.moves import urllib
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
# downloads file
if check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
except (urllib.error.URLError, IOError) as e:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
else:
raise e
def list_dir(root, prefix=False):
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = list(
filter(
lambda p: os.path.isdir(os.path.join(root, p)),
os.listdir(root)
)
)
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
def list_files(root, suffix, prefix=False):
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = list(
filter(
lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),
os.listdir(root)
)
)
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
def download_file_from_google_drive(file_id, root, filename=None, md5=None):
"""Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
# Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
import requests
url = "https://docs.google.com/uc?export=download"
root = os.path.expanduser(root)
if not filename:
filename = file_id
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
session = requests.Session()
response = session.get(url, params={'id': file_id}, stream=True)
token = _get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
_save_response_content(response, fpath)
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def _save_response_content(response, destination, chunk_size=32768):
with open(destination, "wb") as f:
pbar = tqdm(total=None)
progress = 0
for chunk in response.iter_content(chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress += len(chunk)
pbar.update(progress - pbar.n)
pbar.close()
def _is_tar(filename):
return filename.endswith(".tar")
def _is_targz(filename):
return filename.endswith(".tar.gz")
def _is_gzip(filename):
return filename.endswith(".gz") and not filename.endswith(".tar.gz")
def _is_zip(filename):
return filename.endswith(".zip")
def extract_archive(from_path, to_path=None, remove_finished=False):
if to_path is None:
to_path = os.path.dirname(from_path)
if _is_tar(from_path):
with tarfile.open(from_path, 'r') as tar:
tar.extractall(path=to_path)
elif _is_targz(from_path):
with tarfile.open(from_path, 'r:gz') as tar:
tar.extractall(path=to_path)
elif _is_gzip(from_path):
to_path = os.path.join(to_path, os.path.splitext(os.path.basename(from_path))[0])
with open(to_path, "wb") as out_f, gzip.GzipFile(from_path) as zip_f:
out_f.write(zip_f.read())
elif _is_zip(from_path):
with zipfile.ZipFile(from_path, 'r') as z:
z.extractall(to_path)
else:
raise ValueError("Extraction of {} not supported".format(from_path))
if remove_finished:
os.remove(from_path)
def download_and_extract_archive(url, download_root, extract_root=None, filename=None,
md5=None, remove_finished=False):
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
download_url(url, download_root, filename, md5)
archive = os.path.join(download_root, filename)
print("Extracting {} to {}".format(archive, extract_root))
extract_archive(archive, extract_root, remove_finished)
def iterable_to_str(iterable):
return "'" + "', '".join([str(item) for item in iterable]) + "'"
def verify_str_arg(value, arg=None, valid_values=None, custom_msg=None):
if not isinstance(value, torch._six.string_classes):
if arg is None:
msg = "Expected type str, but got type {type}."
else:
msg = "Expected type str for argument {arg}, but got type {type}."
msg = msg.format(type=type(value), arg=arg)
raise ValueError(msg)
if valid_values is None:
return value
if value not in valid_values:
if custom_msg is not None:
msg = custom_msg
else:
msg = ("Unknown value '{value}' for argument {arg}. "
"Valid values are {{{valid_values}}}.")
msg = msg.format(value=value, arg=arg,
valid_values=iterable_to_str(valid_values))
raise ValueError(msg)
return value
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
0777d5132fd348e362404fcaf64c9b94d87dde03 | 07c0500edd527522036760a8cadd4e62e5eb1dcb | /python/union.py | eddcceb5df8731716bf71ff7d29e74468f32ad9c | [] | no_license | amarsyelane/pythonprograms | 261323dc453b42d3ba21ae8496d55bfded130dbd | fffc1a0edfd25577beb84e64059ff3d38bc91c35 | refs/heads/master | 2020-06-24T23:37:09.630903 | 2019-07-27T16:47:00 | 2019-07-27T16:47:00 | 199,127,095 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 165 | py |
def union_list(list1,list2):
list3 = list(set(list1) | set(list2))
return list3
list1 = [1,2,3,4,5,6]
list2 = [7,1,8,2,9,3]
print(union_list(list1,list2)) | [
"root@localhost.localdomain"
] | root@localhost.localdomain |
9db6d4b5e220c435d6b8050e7284d665f189d8ca | b7054c7dc39eeb79aa4aecb77a8de222400b19a7 | /flask_project/d116/s9day116/s4.py | f36a629ef052544a1412614a0f0fd9f2f349f15c | [] | no_license | csuxh/python_fullstack | 89027133c7f9585931455a6a85a24faf41792379 | f78571976b3bef104309e95304892fdb89739d9e | refs/heads/master | 2023-05-11T09:36:40.482788 | 2019-06-12T14:21:26 | 2019-06-12T14:21:26 | 145,090,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,285 | py | from flask import Flask,url_for
app = Flask(__name__)
# 步骤一:定制类
from werkzeug.routing import BaseConverter
class RegexConverter(BaseConverter):
"""
自定义URL匹配正则表达式
"""
def __init__(self, map, regex):
super(RegexConverter, self).__init__(map)
self.regex = regex
def to_python(self, value):
"""
路由匹配时,匹配成功后传递给视图函数中参数的值
:param value:
:return:
"""
return int(value)
def to_url(self, value):
"""
使用url_for反向生成URL时,传递的参数经过该方法处理,返回的值用于生成URL中的参数
:param value:
:return:
"""
val = super(RegexConverter, self).to_url(value)
return val
# 步骤二:添加到转换器
app.url_map.converters['reg'] = RegexConverter
"""
1. 用户发送请求
2. flask内部进行正则匹配
3. 调用to_python(正则匹配的结果)方法
4. to_python方法的返回值会交给视图函数的参数
"""
# 步骤三:使用自定义正则
@app.route('/index/<reg("\d+"):nid>')
def index(nid):
print(nid,type(nid))
print(url_for('index',nid=987))
return "index"
if __name__ == '__main__':
app.run() | [
"csuxh@foxmail.com"
] | csuxh@foxmail.com |
bd813f0538b2818cab9dd99ab5829695630238aa | 51a065360b8b2f4a8cde43842a357b729ce6931a | /computer/RoadLaneDetection/lanes_manager.py | badbede857f973f4e3e348ce76a0dbb0a87a2a4d | [] | no_license | muratory/perception | 8fd95c1a865c5f2317c61110906856fd1eaa9f2d | 23b03a3d33d526318e85748d978c48782298fd4f | refs/heads/master | 2021-05-15T22:39:28.659715 | 2018-06-17T16:43:44 | 2018-06-17T16:43:44 | 106,734,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,598 | py | import sys
import numpy as np
from matplotlib import pyplot as plt
class LaneProba():
"""
Stores the foreground, background and width probabilities for a Lane
"""
def __init__(self, proba=0.1):
self.set_proba(proba)
def decrease(self, ratio=0.8):
self.foreground_position_proba *= ratio
self.background_position_proba *= ratio
self.width_proba *= ratio
def increase(self, ratio=1.2):
self.foreground_position_proba *= ratio
self.background_position_proba *= ratio
self.width_proba *= ratio
# cap proba to 1.0 max
self.foreground_position_proba = min(1.0, self.foreground_position_proba)
self.background_position_proba = min(1.0, self.background_position_proba)
self.width_proba = min(1.0, self.width_proba)
def set_proba(self, proba):
self.foreground_position_proba = proba
self.background_position_proba = proba
self.width_proba = proba
def get_values(self):
return np.array([self.foreground_position_proba, self.background_position_proba, self.width_proba])
def __str__(self):
return "probas:{} {} {}".format(self.foreground_position_proba,
self.background_position_proba,
self.width_proba)
class Lane():
def __init__(self, lane_left, lane_right):
# lane: [nb_points, yx]
foreground_left = lane_left[-1]
foreground_right = lane_right[-1]
background_left = lane_left[0]
background_right = lane_right[0]
self.foreground_position = (foreground_left + foreground_right) / 2
self.background_position = (background_left + background_right) / 2
self.width = foreground_right - foreground_left
self.skeleton = lane_left+ (lane_right - lane_left)/2.0
self.probas = None
# Check validity of the lane
background_width_invalid = (background_right - background_left) > 2*self.width
line_crossing = np.any(lane_right < lane_left)
width_too_small = np.mean(lane_right - lane_left) < 5
self.is_invalid = background_width_invalid or line_crossing or width_too_small
#if self.is_invalid:
# print("lane is invalid !")
def get_parameters(self):
return np.array([self.foreground_position, self.background_position, self.width])
def set_skeleton(self, sk):
self.skeleton = sk
self.foreground_position = int(sk[-1])
self.background_position = int(sk[0])
def set_width(self, w):
self.width = w
def set_probas(self, lane_probas):
self.probas = lane_probas
def distance(self, lane):
foreground_distance = np.abs(self.foreground_position - lane.foreground_position)
mean_distance = np.abs(np.mean(self.skeleton - lane.skeleton))
weight_foreground = 0.75
return foreground_distance * weight_foreground + mean_distance * (1-weight_foreground)
def __eq__(self, other):
return np.array_equal(self.skeleton, other.skeleton)
def __str__(self):
return "pos:{} back_pos:{} width:{}".format(self.foreground_position, self.background_position,
self.width)
class RoadState():
def __init__(self, lanes):
self.current_lanes = []
for lane in lanes:
if not lane.is_invalid:
self.current_lanes.append(lane)
# Add default probas
for lane in self.current_lanes:
lane.set_probas(LaneProba(0.8))
def get_probas(self):
nb_lanes = len(self.current_lanes)
probas = np.zeros((nb_lanes, 3))
for i in range(nb_lanes):
lane = self.current_lanes[i]
if lane is None:
probas[i] = 0
else:
probas[i] = lane.probas.get_values()
return probas
def compute_proba(self, value, comparison):
if comparison == 0:
return 0
ratio = abs(value / comparison)
if ratio < 0.1:
proba = 1.0
elif ratio < 0.2:
proba = 0.9
elif ratio < 0.5:
proba = 0.8
elif ratio < 1.0:
proba = 0.5
else:
proba = 0.3
return proba
def compute_probas(self, parameters, comparison):
probas = [self.compute_proba(value, comparison) for value in parameters]
return np.array(probas)
def compute_changes(self, new_state):
nb_lanes = len(self.current_lanes)
all_diffs_parameters = np.zeros((nb_lanes, 3)) # fore, back, width
new_probas = np.zeros((nb_lanes, 3))
for i in range(nb_lanes):
old_lane = self.current_lanes[i]
new_lane = new_state.current_lanes[i]
if new_lane is None:
new_state.current_lanes[i] = old_lane
all_diffs_parameters[i] = 0
new_probas[i] = 0.2
else:
diffs_parameters = new_lane.get_parameters() - old_lane.get_parameters()
diffs_skeleton = new_lane.skeleton - old_lane.skeleton
distance_parameters = np.abs(diffs_parameters)
distance_skeletons = np.abs(diffs_skeleton)
probas_parameters = self.compute_probas(distance_parameters, old_lane.width)
all_diffs_parameters[i] = diffs_parameters
new_probas[i] = probas_parameters
return all_diffs_parameters, new_probas
def __str__(self):
state_str_list = []
for lane in self.current_lanes:
if lane is None:
state_str_list.append("None")
else:
state_str_list.append(str(lane.foreground_position) + '/' + str(lane.background_position))
return " ".join(state_str_list)
class LanesManager():
def update_lines(self, lines_list):
raise NotImplementedError
class SimpleLanesManager(LanesManager):
def __init__(self, record=False):
self.record = record
self.current_road_state = None
def update_lines(self, lines_list):
# print("")
if self.record:
f_handle = file("output.txt", 'a')
np.save(f_handle, lines_list)
f_handle.close()
# New lanes
new_lanes = []
for left, right in list(zip(lines_list, lines_list[1:])):
new_lanes.append(Lane(left, right))
if self.current_road_state == None:
# init
self.current_road_state = RoadState(new_lanes)
else:
# New road state
new_road_state = RoadState(new_lanes)
'''
print()
print("Begin:")
print("current state: ", str(self.current_road_state))
print("new state: ", str(new_road_state))
print("current probas:", self.current_road_state.get_probas())
print("new probas:", new_road_state.get_probas())
'''
SimpleLanesManager.prepare_for_merge(self.current_road_state, new_road_state)
SimpleLanesManager.fix_missing_lanes(self.current_road_state, new_road_state)
'''
print("Before merge:")
print("current state: ", str(self.current_road_state))
print("new state: ", str(new_road_state))
print("current probas:", self.current_road_state.get_probas())
print("new probas:", new_road_state.get_probas())
'''
SimpleLanesManager.merge(self.current_road_state, new_road_state)
'''
print("End:")
print("final state: ", str(self.current_road_state))
print("final probas:", self.current_road_state.get_probas())
'''
@staticmethod
def prepare_for_merge(current_road_state, new_road_state):
"""
Prepare both models before merge operation:
- Pair old lanes with new lanes
- Insert Fake lanes in both model when the number of lanes do not match, in order to compare the lanes
easily in the next steps
:param current_road_state: current model
:param new_road_state: new candidate model
:return: nothing, both models are updated directly and should now have the same number of lanes. Missing lanes
in each model are tagged 'None'
"""
new_lanes = new_road_state.current_lanes
# 1. Find a mapping between the lanes of the old model and the new lanes
# Get distance table: distance between old and new lanes
max_number_of_lanes = max(len(current_road_state.current_lanes), len(new_lanes))
distance_table = np.ones([max_number_of_lanes, max_number_of_lanes]) * 200 # 200 == infinity
for i in range(len(current_road_state.current_lanes)):
for j in range(len(new_lanes)):
distance_table[i, j] = new_lanes[j].distance(current_road_state.current_lanes[i])
#print('Distance_table:\n{}\n'.format(distance_table.astype(np.int)))
# reorder distance table in f() of min distance for each new lane
new_lanes_minimum_distances = np.min(distance_table, axis=0)
index_new_lanes_discover = np.argsort(new_lanes_minimum_distances)
# compute the mapping old lane <-> new lane
new_lanes_2_current_lanes_mapping = np.ones(max_number_of_lanes, dtype=int) * -1
for j in index_new_lanes_discover:
index_distance_min = np.argmin(distance_table[:, j])
new_lanes_2_current_lanes_mapping[j] = index_distance_min
distance_table[index_distance_min, :] = 1000
# 2. Fix consistency issues between old/new model: each model must have the same number of lanes so that
# it is easy to compare them. This step adds 'None' elements before, after or between lanes inside the
# old model and new model when needed (i.e. missing lanes are now tagged as 'None').
for j in index_new_lanes_discover:
corresponding_current_lane = new_lanes_2_current_lanes_mapping[j]
if corresponding_current_lane >= len(current_road_state.current_lanes):
# New lane
inserted = False
for i in range(len(current_road_state.current_lanes)):
if new_lanes[j].foreground_position < current_road_state.current_lanes[i].foreground_position:
current_road_state.current_lanes.insert(i, None)
inserted = True
break
# could not find a place on the left or middle, add on the right
if not inserted:
current_road_state.current_lanes.append(None)
elif j >= len(new_lanes):
# lane disappeared
# lane on the left side disappeared
inserted = False
current_lane = current_road_state.current_lanes[corresponding_current_lane]
for i in range(len(new_road_state.current_lanes)):
new_lane = new_road_state.current_lanes[i]
if new_lane is None:
continue
if current_lane.foreground_position < new_lane.foreground_position:
new_road_state.current_lanes.insert(i, None)
inserted = True
break
# could not find a place on the left or middle, add on the right
if not inserted:
new_road_state.current_lanes.append(None)
elif corresponding_current_lane == -1:
# -1 : new lane didn't exist previously
raise Exception("Should not happen")
else:
# Nothing to do
pass
# add missing lanes in old/new model
@staticmethod
def fix_missing_lanes(current_road_state, new_road_state):
"""
Update the missing lanes tagged as 'None' by previous step.
- When a new lane appears in the new model, add it to the old model as well (so that we can compare models)
- When a lane disappears in the new model, add the old lane in the new model but with a lower probability
:param current_road_state: current model
:param new_road_state: new candidate model
:return: nothing: both models are updated directly
"""
assert len(current_road_state.current_lanes) == len(new_road_state.current_lanes)
for i in range(len(current_road_state.current_lanes)):
lane_before, lane_after = current_road_state.current_lanes[i], new_road_state.current_lanes[i]
assert not ((lane_before is None) and (lane_after is None)) # one lane must exist
if lane_before is None:
# New lane appeared.
# Add it in the old model and set its proba to 0.1
lane_after.set_probas(LaneProba())
current_road_state.current_lanes[i] = lane_after
elif lane_after is None:
# Lane disappeared. Decrease proba and use for new model
lane_before.probas.decrease()
#new_road_state.current_lanes[i] = lane_before
@staticmethod
def merge(current_road_state, new_road_state):
"""
Merge 2 the new road model into the current road model.
:param current_road_state: current model
:param new_road_state: new candidate model
:return: merged model
"""
old_probas = current_road_state.get_probas()
# Compute the differences between old road state and the new one,
# and the probabilities for each difference
diffs, new_probas = current_road_state.compute_changes(new_road_state)
# compute best diff
#print("old probas:", old_probas)
#print("new probas:", new_probas)
skeletons_before = np.array([lane.skeleton for lane in current_road_state.current_lanes])
skeletons_after = np.array([lane.skeleton for lane in new_road_state.current_lanes])
skeletons_diffs = skeletons_after - skeletons_before
mean_skeleton_diffs = np.median(skeletons_diffs, axis=0)
mean_skeleton_after = skeletons_before + mean_skeleton_diffs
for i in range(len(current_road_state.current_lanes)):
lane_before, lane_after = current_road_state.current_lanes[i], new_road_state.current_lanes[i]
sk1 = lane_before.skeleton
p1 = np.mean(old_probas[i])
p2 = np.mean(new_probas[i])
current_value_proba = p1 / (p1 + p2)
if p1 <= 0.25:
# probability for this skeleton is too low : do not use it, but instead compute
# the mean change from the other skeletons in the road and apply this orientation to the
# current skeleton
current_sk = mean_skeleton_after[i]
other_sks = mean_skeleton_after[np.arange(len(mean_skeleton_after)) != i]
dif = current_sk - other_sks
dif = dif - dif[:, -1].reshape(-1, 1)
mean_dif = np.mean(dif, axis=0)
sk2 = lane_before.skeleton - mean_dif
# debug:
#sk2 = mean_skeleton_after[i]
else:
sk2 = lane_after.skeleton
# Compute new skeleton, between old/new skeleton weighted by the current proba
new_sk = sk1 * current_value_proba + sk2 * (1.0 - current_value_proba)
current_road_state.current_lanes[i].set_skeleton(new_sk)
# Same for the width
new_width = lane_before.width * current_value_proba + lane_after.width * (1.0 - current_value_proba)
current_road_state.current_lanes[i].set_width(new_width)
# Update the proba of the lane
debug = False
if not debug:
current_road_state.current_lanes[i].probas.set_proba(p2)
else:
# manually increase/decrease probas step by step
if p1 > p2:
# previous probability was higher than the new one : decrease the proba for this lane
current_road_state.current_lanes[i].probas.decrease()
elif p2 > p1:
# new proba is better : increase proba for this lane
current_road_state.current_lanes[i].probas.increase()
| [
"pierre.muratory@gmail.com"
] | pierre.muratory@gmail.com |
026f639e2ed9a7f66dd6b76d9b08614d82558dcf | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_capabilities.py | b55c60ffb16290122194860366c1e164cb963345 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py |
from xai.brain.wordbase.nouns._capability import _CAPABILITY
#calss header
class _CAPABILITIES(_CAPABILITY, ):
def __init__(self,):
_CAPABILITY.__init__(self)
self.name = "CAPABILITIES"
self.specie = 'nouns'
self.basic = "capability"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
7bb1a27b99e94f1e6998524a589ab4c12d7a6f47 | 7142c3941481e661075154d714a29d5e283a3074 | /AddingItemsToTheSet1.py | afcece78482c26ddc68528e02fb846b611a5ffc8 | [] | no_license | nirajan5/Demo | 5642a9669fedcca47b0304ac423c0b3e6333b8e2 | 2451875bf5698cd38af69baa117c14099951bc9f | refs/heads/master | 2023-07-27T17:04:03.689673 | 2021-09-15T11:14:25 | 2021-09-15T11:14:25 | 406,732,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | Months = set(["January","February", "March", "April", "May", "June"])
print("\nprinting the original set ... ")
print(Months)
print("\nAdding other months to the set...");
Months.add("July");
Months.add ("August");
print("\nPrinting the modified set...");
print(Months)
print("\nlooping through the set elements ... ")
for i in Months:
print(i)
| [
"jhanirajan5@gmail.com"
] | jhanirajan5@gmail.com |
d9265b72e4b21399cff19be1d8ba9b2a8d85c546 | 94bd78e63de94859eb076e52683f73f6ea91eae3 | /726.py | 66ae50ab0507c371d78444f0c6784ac873ce6cc8 | [] | no_license | MadSkittles/leetcode | 70598c1c861a8ff5d2f7c921a311307d55770acc | 817bbb73dfe095b9c9358dc459ba6605a2a9a256 | refs/heads/master | 2021-11-30T04:56:02.432749 | 2021-11-12T03:28:47 | 2021-11-12T03:28:47 | 123,558,601 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,240 | py | class Solution:
def countOfAtoms(self, formula):
res = ''
c = self.f(formula)
for element in sorted(c.keys()):
res += element + (str(c[element]) if c[element] > 1 else '')
return res
def f(self, formula: str):
from collections import Counter
if not formula:
return Counter()
i = formula.find('(')
if i >= 0:
j, cnt = i + 1, 1
while cnt > 0:
cnt += {'(': 1, ')': -1}.get(formula[j], 0)
j += 1
k = j + 1
while k < len(formula) and formula[k].isdigit():
k += 1
time = formula[j:k]
time = int(time) if time else 1
tmp = self.f(formula[i + 1:j - 1])
for e in tmp:
tmp[e] *= time
return self.f(formula[:i]) + tmp + self.f(formula[k:])
else:
res = Counter()
element = ''
index = 0
while index < len(formula):
if element and (formula[index].isdigit() or formula[index].isupper()):
if formula[index].isupper():
res[element] += 1
element = formula[index]
else:
k = index
while index < len(formula) and formula[index].isdigit():
index += 1
time = int(formula[k:index])
res[element] += time
element = formula[index] if index < len(formula) else ''
else:
element += formula[index]
index += 1
if element:
res[element] += 1
return res
if __name__ == '__main__':
solution = Solution()
print(solution.countOfAtoms(
"(((U42Se42Fe10Mc31Rh49Pu49Sb49)49V39Tm50Zr44Og6)33((W2Ga48Tm14Eu46Mt12)23(RuRnMn11)7(Yb15Lu34Ra19CuTb2)47(Md38BhCu48Db15Hf12Ir40)7CdNi21(Db40Zr24Tc27SrBk46Es41DsI37Np9Lu16)46(Zn49Ho19RhClF9Tb30SiCuYb16)15)37(Cr48(Ni31)25(La8Ti17Rn6Ce35)36(Sg42Ts32Ca)37Tl6Nb47Rh32NdGa18Cm10Pt49(Ar37RuSb30Cm32Rf28B39Re7F36In19Zn50)46)38(Rh19Md23No22PoTl35Pd35Hg)41)50"))
| [
"noreply@github.com"
] | MadSkittles.noreply@github.com |
b20577618c5c7056d9ef2c7eb75c7e3783a2ada2 | b453635bb1a1b767179250ddf5200dd5982752e3 | /apps/home/urls.py | c0bb3f878ae81bbf037a8a83efc38440cbcde5c5 | [
"Unlicense"
] | permissive | orhan1616/django-russian-ecommerce | 21e5c52f13fea5e7bb2f4fac2e4b9657729b33a5 | 9e64a0500ae529fc81e6ed2fb335b33d0ae5354a | refs/heads/main | 2023-06-17T22:06:39.199411 | 2021-07-07T11:54:21 | 2021-07-07T11:54:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | # apps/home/urls.py
# Django modules
from django.urls import path
# Django locals
from apps.home import views
urlpatterns = [
path('', views.homepage, name='homepage'),
path('aboutus/', views.aboutuspage, name='aboutuspage'),
path('contactus/', views.contactuspage, name='contactuspage'),
]
| [
"ingafter60@outlook.com"
] | ingafter60@outlook.com |
13173968ed6a9c4b62bcd9f9f66a07bacb5f1b35 | e1b8fb9a5500516f28d3d7e9a5f259c49ef35f14 | /top/api/rest/SkusCustomGetRequest.py | 70aeced78d8d92d2a43572c618d79dcdc9b6eee7 | [] | no_license | htom78/taobao_comet_py | 9224dbca1a413a54bcc5569873e4c7a9fc9ba059 | ad8b2e983a14d3ab7665244449f79dd72f390815 | refs/heads/master | 2020-05-17T10:47:28.369191 | 2013-08-27T08:50:59 | 2013-08-27T08:50:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | '''
Created by auto_sdk on 2013-06-16 16:36:02
'''
from top.api.base import RestApi
class SkusCustomGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.fields = None
self.outer_id = None
def getapiname(self):
return 'taobao.skus.custom.get'
| [
"tomhu@ekupeng.com"
] | tomhu@ekupeng.com |
9953ddae4f7a966b2c28335395b9fe7a270804e1 | 653a3d9d66f3d359083cb588fc7c9ece8bb48417 | /test/runtime/frontend_test/onnx_test/defs_test/math_test/sqrt_test.py | 9374a12650ad590ad76c5896649b7ba2359b530a | [
"Zlib",
"MIT"
] | permissive | leonskim/webdnn | fec510254b15f3dec00f5bed8f498737b372e470 | f97c798c9a659fe953f9dc8c8537b8917e4be7a2 | refs/heads/master | 2020-04-15T18:42:43.632244 | 2019-01-10T10:07:18 | 2019-01-10T10:07:18 | 164,921,764 | 0 | 0 | NOASSERTION | 2019-01-09T19:07:35 | 2019-01-09T19:07:30 | Python | UTF-8 | Python | false | false | 820 | py | import numpy as np
from test.runtime.frontend_test.onnx_test.util import make_node, make_tensor_value_info, make_model
from test.util import wrap_template, generate_kernel_test_case
from webdnn.frontend.onnx import ONNXConverter
@wrap_template
def template(x_shape, description: str = ""):
vx = np.random.rand(*x_shape) + 1.0
vy = np.sqrt(vx)
x = make_tensor_value_info("x", vx.shape)
y = make_tensor_value_info("y", vy.shape)
operator = make_node("Sqrt", ["x"], ["y"])
model = make_model([operator], [x], [y])
graph = ONNXConverter().convert(model)
generate_kernel_test_case(
description=f"[ONNX] Sqrt {description}",
graph=graph,
inputs={graph.inputs[0]: vx},
expected={graph.outputs[0]: vy}
)
def test():
template(x_shape=[2, 3, 4, 5])
| [
"y.kikura@gmail.com"
] | y.kikura@gmail.com |
b9d91097495c60f324c7dcc2ec855d4b5c1f5550 | 9acfe8ea905a7613b232cf9e512311289d4e5e27 | /CodeForce/Round4_2/task3.py | 8513d85beef58c02028922218a82b9bb0ca9bdec | [] | no_license | antofik/Python | e790ecb61babb23fad198ba996f24b31fdff9f39 | bb6ab6cd87d7bfb1d6efca6623b4b00c387313a8 | refs/heads/master | 2020-12-24T14:27:39.341992 | 2014-03-09T07:32:07 | 2014-03-09T07:32:07 | 17,551,107 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,426 | py | # coding=utf-8
"""
C. Система регистрации
ограничение по времени на тест:5 seconds
ограничение по памяти на тест:64 megabytes
ввод:standard input
вывод:standard output
В скором времени в Берляндии откроется новая почтовая служба "Берляндеск". Администрация сайта хочет запустить свой проект как можно быстрее, поэтому они попросили Вас о помощи. Вам предлагается реализовать прототип системы регистрации сайта.
Система должна работать по следующему принципу. Каждый раз, когда новый пользователь хочет зарегистрироваться, он посылает системе запрос name со своим именем. Если данное имя не содержится в базе данных системы, то оно заносится туда и пользователю возвращается ответ OK, подтверждающий успешную регистрацию. Если же на сайте уже присутствует пользователь с именем name, то система формирует новое имя и выдает его пользователю в качестве подсказки, при этом подсказка также добавляется в базу данных. Новое имя формируется по следующему правилу. К name последовательно приписываются числа, начиная с единицы (name1, name2, ...), и среди них находят такое наименьшее i, что namei не содержится в базе данных сайта.
Входные данные
В первой строке входных данных задано число n (1 ≤ n ≤ 105). Следующие n строк содержат запросы к системе. Каждый запрос представляет собой непустую строку длиной не более 32 символов, состоящую только из строчных букв латинского алфавита.
Выходные данные
В выходных данных должно содержаться n строк — ответы системы на запросы: ОК в случае успешной регистрации, или подсказку с новым именем, если запрашиваемое уже занято.
"""
from collections import defaultdict
from sys import stdin, exit
def task():
stdin.readline()
names = map(str.strip, stdin.readlines())
response = []
d = defaultdict(int)
for name in names:
i = d[name]
if i == 0:
response.append("OK")
d[name] += 1
else:
while True:
kandidate = '%s%s' % (name, i)
if d[kandidate] == 0:
d[kandidate] += 1
d[name] = i
response.append(kandidate)
break
i += 1
for r in response:
print r
task() | [
"antofik@gmail.com"
] | antofik@gmail.com |
1f7cfd2a60fe8870c521f054e9f836e585569a76 | 498493d07412e793fa9051d32e43330cb838b284 | /ogc_plugins_charm/api.py | 7c24123ee413cedd7448a04a05cba1f8be5ed1f4 | [] | no_license | adam-stokes/ogc-plugins-charmstore | 632ce632a40fd989d18c2acf886f65a482deba7a | 83bf4b147afce6c29529a3cf378d2501aff35bfd | refs/heads/master | 2022-02-17T13:33:25.095373 | 2019-08-07T19:37:33 | 2019-08-07T19:37:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,980 | py | import os
from glob import glob
from pathlib import Path
from pprint import pformat
import click
import sh
import yaml
import time
import uuid
from ogc.state import app
from ogc.spec import SpecProcessException
class CharmEnv:
""" Charm environment
"""
def __init__(self):
try:
self.build_dir = Path(os.environ.get("CHARM_BUILD_DIR"))
self.layers_dir = Path(os.environ.get("CHARM_LAYERS_DIR"))
self.interfaces_dir = Path(os.environ.get("CHARM_INTERFACES_DIR"))
self.tmp_dir = Path(os.environ.get("WORKSPACE"))
except TypeError:
raise SpecProcessException(
"CHARM_BUILD_DIR, CHARM_LAYERS_DIR, CHARM_INTERFACES_DIR, WORKSPACE: "
"Unable to find some or all of these charm build environment variables."
)
charm_sh = sh.charm.bake(_env=app.env.copy())
def push(repo_path, out_path, charm_entity, is_bundle=False):
""" Pushes a built charm to Charmstore
"""
app.log.debug(f"vcs: {repo_path} build-path: {out_path} {charm_entity}")
git_commit = sh.git("rev-parse", "HEAD", _cwd=repo_path)
git_commit = git_commit.stdout.decode().strip()
app.log.debug(f"grabbing git revision {git_commit}")
resource_args = []
if not is_bundle:
# Build a list of `oci-image` resources that have `upstream-source` defined,
# which is added for this logic to work.
resources = yaml.safe_load(
Path(out_path).joinpath("metadata.yaml").read_text()
).get("resources", {})
images = {
name: details["upstream-source"]
for name, details in resources.items()
if details["type"] == "oci-image" and details.get("upstream-source")
}
app.log.debug(f"Found {len(images)} oci-image resources:\n{pformat(images)}\n")
for image in images.values():
app.log.debug(f"Pulling {image}...")
sh.docker.pull(image)
# Convert the image names and tags to `--resource foo=bar` format
# for passing to `charm push`.
resource_args = [
arg
for name, image in images.items()
for arg in ("--resource", f"{name}={image}")
]
out = charm_sh.push(out_path, charm_entity, *resource_args)
app.log.debug(f"Charm push returned: {out}")
# Output includes lots of ansi escape sequences from the docker push,
# and we only care about the first line, which contains the url as yaml.
out = yaml.safe_load(out.stdout.decode().strip().splitlines()[0])
app.log.debug(f"Setting {out['url']} metadata: {git_commit}")
charm_sh.set(out["url"], "commit={}".format(git_commit))
def pull_layers(layer_index, layer_list, layer_branch, retries=15, timeout=60):
charm_env = CharmEnv()
layer_list = yaml.safe_load(Path(layer_list).read_text(encoding="utf8"))
num_runs = 0
for layer_map in layer_list:
layer_name = list(layer_map.keys())[0]
if layer_name == "layer:index":
continue
app.log.debug(layer_name)
def download():
for line in charm_sh(
"pull-source",
"-v",
"-i",
layer_index,
layer_name,
_iter=True,
_bg_exc=False,
):
app.log.debug(f" -- {line.strip()}")
try:
num_runs += 1
download()
except TypeError as e:
raise SpecProcessException(f"Could not download charm: {e}")
except sh.ErrorReturnCode_1 as e:
app.log.debug(f"Problem: {e}, retrying [{num_runs}/{retries}]")
if num_runs == retries:
raise SpecProcessException(
f"Could not download charm after {retries} retries."
)
time.sleep(timeout)
download()
ltype, name = layer_name.split(":")
if ltype == "layer":
sh.git.checkout("-f", layer_branch, _cwd=str(charm_env.layers_dir / name))
elif ltype == "interface":
sh.git.checkout(
"-f", layer_branch, _cwd=str(charm_env.interfaces_dir / name)
)
else:
raise SpecProcessException(f"Unknown layer/interface: {layer_name}")
def promote(charm_list, filter_by_tag, from_channel="unpublished", to_channel="edge"):
charm_list = yaml.safe_load(Path(charm_list).read_text(encoding="utf8"))
for charm_map in charm_list:
for charm_name, charm_opts in charm_map.items():
if not any(match in filter_by_tag for match in charm_opts["tags"]):
continue
charm_entity = f"cs:~{charm_opts['namespace']}/{charm_name}"
app.log.debug(
f"Promoting :: {charm_entity:^35} :: from:{from_channel} to: {to_channel}"
)
charm_id = charm_sh.show(charm_entity, "--channel", from_channel, "id")
charm_id = yaml.safe_load(charm_id.stdout.decode())
resources_args = []
try:
resources = charm_sh(
"list-resources",
charm_id["id"]["Id"],
channel=from_channel,
format="yaml",
)
resources = yaml.safe_load(resources.stdout.decode())
if resources:
resources_args = [
(
"--resource",
"{}-{}".format(resource["name"], resource["revision"]),
)
for resource in resources
]
except sh.ErrorReturnCode_1:
app.log.debug("No resources for {}".format(charm_id))
charm_sh.release(
charm_id["id"]["Id"], "--channel", to_channel, *resources_args
)
def resource(charm_entity, channel, builder, out_path, resource_spec):
out_path = Path(out_path)
resource_spec = yaml.safe_load(Path(resource_spec).read_text())
resource_spec_fragment = resource_spec.get(charm_entity, None)
app.log.debug(resource_spec_fragment)
if not resource_spec_fragment:
raise SpecProcessException("Unable to determine resource spec for entity")
os.makedirs(str(out_path), exist_ok=True)
charm_id = charm_sh.show(charm_entity, "--channel", channel, "id")
charm_id = yaml.safe_load(charm_id.stdout.decode())
try:
resources = charm_sh(
"list-resources", charm_id["id"]["Id"], channel=channel, format="yaml"
)
except sh.ErrorReturnCode_1:
app.log.debug("No resources found for {}".format(charm_id))
return
resources = yaml.safe_load(resources.stdout.decode())
builder_sh = Path(builder).absolute()
app.log.debug(builder_sh)
for line in sh.bash(str(builder_sh), _cwd=out_path, _iter=True, _bg_exc=False):
app.log.info(line.strip())
for line in glob("{}/*".format(out_path)):
resource_path = Path(line)
resource_fn = resource_path.parts[-1]
resource_key = resource_spec_fragment.get(resource_fn, None)
if resource_key:
is_attached = False
is_attached_count = 0
while not is_attached:
try:
out = charm_sh.attach(
charm_entity,
"--channel",
channel,
f"{resource_key}={resource_path}",
_err_to_out=True,
_bg_exc=False
)
is_attached = True
except sh.ErrorReturnCode_1 as e:
app.log.debug(f"Problem attaching resources, retrying: {e}")
is_attached_count += 1
if is_attached_count > 10:
raise SpecProcessException(
"Could not attach resource and max retry count reached."
)
app.log.debug(out)
def build(
charm_list,
layer_list,
layer_index,
charm_branch,
layer_branch,
resource_spec,
filter_by_tag,
to_channel,
dry_run,
):
charm_env = CharmEnv()
_charm_list = yaml.safe_load(Path(charm_list).read_text(encoding="utf8"))
pull_layers(layer_index, layer_list, layer_branch)
for charm_map in _charm_list:
for charm_name, charm_opts in charm_map.items():
downstream = f"https://github.com/{charm_opts['downstream']}"
if not any(match in filter_by_tag for match in charm_opts["tags"]):
continue
if dry_run:
app.log.debug(
f"{charm_name:^25} :: vcs-branch: {charm_branch} to-channel: {to_channel} tags: {','.join(charm_opts['tags'])}"
)
continue
charm_entity = f"cs:~{charm_opts['namespace']}/{charm_name}"
src_path = charm_name
os.makedirs(src_path)
dst_path = str(charm_env.build_dir / charm_name)
for line in sh.git.clone(
"--branch",
charm_branch,
downstream,
src_path,
_iter=True,
_bg_exc=False,
):
app.log.debug(line)
for line in charm_sh.build(
r=True, force=True, _cwd=src_path, _iter=True, _bg_exc=False
):
app.log.info(line.strip())
charm_sh.proof(_cwd=dst_path)
if not dry_run:
push(src_path, dst_path, charm_entity)
resource_builder = charm_opts.get("resource_build_sh", None)
if resource_builder:
resource(
charm_entity,
"unpublished",
f"{src_path}/{resource_builder}",
f"{dst_path}/tmp",
resource_spec,
)
if not dry_run:
promote(charm_list, filter_by_tag, to_channel=to_channel)
def build_bundles(bundle_list, filter_by_tag, bundle_repo, to_channel, dry_run):
charm_env = CharmEnv()
_bundle_list = yaml.safe_load(Path(bundle_list).read_text(encoding="utf8"))
app.log.debug("bundle builds")
bundle_repo_dir = charm_env.tmp_dir / "bundles-kubernetes"
bundle_build_dir = charm_env.tmp_dir / "tmp-bundles"
sh.rm("-rf", bundle_repo_dir)
sh.rm("-rf", bundle_build_dir)
os.makedirs(str(bundle_repo_dir), exist_ok=True)
os.makedirs(str(bundle_build_dir), exist_ok=True)
for line in sh.git.clone(
bundle_repo, str(bundle_repo_dir), _iter=True, _bg_exc=False
):
app.log.debug(line)
for bundle_map in _bundle_list:
for bundle_name, bundle_opts in bundle_map.items():
if not any(match in filter_by_tag for match in bundle_opts["tags"]):
app.log.debug(f"Skipping {bundle_name}")
continue
app.log.debug(f"Processing {bundle_name}")
cmd = [
str(bundle_repo_dir / "bundle"),
"-o",
str(bundle_build_dir / bundle_name),
"-c",
to_channel,
bundle_opts["fragments"],
]
app.log.debug(f"Running {' '.join(cmd)}")
import subprocess
subprocess.call(" ".join(cmd), shell=True)
bundle_entity = f"cs:~{bundle_opts['namespace']}/{bundle_name}"
app.log.debug(f"Check {bundle_entity}")
if not dry_run:
push(
str(bundle_repo_dir),
str(bundle_build_dir / bundle_name),
bundle_entity,
is_bundle=True,
)
if not dry_run:
promote(bundle_list, filter_by_tag, to_channel=to_channel)
| [
"battlemidget@users.noreply.github.com"
] | battlemidget@users.noreply.github.com |
28968f4cf0904ba68959fcbeb4aeeacea48d661f | c20534744b07252421aef4e157101eeb2f8a7090 | /django/project4/books/models.py | d9420ddcc23642a44a7fe1fb1bfa357abbd5ae0d | [] | no_license | lee-seul/development_practice | eb34a9be21ba2b8f20646420e903f07343f55f95 | b56fcdded15bf437e365c7d8ffe6981127adb5d4 | refs/heads/master | 2020-12-25T23:10:16.112757 | 2018-04-02T07:36:14 | 2018-04-02T07:36:14 | 44,519,248 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | from django.db import models
class Author(models.Model):
saluation = models.CharField(max_length=100)
name = models.CharField(max_length=50)
email = models.EmailField()
def __unicode__(self):
return self.name
class Publisher(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=100)
website = models.URLField()
def __unicode__(self):
return self.name
class Book(models.Model):
title = models.CharField(max_length=100)
authors = models.ManyToManyField('Author')
publisher = models.ForeignKey(Publisher)
publication_date = models.DateField()
def __unicode__(self):
return self.title
| [
"blacksangi14@naver.com"
] | blacksangi14@naver.com |
57a4fe2637adc67bba7851515835266f28b2069c | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/8/u8w.py | 1d67eb49dc2795c5460036e614e52831772ab646 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'u8W':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
b377681d860d1c9cfb40f5ae30246ba53a8f0ef7 | 2225034e851e936b813005ce3948d76a2c0f85aa | /accounts/migrations/0003_alter_user_user_type.py | 065708962d7354caf2b9947eac6e7ae9b7753fb4 | [] | no_license | khaled-hamada/doctor-online-app-task | 83a6f6864467552f4b08f0832a441ddcaaa8a3e7 | a6d5c8284cc21f21bb5227281ff27b3c3a1ad8d6 | refs/heads/main | 2023-07-12T06:52:11.425656 | 2021-08-24T13:17:18 | 2021-08-24T13:17:18 | 399,429,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | # Generated by Django 3.2.6 on 2021-08-23 11:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_alter_user_user_type'),
]
operations = [
migrations.AlterField(
model_name='user',
name='user_type',
field=models.CharField(blank=True, max_length=64, null=True),
),
]
| [
"khaledosman737@gmail.com"
] | khaledosman737@gmail.com |
27f44b74cc5fedd3f218a356d0844f7d2ffac7e5 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/96/usersdata/184/53936/submittedfiles/estatistica.py | 54102a6b3c85cedfd1bbea3a41e1c235f0059378 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | # -*- coding: utf-8 -*-
def media(lista):
soma = 0
for i in range(0,len(lista),1):
soma = soma + lista[i]
resultado = soma/len(lista)
return resultado
def variância(lista):
media=media(lista)
soma=0
variância=0
for i in range(0,len(lista),1):
soma=((lista[i]-media)**2)
variancia=soma/float(len(lista))
return variancia
def desviopadrao(lista):
return math.sqrt(variancia(lista))
a=[]
b=[]
n=input('digite a quantidade de elementos:')
for i in range(0,n,1):
a.append(input('digite um elemnto:'))
for i in range(0,n,1):
b.append(input('digite um elemnto:'))
media_a=media(a)
media_b=media(b)
desviopadrao_a=desviopadrao(a)
desviopadrao_b=desviopadrao(b)
print media_a
print desviopadrao_a
print media_b
print desviopadrao_b
#Por último escreva o programa principal, que pede a entrada e chama as funções criadas. | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
aaab255b00165926549abcb1a4d30251526fa7fa | 7e8c72c099b231078a763ea7da6bba4bd6bac77b | /python/webpy_and_orm/lib/portal/model/pm.py | 6a44fed39978eadd9456d7535c982c9c1bbdbc2d | [] | no_license | github188/demodemo | fd910a340d5c5fbf4c8755580db8ab871759290b | 96ed049eb398c4c188a688e9c1bc2fe8cd2dc80b | refs/heads/master | 2021-01-12T17:16:36.199708 | 2012-08-15T14:20:51 | 2012-08-15T14:20:51 | 71,537,068 | 1 | 2 | null | 2016-10-21T06:38:22 | 2016-10-21T06:38:22 | null | UTF-8 | Python | false | false | 18,965 | py | """
A persistent manager, the concept is learning JPA.
To use webpy.db module as persistent engine implement O/R mapping.
"""
from __future__ import with_statement
import web
import logging
#logging.basicConfig(level=logging.DEBUG)
class PersistentManager(object):
#only for debug...
_global_class_loader_ = None
cache = {}
def __init__(self, name, module, dbn, auto_schema, **keywords):
self.name = name
web.db.connect(dbn, **keywords)
self.syslog = logging.getLogger('portal.pm')
if self._global_class_loader_ == None:
self._global_class_loader_ = PersistentClassLoader(module)
self.class_loader = self._global_class_loader_
self._check_mapped_data_model(self.class_loader.pm_class, auto_schema)
#self.cache = {}
def load_obj(self, cls, ids, raise_unqiue=True):
cls = self.get_cls_type(cls)
obj_id = cls.__name__ + str(ids)
if not self.cache.has_key(obj_id):
if isinstance(ids, dict):
where = ' and '.join([ '%s=$%s' % (e, e) for e in ids.keys() ])
else:
where = "%s = $id" % cls._pm_id_column_
ids = {'id' : ids}
enties = self.select(cls, where, ids)
if len(enties) > 1 and raise_unqiue:
raise RuntimeError, "Not a unqiue key '%s' for '%s'." % (repr(ids),
cls._pm_db_table_)
self.cache[obj_id] = len(enties) > 0 and enties[0] or None
return self.cache[obj_id]
def persist(self, obj):
if obj._pm_state_ in [ 'persisted', 'proxy' ]: return
self._persist_one_relation(obj)
_pm_var_ = {}
#with web.transaction():
if obj._pm_state_ == 'New':
self._run_fixture(obj, 'save')
for f in obj._pm_fields_:
_pm_var_[f[0]] = getattr(obj, f[0])
del _pm_var_[obj._pm_id_column_]
self.syslog.debug("insert:" + web.insert(obj._pm_db_table_, _test=True, **_pm_var_))
new_id = web.insert(obj._pm_db_table_, **_pm_var_)
setattr(obj, obj._pm_id_column_, new_id)
obj._pm_updated_field_ = set()
#??
obj._pm_state_ = 'persisted'
self._persist_list_relation(obj)
elif obj._pm_updated_field_:
self._run_fixture(obj, 'update')
for f in obj._pm_updated_field_:
_pm_var_[f] = getattr(obj, f)
sql = web.update(obj._pm_db_table_,
where="%s = $id" % obj._pm_id_column_,
vars={'id':getattr(obj, obj._pm_id_column_)},
_test=True,
**_pm_var_)
self.syslog.debug("update:" + sql)
web.update(obj._pm_db_table_,
where="%s = $id" % obj._pm_id_column_,
vars={'id':getattr(obj, obj._pm_id_column_)},
**_pm_var_)
obj._pm_state_ = 'persisted'
def _persist_one_relation(self, obj):
try:
relations = obj._relations_
except AttributeError:
return
for name, map_type, model_type, col in relations.values():
if map_type == 'one':
value = getattr(obj, name)
if value != None and getattr(obj, col) is None:
if not getattr(value, value.__class__._pm_id_column_):
self.persist(value)
setattr(obj, col, getattr(value, value.__class__._pm_id_column_))
def _persist_list_relation(self, obj):
try:
relations = obj._relations_
except AttributeError:
return
for name, map_type, model_type, col in relations.values():
if map_type == 'list':
value = getattr(obj, name)
if value != None:
for item in value:
setattr(item, col, getattr(obj, obj._pm_id_column_))
self.persist(item)
def delete(self, obj):
#with web.transaction():
self.evil(obj)
self._run_fixture(obj, 'delete')
web.delete(obj._pm_db_table_, where="%s = $id" % obj._pm_id_column_,
vars={'id':getattr(obj, obj._pm_id_column_)})
def select(self, cls, where=None, vars=None, order=None, group=None,
limit=None, offset=None):
cls = self.get_cls_type(cls)
if cls._pm_where_ is not None:
where += " and " + cls._pm_where_
self.syslog.debug("Select SQL:" + web.select(cls._pm_db_table_,
vars=vars,
where=where,
limit=limit,
order=order,
offset=offset,
group=group, _test=True))
results = web.select(cls._pm_db_table_,
vars=vars,
where=where,
limit=limit,
order=order,
offset=offset,
group=group)
#print results
return self._mapping_to_model(cls, results)
def sqlquery(self, cls, sql_query, vars=None, order=None, limit=None, offset=None):
cls = cls and self.get_cls_type(cls) or cls
if vars == None: vars = {}
#replace ${ClassName} as table name
import re
for table in re.finditer("\\$\\{(\w+)\\}", sql_query):
table = table.group(1)
table_cls = self.get_cls_type(table)
sql_query = sql_query.replace('${%s}' % table, table_cls._pm_db_table_)
for (sql, val) in (('ORDER BY', order),
('LIMIT', limit),
('OFFSET', offset)
):
if val:
sql_query += ' %s %s' % (sql, val)
self.syslog.debug("Select SQL for sqlquery:%s" % sql_query)
results = web.query(sql_query, vars)
if cls:
return self._mapping_to_model(cls, results)
else:
return results
def evil(self, obj):
"""
Remove the object from cache, and disattach the obj from PM.
the object stauts will not be persistent after the object is evil.
"""
#with web.transaction():
obj_id = obj.__class__.__name__ + str(getattr(obj, obj._pm_id_column_))
if self.cache.has_key(obj_id):
del self.cache[obj_id]
return True
return False
def evil_all(self):
self.cache.clear()
return True
def cache_obj(self, obj):
obj_id = obj.__class__.__name__ + str(getattr(obj, obj._pm_id_column_))
self.cache[obj_id] = obj
def commit(self):
web.db.commit()
def rollback(self):
web.db.rollback()
def drop_model(self, cls):
cls = self.get_cls_type(cls)
cursor = web.ctx.db_cursor()
try:
drop_sql = self.class_loader.export_drop_table(cls)
self.syslog.debug("Drop model:" + drop_sql)
cursor.execute(drop_sql)
except:
import sys
type, value, my_traceback = sys.exc_info()
if 'no such table' in str(value):
pass
finally:
cursor.close()
def get_cls_type(self, cls):
if type(cls) == type(''):
if not self.class_loader.named_class.has_key(cls):
raise RuntimeError, "Not found class mapping '%s'." % cls
return self.class_loader.named_class[cls]
else:
return cls
def _check_mapped_data_model(self, cls_list, auto_schema):
for cls in cls_list:
cursor = web.ctx.db_cursor()
try:
cursor.execute('select 1 from %s' % cls._pm_db_table_)
except:
import sys
type, value, my_traceback = sys.exc_info()
if 'no such table' in str(value):
self.syslog.info("Not found table:" + cls._pm_db_table_)
if 'create' in auto_schema:
create_sql = self.class_loader.export_sql_schema(cls)
self.syslog.info("Create:" + create_sql)
cursor.execute(create_sql)
self.syslog.info("Create new table:" + cls._pm_db_table_)
else:
raise
cursor.close()
def _mapping_to_model(self, cls, datas):
enties = []
for res in datas:
obj_id = cls.__name__ + str(res[cls._pm_id_column_])
if not self.cache.has_key(obj_id):
entity = cls()
entity._pm_fill_object_(res)
self.cache[obj_id] = entity
self._init_lazy_loading_relations(entity)
self._run_fixture(entity, 'load')
enties.append(self.cache[obj_id])
return enties
def _run_fixture(self,obj, name):
try:
f = getattr(obj, name)
except AttributeError:
return
f(self)
class lazy_loading_proxy(object):
def __init__(self, pm, obj, relation):
def loader():
(name, map_type, model_type, col) = relation
data = None
id_value = None
if map_type == 'list':
id_value = getattr(obj, obj._pm_id_column_)
data = pm.select(model_type, '%s=$id' % col, {'id':id_value})
elif map_type == 'one':
id_value = getattr(obj, col)
data = pm.load_obj(model_type, id_value)
else:
raise RuntimeError, "not supported relation type '%s'" % map_type
setattr(self, 'proxy', data)
setattr(obj, name, data)
if data is None:
syslog = logging.getLogger('portal.pm')
syslog.warn("not entity:%s id:%s in %s" % (str(relation), id_value, str(obj)))
return data
self.loader = loader
self.proxy = None
self._pm_state_ = 'proxy'
(name, map_type, model_type, col) = relation
self._str_ = '{proxy:%s<%s> in %s}' % (name, model_type, obj.__class__.__name__)
def __getattribute__(self, name):
proxy = object.__getattribute__(self, "proxy")
if name == '_pm_state_' and proxy == None:
return 'proxy'
if proxy == None:
proxy = object.__getattribute__(self, "loader")()
if proxy != None:
return proxy.__getattribute__(name)
else:
raise RuntimeError, "not found entity..."
def __iter__(self):
proxy = object.__getattribute__(self, "proxy")
if proxy == None:
proxy = object.__getattribute__(self, "loader")()
return proxy and iter(proxy) or iter([])
def __getattr__(self, name):
proxy = object.__getattribute__(self, "proxy")
if proxy == None:
proxy = object.__getattribute__(self, "loader")()
return getattr(proxy, name)
def __getitem__(self, key):
proxy = object.__getattribute__(self, "proxy")
if proxy == None:
proxy = object.__getattribute__(self, "loader")()
return proxy[key]
def __str__(self):
return object.__getattribute__(self, "_str_")
def __cmp__(self, other):
proxy = object.__getattribute__(self, "proxy")
if proxy == None:
proxy = object.__getattribute__(self, "loader")()
return proxy.__cmp__(other)
def _init_lazy_loading_relations(self, obj):
try:
relations = obj._relations_
except AttributeError:
relations = {}
for relation in relations.values():
(name, map_type, model_type, col) = relation
setattr(obj, relation[0], self.lazy_loading_proxy(self, obj, relation))
class PersistentClassLoader(object):
""" A class loader for searching data object.
"""
def __init__(self, obj=None):
self.pm_class = []
self.named_class = {}
if obj is None: return
import types
if type(obj) in [ types.ClassType, types.TypeType ]:
self.add_class(obj)
else:
self.add_module(obj)
def add_module(self, mod):
"""
try:
getattr(mod, "_pm_loaded_")
except AttributeError:
setattr(mod, '_pm_loaded_', True)
"""
import types
for key in dir(mod):
if type(getattr(mod, key)) in [ types.ClassType, types.TypeType ]:
cls = getattr(mod, key)
self.add_class(cls)
try:
import os
sub_mod = []
for mod in os.listdir(mod.__path__[0]):
if mod[0] in ['_', '.'] or mod[-3:] is not '.py':
continue
sub_mod.append(os.path.splitext(e)[0])
except AttributeError:
sub_mod = []
for name in sub_mod:
new_mod = __import__(mod.__name__ + '.' + name, globals(), locals(), [ name ])
self.add_module(new_mod)
def add_class(self, cls):
if cls not in self.pm_class:
self.pm_class.append(cls)
self.named_class[cls.__name__] = cls
self._installed_pm_hooks(cls)
def _installed_pm_hooks(self, cls):
cls._pm_fill_object_ = _fill_object_
cls._pm_state_ = 'New'
cls._pm_where_ = None
self._parse_comments(cls, cls.__doc__)
cls.__setattr__ = _set_object_attr_
def _parse_comments(self, cls, docs):
comments_header = {'@table:':self._commnet_table_,
'@id:':self._commnet_id_,
'@field:':self._commnet_field_,
'@where:':self._commnet_where_,
'@relation:':self._commnet_relation_,
'@extend:':self._commnet_extend_
}
for doc in docs.split("\n"):
doc = doc.strip()
for comment in comments_header.keys():
if doc.startswith(comment):
doc = doc.replace(comment, '').strip()
comments_header[comment](cls, doc)
break
def _commnet_table_(self, cls, str):
cls._pm_db_table_ = str
def _commnet_id_(self, cls, str):
(f_name, f_len, f_type) = self._parse_filed_commnet(str)
cls._pm_id_column_ = f_name
self._commnet_field_(cls, str)
def _commnet_field_(self, cls, str):
try:
_pm_fields_ = cls._pm_fields_
except AttributeError:
cls._pm_fields_ = _pm_fields_ = []
f = self._parse_filed_commnet(str)
setattr(cls, f[0], None)
for fd in _pm_fields_:
if fd[0] == f[0]: return
_pm_fields_.append(f)
def _commnet_where_(self, cls, str):
cls._pm_where_ = str
def _commnet_relation_(self, cls, str):
try:
_relations_ = cls._relations_
except AttributeError:
cls._relations_ = _relations_ = {}
if str.count(',') == 3:
name, map_type, model_type, col = str.split(',')
_relations_[name] = (name, map_type, model_type, col)
if map_type == 'one':
self._commnet_field_(cls, '%s,0,INTEGER' % col)
setattr(cls, name, None)
def _commnet_extend_(self, cls, str):
self._parse_comments(cls, self.named_class[str].__doc__)
def _parse_filed_commnet(self, str):
name, length, type = str.split(",")
length = int(length)
return (name, length, type)
def export_sql_schema(self, cls=None):
if cls != None:
schema = "CREATE TABLE %s (\n" % cls._pm_db_table_
primary_field = cls._pm_id_column_
def _generate_field_(f):
f_type = (int(f[1]) > 0) and "%s(%s)" % (f[2], f[1]) or f[2]
return ((primary_field == f[0]) and "%s %s PRIMARY KEY" or "%s %s") \
% (f[0], f_type)
schema += ",\n".join([ _generate_field_(e) for e in cls._pm_fields_])
schema += "\n)"
return schema
return ""
def export_drop_table(self, cls=None):
if cls != None:
return "DROP TABLE %s" % cls._pm_db_table_
return ""
#---start persistent object hooks--------------
def _fill_object_(self, data):
self._pm_updated_field_ = set()
for f in self._pm_fields_:
setattr(self, f[0], data[f[0]])
self._pm_state_ = 'loaded'
def _set_object_attr_(self, name, value):
if self._pm_state_ != 'New':
for f in self._pm_fields_:
if name == f[0]:
if value != getattr(self, name):
self._pm_updated_field_.add(name)
self._pm_state_ = 'updated'
break
super(self.__class__, self).__setattr__(name, value)
#super.__setattr__(self, name, value)
#object.__setattr__(self, name, value)
#---end persistent object hooks----------------
def cur_pm(name='default'):
"""
Get current thread's PM, the name can distinguish multiple PM.
"""
pm_name = "_db_" + name
pm = None
from portal.config import db_config as db_param
try:
pm = getattr(web.ctx, pm_name)
except AttributeError:
pass
finally:
if pm == None:
pm = PersistentManager(name, **db_param[name])
setattr(web.ctx, pm_name, pm)
logging.getLogger('cur_pm').info('create new persistent manager.')
return pm
| [
"DeonWu@b18a5524-d64a-0410-9f42-ad3cd61580fb"
] | DeonWu@b18a5524-d64a-0410-9f42-ad3cd61580fb |
a6df2cb4a8a4471534fc5fe9ba7d6996c2dff035 | cc44edfa1edbedea3ad044805be7548e0ccba70d | /0x0F-python-object_relational_mapping/13-model_state_delete_a.py | c044b9f72162be0367f0edae4457370fc78e42b1 | [] | no_license | set808/holbertonschool-higher_level_programming | 421f0da1f91cd56eb2daa4e07a51b4a505d53edc | eb276a4e68e5cc43498459eec78fc05f72e2cd48 | refs/heads/master | 2020-03-09T13:07:43.824914 | 2018-09-08T00:26:46 | 2018-09-08T00:26:46 | 128,802,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | #!/usr/bin/python3
"""List objects that contain the letter a """
from sys import argv
from model_state import Base, State
from sqlalchemy import (create_engine)
from sqlalchemy import update
from sqlalchemy.orm import sessionmaker
def main(argv):
engine = create_engine(
'mysql+mysqldb://{}:{}@localhost:3306/{}'.format(
argv[1], argv[2], argv[3]), pool_pre_ping=True)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
for state in session.query(State).filter(State.name.like('%a%')):
session.delete(state)
session.commit()
if __name__ == "__main__":
main(argv)
| [
"spencertaylor808@gmail.com"
] | spencertaylor808@gmail.com |
ed42e215d09f16cabbe9caef947294944b5956c2 | c80ec1805a7e6cb1bd3f4b3e383ef4f4cf164765 | /gen/datehandler/__init__.py | b3a56bafa582242949c6717acefc05dda78990b6 | [] | no_license | balrok/gramps_addon | 57c8e976c47ea3c1d1298d3fd4406c13909ac933 | 0c79561bed7ff42c88714edbc85197fa9235e188 | refs/heads/master | 2020-04-16T03:58:27.818732 | 2015-02-01T14:17:44 | 2015-02-01T14:17:44 | 30,111,898 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,166 | py | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2004-2007 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Class handling language-specific selection for date parser and displayer.
"""
from __future__ import print_function, unicode_literals
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
from ..const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
# import prerequisites for localized handlers
from ._datehandler import (LANG, LANG_SHORT, LANG_TO_PARSER, LANG_TO_DISPLAY,
register_datehandler)
from . import _datestrings
# Import all the localized handlers
from . import _date_ar
from . import _date_bg
from . import _date_ca
from . import _date_cs
from . import _date_da
from . import _date_de
from . import _date_el
from . import _date_es
from . import _date_fi
from . import _date_fr
from . import _date_hr
from . import _date_it
from . import _date_lt
from . import _date_nb
from . import _date_nl
from . import _date_pl
from . import _date_pt
from . import _date_ru
from . import _date_sk
from . import _date_sl
from . import _date_sr
from . import _date_sv
from . import _date_uk
# Initialize global parser
try:
if LANG in LANG_TO_PARSER:
parser = LANG_TO_PARSER[LANG]()
else:
parser = LANG_TO_PARSER[LANG_SHORT]()
except:
logging.warning(_("Date parser for '%s' not available, using default") % LANG)
parser = LANG_TO_PARSER["C"]()
# Initialize global displayer
try:
from ..config import config
val = config.get('preferences.date-format')
except:
val = 0
try:
if LANG in LANG_TO_DISPLAY:
displayer = LANG_TO_DISPLAY[LANG](val)
else:
displayer = LANG_TO_DISPLAY[LANG_SHORT](val)
except:
logging.warning(_("Date displayer for '%s' not available, using default") % LANG)
displayer = LANG_TO_DISPLAY["C"](val)
# Import utility functions
from ._dateutils import *
from ._grampslocale import (codeset, tformat)
if __name__ == "__main__":
from ._datedisplay import DateDisplay
m = 0
for l,d in LANG_TO_DISPLAY.items():
if len(l) != 2:
continue
m = max(m, len(d.formats))
print("{}: {} {} own dg: {}".format(
l, len(d.formats), d.formats,
d._display_gregorian != DateDisplay._display_gregorian))
print("MAX: ", m)
| [
"carl.schoenbach@gmail.com"
] | carl.schoenbach@gmail.com |
228724cb4ba363074d939a34c441ba86ebe9c6c2 | 25cb0013b8e635dd5a7cc189819d43191f571fe2 | /tests/parser/test_wikipedia.py | 09d7a2f3bcca27a44bfcb1147cb6ef82d0cd2e7c | [
"MIT"
] | permissive | openmicroanalysis/pyxray | e40a1c575b8f5002f162c611c4ffcf00ff028907 | b06478f6c251d92e878713d18d5c7bc063bac0fb | refs/heads/master | 2023-03-18T11:41:28.806694 | 2023-03-10T09:35:58 | 2023-03-10T09:35:58 | 57,978,665 | 4 | 2 | MIT | 2023-03-10T09:35:59 | 2016-05-03T15:10:14 | Python | UTF-8 | Python | false | false | 299 | py | #!/usr/bin/env python
""" """
# Standard library modules.
# Third party modules.
# Local modules.
from pyxray.parser.wikipedia import WikipediaElementNameParser
# Globals and constants variables.
def test_wikipedia():
parser = WikipediaElementNameParser()
assert len(list(parser)) > 0
| [
"philippe.pinard@gmail.com"
] | philippe.pinard@gmail.com |
3f786d9369922895b5661bc3d6051ba8a9589b3a | 3a891a79be468621aae43defd9a5516f9763f36e | /desktop/core/ext-py/nose-1.3.7/examples/plugin/setup.py | 4dd5dad316fc51c1078d9f21e31d231609ef1249 | [
"Apache-2.0",
"LGPL-2.1-only"
] | permissive | oyorooms/hue | b53eb87f805063a90f957fd2e1733f21406269aa | 4082346ef8d5e6a8365b05752be41186840dc868 | refs/heads/master | 2020-04-15T20:31:56.931218 | 2019-01-09T19:02:21 | 2019-01-09T19:05:36 | 164,998,117 | 4 | 2 | Apache-2.0 | 2019-01-10T05:47:36 | 2019-01-10T05:47:36 | null | UTF-8 | Python | false | false | 523 | py | """
An example of how to create a simple nose plugin.
"""
try:
import ez_setup
ez_setup.use_setuptools()
except ImportError:
pass
from setuptools import setup
setup(
name='Example plugin',
version='0.1',
author='Jason Pellerin',
author_email = 'jpellerin+nose@gmail.com',
description = 'Example nose plugin',
license = 'GNU LGPL',
py_modules = ['plug'],
entry_points = {
'nose.plugins.0.10': [
'example = plug:ExamplePlugin'
]
}
)
| [
"ranade@cloudera.com"
] | ranade@cloudera.com |
85100f9a205b487eaaf3372ea12fccd23da2983d | 9316e155538af98001c6d7551e721b6160c99bd7 | /run_ants.py | 716f1e81bac243ddd8c7e9ef3e5e5a461ccb5298 | [
"Apache-2.0"
] | permissive | binarybottle/mindboggle_sidelined | 2c7c6591d199a5b715cb028d3374c1a426fb4341 | 1431d4877f4ceae384486fb66798bc22e6471af7 | refs/heads/master | 2016-09-06T17:24:32.646682 | 2015-02-16T04:45:44 | 2015-02-16T04:45:44 | 30,854,143 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,650 | py | """
Run antsCorticalThickness.sh on Mindboggle-101 brains
"""
import os
run_all = True
if run_all:
names = ['OASIS-TRT-20', 'MMRR-21', 'NKI-RS-22', 'NKI-TRT-20',
'Afterthought', 'Colin27', 'Twins-2', 'MMRR-3T7T-2', 'HLN-12']
numbers = [20,21,22,20,1,1,2,2,12]
i1 = 0
names = [names[i1]]
numbers = [numbers[i1]]
path1 = '/homedir/Data/Brains/Mindboggle101/subjects/'
end1a = '/mri/orig/001.mgz'
end1b = '/mri/orig/001.nii.gz'
path2 = '/data/Brains/Atropos_templates/OASIS-30_Atropos_template/'
end2a = 'T_template0.nii.gz'
end2b = 'T_template0_BrainCerebellum.nii.gz'
end2c = 'T_template0_BrainCerebellumProbabilityMask.nii.gz'
end2d = 'T_template0_BrainCerebellumExtractionMask.nii.gz'
end2e = 'Priors2/priors%d.nii.gz'
convert = False
for i,name in enumerate(names):
number = numbers[i]
for n in range(1,number+1):
if convert:
s = 'mri_convert {0}{1}-{2}{3} {0}{1}-{2}{4} ' \
.format(path1, name, n, end1a, end1b)
print(s)
os.system(s)
prefix = 'antsCorticalThickness/{0}-{1}/ants'.format(name, n)
s = 'antsCorticalThickness.sh -d 3 -n 3 -w 0.25 ' \
'-a {0}{1}-{2}{3} ' \
'-o {4} ' \
'-e {5}/{6} ' \
'-t {5}/{7} ' \
'-m {5}/{8} ' \
'-f {5}/{9} ' \
'-p {5}/{10} ' \
.format(path1, name, n, end1b, prefix, path2, end2a, end2b,
end2c, end2d, end2e)
print(s)
os.system(s)
| [
"arno@binarybottle.com"
] | arno@binarybottle.com |
bdbbde89fa8e66deaf81f53dee472784536d88c9 | 61747f324eaa757f3365fd7bf5ddd53ea0db47d1 | /casepro/cases/migrations/0017_outgoing_text.py | f5a2e63d2c2bc3469b928f6e54c6172ceabe1ef8 | [
"BSD-3-Clause"
] | permissive | BlueRidgeLabs/casepro | f8b0eefa8f961dd2fdb5da26a48b619ebc1f8c12 | 8ef509326f3dfa80bb44beae00b60cc6c4ac7a24 | refs/heads/master | 2022-01-24T09:01:18.881548 | 2017-12-05T18:46:05 | 2017-12-05T18:49:42 | 113,502,588 | 0 | 0 | null | 2017-12-07T21:57:37 | 2017-12-07T21:57:37 | null | UTF-8 | Python | false | false | 404 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cases', '0016_case_contact'),
]
operations = [
migrations.AddField(
model_name='outgoing',
name='text',
field=models.TextField(max_length=640, null=True),
),
]
| [
"rowanseymour@gmail.com"
] | rowanseymour@gmail.com |
a1c71cac28e8106d69520a88204e6eebf415bd96 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_omniscient.py | d973e99c473ac2a6a285eb5b5832fff3a0311952 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py |
#calss header
class _OMNISCIENT():
def __init__(self,):
self.name = "OMNISCIENT"
self.definitions = [u'having or seeming to have unlimited knowledge: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
033ff5a1c621db3638e1544b4a2d3c6f3741a08d | 0130c8b14927097663157846adc4b146d67d2fda | /tests/st/ops/gpu/test_fused_relu_grad_bn_reduce_grad.py | 4e5f6afbc529a5aefef225ba6b5e26b43a8557f6 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-3-Clause",
"NCSA",
"LLVM-exception",
"Zlib",
"BSD-2-Clause",
"MIT"
] | permissive | Shigangli/akg | e8be3e0ee1eafe3e42b4cc4d424c28f08ef4c0bc | 3766c54e0b109541932d147a6b5643a334b82403 | refs/heads/master | 2023-09-06T05:13:40.571583 | 2021-11-23T03:44:54 | 2021-11-23T03:44:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,586 | py | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import numpy as np
from akg.utils import kernel_exec as utils
from tests.common.gen_random import random_gaussian
from tests.common.test_op.resnet.fused_relu_grad_bn_reduce_grad import fused_relu_grad_bn_reduce_grad
def gen_data(shape, dtype):
return random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
def compute_py(data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8, data_9, layout):
data_tmp1 = np.multiply(data_4, data_5)
n, h, w, c = np.shape(data_9)
data_tmp2 = np.full(np.shape(data_tmp1), 1.0 / (n * h * w), 'float32')
data_tmp3 = np.multiply(data_tmp1, data_tmp2)
data_tmp5 = np.full(np.shape(data_9), 0.0, 'float16')
data_tmp6 = np.greater(data_9, data_tmp5)
data_tmp7 = np.where(data_tmp6, data_8, data_tmp5)
data_tmp8 = data_tmp7.astype('float32')
data_tmp9 = np.full(np.shape(data_9), n * h * w, 'float32')
data_tmp10 = np.multiply(data_tmp8, data_tmp9)
data_tmp12 = np.subtract(data_tmp10, data_3)
data_tmp14 = data_7.astype('float32')
data_tmp15 = np.multiply(data_6, data_tmp2)
data_tmp17 = np.subtract(data_tmp14, data_tmp15)
data_tmp18 = np.multiply(data_2, data_tmp17)
data_tmp20 = np.divide(data_tmp18, data_1)
data_tmp21 = np.subtract(data_tmp12, data_tmp20)
data_tmp22 = np.multiply(data_tmp3, data_tmp21)
expect = data_tmp22.astype('float16')
output = np.full(np.shape(expect), np.nan, 'float16')
return expect, output
def test_fused_relu_grad_bn_reduce_grad(shape_1, shape_2, layout='NHWC', poly_sch=False):
data_1 = gen_data(shape_1, 'float32')
data_2 = gen_data(shape_1, 'float32')
data_3 = gen_data(shape_1, 'float32')
data_4 = gen_data(shape_1, 'float32')
data_5 = gen_data(shape_1, 'float32')
data_6 = gen_data(shape_1, 'float32')
data_7 = gen_data(shape_2, 'float16')
data_8 = gen_data(shape_2, 'float16')
data_9 = gen_data(shape_2, 'float16')
expect, output = compute_py(data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8, data_9, layout)
input_list = [shape_1, shape_1, shape_1, shape_1, shape_1, shape_1, shape_2, shape_2, shape_2]
dtype_list = ['float32', 'float32', 'float32', 'float32', 'float32', 'float32', 'float16', 'float16', 'float16']
op_attrs = [layout]
if poly_sch:
mod = utils.op_build_test(fused_relu_grad_bn_reduce_grad, input_list, dtype_list,
kernel_name="fused_relu_grad_bn_reduce_grad", op_attrs=op_attrs,
attrs={"target": "cuda"})
args = [data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8, data_9, output]
output = utils.mod_launch(mod, args, expect=expect)
res = np.allclose(output, expect, rtol=5e-03, atol=1e-08)
print("Test {}".format("Pass" if res else "Failed"))
if not res:
print("Error cuda:========================")
print(mod.imported_modules[0].get_source())
raise AssertionError("Test fail")
return True
| [
"1027252281@qq.com"
] | 1027252281@qq.com |
a74d54cf192a18fe0dd00295cc02d80e22709ea2 | da85a249f6ffc68c71bc27b1246a77704d15770e | /tests/debug-tests/utils.py | 4003318523fa789c9a341c578a8c9671aae29397 | [
"BSD-3-Clause"
] | permissive | efajardo-nv/ucx-py | d8d268fc094afd28512c843dfa49f6afcb55d4bb | 90e7ae0214ea23588040d00f9c9497aba0f09b65 | refs/heads/master | 2022-05-22T10:32:24.411103 | 2020-03-31T18:10:46 | 2020-03-31T18:10:46 | 257,774,594 | 0 | 0 | BSD-3-Clause | 2020-04-22T02:52:27 | 2020-04-22T02:52:26 | null | UTF-8 | Python | false | false | 2,379 | py | import argparse
from distributed.comm.utils import from_frames
from distributed.utils import nbytes, parse_bytes
import numpy as np
import ucp
ITERATIONS = 50
def cuda_array(size):
# import cupy
# return cupy.empty(size, dtype=cupy.uint8)
# return rmm.device_array(size, dtype=np.uint8)
import numba.cuda
return numba.cuda.device_array((size,), dtype=np.uint8)
async def send(ep, frames):
await ep.send(np.array([len(frames)], dtype=np.uint64))
await ep.send(
np.array(
[hasattr(f, "__cuda_array_interface__") for f in frames], dtype=np.bool
)
)
await ep.send(np.array([nbytes(f) for f in frames], dtype=np.uint64))
# Send frames
for frame in frames:
if nbytes(frame) > 0:
await ep.send(frame)
async def recv(ep):
try:
# Recv meta data
nframes = np.empty(1, dtype=np.uint64)
await ep.recv(nframes)
is_cudas = np.empty(nframes[0], dtype=np.bool)
await ep.recv(is_cudas)
sizes = np.empty(nframes[0], dtype=np.uint64)
await ep.recv(sizes)
except (ucp.exceptions.UCXCanceled, ucp.exceptions.UCXCloseError) as e:
msg = "SOMETHING TERRIBLE HAS HAPPENED IN THE TEST"
raise e(msg)
# Recv frames
# breakpoint()
frames = []
for is_cuda, size in zip(is_cudas.tolist(), sizes.tolist()):
if size > 0:
if is_cuda:
frame = cuda_array(size)
else:
frame = np.empty(size, dtype=np.uint8)
await ep.recv(frame)
frames.append(frame)
else:
if is_cuda:
frames.append(cuda_array(size))
else:
frames.append(b"")
msg = await from_frames(frames)
return frames, msg
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--server", default=None, help="server address.")
parser.add_argument("-p", "--port", default=13337, help="server port.", type=int)
parser.add_argument(
"-n",
"--n-bytes",
default="10 Mb",
type=parse_bytes,
help="Message size. Default '10 Mb'.",
)
parser.add_argument(
"--n-iter",
default=10,
type=int,
help="Numer of send / recv iterations (default 10).",
)
return parser.parse_args()
| [
"quasiben@gmail.com"
] | quasiben@gmail.com |
4f47113183c2cf8e5d890bfebbab7cf2f9c5bc3a | 2cc44ba2a9b9b752fd1b1ebfd1a3681fe87c8617 | /models/datamodel.py | a3054c3669606379b4b05d100a9265744241b4ac | [] | no_license | jorgec/iNav-Python-SDK | 47c6a2b5e9a4eb246b9ee997b67a9726e2f9db30 | 57260980f8e2f5130f1abc21fd6dab7404ea05ac | refs/heads/master | 2020-08-29T23:46:45.733005 | 2019-10-29T04:47:55 | 2019-10-29T04:47:55 | 218,206,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | import json
class DataModel:
class Meta:
fields = {}
def as_dict(self):
return {key: str(getattr(self, value)) for key, value in self.Meta.fields.items()}
def serialize(self):
return json.dumps(self.as_dict())
| [
"jorge.cosgayon@gmail.com"
] | jorge.cosgayon@gmail.com |
c476de20becad6e2a53cd18d6249c626d55d01c1 | e8199f1d424592affe19b50fd96a02815067d1b1 | /Apple/329. Longest Increasing Path in a Matrix.py | 4b7821d007aef9791d6d78446d5a4277649b4273 | [] | no_license | srajsonu/LeetCode-Solutions-Python | 39a809e4c6d555a3a3055ce03d59cfa40b93a287 | 8ec31c8df2885f3da533424ba13060b7d3e3af78 | refs/heads/master | 2023-03-19T10:05:42.578615 | 2021-03-13T17:21:36 | 2021-03-13T17:21:36 | 280,716,200 | 0 | 1 | null | 2020-10-06T09:54:02 | 2020-07-18T18:32:04 | Python | UTF-8 | Python | false | false | 1,003 | py | from collections import deque
class Solution:
def isValid(self, A, i, j):
if i < 0 or i >= len(A) or j < 0 or j >= len(A[0]):
return False
return True
def dfs(self, A, i, j, dp):
ans = 0
row = [1, 0, -1, 0]
col = [0, 1, 0, -1]
if dp[i][j]:
return dp[i][j]
else:
for r, c in zip(row, col):
nRow = i + r
nCol = j + c
if self.isValid(A, nRow, nCol) and A[i][j] > A[nRow][nCol]:
ans = max(ans, self.dfs(A, nRow, nCol, dp))
dp[i][j] = 1 + ans
return dp[i][j]
def Solve(self, A):
if not A:
return 0
m = len(A)
n = len(A[0])
dp =[[0]*n for _ in range(m)]
return max(self.dfs(A, x, y, dp) for x in range(m) for y in range(n))
if __name__ == '__main__':
A = [[9, 9, 4],
[6, 6, 8],
[2, 1, 1]]
B = Solution()
print(B.Solve(A))
| [
"srajsonu02@gmail.com"
] | srajsonu02@gmail.com |
b0778fa352eb1a191c385864773583a269064c75 | 2713e8a47e68d82907a4cedc6434ef1cd72d85e7 | /fluo/middleware/locale.py | 5ced4754e4f221eba613a13a5729fba9f5901d25 | [
"MIT"
] | permissive | rsalmaso/django-fluo | a283b8f75769ac6e57fa321c607819899e0c31c8 | 24b9f36e85b247ea209b9c40b17599e7731f5ded | refs/heads/main | 2023-01-12T01:37:06.975318 | 2022-12-30T22:08:40 | 2022-12-30T22:08:40 | 48,948,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,060 | py | # Copyright (C) 2007-2022, Raffaele Salmaso <raffaele@salmaso.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import annotations
# taken and adapted from django-cms
# Copyright (c) 2008, Batiste Bieler
"this is the locale selecting middleware that will look at accept headers"
import re
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponseRedirect
from django.utils import translation
from django.utils.cache import patch_vary_headers
from django.utils.deprecation import MiddlewareMixin
from fluo.settings import NO_LOCALE_PATTERNS
SUB = re.compile(
r'<a([^>]+)href="/(?!(%s|%s|%s))([^"]*)"([^>]*)>'
% ("|".join(map(lambda l: l[0] + "/", settings.LANGUAGES)), settings.MEDIA_URL[1:], settings.STATIC_URL[1:]),
)
SUB2 = re.compile(
r'<form([^>]+)action="/(?!(%s|%s|%s))([^"]*)"([^>]*)>'
% ("|".join(map(lambda l: l[0] + "/", settings.LANGUAGES)), settings.MEDIA_URL[1:], settings.STATIC_URL[1:]),
)
SUPPORTED = dict(settings.LANGUAGES)
START_SUB = re.compile(r"^/(%s)/(.*)" % "|".join(map(lambda l: l[0], settings.LANGUAGES)))
NO_LOCALE_SUB = re.compile(r"^(%s|%s)(.*)" % ("|".join(NO_LOCALE_PATTERNS), settings.STATIC_URL))
LANGUAGE_COOKIE_NAME = settings.LANGUAGE_COOKIE_NAME
def has_lang_prefix(path):
check = START_SUB.match(path)
if check is not None:
return check.group(1)
else:
return False
def skip_translation(path):
check = NO_LOCALE_SUB.match(path)
if check is not None:
return check.group(1)
else:
return False
def get_default_language(language_code=None):
"""
Returns default language depending on settings.LANGUAGE_CODE merged with
best match from settings.LANGUAGES
Returns: language_code
Raises ImproperlyConfigured if no match found
"""
if not language_code:
language_code = settings.LANGUAGE_CODE
languages = dict(settings.LANGUAGES).keys()
# first try if there is an exact language
if language_code in languages:
return language_code
# otherwise split the language code if possible, so iso3
language_code = language_code.split("-")[0]
if language_code not in languages:
raise ImproperlyConfigured("No match in LANGUAGES for LANGUAGE_CODE %s" % settings.LANGUAGE_CODE)
return language_code
def get_language_from_request(request):
language = request.GET.get("language", request.POST.get("language", None))
if language:
if language not in dict(settings.LANGUAGES).keys():
language = None
if language is None:
language = getattr(request, "LANGUAGE_CODE", None)
if language:
if language not in dict(settings.LANGUAGES).keys():
language = None
if language is None:
language = get_default_language()
return language
class LocaleMiddleware(MiddlewareMixin):
def get_language_from_request(self, request):
changed = False
prefix = has_lang_prefix(request.path_info)
if prefix:
request.path = "/" + "/".join(request.path.split("/")[2:])
request.path_info = "/" + "/".join(request.path_info.split("/")[2:])
t = prefix
if t in SUPPORTED:
lang = t
if hasattr(request, "session"):
request.session["django_language"] = lang
else:
request.set_cookie(LANGUAGE_COOKIE_NAME, lang)
changed = True
else:
lang = translation.get_language_from_request(request)
if not changed:
if hasattr(request, "session"):
lang = request.session.get("django_language", None)
if lang in SUPPORTED and lang is not None:
return lang
elif LANGUAGE_COOKIE_NAME in request.COOKIES.keys():
lang = request.COOKIES.get(LANGUAGE_COOKIE_NAME, None)
if lang in SUPPORTED and lang is not None:
return lang
if not lang:
lang = translation.get_language_from_request(request)
lang = get_default_language(lang)
return lang
def process_request(self, request):
path = str(request.path)
if skip_translation(path):
return
prefix = has_lang_prefix(request.path_info)
if not prefix:
return HttpResponseRedirect("/%s%s" % (settings.LANGUAGE_CODE[:2], request.get_full_path()))
language = self.get_language_from_request(request)
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
path = str(request.path)
if skip_translation(path):
return response
patch_vary_headers(response, ["Accept-Language"])
translation.deactivate()
if (
not skip_translation(path)
and response.status_code == 200
and response._headers["content-type"][1].split(";")[0] == "text/html"
):
response.content = SUB.sub(
r'<a\1href="/%s/\3"\4>' % request.LANGUAGE_CODE,
response.content.decode("utf-8"),
)
response.content = SUB2.sub(
r'<form\1action="/%s/\3"\4>' % request.LANGUAGE_CODE,
response.content.decode("utf-8"),
)
if response.status_code == 301 or response.status_code == 302:
if "Content-Language" not in response:
response["Content-Language"] = translation.get_language()
location = response._headers["location"]
prefix = has_lang_prefix(location[1])
if not prefix and location[1].startswith("/") and not skip_translation(location[1]):
response._headers["location"] = (
location[0],
"/%s%s" % (request.LANGUAGE_CODE, location[1]),
)
return response
| [
"raffaele@salmaso.org"
] | raffaele@salmaso.org |
37ba7f3e9c1e421d8f3ae6e4fce5cdd0f88f331e | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_157/923.py | 9d23b8f1fd1c482d24b4c5e1908d21f090e72531 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,051 | py | import copy
d = {('i','j'): 'k',
('j','i'): '-k',
('j','k'): 'i',
('k','j'): '-i',
('k','i'): 'j',
('i','k'): '-j'}
def mul_q(q1, q2):
sign = 1
if q1[0] == '-':
sign *= -1
q1 = q1[1:]
if q2[0] == '-':
sign *= -1
q2 = q2[1:]
if q1 == '1':
ans = q2
elif q2 == '1':
ans = q1
elif q1 == q2:
ans = '-1'
else:
ans = d[(q1, q2)]
if sign == -1:
if ans[0] == '-':
return ans[1:]
else:
return '-' + ans
else:
return ans
def prod_string(s):
qs = list(s)
acc = '1'
for q in s:
acc = mul_q(acc, q)
return acc
def exp_q(q, pow):
pow = pow % 4
if pow == 0:
return '1'
acc = '1'
for i in range(pow):
acc = mul_q(acc, q)
return acc
def solve_case(case):
pow, s = case
if exp_q(prod_string(s), pow) != '-1':
return 'NO'
pow_i = min(pow, 4)
ans_i = starts_i(s * pow_i)
if ans_i:
remaining_chars = ans_i[1]
else:
return 'NO'
pow_j = min(pow - pow_i, 4)
ans_j = starts_j(remaining_chars + s * pow_j)
if ans_j:
return 'YES'
else:
return 'NO'
def starts_i(s):
qs = list(s)
acc = '1'
chars_used = 0
for q in s:
acc = mul_q(acc, q)
chars_used += 1
if acc == 'i':
return (True, ''.join(qs[chars_used:]))
return False
def starts_j(s):
qs = list(s)
acc = '1'
chars_used = 0
for q in s:
acc = mul_q(acc, q)
chars_used += 1
if acc == 'j':
return (True, ''.join(qs[chars_used:]))
return False
f = open('c.in', 'r')
lines = f.readlines()
cases = [(int(lines[2*i - 1].split()[1]), lines[2*i].strip()) for i in xrange(1, 1 + len(lines)/2)]
print(len(cases))
print(cases)
g = open('c.out','w')
for i in xrange(len(cases)):
g.write('Case #' + str(i + 1) + ': ' + str(solve_case(cases[i])) + '\n')
g.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
9e492dad1c020c8a6a1788e5051a0307eb217d82 | 804c1fcaa0f35383b7753f0c7e211220dee1009a | /handroll/tests/test_composers.py | a2b9b241604de4534b8325dd5deccdfc1d1bd8b3 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | handroll/handroll | e97e45b47eea14c6363c0fa8ae7c141254ced5ec | 7768a4d94ef9170a68098eed2a71456d88bc193d | refs/heads/master | 2020-12-13T13:09:17.916767 | 2018-06-18T02:40:25 | 2018-06-18T02:40:25 | 19,365,290 | 18 | 3 | null | 2017-05-08T01:00:47 | 2014-05-02T03:48:29 | Python | UTF-8 | Python | false | false | 20,070 | py | # Copyright (c) 2017, Matt Layman
import inspect
import os
import stat
import tempfile
import mock
from handroll.composers import Composer
from handroll.composers import Composers
from handroll.composers import CopyComposer
from handroll.composers.mixins import FrontmatterComposerMixin
from handroll.composers.atom import AtomComposer
from handroll.composers.generic import GenericHTMLComposer
from handroll.composers.j2 import Jinja2Composer
from handroll.composers.md import MarkdownComposer
from handroll.composers.rst import ReStructuredTextComposer
from handroll.composers.sass import SassComposer
from handroll.composers.txt import TextileComposer
from handroll.exceptions import AbortError
from handroll.tests import TestCase
class TestComposer(TestCase):
def _make_one(self):
config = self.factory.make_configuration()
return Composer(config)
def test_compose_not_implemented(self):
composer = self._make_one()
with self.assertRaises(NotImplementedError):
composer.compose(None, None, None)
def test_get_output_extension_not_implemented(self):
composer = self._make_one()
with self.assertRaises(NotImplementedError):
composer.get_output_extension('file.txt')
def test_permit_frontmatter(self):
composer = self._make_one()
with self.assertRaises(NotImplementedError):
composer.permit_frontmatter
def test_has_config(self):
config = self.factory.make_configuration()
composer = Composer(config)
self.assertEqual(config, composer._config)
class TestComposers(TestCase):
def _make_one(self):
config = self.factory.make_configuration()
return Composers(config)
def test_selects_composer(self):
composers = self._make_one()
composer = composers.select_composer_for('sample.md')
self.assertTrue(isinstance(composer, MarkdownComposer))
def test_has_config(self):
config = self.factory.make_configuration()
composers = Composers(config)
self.assertEqual(config, composers._config)
def test_get_output_extension(self):
composers = self._make_one()
extension = composers.get_output_extension('sample.md')
self.assertEqual('.html', extension)
class TestAtomComposer(TestCase):
def _make_one(self):
config = self.factory.make_configuration()
return AtomComposer(config)
def setUp(self):
site = tempfile.mkdtemp()
self.source_file = os.path.join(site, 'feed.atom')
open(self.source_file, 'w').close()
self.outdir = tempfile.mkdtemp()
self.output_file = os.path.join(self.outdir, 'feed.xml')
def test_composes_feed(self):
source = """{
"title": "Fakity Fake",
"id": "let's pretend this is unique",
"entries": [{
"title": "Sample A",
"updated": "2014-02-23T00:00:00",
"published": "2014-02-22T00:00:00",
"url": "http://some.website.com/a.html",
"summary": "A summary of the sample post"
}]
}"""
with open(self.source_file, 'w') as f:
f.write(source)
composer = self._make_one()
composer.compose(None, self.source_file, self.outdir)
self.assertTrue(os.path.exists(self.output_file))
def test_must_have_entries(self):
source = """{
"title": "Fakity Fake",
"id": "let's pretend this is unique"
}"""
with open(self.source_file, 'w') as f:
f.write(source)
composer = self._make_one()
with self.assertRaises(AbortError):
composer.compose(None, self.source_file, self.outdir)
@mock.patch('handroll.composers.atom.json')
def test_skips_up_to_date(self, json):
open(self.output_file, 'w').close()
composer = self._make_one()
composer.compose(None, self.source_file, self.outdir)
self.assertFalse(json.loads.called)
def test_output_extension(self):
composer = self._make_one()
self.assertEqual('.xml', composer.get_output_extension('source.atom'))
def test_permit_frontmatter(self):
composer = self._make_one()
self.assertFalse(composer.permit_frontmatter)
@mock.patch('handroll.composers.atom.json')
def test_forces_update(self, json):
json.loads.return_value = {
'title': 'Fakity Fake',
'id': "let's pretend this is unique",
'entries': [{
'title': 'Sample A',
'updated': '2014-02-23T00:00:00',
'published': '2014-02-22T00:00:00',
'url': 'http://some.website.com/a.html',
'summary': 'A summary of the sample post'
}]
}
open(self.output_file, 'w').close()
composer = self._make_one()
composer._config.force = True
composer.compose(None, self.source_file, self.outdir)
self.assertTrue(json.loads.called)
class TestCopyComposer(TestCase):
def _make_one(self):
config = self.factory.make_configuration()
return CopyComposer(config)
@mock.patch('handroll.composers.shutil')
def test_skips_same_files(self, shutil):
marker = 'marker.txt'
source = tempfile.mkdtemp()
source_file = os.path.join(source, marker)
outdir = tempfile.mkdtemp()
open(source_file, 'w').close()
open(os.path.join(outdir, marker), 'w').close()
composer = self._make_one()
composer.compose(None, source_file, outdir)
self.assertFalse(shutil.copy.called)
@mock.patch('handroll.composers.shutil')
def test_copies_when_content_differs(self, shutil):
marker = 'marker.txt'
source = tempfile.mkdtemp()
source_file = os.path.join(source, marker)
outdir = tempfile.mkdtemp()
open(source_file, 'w').close()
with open(os.path.join(outdir, marker), 'w') as f:
f.write('something different')
composer = self._make_one()
composer.compose(None, source_file, outdir)
self.assertTrue(shutil.copy.called)
def test_output_extension(self):
"""The copy composer takes the extension of the source file."""
composer = self._make_one()
self.assertEqual('.png', composer.get_output_extension('photo.png'))
def test_permit_frontmatter(self):
composer = self._make_one()
self.assertFalse(composer.permit_frontmatter)
@mock.patch('handroll.composers.shutil')
def test_copies_when_forced(self, shutil):
marker = 'marker.txt'
source = tempfile.mkdtemp()
source_file = os.path.join(source, marker)
outdir = tempfile.mkdtemp()
open(source_file, 'w').close()
open(os.path.join(outdir, marker), 'w').close()
composer = self._make_one()
composer._config.force = True
composer.compose(None, source_file, outdir)
self.assertTrue(shutil.copy.called)
class TestGenericHTMLComposer(TestCase):
def _make_one(self):
config = self.factory.make_configuration()
return GenericHTMLComposer(config)
def test_composes_file(self):
catalog = mock.MagicMock()
site = tempfile.mkdtemp()
source_file = os.path.join(site, 'sample.generic')
open(source_file, 'w').close()
outdir = ''
composer = self._make_one()
with self.assertRaises(NotImplementedError):
composer.compose(catalog, source_file, outdir)
def test_selects_default_template(self):
catalog = mock.MagicMock()
default = mock.PropertyMock()
type(catalog).default = default
composer = self._make_one()
composer.select_template(catalog, {})
self.assertTrue(default.called)
def test_selects_specified_template(self):
catalog = mock.MagicMock()
composer = self._make_one()
composer.select_template(catalog, {'template': 'base.j2'})
catalog.get_template.assert_called_once_with('base.j2')
def test_needs_update(self):
site = tempfile.mkdtemp()
output_file = os.path.join(site, 'output.md')
open(output_file, 'w').close()
future = os.path.getmtime(output_file) + 1
source_file = os.path.join(site, 'test.md')
open(source_file, 'w').close()
os.utime(source_file, (future, future))
template = mock.MagicMock()
template.last_modified = future
composer = self._make_one()
self.assertTrue(composer._needs_update(None, source_file, output_file))
past = future - 10
os.utime(source_file, (past, past))
self.assertTrue(
composer._needs_update(template, source_file, output_file))
template.last_modified = past
self.assertFalse(
composer._needs_update(template, source_file, output_file))
def test_output_extension(self):
composer = self._make_one()
self.assertEqual('.html', composer.get_output_extension('source.rst'))
def test_forces_update(self):
site = tempfile.mkdtemp()
output_file = os.path.join(site, 'output.md')
open(output_file, 'w').close()
past = os.path.getmtime(output_file) - 10
source_file = os.path.join(site, 'test.md')
open(source_file, 'w').close()
os.utime(source_file, (past, past))
template = mock.MagicMock(last_modified=past)
composer = self._make_one()
composer._config.force = True
self.assertTrue(
composer._needs_update(template, source_file, output_file))
def test_permit_frontmatter(self):
composer = self._make_one()
self.assertTrue(composer.permit_frontmatter)
class TestMarkdownComposer(TestCase):
def _make_one(self):
config = self.factory.make_configuration()
return MarkdownComposer(config)
def test_generates_html(self):
source = '**bold**'
composer = self._make_one()
html = composer._generate_content(source)
self.assertEqual('<p><strong>bold</strong></p>', html)
def test_composes_no_update(self):
site = tempfile.mkdtemp()
source_file = os.path.join(site, 'test.md')
open(source_file, 'w').close()
source_mtime = os.path.getmtime(source_file)
future = source_mtime + 1
outdir = tempfile.mkdtemp()
output_file = os.path.join(outdir, 'test.html')
open(output_file, 'w').close()
os.utime(output_file, (future, future))
template = mock.MagicMock()
template.last_modified = source_mtime
catalog = mock.MagicMock()
catalog.default = template
composer = self._make_one()
composer.compose(catalog, source_file, outdir)
self.assertFalse(template.render.called)
def test_uses_smartypants(self):
source = '"quoted"'
composer = self._make_one()
html = composer._generate_content(source)
self.assertEqual('<p>“quoted”</p>', html)
class TestReStructuredTextComposer(TestCase):
def _make_one(self):
config = self.factory.make_configuration()
return ReStructuredTextComposer(config)
def test_generates_html(self):
source = '**bold**'
composer = self._make_one()
html = composer._generate_content(source)
expected = '<div class="document">\n' \
'<p><strong>bold</strong></p>\n' \
'</div>\n'
self.assertEqual(expected, html)
class TestSassComposer(TestCase):
def _make_fake_sass_bin(self):
fake_bin = tempfile.mkdtemp()
fake_sass = os.path.join(fake_bin, 'sass')
with open(fake_sass, 'w') as f:
f.write('#!/usr/bin/env python')
st = os.stat(fake_sass)
os.chmod(fake_sass, st.st_mode | stat.S_IEXEC)
return fake_bin
def test_abort_with_no_sass(self):
"""Test that handroll aborts if ``sass`` is not installed."""
# The fake bin directory has no sass executable.
fake_bin = tempfile.mkdtemp()
with self.assertRaises(AbortError):
SassComposer(fake_bin)
def test_create(self):
fake_bin = self._make_fake_sass_bin()
composer = SassComposer(fake_bin)
self.assertTrue(isinstance(composer, SassComposer))
def test_build_command(self):
fake_bin = self._make_fake_sass_bin()
composer = SassComposer(fake_bin)
source_file = '/in/sassy.scss'
output_file = '/out/sass.css'
expected = [
os.path.join(fake_bin, 'sass'), '--style', 'compressed',
source_file, output_file]
actual = composer.build_command(source_file, output_file)
self.assertEqual(expected, actual)
@mock.patch('handroll.composers.sass.subprocess')
def test_failed_sass_aborts(self, subprocess):
fake_bin = self._make_fake_sass_bin()
composer = SassComposer(fake_bin)
source_file = '/in/sassy.scss'
output_dir = '/out'
subprocess.Popen.return_value.communicate.return_value = ('boom', '')
subprocess.Popen.return_value.returncode = 1
with self.assertRaises(AbortError):
composer.compose(None, source_file, output_dir)
def test_output_extension(self):
fake_bin = self._make_fake_sass_bin()
composer = SassComposer(fake_bin)
self.assertEqual('.css', composer.get_output_extension('source.sass'))
def test_permit_frontmatter(self):
fake_bin = self._make_fake_sass_bin()
composer = SassComposer(fake_bin)
self.assertFalse(composer.permit_frontmatter)
class TestTextileComposer(TestCase):
def test_generates_html(self):
source = '*bold*'
config = self.factory.make_configuration()
composer = TextileComposer(config)
html = composer._generate_content(source)
self.assertEqual('\t<p><strong>bold</strong></p>', html)
class TestFrontmatterComposerMixin(TestCase):
def test_looks_like_frontmatter(self):
mixin = FrontmatterComposerMixin()
self.assertTrue(mixin._has_frontmatter('%YAML 1.1'))
self.assertTrue(mixin._has_frontmatter('---'))
def test_gets_frontmatter(self):
source = inspect.cleandoc("""%YAML 1.1
---
title: "ØMQ: A dynamic book with surprises"
---
The Content
""")
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(source.encode('utf-8'))
mixin = FrontmatterComposerMixin()
data, source = mixin.get_data(f.name)
self.assertEqual('ØMQ: A dynamic book with surprises', data['title'])
self.assertEqual('The Content', source)
def test_gets_frontmatter_no_directive(self):
source = inspect.cleandoc("""---
title: A Fake Title
---
The Content
""")
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(source.encode('utf-8'))
mixin = FrontmatterComposerMixin()
data, source = mixin.get_data(f.name)
self.assertEqual('A Fake Title', data['title'])
self.assertEqual('The Content', source)
@mock.patch('handroll.composers.mixins.signals')
def test_fires_frontmatter_loaded(self, signals):
source = inspect.cleandoc("""%YAML 1.1
---
title: A Fake Title
---
The Content
""")
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(source.encode('utf-8'))
mixin = FrontmatterComposerMixin()
data, source = mixin.get_data(f.name)
signals.frontmatter_loaded.send.assert_called_once_with(
f.name, frontmatter={'title': 'A Fake Title'})
def test_malformed_yaml(self):
source = inspect.cleandoc("""%YAML 1.1
---
title: A Fake Title
The Content
""")
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(source.encode('utf-8'))
mixin = FrontmatterComposerMixin()
with self.assertRaises(AbortError):
mixin.get_data(f.name)
def test_malformed_document_with_frontmatter(self):
source = inspect.cleandoc("""%YAML 1.1
---
title: A Fake Title
""")
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(source.encode('utf-8'))
mixin = FrontmatterComposerMixin()
with self.assertRaises(AbortError):
mixin.get_data(f.name)
class TestJinja2Composer(TestCase):
def _make_one(self):
config = self.factory.make_configuration()
config.outdir = tempfile.mkdtemp()
return Jinja2Composer(config)
def test_get_output_extension(self):
composer = self._make_one()
extension = composer.get_output_extension('source.xyz.j2')
self.assertEqual('.xyz', extension)
def test_composes(self):
source = inspect.cleandoc("""%YAML 1.1
---
title: A Fake Title
---
title: {{ title }}
domain: {{ config.domain }}
""")
with tempfile.NamedTemporaryFile(delete=False, suffix='.txt.j2') as f:
f.write(source.encode('utf-8'))
composer = self._make_one()
output_file = os.path.join(
composer._config.outdir, os.path.basename(f.name.rstrip('.j2')))
composer.compose(None, f.name, composer._config.outdir)
content = open(output_file, 'r').read()
self.assertEqual(
'title: A Fake Title\ndomain: http://www.example.com\n',
content)
def test_composes_no_frontmatter(self):
source = inspect.cleandoc("""First row
domain: {{ config.domain }}
""")
with tempfile.NamedTemporaryFile(delete=False, suffix='.txt.j2') as f:
f.write(source.encode('utf-8'))
composer = self._make_one()
output_file = os.path.join(
composer._config.outdir, os.path.basename(f.name.rstrip('.j2')))
composer.compose(None, f.name, composer._config.outdir)
content = open(output_file, 'r').read()
self.assertEqual(
'First row\ndomain: http://www.example.com\n', content)
def test_needs_update(self):
site = tempfile.mkdtemp()
output_file = os.path.join(site, 'output.md')
open(output_file, 'w').close()
future = os.path.getmtime(output_file) + 1
source_file = os.path.join(site, 'test.md')
open(source_file, 'w').close()
os.utime(source_file, (future, future))
composer = self._make_one()
self.assertTrue(composer._needs_update(source_file, output_file))
past = future - 10
os.utime(source_file, (past, past))
self.assertFalse(composer._needs_update(source_file, output_file))
def test_forces_update(self):
site = tempfile.mkdtemp()
output_file = os.path.join(site, 'output.md')
open(output_file, 'w').close()
past = os.path.getmtime(output_file) - 10
source_file = os.path.join(site, 'test.md')
open(source_file, 'w').close()
os.utime(source_file, (past, past))
composer = self._make_one()
composer._config.force = True
self.assertTrue(composer._needs_update(source_file, output_file))
@mock.patch('handroll.composers.j2.jinja2.Template.render')
def test_skips_up_to_date(self, render):
site = tempfile.mkdtemp()
source_file = os.path.join(site, 'source.txt.j2')
open(source_file, 'w').close()
output_file = os.path.join(site, 'source.txt')
open(output_file, 'w').close()
composer = self._make_one()
composer.compose(None, source_file, site)
self.assertFalse(render.called)
def test_permit_frontmatter(self):
composer = self._make_one()
self.assertTrue(composer.permit_frontmatter)
| [
"matthewlayman@gmail.com"
] | matthewlayman@gmail.com |
1720eccbd3a5a5d87381eb2743badc0aabfe950c | 2bd385ec885cc67617524d4cc251a33a7fac95a1 | /models/eagleedu_syllabus.py | 300b71f24090838271ffc1127bcdd072d0714bcf | [] | no_license | development-team-work/eagleedu_core | 47eed702c95a4c8ba6625bd516946c56133b98a9 | 59cbdef95b6092a93ac12ab11bea237da345b3e7 | refs/heads/master | 2021-01-02T03:13:00.957763 | 2020-02-10T10:30:11 | 2020-02-10T10:30:11 | 239,466,974 | 0 | 0 | null | 2020-02-10T08:55:19 | 2020-02-10T08:55:18 | null | UTF-8 | Python | false | false | 752 | py | # -*- coding: utf-8 -*-
from eagle.exceptions import ValidationError
from eagle import fields, models, api, _
class EagleeduSyllabus(models.Model):
_name = 'eagleedu.syllabus'
_description = "Syllabus "
_rec_name='syllabus_display'
name = fields.Char(string='Name', help="Enter the Name of the Syllabus")
# syllabus_code = fields.Char(string='Syllabus Code', compute="_get_code")
syllabus_display=fields.Char('Syllabus Display',help="This is printed on the marksheet as Subject")
standard_class_id = fields.Many2one('eagleedu.standard_class', string='Class ID')
subject_id = fields.Many2one('eagleedu.subject', string='Subject')
academic_year = fields.Many2one('eagleedu.academic.year', string='Academic Year')
| [
"rapidgrps@princegroup-bd.com"
] | rapidgrps@princegroup-bd.com |
3d30699e50d84cb0530efc016de11719e7b37e03 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_w_M_to_W_focus_Zok_div/ch096/woColorJ/Sob_k25_s001_EroM/pyr_Tcrop255_p20_j15/pyr_1s/L4/step09_1side_L4.py | 82aff74ef2922ec79600254bd74c9122ba6bd036 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,656 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
from tkinter import S
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step08_b_use_G_generate_I_w_M_to_Wx_Wy_Wz_combine import I_w_M_to_W
from step08_b_use_G_generate_0_util import Tight_crop, Color_jit
from step09_c_train_step import Train_step_I_w_M_to_W
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
color_jit = None
use_what_gen_op = I_w_M_to_W( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 0) )
use_what_train_step = Train_step_I_w_M_to_W( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 15), color_jit=color_jit )
use_hid_ch = 96
import time
start_time = time.time()
###############################################################################################################################################################################################
###############################################################################################################################################################################################
########################################################### Block1
### Block1
#########################################################################################
pyramid_1side_1 = [1, 0, 0, 0, 0, 0, 0, 0, 1]
pyramid_1side_2 = [1, 1, 0, 0, 0, 0, 0, 1, 1]
pyramid_1side_3 = [1, 1, 1, 0, 0, 0, 1, 1, 1]
pyramid_1side_4 = [1, 1, 1, 1, 0, 1, 1, 1, 1]
pyramid_1side_5 = [1, 1, 1, 1, 1, 1, 1, 1, 1]
#########################################################################################
ch032_pyramid_1side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=4, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=4, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=4, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=4, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=4, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_5, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
#########################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 512, 512, 1))
use_model = ch032_pyramid_1side_4
use_model = use_model.build()
result = use_model.generator(data)
print(result.shape)
from kong_util.tf_model_util import Show_model_weights
Show_model_weights(use_model.generator)
use_model.generator.summary()
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
43506dfa09f5bd26109c821562d504b8dc7d7c6d | 6e357be547cbeb153c4778e3552716f40a21b007 | /문제풀이/최빈수 구하기/최빈수 구하기.py | 4cbf3d1271b85b37526f4112add6f9b8fb5b88c7 | [] | no_license | hyunsang-ahn/algorithm | db6a8fdf77806f06652a3f4c2e3234b50c1bb717 | c3a3c93c452e4b35202529e3209a26fbdc0c2ad7 | refs/heads/master | 2021-10-22T04:44:09.808795 | 2019-03-08T06:50:30 | 2019-03-08T06:50:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | import sys
sys.stdin = open('input.txt', 'r')
T = int(input())
for tc in range(1,T+1):
n = int(input())
arr = list(map(int, input().split()))
num_dic = {}
for i in arr:
num_dic[i] = arr.count(i)
res = max(list(num_dic.values()))
print("#", end="")
print(tc, end=" ")
res_list = []
for k, v in num_dic.items():
if v == res:
res_list.append(k)
print(max(res_list))
| [
"happylovetkd@naver.com"
] | happylovetkd@naver.com |
b4ea18da6321d5abc400a5a0d5cc3b4564cb4c65 | eff5cd25fa442b70491262bada0584eaaf8add46 | /tfx/components/base/executor_spec.py | 71518533a6ad04f0c3753e2968b7c9a2eb32c332 | [
"Apache-2.0"
] | permissive | fsx950223/tfx | c58e58a85e6de6e9abcb8790acbf36424b5b2029 | 527fe2bab6e4f62febfe1a2029358fabe55f418c | refs/heads/master | 2021-01-04T12:12:51.010090 | 2020-01-26T04:43:14 | 2020-01-26T04:43:14 | 240,543,231 | 1 | 0 | Apache-2.0 | 2020-02-14T15:48:12 | 2020-02-14T15:48:11 | null | UTF-8 | Python | false | false | 2,989 | py | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Executor specifications for defining what to to execute."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from typing import List, Text, Type
from six import with_metaclass
from tfx.components.base import base_executor
from tfx.utils import json_utils
class ExecutorSpec(with_metaclass(abc.ABCMeta, json_utils.Jsonable)):
"""A specification for a component executor.
An instance of ExecutorSpec describes the implementation of a component.
"""
class ExecutorClassSpec(ExecutorSpec):
"""A specification of executor class.
Attributes:
executor_class: a subclass of base_executor.BaseExecutor used to execute
this component (required).
"""
def __init__(self, executor_class: Type[base_executor.BaseExecutor]):
if not executor_class:
raise ValueError('executor_class is required')
self.executor_class = executor_class
super(ExecutorClassSpec, self).__init__()
class ExecutorContainerSpec(ExecutorSpec):
"""A specifcation of a container.
The spec includes image, command line entrypoint and arguments for a
container. For example:
spec = ExecutorContainerSpec(
image='docker/whalesay',
command=['cowsay'],
args=['hello wolrd'])
Attributes:
image: Container image that has executor application. Assumption is that
this container image is separately release-managed, and tagged/versioned
accordingly.
command: Container entrypoint array. Not executed within a shell. The docker
image's ENTRYPOINT is used if this is not provided. The Jinja templating
mechanism is used for constructing a user-specified command-line
invocation based on input and output metadata at runtime.
args: Arguments to the container entrypoint. The docker image's CMD is used
if this is not provided. The Jinja templating mechanism is used for
constructing a user-specified command-line invocation based on input and
output metadata at runtime.
"""
def __init__(self,
image: Text,
command: List[Text] = None,
args: List[Text] = None):
if not image:
raise ValueError('image cannot be None or empty.')
self.image = image
self.command = command
self.args = args
super(ExecutorContainerSpec, self).__init__()
| [
"tensorflow-extended-team@google.com"
] | tensorflow-extended-team@google.com |
bdbdec718b149a290f3085cd6eb64c14f9d426c8 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_014/ch58_2020_04_21_19_49_08_495191.py | 1d881d5e8a3270339654288032b617d7b21bba59 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | def conta_a (s):
i = 0
contador = 0
while i < len(s):
if s[i] == 'a':
contador = contador + 1
i += 1
return contador | [
"you@example.com"
] | you@example.com |
e6b2f84f0083b67ae2d203f72298316f4eff734e | 44b389338c12b0dc2018d8022031b58090c58a63 | /ProjectEuler/Problem034.py | 410d4e54f8c5ce48b11c1c93332c1f0b88526652 | [] | no_license | llcawthorne/old-python-learning-play | cbe71b414d6fafacec7bad681b91976648b230d3 | 5241613a5536cd5c086ec56acbc9d825935ab292 | refs/heads/master | 2016-09-05T17:47:47.985814 | 2015-07-13T01:25:44 | 2015-07-13T01:25:44 | 38,983,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | #!/usr/bin/env python3
"""Project Euler Problem 034
145 is a curious number, as 1! + 4! + 5! = 1 + 24 + 120 = 145.
Find the sum of all numbers which are equal to the sum of the factorial of their digits.
Note: as 1! = 1 and 2! = 2 are not sums they are not included.
"""
import math
# 9! * 8 is only a 7 digit number
# so we don't have to go larger than this
fnine = math.factorial(9)*7
x = 145
curious = []
for x in range(10,fnine):
n = 0
for digit in str(x):
n += math.factorial(int(digit))
if x == n:
curious.append(x)
print('The curious numbers sum to',sum(curious))
| [
"LLC@acm.org"
] | LLC@acm.org |
77216f0de3ae8de629843bafb7defbf5cc5f8d29 | 200ec10b652f9c504728890f6ed7d20d07fbacae | /forms.py | 0becace3be7e76cd20e14cf95376646cfe47f020 | [] | no_license | Ks-Ksenia/flask_shop | f4edc17669c29ae02a89e836c3c48230147ae84f | 9eb44fd22bf99913c9824ea35e3922cb14ef2451 | refs/heads/master | 2023-03-01T13:55:20.749127 | 2021-02-14T09:29:04 | 2021-02-14T09:29:04 | 338,767,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,349 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, \
SubmitField, SelectField, TextAreaField, FormField, FieldList
from wtforms.validators import Email, DataRequired, Length, EqualTo, required
from wtforms.fields.html5 import TelField, EmailField
from wtforms.widgets import CheckboxInput
class LoginForm(FlaskForm):
email = StringField('Email', validators=[Email('Некорректный email')])
password = PasswordField('Пароль', validators=[DataRequired(), Length(min=5, max=100,
message='Пароль должен быть от 5 до 100 символов')])
remember = BooleanField('Запомнить меня', default=False)
submit = SubmitField('Войти')
class RegistrationForm(FlaskForm):
username = StringField('Имя', validators=[DataRequired(), Length(max=100, message='Имя не должно превышать 100 символов')])
email = StringField('Email', validators=[Email('Некорректный email')])
password1 = PasswordField('Пароль', validators=[DataRequired(), Length(min=5, max=100,
message='Пароль должен быть от 5 до 100 символов')])
password2 = PasswordField('Повторите пароль',
validators=[DataRequired(),
Length(min=5, max=100, message='Пароль должен быть от 5 до 100 символов'),
EqualTo('password1', message='Пароли не совпадают')])
submit = SubmitField('Зарегистрироваться')
class OrderForm(FlaskForm):
CHOICE = [('Самовывоз', 'Самовывоз'), ('Доставка', 'Доставка')]
first_name = StringField('Имя', validators=[DataRequired(), Length(max=100,
message='Максимальная длинна имени 100 символов')])
last_name = StringField('Фамилия', validators=[DataRequired(), Length(max=100,
message='Максимальная длинна фамилии 100 символов')])
email = EmailField('Email', validators=[Email('Некорректный email')])
phone = TelField('Телефон', validators=[DataRequired(), Length(max=12,
message='Максимальная длина номера телефона 12 символов')])
delivery = SelectField('Доставка', choices=CHOICE)
address = TextAreaField('Адрес доставки')
submit = SubmitField('Подтвердить заказ')
class SortForm(FlaskForm):
SORT = [('id', 'умолчанию'), ('price', 'возрастанию цены'), ('-price', 'убыванию цены')]
sort = SelectField('Сортировать по:', choices=SORT)
min_price = StringField('Цена от')
max_price = StringField('Цена до')
exist = BooleanField('В наличие')
submit = SubmitField('Применить')
| [
"demag74@mail.ru"
] | demag74@mail.ru |
218afc519d26734b9d64d19e0eb875fa0f64f7b5 | 432b97b99fc95679053620458652d462eb6e03d1 | /ikasl/core/gsom.py | 07d4229d8e1e6ea1272d41f916e1bc4623302c5a | [
"Apache-2.0"
] | permissive | AathmanT/self-learning-algorithm | b07db0fe96a1e80ef0eb98e6863dd8a132e65db5 | 64637a1b801d18b7648c575c84dad67bb3113e46 | refs/heads/master | 2020-12-19T04:17:45.534149 | 2020-02-29T14:06:26 | 2020-02-29T14:06:26 | 235,618,332 | 1 | 2 | Apache-2.0 | 2020-02-28T14:18:32 | 2020-01-22T16:45:23 | Jupyter Notebook | UTF-8 | Python | false | false | 9,113 | py | import math
import random
import functools
import numpy as np
from core import growth_handler as Growth_Handler
from core import elements as Elements
from util import utilities as Utils
import time
class GSOM:
def __init__(self, params, input_vectors, dimensions, aggregate_node=None):
self.parameters = params
self.inputs = np.asarray(input_vectors)
self.growth_handler = Growth_Handler.GrowthHandler()
self.aggregate_node = aggregate_node
self.dimensions = dimensions
self.learn_smooth_sample_size = self.parameters.get_learn_smooth_sample_size(len(self.inputs))
self.gsom_nodemap = {}
def grow(self):
self._initialize_network(self.dimensions)
param = self.parameters
# Optimise python references: that are reevaluated each time through the loop
grow = self._grow_for_single_iteration_and_single_input
learning_rate = param.START_LEARNING_RATE
for i in range(0, param.LEARNING_ITERATIONS):
if i != 0:
learning_rate = self._get_learning_rate(param, learning_rate, len(self.gsom_nodemap))
neighbourhood_radius = self._get_neighbourhood_radius(param.LEARNING_ITERATIONS, i,
param.MAX_NEIGHBOURHOOD_RADIUS)
for k in random.sample(range(0, len(self.inputs)), self.learn_smooth_sample_size):
grow(self.inputs[k], learning_rate, neighbourhood_radius)
return self.gsom_nodemap
def smooth(self):
learning_rate = self.parameters.START_LEARNING_RATE * self.parameters.SMOOTHING_LEARNING_RATE_FACTOR
reduced_neighbourhood_radius = self.parameters.MAX_NEIGHBOURHOOD_RADIUS * self.parameters.SMOOTHING_NEIGHBOURHOOD_RADIUS_FACTOR
smooth = self._smooth_for_single_iteration_and_single_input
for i in range(0, self.parameters.SMOOTHING_ITERATIONS):
if i != 0:
learning_rate = self._get_learning_rate(self.parameters, learning_rate, len(self.gsom_nodemap))
neighbourhood_radius = self._get_neighbourhood_radius(self.parameters.SMOOTHING_ITERATIONS, i,
reduced_neighbourhood_radius)
for k in random.sample(range(0, len(self.inputs)), self.learn_smooth_sample_size):
smooth(self.inputs[k], learning_rate, neighbourhood_radius)
return self.gsom_nodemap
def assign_hits(self):
curr_count = 0
for cur_input in self.inputs:
winner = Utils.Utilities.select_winner(self.gsom_nodemap, cur_input, self.parameters.DISTANCE_FUNCTION,
self.parameters.DISTANCE_DIVIDER)
node_index = Utils.Utilities.generate_index(winner.x, winner.y)
self.gsom_nodemap[node_index].map_label(curr_count)
curr_count += 1
def evaluate_hits(self, input_vectors):
for i in range(0, len(input_vectors)):
input_vector = input_vectors[i]
Utils.Utilities.select_winner(self.gsom_nodemap, input_vector, self.parameters.DISTANCE_FUNCTION, self.parameters.DISTANCE_DIVIDER).hit()
return self.gsom_nodemap
def _smooth_for_single_iteration_and_single_input(self, input_vector, learning_rate, neigh_radius):
gsom_nodemap = self.gsom_nodemap
winner = Utils.Utilities.select_winner(gsom_nodemap, input_vector, self.parameters.DISTANCE_FUNCTION, self.parameters.DISTANCE_DIVIDER)
left = Utils.Utilities.generate_index(winner.x - 1, winner.y)
right = Utils.Utilities.generate_index(winner.x + 1, winner.y)
top = Utils.Utilities.generate_index(winner.x, winner.y + 1)
bottom = Utils.Utilities.generate_index(winner.x, winner.y - 1)
if left in gsom_nodemap:
self._adjust_weights_for_neighbours(gsom_nodemap[left], winner, input_vector, neigh_radius,
learning_rate)
elif right in gsom_nodemap:
self._adjust_weights_for_neighbours(gsom_nodemap[right], winner, input_vector, neigh_radius,
learning_rate)
elif top in gsom_nodemap:
self._adjust_weights_for_neighbours(gsom_nodemap[top], winner, input_vector, neigh_radius,
learning_rate)
elif bottom in gsom_nodemap:
self._adjust_weights_for_neighbours(gsom_nodemap[bottom], winner, input_vector, neigh_radius,
learning_rate)
def _grow_for_single_iteration_and_single_input(self, input_vector, learning_rate, neigh_radius):
param = self.parameters
gsom_nodemap = self.gsom_nodemap
winner = Utils.Utilities.select_winner(gsom_nodemap, input_vector, param.DISTANCE_FUNCTION, param.DISTANCE_DIVIDER)
# Update the error value of the winner node
winner.cal_and_update_error(input_vector, param.DISTANCE_FUNCTION, param.DISTANCE_DIVIDER)
# Weight adaptation for winner's neighborhood
adjust = self._adjust_weights_for_neighbours
for node_id in list(gsom_nodemap):
# Exclude winner from the nodemap since winner's weight has already been updated in the previous step
if not (gsom_nodemap[node_id].x == winner.x and gsom_nodemap[node_id].y == winner.y):
adjust(gsom_nodemap[node_id], winner, input_vector, neigh_radius,
learning_rate)
# Evaluate winner's weights and grow network it it's above Growth Threshold (GT)
if winner.error > param.get_gt(len(input_vector)):
self._adjust_winner_error(winner, len(input_vector))
def _adjust_winner_error(self, winner, dimensions):
left = Utils.Utilities.generate_index(winner.x - 1, winner.y)
right = Utils.Utilities.generate_index(winner.x + 1, winner.y)
top = Utils.Utilities.generate_index(winner.x, winner.y + 1)
bottom = Utils.Utilities.generate_index(winner.x, winner.y - 1)
if left in self.gsom_nodemap and right in self.gsom_nodemap and top in self.gsom_nodemap and bottom in self.gsom_nodemap:
# If the network has adequate neurons to process the input data, the weight vectors of those neurons are
# adapted as such the distribution of the weight vectors will represent the input vector distribution.
self._distribute_error_to_neighbours(winner, left, right, top, bottom, dimensions)
else:
# If the network does not have sufficient neurons, the weight will be accumulated on a single neuron.
self.growth_handler.grow_nodes(self.gsom_nodemap, winner)
def _distribute_error_to_neighbours(self, winner, left, right, top, bottom, dimensions):
winner.error = self.parameters.get_gt(dimensions)
self.gsom_nodemap[left].error = self._calc_error_for_neighbours(self.gsom_nodemap[left])
self.gsom_nodemap[right].error = self._calc_error_for_neighbours(self.gsom_nodemap[right])
self.gsom_nodemap[top].error = self._calc_error_for_neighbours(self.gsom_nodemap[top])
self.gsom_nodemap[bottom].error = self._calc_error_for_neighbours(self.gsom_nodemap[bottom])
def _calc_error_for_neighbours(self, node):
return node.error * (1 + self.parameters.FD)
def _adjust_weights_for_neighbours(self, node, winner, input_vector, neigh_radius, learning_rate):
node_dist_sqr = math.pow(winner.x - node.x, 2) + math.pow(winner.y - node.y, 2)
neigh_radius_sqr = neigh_radius * neigh_radius
if node_dist_sqr < neigh_radius_sqr:
influence = math.exp(- node_dist_sqr / (2 * neigh_radius_sqr))
node.adjust_weights(input_vector, influence, learning_rate)
def _initialize_network(self, dimensions):
if self.aggregate_node is not None:
# Generate the node map for aggregated nodes following from the second sequence of weights
self.gsom_nodemap = {
'0:0': Elements.GSOMNode(0, 0, self.aggregate_node.get_weights())
}
else:
# Generate the node map for initial GSOM layer - for all the inputs
self.gsom_nodemap = {
'0:0': Elements.GSOMNode(0, 0, np.random.rand(dimensions)),
'0:1': Elements.GSOMNode(0, 1, np.random.rand(dimensions)),
'1:0': Elements.GSOMNode(1, 0, np.random.rand(dimensions)),
'1:1': Elements.GSOMNode(1, 1, np.random.rand(dimensions)),
}
def _get_learning_rate(self, parameters, prev_learning_rate, nodemap_size):
return parameters.ALPHA * (1 - (parameters.R / nodemap_size)) * prev_learning_rate
def _get_neighbourhood_radius(self, total_iteration, iteration, max_neighbourhood_radius):
time_constant = total_iteration / math.log(max_neighbourhood_radius)
return max_neighbourhood_radius * math.exp(- iteration / time_constant)
| [
"razmik89@gmail.com"
] | razmik89@gmail.com |
9f07b90ac5d6626a6da98f349aa934ab9f7f771f | e2e1732b6eb1a7a6dfeba76762851ad06eb8e482 | /wangban/wangban/spiders/redisspider.py | eabd738eb7841186dc293026c518f1ae4a617c48 | [] | no_license | nightqiuhua/bigCrawlers | 551e80d55df492c89ae0e0e0bd70c0e5f873068d | 19b86130c8af057d06014865d150e3d2ed6cc319 | refs/heads/main | 2023-03-23T01:13:26.021850 | 2021-03-03T15:09:28 | 2021-03-03T15:09:28 | 344,165,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,206 | py | from scrapy import signals
from scrapy.exceptions import DontCloseSpider
from wangban_utils.redis_util import get_redis_conn
from scrapy.spiders import Spider
from items import HangzhouItem
import socket
from datetime import datetime
import os
from urllib.parse import urljoin
import json
import time
from wangban_utils.mongo_mysql_util import MongoDB_To_MySQL
from modify_func import all_modify_func
from wangban_utils.mongo_util import MongodbClass
from scrapy.utils.project import get_project_settings
from . import workers
SETTINGS = get_project_settings()
class RedisSpider(Spider):
name = 'redisspider'
def __init__(self):
super().__init__()
self.redis_conn = get_redis_conn()
#self.to_mysql = MongoDB_To_MySQL(self.name)
self.redis_batch_size = 100
self.work_queue = SETTINGS['URLS_WORK_TASKS']
self.check_queue = SETTINGS['URLS_CHECK_TASKS']
#self.sche_updator = UpdateFilterClass(self.name)
self.pre_suf = None
self.workers = dict(workers)
def start_requests(self):
return self.next_requests()
def schedule_to_works(self):
found = 0
while found < self.redis_batch_size:
data = self.redis_conn.lpop(self.check_queue)
if not data:
break
self.redis_conn.rpush(self.work_queue,data)
found +=1
@classmethod
def from_crawler(cls,crawler,*args,**kwargs):
spider = super().from_crawler(crawler,*args,**kwargs)
crawler.signals.connect(spider._spider_opened,signal=signals.spider_opened)
crawler.signals.connect(spider._spider_idle,signal=signals.spider_idle)
#crawler.signals.connect(spider._spider_closed,signal=signals.spider_closed)
return spider
def _spider_opened(self,spider):
pass
def _spider_idle(self,spider):
self.schedule_next_requests()
raise DontCloseSpider
def schedule_next_requests(self):
for req in self.next_requests():
self.crawler.engine.crawl(req, spider=self)
#self.worker_spider 工作的爬虫 例如安吉的爬虫
def next_requests(self):
fetch_one = self.redis_conn.lpop
found = 0
#self.schedule_to_works()
while found < self.redis_batch_size:
data = fetch_one(self.work_queue)
if not data:
break
links_dict = json.loads(data.decode('utf-8'))
worker_spider = self.workers[links_dict['name']]() # 根据link_url 指定 worker_spider,workers 包含所有的爬虫实例
yield worker_spider.generate_request(links_dict=links_dict,spider= self)
found += 1
def parse(self,response):
worker_spider = self.workers[response.meta['name']]()
items = HangzhouItem()
#print('response.url',response.url)
try:
items['url'] = response.url
items['project'] = 'hangzhou'
items['spider'] = worker_spider.name
items['server'] = socket.gethostname()
items['crawling_date'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
#primary fields
items['source_website'] = worker_spider.source_website
items['website_area'] = worker_spider.specific_area
items['specific_area'] = response.meta['an_county']
items['an_type'] = response.meta['an_type']
items['an_major'] = response.meta['an_major']
items['an_sub'] = response.meta['an_sub']
items['project_title']=worker_spider.an_title_parse(response)
items['on_date'] = worker_spider.an_on_date_parse(response)
items['an_title'] = worker_spider.an_title_parse(response)
items['an_url'] = worker_spider.final_url(response)
items['an_refer_url'] = response.meta['an_refer_url']
items['crawling_number'] = '1'
items['an_content'] = worker_spider.an_content(response)
items['code'] = 'NONE'
except Exception as e:
print('parse error',response.url)
print('parse error',e)
else:
return items
| [
"1320551630@qq.com"
] | 1320551630@qq.com |
8e0110d97e1eef72cc626068e9dc37da471245a5 | 3b2940c38412e5216527e35093396470060cca2f | /top/api/rest/TmallTraderateFeedsGetRequest.py | 5b6fa08b6e0e7bdd1d538ab76409fcdd3e1390ad | [] | no_license | akingthink/goods | 842eb09daddc2611868b01ebd6e330e5dd7d50be | ffdb5868a8df5c2935fc6142edcdf4c661c84dca | refs/heads/master | 2021-01-10T14:22:54.061570 | 2016-03-04T09:48:24 | 2016-03-04T09:48:24 | 45,093,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | '''
Created by auto_sdk on 2015-01-20 12:44:31
'''
from top.api.base import RestApi
class TmallTraderateFeedsGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.child_trade_id = None
def getapiname(self):
return 'tmall.traderate.feeds.get'
| [
"yangwenjin@T4F-MBP-17.local"
] | yangwenjin@T4F-MBP-17.local |
b65a444228c179437b655748fdf4aa97e4c9f16b | 86f8bf3933208329eb73bfcba5e1318dbb2ddafa | /hello_world/django/benckmark/settings.py | 59dcfe2806efd190dafb6247412c676e1aacfc40 | [] | no_license | TakesxiSximada/benchmarks | 42ce5466c813e45db78f87ca391806fbb845a16c | 9cd2fc732ed006fd3554e01b1fc71bfcb3ada312 | refs/heads/master | 2021-01-15T23:02:14.063157 | 2015-05-30T18:52:08 | 2015-05-30T18:52:08 | 36,551,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,237 | py | """
Django settings for benckmark project.
Generated by 'django-admin startproject' using Django 1.8.dev20141227141312.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@d0)=*&5(ld#322i*2h#0t#!%d(c01t_eg!*nqla9m2qid%$4_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = [
'localhost',
'127.0.0.1',
]
# Application definition
INSTALLED_APPS = (
# 'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'benckmark.urls'
WSGI_APPLICATION = 'benckmark.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
| [
"takesxi.sximada@gmail.com"
] | takesxi.sximada@gmail.com |
eeee37bc3e3fdfee9d00d5b5551c681844a5fc81 | 65c539e235155b15946cbc7f8838bf69f56086c0 | /learn_torch/mini_cnn.py | 31436782fa363b25b8c0d230125bba80acf52950 | [] | no_license | xkcomeon/any-whim | 08a070a8ae8d795cb76e77f0f0f61edea7e5d60e | ce7160686d3689fbd4350420d1f130d7cce5c2c4 | refs/heads/master | 2023-03-20T08:16:43.446082 | 2021-03-14T14:48:02 | 2021-03-14T14:48:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,614 | py | # 写一个中文的单字识别的cnn
# 尽可能的搞定旋转缩放之类的分类问题
import os
import cv2
import numpy as np
# 读取单字图片文件
def read_imginfos(file):
# 目前读取的数据是单字识别,这里读取的格式为,图片文件的第一个汉字代表了其类别
class_types = set()
imginfos = []
for i in os.listdir(file):
if i.endswith('.jpg') or i.endswith('.png'):
class_types.add(i[0])
for i in os.listdir(file):
if i.endswith('.jpg') or i.endswith('.png'):
fil = os.path.join(file, i)
img = cv2.imdecode(np.fromfile(fil, dtype=np.uint8), 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # [y,x,c]
img = cv2.resize(img, (40, 40))
img = np.transpose(img, (2,1,0)) # [c,x,y]
imginfo = {}
imginfo['class'] = i[0]
imginfo['img'] = img
imginfos.append(imginfo)
# cv2.imshow('test', img)
# cv2.waitKey(0)
class_types = {tp: idx for idx, tp in enumerate(sorted(class_types))}
return imginfos, class_types
# 生成 y_true 用于误差计算
def make_y_true(imginfo, class_types):
img = imginfo['img']
class_types.get(imginfo['class'])
clz = [0.]*len(class_types)
clz[class_types.get(imginfo['class'])] = 1.
return torch.FloatTensor(clz)
def load_data(filepath):
imginfos, class_types = read_imginfos(filepath)
train_data = []
for imginfo in imginfos:
train_data.append([torch.FloatTensor(imginfo['img']), make_y_true(imginfo, class_types)])
return train_data, class_types
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as Data
from torch.autograd import Variable
from collections import OrderedDict
USE_CUDA = True if torch.cuda.is_available() else False
DEVICE = 'cuda' if USE_CUDA else 'cpu'
torch.set_printoptions(precision=2, sci_mode=False, linewidth=120, profile='full')
class MiniCNN(nn.Module):
class ConvBN(nn.Module):
def __init__(self, cin, cout, kernel_size=3, stride=1, padding=None):
super().__init__()
padding = (kernel_size - 1) // 2 if not padding else padding
self.conv = nn.Conv2d(cin, cout, kernel_size, stride, padding, bias=False)
self.bn = nn.BatchNorm2d(cout, momentum=0.01)
self.relu = nn.LeakyReLU(0.1, inplace=True)
def forward(self, x):
return self.relu(self.bn(self.conv(x)))
def __init__(self, class_types, inchennel=3):
super().__init__()
self.oceil = len(class_types)
self.model = nn.Sequential(
OrderedDict([
('ConvBN_0', self.ConvBN(inchennel, 32)),
('Pool_0', nn.MaxPool2d(2, 2)),
('ConvBN_1', self.ConvBN(32, 64)),
('Pool_1', nn.MaxPool2d(2, 2)),
('ConvBN_2', self.ConvBN(64, 128)),
('Pool_2', nn.MaxPool2d(2, 2)),
('ConvBN_3', self.ConvBN(128, 256)),
('Flatten', nn.Flatten()),
('Linear', nn.Linear(6400, self.oceil)),
])
)
def forward(self, x):
x = torch.sigmoid(self.model(x))
return x
class miniloss(nn.Module):
def __init__(self, class_types):
super().__init__()
self.clazlen = len(class_types)
def forward(self, pred, targ, callback=None):
loss = F.mse_loss(pred,targ,reduction='sum')
global print
print = callback if callback else print
print(loss)
return loss
def train(train_data, class_types):
EPOCH = 10
BATCH_SIZE = 100
LR = 0.001
net = MiniCNN(class_types).to(DEVICE)
mloss = miniloss(class_types).to(DEVICE)
optimizer = torch.optim.Adam(net.parameters(), lr=LR)
train_loader = Data.DataLoader(
dataset=train_data,
batch_size=BATCH_SIZE,
shuffle=True,
)
for epoch in range(EPOCH):
print('epoch', epoch)
for step, (b_x, b_y) in enumerate(train_loader):
b_x = Variable(b_x).to(DEVICE)
b_y = Variable(b_y).to(DEVICE)
loss = mloss(net(b_x), b_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
state = {'net':net.state_dict(), 'optimizer':optimizer, 'epoch':epoch+1, 'class_types':class_types}
torch.save(state, 'net.pkl')
print('save.')
print('end.')
def load_state(filename):
state = torch.load(filename)
class_types = state['class_types']
net = MiniCNN(class_types)
net.load_state_dict(state['net'])
net.to(DEVICE)
net.eval()
state['net'] = net
return state
def test(filename, state):
net = state['net'].to(DEVICE)
class_types = state['class_types']
img = cv2.imdecode(np.fromfile(filename, dtype=np.uint8), 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # [y,x,c]
img = cv2.resize(img, (40, 40))
img = np.transpose(img, (2,1,0)) # [c,x,y]
x = torch.FloatTensor(img).unsqueeze(0).to(DEVICE)
v = net(x)
if USE_CUDA:
v = v.cpu().detach().numpy()
else:
v = v.detach().numpy()
v = v[0].tolist()
r = sorted(class_types)[v.index(max(v))]
print(v)
print(r)
return r
# train_data, class_types = load_data('./train_img')
# train(train_data, class_types)
print('loading model.')
state = load_state('net.pkl')
print('loading model. ok.')
test('./train_img/你_00_30_(255, 255, 255)_(0, 0, 255)_simsun.ttc.jpg', state)
| [
"opaquism@hotmail.com"
] | opaquism@hotmail.com |
844498d5a39b662dec59c8f2370751615f418417 | 3365e4d4fc67bbefe4e8c755af289c535437c6f4 | /.history/src/core/dialogs/waterfall_dialog_20170814144141.py | dbaa69a0f387bec839f238e48d9d55b0e64da868 | [] | no_license | kiranhegde/OncoPlotter | f3ab9cdf193e87c7be78b16501ad295ac8f7d2f1 | b79ac6aa9c6c2ca8173bc8992ba3230aa3880636 | refs/heads/master | 2021-05-21T16:23:45.087035 | 2017-09-07T01:13:16 | 2017-09-07T01:13:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,426 | py | '''
Refs:
Embedding plot: https://sukhbinder.wordpress.com/2013/12/16/simple-pyqt-and-matplotlib-example-with-zoompan/
'''
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import (QApplication, QDialog, QWidget, QPushButton, QVBoxLayout, QTreeWidget, QTreeWidgetItem)
from PyQt5 import QtCore, QtGui
import core.gui.waterfall as waterfall
import numpy as np
from pprint import pprint
class Waterfall(QWidget, waterfall.Ui_Waterfall):
general_settings_signal = QtCore.pyqtSignal(list) #send list of plotting params
updated_rectangles_signal = QtCore.pyqtSignal(list) #send list of updated artists for redrawing
def __init__(self, parent):
super(Waterfall,self).__init__(parent)
self.setupUi(self)
#Button functions
self.btn_apply_general_settings.clicked.connect(self.send_settings)
self.patient_tree = self.create_patient_tree()
self.data_viewer_container.addWidget(self.patient_tree)
def on_waterfall_data_signal(self,signal):
self.waterfall_data = signal['waterfall_data'] #pandas dataframe
def on_generated_rectangles_signal(self,signal):
self.rectangles_received = signal[0]
self.add_items(self.rectangles_received) #display in table
#print(self.rectangles_received)
def send_settings(self,signal):
self.list_general_settings = [
self.plot_title.text(),
self.x_label.text(),
self.y_label.text(),
self.twenty_percent_line.isChecked(),
self.thirty_percent_line.isChecked(),
self.zero_percent_line.isChecked(),
self.display_responses_as_text.isChecked()
]
self.general_settings_signal.emit(self.list_general_settings)
def create_patient_tree(self):
'''
Create QTreeWidget populated with a patient's data for the DataEntry dialog.
Assumes that self.temp_patient is the patient of interest and that the variable belongs to the dialog.
'''
self.tree = QTreeWidget()
self.root = self.tree.invisibleRootItem()
self.headers = [
'Patient #',
'Best response %',
'Overall response',
'Cancer type'
]
self.headers_item = QTreeWidgetItem(self.headers)
self.tree.setColumnCount(len(self.headers))
self.tree.setHeaderItem(self.headers_item)
self.root.setExpanded(True)
#self.addItems()
#self.tree.header().setResizeMode(QtGui.QHeaderView.ResizeToContents)
#self.tree.header().setStretchLastSection(False)
return self.tree
def add_items(self,):
'''
Populate viewing tree
'''
for rect in self.rectangles_received:
#populate editable tree with rect data
#column = 0
#self.rect_item = QTreeWidgetItem(self.root)
#self.rect_params = [rect.get_label]
print(rect.get_label())
class WaterfallPlotter(QWidget):
generated_rectangles_signal = QtCore.pyqtSignal(list) #send list of rects for data display in tree
def __init__(self,parent):
super(WaterfallPlotter,self).__init__(parent)
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.toolbar = NavigationToolbar(self.canvas,self)
self.btn_plot = QPushButton('Default Plot')
self.btn_plot.clicked.connect(self.default_plot)
self.layout = QVBoxLayout()
self.layout.addWidget(self.toolbar)
self.layout.addWidget(self.canvas)
self.layout.addWidget(self.btn_plot)
self.setLayout(self.layout)
def on_waterfall_data_signal(self,signal):
self.waterfall_data = signal['waterfall_data'] #pandas dataframe
self.btn_plot.setEnabled(True)
def on_general_settings_signal(self,signal):
try:
hasattr(self,'ax')
self.ax.set_title(signal[0])
self.ax.set_xlabel(signal[1])
self.ax.set_ylabel(signal[2])
self.canvas.draw()
except Exception as e:
print(e)
def default_plot(self):
'''
Plot waterfall data
'''
self.figure.clear()
self.rect_locations = np.arange(len(self.waterfall_data['Best response percent change']))
self.ax = self.figure.add_subplot(111)
self.ax.axhline(y=20, linestyle='--', c='k', alpha=0.5, lw=2.0, label='twenty_percent')
self.ax.axhline(y=-30, linestyle='--', c='k', alpha=0.5, lw=2.0, label='thirty_percent')
self.ax.axhline(y=0, c='k', alpha=1, lw=2.0, label='zero_percent')
self.ax.grid(color = 'k', axis = 'y', alpha=0.25)
self.rects = self.ax.bar(self.rect_locations,self.waterfall_data['Best response percent change'],label=self.waterfall_data['Patient number'])
self.auto_label_responses(self.ax, self.rects, self.waterfall_data)
#self.plot_table()
self.canvas.draw()
self.ax.hold(False) #rewrite the plot when plot() called
self.generated_rectangles_signal.emit([self.rects])
def plot_table(self):
rows = ['%s' % x for x in self.waterfall_data.keys()]
rows = rows[4:] #skip first three, they are the 4 standard headers, rest are table rows
columns = self.waterfall_data['Patient number'] #patient numbers
cell_text = []
for row in rows:
cell_text_temp = []
for col in range(len(columns)):
cell_text_temp.append(self.waterfall_data[row][col])
cell_text.append(cell_text_temp)
the_table = plt.table(cellText=cell_text, rowLabels=rows, colLabels=columns, loc='bottom', cellLoc='center')
plt.subplots_adjust(bottom=0.15,left=0.5)
self.ax.set_xlim(-0.5,len(columns)-0.5)
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off'
) # labels along the bottom edge are off
def update_plot(self):
'''
TODO
'''
pass
def auto_label_responses(self, ax, rects, waterfall_data):
'''Add labels above/below bars'''
i = 0
for rect in rects:
height = rect.get_height()
if height >= 0:
valign = 'bottom'
else:
valign = 'top'
ax.text(rect.get_x() + rect.get_width()/2., height,
'%s' % waterfall_data['Overall response'][i], ha='center', va=valign)
i+=1
| [
"ngoyal95@terpmail.umd.edu"
] | ngoyal95@terpmail.umd.edu |
d46d75034818cde56a867d61ae3ff507e3d0284c | caf8cbcafd448a301997770165b323438d119f5e | /.history/chapter01/python_01_20201119170541.py | 98301dd8d3519273d4add0a7cd4f5762ebe09064 | [
"MIT"
] | permissive | KustomApe/nerdape | 03e0691f675f13ce2aefa46ee230111247e90c72 | aef6fb2d1f8c364b26d91bf8570b4487a24de69a | refs/heads/main | 2023-01-23T10:13:26.584386 | 2020-11-28T22:29:49 | 2020-11-28T22:29:49 | 309,897,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | """
[今日やること・学ぶこと]
""" | [
"kustomape@gmail.com"
] | kustomape@gmail.com |
49ede39dee06aaa502ae7e4507c81c91e49f634c | dbbdf35bff726681ae34ad08eeda5f30929e2ae9 | /supervised_learning/0x00-binary_classification/8-neural_network.py | 6f3dce07188aef74a022e7625d340e1657f4161c | [] | no_license | jorgezafra94/holbertonschool-machine_learning | 0b7f61c954e5d64b1f91ec14c261527712243e98 | 8ad4c2594ff78b345dbd92e9d54d2a143ac4071a | refs/heads/master | 2023-02-03T20:19:36.544390 | 2020-12-21T21:49:10 | 2020-12-21T21:49:10 | 255,323,504 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,439 | py | #!/usr/bin/env python3
"""
neural network with one hidden layer performing binary classification:
"""
import numpy as np
class NeuralNetwork():
"""
Here is my first Holberton NeuralNetwork Class
here we are going to use One hidden layer
the main things to keep in mind about a neuron is
the ecuation y = sum(w.x) + b
where w are the weights in this case W
x are the inputs in this case nx
b are the biases
A is the activated output of the neuron
"""
def __init__(self, nx, nodes):
"""
constructor of class
nx is the number of input features to the neuron
nodes is the number of nodes found in the hidden layer
"""
if type(nx) is not int:
raise TypeError("nx must be an integer")
if nx < 1:
raise ValueError("nx must be a positive integer")
if type(nodes) is not int:
raise TypeError("nodes must be an integer")
if nodes < 1:
raise ValueError("nodes must be a positive integer")
# input layer parameters
self.W1 = np.random.randn(nx, nodes)
self.W1 = self.W1.reshape(nodes, nx)
self.b1 = np.zeros(nodes)
self.b1 = self.b1.reshape(nodes, 1)
self.A1 = 0
# hidden layer parameters
self.W2 = np.random.randn(nodes)
self.W2 = self.W2.reshape(1, nodes)
self.b2 = 0
self.A2 = 0
| [
"947@holbertonschool.com"
] | 947@holbertonschool.com |
78d93dc4ea64e77ecc661aa7c3d513d1758e2027 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_109/ch27_2019_08_14_18_40_36_940230.py | 4266c60b91cf6eb8413093db272fd574351c5621 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | def n(a, c):
tempo = ((a*365)*c)*10
tempo = (tempo/3600)
return tempo
a = int(input('Insira há quantos anos você fuma: '))
c = int(input('Insira quantos cigarros você usa por dia: '))
print(n(a, c)) | [
"you@example.com"
] | you@example.com |
22483da0f47eecf1240954f46e0762ee7f16d6d1 | 97cb12cc1243ffa1e29c98ed013a03b377d0e9cd | /setup.py | 1e3612c418f41668b0949274eda9949bd59dac26 | [
"MIT"
] | permissive | brettatoms/flask-appconfig | 675097323f06b7dcd40f6e8225991e24a349d4a3 | 3e023569f156166abe3992bb3abe0fdad1c38630 | refs/heads/master | 2021-01-18T04:47:44.432958 | 2015-07-17T23:02:04 | 2015-07-17T23:02:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='flask-appconfig',
version='0.11.0.dev1',
description=('Configures Flask applications in a canonical way. Also auto-'
'configures Heroku. Aims to standardize configuration.'),
long_description=read('README.rst'),
author='Marc Brinkmann',
author_email='git@marcbrinkmann.de',
url='http://github.com/mbr/flask-appconfig',
license='MIT',
packages=find_packages(exclude=['tests']),
install_requires=['flask', 'six', 'click'],
entry_points={
'console_scripts': [
'flask = flask_appconfig.cli:cli',
],
},
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
]
)
| [
"git@marcbrinkmann.de"
] | git@marcbrinkmann.de |
a897a4faf3b4f6073bc4505c1ef6077f10815dac | b4cfb1f9813df98a791c0dfeab5183996f900f13 | /core/forms.py | 34183f3bf2d149ad1d532f1b606456e800d6fc30 | [] | no_license | KiwiState/TestDjango | 6fedf22603950bd4345dc6dc93ed7ab67c811769 | c09ef5f02cf4297d97393cd3a495475478e023ef | refs/heads/master | 2023-06-09T20:26:16.070432 | 2021-07-08T13:49:05 | 2021-07-08T13:49:05 | 377,001,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | from django import forms
from django.forms import ModelForm
from .models import Pintura,Contacto,Usuarios
class PinturaForm(ModelForm):
class Meta:
model = Pintura
fields = ['titulo','descripcion','categoria','imagen','id_pintura','id']
class ContactoForm(ModelForm):
class Meta:
model = Contacto
fields = ['email','titulo','descripcion']
class UsuariosForm(ModelForm):
class Meta:
model = Usuarios
fields = ['nombre','correo','sexo','edad','artista','id']
| [
"unconfigured@null.spigotmc.org"
] | unconfigured@null.spigotmc.org |
f9c00636facae578d804121b09edd4a003c90297 | cbfb679bd068a1153ed855f0db1a8b9e0d4bfd98 | /leet/amazon/strings_and_arrays/454_4Sum_II.py | 39caa53fc72f97bde6109f3bfd501894873bb480 | [] | no_license | arsamigullin/problem_solving_python | 47715858a394ba9298e04c11f2fe7f5ec0ee443a | 59f70dc4466e15df591ba285317e4a1fe808ed60 | refs/heads/master | 2023-03-04T01:13:51.280001 | 2023-02-27T18:20:56 | 2023-02-27T18:20:56 | 212,953,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,724 | py | import collections
from typing import List
class Solution:
def fourSumCount(self, A: List[int], B: List[int], C: List[int], D: List[int]) -> int:
dp = collections.defaultdict(int)
for a in A:
for b in B:
dp[a+b]+=1
cnt = 0
for c in C:
for d in D:
cnt+= dp[-(c+ d)]
return cnt
class Solution:
def fourSumCount(self, A: List[int], B: List[int], C: List[int], D: List[int]) -> int:
# n = len(A)
# sumX={}
# sumY={}
# for i in range(n):
# for j in range(n):
# sa = A[i] + B[j]
# sb = C[i] + D[j]
# if sa not in sumX:
# sumX[sa] =0
# sumX[sa] +=1
# if sb not in sumY:
# sumY[sb] =0
# sumY[sb] +=1
# total =0
# for k in sumX:
# if -k in sumY:
# total += sumX[k] * sumY[-k]
# return total
p, q, r, s = dict(), dict(), dict(), dict()
for i, j, k, l in zip(A, B, C, D):
p[i] = p.get(i, 0) + 1
q[j] = q.get(j, 0) + 1
r[k] = r.get(k, 0) + 1
s[l] = s.get(l, 0) + 1
sumt = dict()
for i in p:
for j in q:
t = i + j
sumt[t] = sumt.get(t, 0) + p[i] * q[j]
total = 0
for i in r:
for j in s:
t = i + j
total += sumt.get(-t, 0) * (r[i] * s[j])
return total
| [
"ar.smglln@gmail.com"
] | ar.smglln@gmail.com |
98ae7611ae59d9ad96d11df5d4be3d5ffffa1b93 | 798960eb97cd1d46a2837f81fb69d123c05f1164 | /symphony/cli/pyinventory/graphql/enum/user_role.py | 053133dc005e0a4f2ac8fe4e406670c5ecde4190 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | kyaaqba/magma | 36d5fa00ce4f827e6ca5ebd82d97a3d36e5f5b5b | fdb7be22a2076f9a9b158c9670a9af6cad68b85f | refs/heads/master | 2023-01-27T12:04:52.393286 | 2020-08-20T20:23:50 | 2020-08-20T20:23:50 | 289,102,268 | 0 | 0 | NOASSERTION | 2020-08-20T20:18:42 | 2020-08-20T20:18:41 | null | UTF-8 | Python | false | false | 300 | py | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from enum import Enum
class UserRole(Enum):
USER = "USER"
ADMIN = "ADMIN"
OWNER = "OWNER"
MISSING_ENUM = ""
@classmethod
def _missing_(cls, value: object) -> "UserRole":
return cls.MISSING_ENUM
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
e5986fafc4c92267351701bf782014ee5e3e3e90 | 11757061e3f4d4b6535c311ed5005b620c98d1b5 | /lang/test/python/test_scope.py | f2a8fe1b5c8f0a8e2c252d5dee22b1b06f9bb5f3 | [
"MIT"
] | permissive | nijeshu/taichi | 580176d65cae80c87ab508a7db3f71f4815cbcdc | bd02798208b2d363e605434d10d739fe03e9c07f | refs/heads/master | 2020-08-06T15:41:47.510747 | 2019-10-05T19:12:08 | 2019-10-05T19:12:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | import taichi as ti
ti.runtime.print_preprocessed = True
def test_scope():
# In the future the following code should throw an exception at the python front end
# instead of crashing the compiler
return
for arch in [ti.x86_64, ti.cuda]:
# ti.reset()
ti.cfg.arch = arch
x = ti.var(ti.f32)
N = 1
@ti.layout
def place():
ti.root.dense(ti.i, N).place(x)
@ti.kernel
def func():
if 1 > 0:
val = 1
ti.print(val)
func()
| [
"yuanmhu@gmail.com"
] | yuanmhu@gmail.com |
dcd595986262113e80ae7f4168553bdf37ea6ab5 | 03f1a5380641564750daa9c4de90e85e8e1c9c35 | /notifier/signal.py | 51d23490edad463787e514f13def9a6ded7100a9 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | stockec/siis | f4e4a792a1f6623030a3299f0978b681d7c85d52 | 82f54415b4ed82ff12d17a252dd6da32bc31a586 | refs/heads/master | 2020-07-18T11:28:26.411702 | 2019-09-01T22:01:59 | 2019-09-01T22:01:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,781 | py | # @date 2018-08-07
# @author Frederic SCHERMA
# @license Copyright (c) 2018 Dream Overflow
# service worker
class Signal(object):
SIGNAL_UNDEFINED = 0
SIGNAL_SOCIAL_ENTER = 10 # broker copy position entry signal
SIGNAL_SOCIAL_EXIT = 11 # broker copy position exit signal
SIGNAL_SOCIAL_UPDATED = 12 # broker copy position exit signal
SIGNAL_AUTHOR_ADDED = 13
SIGNAL_AUTHOR_REMOVED = 14
SIGNAL_STRATEGY_ENTRY_EXIT = 50 # data is a dict {'trader-name', 'trade-id', 'symbol', 'direction', 'price', 'symbol', 'action', 'rate', 'timestamp', ...}
SIGNAL_CANDLE_DATA = 100 # data is a pair with (market_id, Candle)
SIGNAL_TICK_DATA = 101 # data is a pair with (market_id, Tick)
SIGNAL_CANDLE_DATA_BULK = 102 # data is a tuple of (market_id, tf, Candle[])
SIGNAL_TICK_DATA_BULK = 103 # data is a tuple of (market_id, tf, Tick[])
SIGNAL_SOCIAL_ORDER = 104 # data is a tuple with (str market id, dict position details)
SIGNAL_BUY_SELL_ORDER = 105 # data is BuySellSignal
SIGNAL_ORDER_BOOK = 106 # data is a tuple with (market_id, buys array, sells array)
SIGNAL_WATCHER_CONNECTED = 200 # data is None
SIGNAL_WATCHER_DISCONNECTED = 201 # data is None
SIGNAL_ACCOUNT_DATA = 300 # data is a tuple with (balance, free_margin, pnl, currency, risk_limit)
SIGNAL_MARKET_DATA = 301 # data is a tuple with (market_id, tradable, timestamp, bid, ofr, base_exchange_rate, contract_size, value_per_pip, vol24h_base, vol24h_quote)
SIGNAL_MARKET_INFO_DATA = 302 # data is a tuple with (market_id, Market())
SIGNAL_MARKET_LIST_DATA = 303 # data is an array of tuples of str (market_id, symbol, base, quote)
SIGNAL_POSITION_OPENED = 400 # data is a (str market id, dict position details, str ref order id)
SIGNAL_POSITION_UPDATED = 401 # data is a (str market id, dict position details, str ref order id)
SIGNAL_POSITION_DELETED = 402 # data is a (str market id, str position id, str ref order id)
SIGNAL_POSITION_AMENDED = 403 # data is a (str market id, dict position details)
SIGNAL_ORDER_OPENED = 500 # data is a (str market id, dict order details, str ref order id)
SIGNAL_ORDER_UPDATED = 501 # data is a (str market id, dict order details, str ref order id)
SIGNAL_ORDER_DELETED = 502 # data is a (str market id, str order id, str ref order id)
SIGNAL_ORDER_REJECTED = 503 # data is a (str market id, str ref order id)
SIGNAL_ORDER_CANCELED = 504 # data is a (str market id, str order id, str ref order id)
SIGNAL_ORDER_TRADED = 505 # data is a (str market id, dict order details, str ref order id)
SIGNAL_ASSET_DATA = 600 # data is a tuple with (asset_id, asset object)
SIGNAL_ASSET_DATA_BULK = 601 # data is an array of Asset objects
SIGNAL_ASSET_UPDATED = 602 # data is a tuple with (asset_id, locked_balance, free_balance)
SIGNAL_STRATEGY_TRADE_LIST = 700 # data is a an array of tuple with (market_id, integer trade_id, integer trade_type, dict data, dict operations)
SIGNAL_STRATEGY_TRADER_LIST = 701 # data is a an array of tuple with (market_id, boolean activity, dict data, dict regions)
SOURCE_UNDEFINED = 0
SOURCE_WATCHER = 1
SOURCE_TRADER = 2
SOURCE_STRATEGY = 3
SOURCE_MONITOR = 4
def __init__(self, source, source_name, signal_type, data):
self._source = source
self._source_name = source_name
self._signal_type = signal_type
self._data = data
@property
def source(self):
return self._source
@property
def source_name(self):
return self._source_name
@property
def signal_type(self):
return self._signal_type
@property
def data(self):
return self._data
| [
"frederic.scherma@gmail.com"
] | frederic.scherma@gmail.com |
91b5d980d481c438768b83a4d5b7df79eea6bc96 | 6189f34eff2831e3e727cd7c5e43bc5b591adffc | /WebMirror/management/rss_parser_funcs/feed_parse_extractRemnancetlCom.py | 2f4d11c779dbeaec9f911463a4b61cbe7a01a777 | [
"BSD-3-Clause"
] | permissive | fake-name/ReadableWebProxy | 24603660b204a9e7965cfdd4a942ff62d7711e27 | ca2e086818433abc08c014dd06bfd22d4985ea2a | refs/heads/master | 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 | BSD-3-Clause | 2023-09-11T15:48:15 | 2015-07-24T04:30:43 | Python | UTF-8 | Python | false | false | 541 | py |
def extractRemnancetlCom(item):
'''
Parser for 'remnancetl.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| [
"something@fake-url.com"
] | something@fake-url.com |
16f944de1e71ab6bfb26e39281def4a786a69efa | 7a704e838d89f942a1099fec141f1fbe9828e528 | /v2_plugin/example/maskrcnn-service/engine.py | 030d0989e1e8c9dbb2e97a53f39056e2bf88d710 | [
"Apache-2.0"
] | permissive | cap-ntu/Video-to-Retail-Platform | 3ee00d22b7fd94925adac08c5ea733ee647f4574 | 757c68d9de0778e3da8bbfa678d89251a6955573 | refs/heads/hysia_v2 | 2023-02-14T05:22:16.792928 | 2021-01-10T02:31:43 | 2021-01-10T02:31:43 | 212,741,650 | 63 | 20 | Apache-2.0 | 2021-01-10T02:32:00 | 2019-10-04T05:22:08 | Python | UTF-8 | Python | false | false | 2,754 | py | import ssl
from pathlib import Path
from typing import Dict
import numpy as np
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.structures.bounding_box import BoxList
from common.engine import BaseEngine
from .mask_rcnn_predictor import COCODemo
# cancel ssl certificate verify
ssl._create_default_https_context = ssl._create_unverified_context
class Engine(BaseEngine):
CFG_ROOT = Path(__file__).parent.absolute() / 'third/maskrcnn-benchmark/configs'
def __init__(self, config):
super().__init__(config)
self._load_model(self.config)
def _load_model(self, model_name: str):
self._model_name = model_name
self._config = self._load_cfg()
self._model = COCODemo(
cfg,
min_image_size=800,
confidence_threshold=0.7,
)
def reset_model_version(self, model_name: str):
self._load_model(model_name)
def _load_cfg(self):
model_path = Path(self._model_name).with_suffix('.yaml')
full_path = self.CFG_ROOT / model_path
print('loading configuration from {}'.format(full_path))
cfg.merge_from_file(full_path)
return cfg.merge_from_list(["MODEL.DEVICE", "cuda"])
@staticmethod
def decode_bbox(predictions: BoxList):
"""
Arguments:
predictions (BoxList): the result of the computation by the model.
It should contain the field `labels`.
Returns:
label, boxes, scores (list, list, list): a tuple containing list of
labels, boxes and scores.
"""
# get label
label_ids = predictions.get_field('labels').tolist()
boxes = predictions.bbox
boxes = boxes.to(torch.int64).tolist()
scores = predictions.get_field('scores').tolist()
if predictions.has_field('mask'):
mask = predictions.get_field('mask').tolist()
else:
mask = None
return label_ids, boxes, scores, mask
def single_predict(self, np_array: np.ndarray, **kwargs) -> Dict[str, list]:
height, width, _ = np_array.shape
predictions = self._model.compute_prediction(np_array)
top_predictions = self._model.select_top_predictions(predictions)
label_ids, boxes, scores, mask = self.decode_bbox(top_predictions)
labels = [self._model.CATEGORIES[i] for i in label_ids]
return {
'labels': labels,
'label_ids': label_ids,
'boxes': boxes,
'scores': scores,
'mask': mask,
'width': width,
'height': height
}
def batch_predict(self, *args, **kwargs):
print('Hello world from batch predict.')
| [
"YLI056@e.ntu.edu.sg"
] | YLI056@e.ntu.edu.sg |
cf4e7fec26789b171a8ebaa8eabc49d62d29ff3c | ecbc02f304557c6069637f2871d4d4c9c3e04e98 | /tests/core/test_source_map.py | 56189b92496db80f7f6c3b9c500dbff0e3840aca | [
"MIT"
] | permissive | charles-cooper/vyper-debug | 6b6e12ccc1ca9217caff7f21bc176b69d1bc8fc5 | 2678de107615c705a3e55edf811b23259990d1c4 | refs/heads/master | 2020-05-21T01:05:39.122973 | 2019-05-09T17:59:46 | 2019-05-09T17:59:46 | 185,847,661 | 1 | 0 | MIT | 2019-05-09T17:57:59 | 2019-05-09T17:57:59 | null | UTF-8 | Python | false | false | 881 | py | from vdb.source_map import produce_source_map
def test_source_map_output():
code = """
a_map: map(bytes32, bytes32)
@public
def func1(a: int128) -> int128:
b: int128 = 2
c: int128 = 3
g: bytes[10]
return a + b + c + 1
@public
def func2(a: int128):
x: uint256
"""
sm = produce_source_map(code)
# globals
assert sm['globals']['a_map'] == {
'type': 'map(bytes32, bytes32)',
'size': 0,
'position': 0
}
# locals
assert sm['locals']['func1'] == {
'from_lineno': 4,
'to_lineno': 11,
'variables': {
'a': {'type': 'int128', 'size': 32, 'position': 320},
'b': {'type': 'int128', 'size': 32, 'position': 352},
'c': {'type': 'int128', 'size': 32, 'position': 384},
'g': {'type': 'bytes[10]', 'size': 96, 'position': 416}
},
}
| [
"jacques@dilectum.co.za"
] | jacques@dilectum.co.za |
14fd9210aab05327a7304470c21824934a141426 | b4b140bb107baebc50b310f1d79fdbe2a0382708 | /proj/lib/python3.7/site-packages/flask_cors/core.py | 4dcfe44631be4e9f8d06ae12396796d7d1d9e5df | [
"MIT"
] | permissive | shahedex/horizon_backend_flask | f642b99bf019050ff72896e455a85bd3f483cf39 | 7dce74fce0afdfa1cb6481e1d765e01a8ad3c5c4 | refs/heads/master | 2022-10-08T17:27:10.299450 | 2019-10-06T16:57:42 | 2019-10-06T16:57:42 | 207,140,462 | 0 | 1 | MIT | 2022-09-16T18:09:06 | 2019-09-08T16:26:35 | Python | UTF-8 | Python | false | false | 13,771 | py | # -*- coding: utf-8 -*-
"""
core
~~~~
Core functionality shared between the extension and the decorator.
:copyright: (c) 2016 by Cory Dolphin.
:license: MIT, see LICENSE for more details.
"""
import re
import logging
import collections
from datetime import timedelta
from six import string_types
from flask import request, current_app
from werkzeug.datastructures import Headers, MultiDict
LOG = logging.getLogger(__name__)
# Response Headers
ACL_ORIGIN = 'Access-Control-Allow-Origin'
ACL_METHODS = 'Access-Control-Allow-Methods'
ACL_ALLOW_HEADERS = 'Access-Control-Allow-Headers'
ACL_EXPOSE_HEADERS = 'Access-Control-Expose-Headers'
ACL_CREDENTIALS = 'Access-Control-Allow-Credentials'
ACL_MAX_AGE = 'Access-Control-Max-Age'
# Request Header
ACL_REQUEST_METHOD = 'Access-Control-Request-Method'
ACL_REQUEST_HEADERS = 'Access-Control-Request-Headers'
ALL_METHODS = ['GET', 'HEAD', 'POST', 'OPTIONS', 'PUT', 'PATCH', 'DELETE']
CONFIG_OPTIONS = ['CORS_ORIGINS', 'CORS_METHODS', 'CORS_ALLOW_HEADERS',
'CORS_EXPOSE_HEADERS', 'CORS_SUPPORTS_CREDENTIALS',
'CORS_MAX_AGE', 'CORS_SEND_WILDCARD',
'CORS_AUTOMATIC_OPTIONS', 'CORS_VARY_HEADER',
'CORS_RESOURCES', 'CORS_INTERCEPT_EXCEPTIONS',
'CORS_ALWAYS_SEND']
# Attribute added to request object by decorator to indicate that CORS
# was evaluated, in case the decorator and extension are both applied
# to a view.
FLASK_CORS_EVALUATED = '_FLASK_CORS_EVALUATED'
# Strange, but this gets the type of a compiled regex, which is otherwise not
# exposed in a public API.
RegexObject = type(re.compile(''))
DEFAULT_OPTIONS = dict(origins='*',
methods=ALL_METHODS,
allow_headers='*',
expose_headers=None,
supports_credentials=False,
max_age=None,
send_wildcard=False,
automatic_options=True,
vary_header=True,
resources=r'/*',
intercept_exceptions=True,
always_send=True)
def parse_resources(resources):
if isinstance(resources, dict):
# To make the API more consistent with the decorator, allow a
# resource of '*', which is not actually a valid regexp.
resources = [(re_fix(k), v) for k, v in resources.items()]
# Sort by regex length to provide consistency of matching and
# to provide a proxy for specificity of match. E.G. longer
# regular expressions are tried first.
def pattern_length(pair):
maybe_regex, _ = pair
return len(get_regexp_pattern(maybe_regex))
return sorted(resources,
key=pattern_length,
reverse=True)
elif isinstance(resources, string_types):
return [(re_fix(resources), {})]
elif isinstance(resources, collections.Iterable):
return [(re_fix(r), {}) for r in resources]
# Type of compiled regex is not part of the public API. Test for this
# at runtime.
elif isinstance(resources, RegexObject):
return [(re_fix(resources), {})]
else:
raise ValueError("Unexpected value for resources argument.")
def get_regexp_pattern(regexp):
"""
Helper that returns regexp pattern from given value.
:param regexp: regular expression to stringify
:type regexp: _sre.SRE_Pattern or str
:returns: string representation of given regexp pattern
:rtype: str
"""
try:
return regexp.pattern
except AttributeError:
return str(regexp)
def get_cors_origins(options, request_origin):
origins = options.get('origins')
wildcard = r'.*' in origins
# If the Origin header is not present terminate this set of steps.
# The request is outside the scope of this specification.-- W3Spec
if request_origin:
LOG.debug("CORS request received with 'Origin' %s", request_origin)
# If the allowed origins is an asterisk or 'wildcard', always match
if wildcard and options.get('send_wildcard'):
LOG.debug("Allowed origins are set to '*'. Sending wildcard CORS header.")
return ['*']
# If the value of the Origin header is a case-sensitive match
# for any of the values in list of origins
elif try_match_any(request_origin, origins):
LOG.debug("The request's Origin header matches. Sending CORS headers.", )
# Add a single Access-Control-Allow-Origin header, with either
# the value of the Origin header or the string "*" as value.
# -- W3Spec
return [request_origin]
else:
LOG.debug("The request's Origin header does not match any of allowed origins.")
return None
elif options.get('always_send'):
if wildcard:
# If wildcard is in the origins, even if 'send_wildcard' is False,
# simply send the wildcard. Unless supports_credentials is True,
# since that is forbidded by the spec..
# It is the most-likely to be correct thing to do (the only other
# option is to return nothing, which almost certainly not what
# the developer wants if the '*' origin was specified.
if options.get('supports_credentials'):
return None
else:
return ['*']
else:
# Return all origins that are not regexes.
return sorted([o for o in origins if not probably_regex(o)])
# Terminate these steps, return the original request untouched.
else:
LOG.debug("The request did not contain an 'Origin' header. This means the browser or client did not request CORS, ensure the Origin Header is set.")
return None
def get_allow_headers(options, acl_request_headers):
if acl_request_headers:
request_headers = [h.strip() for h in acl_request_headers.split(',')]
# any header that matches in the allow_headers
matching_headers = filter(
lambda h: try_match_any(h, options.get('allow_headers')),
request_headers
)
return ', '.join(sorted(matching_headers))
return None
def get_cors_headers(options, request_headers, request_method):
origins_to_set = get_cors_origins(options, request_headers.get('Origin'))
headers = MultiDict()
if not origins_to_set: # CORS is not enabled for this route
return headers
for origin in origins_to_set:
headers.add(ACL_ORIGIN, origin)
headers[ACL_EXPOSE_HEADERS] = options.get('expose_headers')
if options.get('supports_credentials'):
headers[ACL_CREDENTIALS] = 'true' # case sensative
# This is a preflight request
# http://www.w3.org/TR/cors/#resource-preflight-requests
if request_method == 'OPTIONS':
acl_request_method = request_headers.get(ACL_REQUEST_METHOD, '').upper()
# If there is no Access-Control-Request-Method header or if parsing
# failed, do not set any additional headers
if acl_request_method and acl_request_method in options.get('methods'):
# If method is not a case-sensitive match for any of the values in
# list of methods do not set any additional headers and terminate
# this set of steps.
headers[ACL_ALLOW_HEADERS] = get_allow_headers(options, request_headers.get(ACL_REQUEST_HEADERS))
headers[ACL_MAX_AGE] = options.get('max_age')
headers[ACL_METHODS] = options.get('methods')
else:
LOG.info("The request's Access-Control-Request-Method header does not match allowed methods. CORS headers will not be applied.")
# http://www.w3.org/TR/cors/#resource-implementation
if options.get('vary_header'):
# Only set header if the origin returned will vary dynamically,
# i.e. if we are not returning an asterisk, and there are multiple
# origins that can be matched.
if headers[ACL_ORIGIN] == '*':
pass
elif (len(options.get('origins')) > 1 or
len(origins_to_set) > 1 or
any(map(probably_regex, options.get('origins')))):
headers.add('Vary', 'Origin')
return MultiDict((k, v) for k, v in headers.items() if v)
def set_cors_headers(resp, options):
"""
Performs the actual evaluation of Flas-CORS options and actually
modifies the response object.
This function is used both in the decorator and the after_request
callback
"""
# If CORS has already been evaluated via the decorator, skip
if hasattr(resp, FLASK_CORS_EVALUATED):
LOG.debug('CORS have been already evaluated, skipping')
return resp
# Some libraries, like OAuthlib, set resp.headers to non Multidict
# objects (Werkzeug Headers work as well). This is a problem because
# headers allow repeated values.
if (not isinstance(resp.headers, Headers)
and not isinstance(resp.headers, MultiDict)):
resp.headers = MultiDict(resp.headers)
headers_to_set = get_cors_headers(options, request.headers, request.method)
LOG.debug('Settings CORS headers: %s', str(headers_to_set))
for k, v in headers_to_set.items():
resp.headers.add(k, v)
return resp
def probably_regex(maybe_regex):
if isinstance(maybe_regex, RegexObject):
return True
else:
common_regex_chars = ['*','\\',']', '?']
# Use common characters used in regular expressions as a proxy
# for if this string is in fact a regex.
return any((c in maybe_regex for c in common_regex_chars))
def re_fix(reg):
"""
Replace the invalid regex r'*' with the valid, wildcard regex r'/.*' to
enable the CORS app extension to have a more user friendly api.
"""
return r'.*' if reg == r'*' else reg
def try_match_any(inst, patterns):
return any(try_match(inst, pattern) for pattern in patterns)
def try_match(request_origin, maybe_regex):
"""Safely attempts to match a pattern or string to a request origin."""
if isinstance(maybe_regex, RegexObject):
return re.match(maybe_regex, request_origin)
elif probably_regex(maybe_regex):
return re.match(maybe_regex, request_origin, flags=re.IGNORECASE)
else:
try:
return request_origin.lower() == maybe_regex.lower()
except AttributeError:
return request_origin == maybe_regex
def get_cors_options(appInstance, *dicts):
"""
Compute CORS options for an application by combining the DEFAULT_OPTIONS,
the app's configuration-specified options and any dictionaries passed. The
last specified option wins.
"""
options = DEFAULT_OPTIONS.copy()
options.update(get_app_kwarg_dict(appInstance))
if dicts:
for d in dicts:
options.update(d)
return serialize_options(options)
def get_app_kwarg_dict(appInstance=None):
"""Returns the dictionary of CORS specific app configurations."""
app = (appInstance or current_app)
# In order to support blueprints which do not have a config attribute
app_config = getattr(app, 'config', {})
return dict(
(k.lower().replace('cors_', ''), app_config.get(k))
for k in CONFIG_OPTIONS
if app_config.get(k) is not None
)
def flexible_str(obj):
"""
A more flexible str function which intelligently handles stringifying
strings, lists and other iterables. The results are lexographically sorted
to ensure generated responses are consistent when iterables such as Set
are used.
"""
if obj is None:
return None
elif(not isinstance(obj, string_types)
and isinstance(obj, collections.Iterable)):
return ', '.join(str(item) for item in sorted(obj))
else:
return str(obj)
def serialize_option(options_dict, key, upper=False):
if key in options_dict:
value = flexible_str(options_dict[key])
options_dict[key] = value.upper() if upper else value
def ensure_iterable(inst):
"""
Wraps scalars or string types as a list, or returns the iterable instance.
"""
if isinstance(inst, string_types):
return [inst]
elif not isinstance(inst, collections.Iterable):
return [inst]
else:
return inst
def sanitize_regex_param(param):
return [re_fix(x) for x in ensure_iterable(param)]
def serialize_options(opts):
"""
A helper method to serialize and processes the options dictionary.
"""
options = (opts or {}).copy()
for key in opts.keys():
if key not in DEFAULT_OPTIONS:
LOG.warn("Unknown option passed to Flask-CORS: %s", key)
# Ensure origins is a list of allowed origins with at least one entry.
options['origins'] = sanitize_regex_param(options.get('origins'))
options['allow_headers'] = sanitize_regex_param(options.get('allow_headers'))
# This is expressly forbidden by the spec. Raise a value error so people
# don't get burned in production.
if r'.*' in options['origins'] and options['supports_credentials'] and options['send_wildcard']:
raise ValueError("Cannot use supports_credentials in conjunction with"
"an origin string of '*'. See: "
"http://www.w3.org/TR/cors/#resource-requests")
serialize_option(options, 'expose_headers')
serialize_option(options, 'methods', upper=True)
if isinstance(options.get('max_age'), timedelta):
options['max_age'] = str(int(options['max_age'].total_seconds()))
return options
| [
"shahed739@gmail.com"
] | shahed739@gmail.com |
ea8866cc497b0eb9b862a1e59a2461097b8b4615 | 2469fc10932f11bb273fc6194b0c67779b6337f3 | /1. 상수형 자료형/jump_to_Python_숫자형_5월_18일.py | e74a3289fb8d5209ea04451bb4c95eb2ccd8e4b7 | [] | no_license | iopasd753951/Learn_Python | 925d20bbcaa871ebdc427a4e0d31ee8e81c51f72 | 5488d0a8e4ebd8140a488f93d31bf7a13459daaf | refs/heads/master | 2020-03-25T01:01:48.908415 | 2018-11-05T14:38:43 | 2018-11-05T14:38:43 | 143,218,566 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py |
# coding: utf-8
# In[5]:
a = int(input())
b = int(input())
c = int(input())
total = (a + b + c) // 3
print("점수 평균 :", total)
# In[6]:
a = 17 // 3
print(a)
# In[7]:
a = 17 % 3
print(a)
# In[ ]:
a = int(input())
if a % 2 == 0 :
print("짝수")
elif a % 2 == 1 :
print("홀수")
else :
print("몰라")
| [
"iopasd753951@gmail.com"
] | iopasd753951@gmail.com |
dbe515b084f3cf998bb1cdf2e0a248687292e570 | 3428950daafacec9539a83809cf9752000508f63 | /코딩테스트책/7-6.py | 75ee8b86c6f39502de4fa3772176543822e986e7 | [] | no_license | HyunAm0225/Python_Algorithm | 759b91743abf2605dfd996ecf7791267b0b5979a | 99fb79001d4ee584a9c2d70f45644e9101317764 | refs/heads/master | 2023-05-24T05:29:12.838390 | 2021-06-15T16:36:33 | 2021-06-15T16:36:33 | 274,587,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | # 이진 탐색
# 떡 자르기 문제
n, m = map(int,input().split())
array = list(map(int,input().split()))
# 이진 탐색을 위한 시작점과 끝점 설정
start = 0
end = max(array)
# 이진 탐색 수행(반복적)
result = 0
while(start <=end):
total = 0
mid = (start+end)//2
for x in array:
# 잘랐을 때 떡의 양 계산
if x > mid:
total += x - mid
# 떡의 양이 부족한 경우 더 많이 자르기 (왼쪽 부분 탐색)
if total < m:
end = mid -1
# 떡의 양이 충분한 경우 덜 자르기(오른쪽 부분 탐색)
else:
result = mid # 최대한 덜 잘랐을 때가 정답이므로, 여기에서 result에 기록한다.
start = mid + 1
# 정답 출력
print(result)
| [
"tlfgjawlq@naver.com"
] | tlfgjawlq@naver.com |
24ff3174bfa172b59160d3c30a8b202d43863cd5 | 1b87d5f7cba7e068f7b2ea902bba494599d20a78 | /experimental/modeswitch/x11vmode.py | 65e6243a8af467c60e50248a8f876db8546c600e | [
"BSD-3-Clause"
] | permissive | jpaalasm/pyglet | 906d03fe53160885665beaed20314b5909903cc9 | bf1d1f209ca3e702fd4b6611377257f0e2767282 | refs/heads/master | 2021-01-25T03:27:08.941964 | 2014-01-25T17:50:57 | 2014-01-25T17:50:57 | 16,236,090 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,453 | py | #!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
import os
import fcntl
import mutex
import time
import select
import struct
import signal
import sys
import threading
from pyglet.window.xlib import xlib
import lib_xf86vmode as xf86vmode
class ModeList(object):
invalid = True
def __init__(self, x_display, x_screen):
self.x_display = x_display
self.x_screen = x_screen
self.display_name = xlib.XDisplayString(self.x_display)
@classmethod
def from_screen(cls, screen):
display = screen.display
return cls(display._display, screen._x_screen_id)
def _validate(self):
if not self.invalid:
return
count = ctypes.c_int()
modes = ctypes.POINTER(ctypes.POINTER(xf86vmode.XF86VidModeModeInfo))()
xf86vmode.XF86VidModeGetAllModeLines(self.x_display, self.x_screen,
count, modes)
# Copy modes out of list and free list
self.modes = []
for i in range(count.value):
mode = xf86vmode.XF86VidModeModeInfo()
ctypes.memmove(ctypes.byref(mode), ctypes.byref(modes.contents[i]),
ctypes.sizeof(mode))
self.modes.append(mode)
if mode.privsize:
xlib.XFree(mode.private)
xlib.XFree(modes)
self.invalid = False
def _mode_packet(self, mode):
return ModePacket(self.display_name, self.x_screen,
mode.hdisplay, mode.vdisplay, mode.dotclock)
def get_mode(self):
'''Get current mode (ModePacket)'''
self._validate()
return self._mode_packet(self.modes[0])
def set_mode(self, width, height, dotclock=None):
'''Set mode closest to requested width, height and dotclock (if
specified). Actual mode is returned. Exception is raised
if width or height are above maximum.
'''
self._validate()
best_mode = None
for mode in self.modes:
if width > mode.hdisplay or height > mode.vdisplay:
continue
if not best_mode:
best_mode = mode
continue
if mode.hdisplay == best_mode.hdisplay:
if mode.vdisplay < best_mode.vdisplay:
if (dotclock is not None and
abs(dotclock - mode.dotclock) <
abs(dotclock - best_mode.dotclock)):
best_mode = mode
elif mode.vdisplay < best_mode.vdisplay:
best_mode = mode
elif mode.hdisplay < best_mode.hdisplay:
best_mode = mode
if best_mode is None:
raise Exception('No mode is in range of requested resolution.')
xf86vmode.XF86VidModeSwitchToMode(self.x_display, self.x_screen,
best_mode)
xlib.XFlush(self.x_display)
self.invalid = True
return self._mode_packet(best_mode)
# Mode packets tell the child process how to restore a given display and
# screen. Only one packet should be sent per display/screen (more would
# indicate redundancy or incorrect restoration). Packet format is:
# display (max 256 chars),
# screen
# width
# height
# dotclock
class ModePacket(object):
format = '256siHHI'
size = struct.calcsize(format)
def __init__(self, display_name, screen, width, height, dotclock):
self.display_name = display_name
self.screen = screen
self.width = width
self.height = height
self.dotclock = dotclock
def encode(self):
return struct.pack(self.format, self.display_name, self.screen,
self.width, self.height, self.dotclock)
@classmethod
def decode(cls, data):
display_name, screen, width, height, dotclock = \
struct.unpack(cls.format, data)
return cls(display_name.strip('\0'), screen, width, height, dotclock)
def __repr__(self):
return '%s(%r, %r, %r, %r, %r)' % (
self.__class__.__name__, self.display_name, self.screen,
self.width, self.height, self.dotclock)
def set(self):
display = xlib.XOpenDisplay(self.display_name)
mode_list = ModeList(display, self.screen)
mode_list.set_mode(self.width, self.height, self.dotclock)
xlib.XCloseDisplay(display)
_restore_mode_child_installed = False
_restorable_screens = set()
_mode_write_pipe = None
def _install_restore_mode_child():
global _mode_write_pipe
global _restore_mode_child_installed
if _restore_mode_child_installed:
return
# Parent communicates to child by sending "mode packets" through a pipe:
mode_read_pipe, _mode_write_pipe = os.pipe()
if os.fork() == 0:
# Child process (watches for parent to die then restores video mode(s).
os.close(_mode_write_pipe)
# Set up SIGHUP to be the signal for when the parent dies.
PR_SET_PDEATHSIG = 1
libc = ctypes.cdll.LoadLibrary('libc.so.6')
libc.prctl.argtypes = (ctypes.c_int, ctypes.c_ulong, ctypes.c_ulong,
ctypes.c_ulong, ctypes.c_ulong)
libc.prctl(PR_SET_PDEATHSIG, signal.SIGHUP, 0, 0, 0)
# SIGHUP indicates the parent has died. The child mutex is unlocked, it
# stops reading from the mode packet pipe and restores video modes on
# all displays/screens it knows about.
def _sighup(signum, frame):
parent_wait_mutex.unlock()
parent_wait_mutex = mutex.mutex()
parent_wait_mutex.lock(lambda arg: arg, None)
signal.signal(signal.SIGHUP, _sighup)
# Wait for parent to die and read packets from parent pipe
packets = []
buffer = ''
while parent_wait_mutex.test():
data = os.read(mode_read_pipe, ModePacket.size)
buffer += data
# Decode packets
while len(buffer) >= ModePacket.size:
packet = ModePacket.decode(buffer[:ModePacket.size])
packets.append(packet)
buffer = buffer[ModePacket.size:]
for packet in packets:
packet.set()
sys.exit(0)
else:
# Parent process. Clean up pipe then continue running program as
# normal. Send mode packets through pipe as additional
# displays/screens are mode switched.
os.close(mode_read_pipe)
_restore_mode_child_installed = True
def _set_restore_mode(mode):
_install_restore_mode_child()
# This is not the real restore mode if one has already been set.
if (mode.display_name, mode.screen) in _restorable_screens:
return
os.write(_mode_write_pipe, mode.encode())
_restorable_screens.add((mode.display_name, mode.screen))
def _set_mode(screen, width, height):
display_name = screen.display
mode_list = ModeList.from_screen(screen)
current_mode = mode_list.get_mode()
_set_restore_mode(current_mode)
new_mode = mode_list.set_mode(width, height)
return new_mode.width, new_mode.height
import pyglet
window = pyglet.window.Window()
_set_mode(window.screen, 800, 600)
pyglet.app.run()
# Trigger a segfault -- mode still gets restored thanks to child :-)
print ctypes.c_char_p.from_address(0)
| [
"joonas.paalasmaa@gmail.com"
] | joonas.paalasmaa@gmail.com |
157bf7e582510d1c353c498ec1b026dbd39bdb35 | 714cfd73f40383d6a8cde7144f56c8777fafe8e3 | /src/misc/features/create_tsfresh.py | 17491a3e84e3976649b1f9a54c33d16a0fea2547 | [
"BSD-2-Clause"
] | permissive | Ynakatsuka/g2net-gravitational-wave-detection | bed60d39534b4aced1469964369b0fec17c7b7c7 | 482914a64e0140f27e0058202af1fdea06f7b258 | refs/heads/main | 2023-09-06T00:35:07.096235 | 2021-10-29T07:44:34 | 2021-10-29T07:44:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,834 | py | import multiprocessing
import os
import warnings
import hydra
import pandas as pd
import tsfresh
from base import BaseG2NetFeatureEngineeringDataset, G2NetFeatureEngineering
from omegaconf import DictConfig
class TsFreshFeatures(BaseG2NetFeatureEngineeringDataset):
def _engineer_features(self, signals):
df = pd.DataFrame(
signals.T, columns=["channel_0", "channel_1", "channel_2"]
)
df["id"] = 0
extracted_features = tsfresh.extract_features(
df, column_id="id", n_jobs=0, disable_progressbar=True
)
features = {}
for k, v in extracted_features.items():
features[k] = v.values[0]
return features
@hydra.main(config_path="../../../config", config_name="default")
def main(config: DictConfig) -> None:
filename = __file__.split("/")[-1][:-3]
input_dir = config.input_dir
features_dir = config.features_dir
os.makedirs(features_dir, exist_ok=True)
train = pd.read_csv(config.competition.train_path)
test = pd.read_csv(config.competition.test_path)
train["path"] = train["id"].apply(
lambda x: f"{input_dir}/train/{x[0]}/{x[1]}/{x[2]}/{x}.npy"
)
test["path"] = test["id"].apply(
lambda x: f"{input_dir}/test/{x[0]}/{x[1]}/{x[2]}/{x}.npy"
)
num_workers = multiprocessing.cpu_count()
transformer = G2NetFeatureEngineering(
TsFreshFeatures, batch_size=num_workers, num_workers=num_workers
)
X_train = transformer.fit_transform(train["path"])
X_test = transformer.transform(test["path"])
print(X_train.info())
X_train.to_pickle(os.path.join(features_dir, f"{filename}_train.pkl"))
X_test.to_pickle(os.path.join(features_dir, f"{filename}_test.pkl"))
if __name__ == "__main__":
warnings.filterwarnings("ignore")
main()
| [
"nk.tsssa@gmail.com"
] | nk.tsssa@gmail.com |
c8f9ad715eef34b8164134cbcfba734dc4d275cf | 161eee91b961e3387526772233c9c63239b4af8d | /travelproject/travelapp/views.py | 36c603dfff29f9e9dfc316fad551faceb00f6a5f | [] | no_license | sreekripa/travell | 5ead55c80068796e297c4bf126e89e7542bbfdfc | b8085d8aaca4bfb4c09c67adea14094c777724c8 | refs/heads/master | 2023-05-02T23:18:28.387407 | 2021-05-17T08:49:54 | 2021-05-17T08:49:54 | 368,113,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | from django.http import HttpResponse
from django.shortcuts import render
from.models import place
from.models import blog
# Create your views here.
def fun(request):
obj=place.objects.all()
ob=blog.objects.all()
return render(request,"index.html",{'results':obj,'res':ob})
def add(request):
num1=int(request.POST["num1"])
num2=int(request.POST["num2"])
num3=num1+num2
return render(request,"result.html",{"add":num3}) | [
"kripas1990@gmail.com"
] | kripas1990@gmail.com |
b86bf576a9d70bbd47358ef2d067a45f7762f3fa | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03109/s888863725.py | 52f2c8fd86c765cb30b3b9ef1e5ef6b6ff53d94f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | s=input()
print('Heisei' if int(s[5:7]) <= 4 else 'TBD') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
291bb25a970d0003b7a222bb9bb6f02332348965 | 38876c993ec622a6a0081232018d0222a4b67ea5 | /tencentcloud/emr/v20190103/models.py | 661f784a68929d9080a548acbe5d0ff9a7918bf5 | [
"Apache-2.0"
] | permissive | zepc007/tencentcloud-sdk-python | f218c482e5c63b9f4381ca76757c429e286d5ad2 | b9722fcb20f3cc8d89654863d86e781810c47330 | refs/heads/master | 2020-04-22T03:06:29.548902 | 2019-04-13T05:49:59 | 2019-04-13T05:49:59 | 170,073,705 | 1 | 0 | null | 2019-02-11T05:35:51 | 2019-02-11T05:35:50 | null | UTF-8 | Python | false | false | 9,438 | py | # -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tencentcloud.common.abstract_model import AbstractModel
class InquiryPriceCreateInstanceRequest(AbstractModel):
"""InquiryPriceCreateInstance请求参数结构体
"""
def __init__(self):
"""
:param TimeUnit: 时间单位
:type TimeUnit: str
:param TimeSpan: 时间长度
:type TimeSpan: int
:param ResourceSpec: 询价资源描述
:type ResourceSpec: :class:`tencentcloud.emr.v20190103.models.ResourceSpec`
:param Currency: 货币种类
:type Currency: str
:param PayMode: 计费类型
:type PayMode: int
:param SupportHA: 是否支持HA, 1 支持,0 不支持
:type SupportHA: int
:param Software: 软件列表
:type Software: list of str
:param Placement: 位置信息
:type Placement: :class:`tencentcloud.emr.v20190103.models.Placement`
:param VPCSettings: VPC信息
:type VPCSettings: :class:`tencentcloud.emr.v20190103.models.VPCSettings`
"""
self.TimeUnit = None
self.TimeSpan = None
self.ResourceSpec = None
self.Currency = None
self.PayMode = None
self.SupportHA = None
self.Software = None
self.Placement = None
self.VPCSettings = None
def _deserialize(self, params):
self.TimeUnit = params.get("TimeUnit")
self.TimeSpan = params.get("TimeSpan")
if params.get("ResourceSpec") is not None:
self.ResourceSpec = ResourceSpec()
self.ResourceSpec._deserialize(params.get("ResourceSpec"))
self.Currency = params.get("Currency")
self.PayMode = params.get("PayMode")
self.SupportHA = params.get("SupportHA")
self.Software = params.get("Software")
if params.get("Placement") is not None:
self.Placement = Placement()
self.Placement._deserialize(params.get("Placement"))
if params.get("VPCSettings") is not None:
self.VPCSettings = VPCSettings()
self.VPCSettings._deserialize(params.get("VPCSettings"))
class InquiryPriceCreateInstanceResponse(AbstractModel):
"""InquiryPriceCreateInstance返回参数结构体
"""
def __init__(self):
"""
:param Result: 询价结果
:type Result: :class:`tencentcloud.emr.v20190103.models.InquiryPriceResult`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Result") is not None:
self.Result = InquiryPriceResult()
self.Result._deserialize(params.get("Result"))
self.RequestId = params.get("RequestId")
class InquiryPriceResult(AbstractModel):
"""用于询价输出
"""
def __init__(self):
"""
:param OriginalCost: 原始价格
:type OriginalCost: float
:param DiscountCost: 折扣后价格
:type DiscountCost: float
:param TimeUnit: 时间单位
:type TimeUnit: str
:param TimeSpan: 时间长度
:type TimeSpan: int
"""
self.OriginalCost = None
self.DiscountCost = None
self.TimeUnit = None
self.TimeSpan = None
def _deserialize(self, params):
self.OriginalCost = params.get("OriginalCost")
self.DiscountCost = params.get("DiscountCost")
self.TimeUnit = params.get("TimeUnit")
self.TimeSpan = params.get("TimeSpan")
class NodeSpec(AbstractModel):
"""节点描述
"""
def __init__(self):
"""
:param Memory: 内存容量,单位为M
注意:此字段可能返回 null,表示取不到有效值。
:type Memory: int
:param CPUCores: CPU核数
注意:此字段可能返回 null,表示取不到有效值。
:type CPUCores: int
:param Volume: 数据盘容量
注意:此字段可能返回 null,表示取不到有效值。
:type Volume: int
:param DiskType: 磁盘类型
注意:此字段可能返回 null,表示取不到有效值。
:type DiskType: str
:param Spec: 节点规格描述
注意:此字段可能返回 null,表示取不到有效值。
:type Spec: str
:param RootDiskVolume: 系统盘容量
注意:此字段可能返回 null,表示取不到有效值。
:type RootDiskVolume: int
:param StorageType: 存储类型
注意:此字段可能返回 null,表示取不到有效值。
:type StorageType: int
:param SpecName: 规格名称
注意:此字段可能返回 null,表示取不到有效值。
:type SpecName: str
"""
self.Memory = None
self.CPUCores = None
self.Volume = None
self.DiskType = None
self.Spec = None
self.RootDiskVolume = None
self.StorageType = None
self.SpecName = None
def _deserialize(self, params):
self.Memory = params.get("Memory")
self.CPUCores = params.get("CPUCores")
self.Volume = params.get("Volume")
self.DiskType = params.get("DiskType")
self.Spec = params.get("Spec")
self.RootDiskVolume = params.get("RootDiskVolume")
self.StorageType = params.get("StorageType")
self.SpecName = params.get("SpecName")
class Placement(AbstractModel):
"""描述集实例位置信息
"""
def __init__(self):
"""
:param ProjectId: 实例所属项目ID。该参数可以通过调用 DescribeProject 的返回值中的 projectId 字段来获取。不填为默认项目。
:type ProjectId: int
:param Zone: 实例所属的可用区ID。该参数也可以通过调用 DescribeZones 的返回值中的Zone字段来获取。
:type Zone: str
"""
self.ProjectId = None
self.Zone = None
def _deserialize(self, params):
self.ProjectId = params.get("ProjectId")
self.Zone = params.get("Zone")
class ResourceSpec(AbstractModel):
"""资源描述
"""
def __init__(self):
"""
:param CommonCount: Common节点数量
:type CommonCount: int
:param MasterResourceSpec: 描述Master节点资源
:type MasterResourceSpec: :class:`tencentcloud.emr.v20190103.models.NodeSpec`
:param CoreResourceSpec: 描述Core节点资源
:type CoreResourceSpec: :class:`tencentcloud.emr.v20190103.models.NodeSpec`
:param TaskResourceSpec: 描述Task节点资源
:type TaskResourceSpec: :class:`tencentcloud.emr.v20190103.models.NodeSpec`
:param MasterCount: Master节点数量
:type MasterCount: int
:param CoreCount: Core节点数量
:type CoreCount: int
:param TaskCount: Task节点数量
:type TaskCount: int
:param CommonResourceSpec: 描述Common节点资源
:type CommonResourceSpec: :class:`tencentcloud.emr.v20190103.models.NodeSpec`
"""
self.CommonCount = None
self.MasterResourceSpec = None
self.CoreResourceSpec = None
self.TaskResourceSpec = None
self.MasterCount = None
self.CoreCount = None
self.TaskCount = None
self.CommonResourceSpec = None
def _deserialize(self, params):
self.CommonCount = params.get("CommonCount")
if params.get("MasterResourceSpec") is not None:
self.MasterResourceSpec = NodeSpec()
self.MasterResourceSpec._deserialize(params.get("MasterResourceSpec"))
if params.get("CoreResourceSpec") is not None:
self.CoreResourceSpec = NodeSpec()
self.CoreResourceSpec._deserialize(params.get("CoreResourceSpec"))
if params.get("TaskResourceSpec") is not None:
self.TaskResourceSpec = NodeSpec()
self.TaskResourceSpec._deserialize(params.get("TaskResourceSpec"))
self.MasterCount = params.get("MasterCount")
self.CoreCount = params.get("CoreCount")
self.TaskCount = params.get("TaskCount")
if params.get("CommonResourceSpec") is not None:
self.CommonResourceSpec = NodeSpec()
self.CommonResourceSpec._deserialize(params.get("CommonResourceSpec"))
class VPCSettings(AbstractModel):
"""VPC 参数
"""
def __init__(self):
"""
:param VpcId: VPC ID
:type VpcId: str
:param SubnetId: Subnet ID
:type SubnetId: str
"""
self.VpcId = None
self.SubnetId = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
self.SubnetId = params.get("SubnetId") | [
"tencentcloudapi@tencent.com"
] | tencentcloudapi@tencent.com |
6414416c57b15df463a316ab5eb47dfd60c206ad | ac1e944eb288c8b13a0bef0ee7de85ee6d30b4c0 | /django/portfolio/portfolio/settings.py | 76fb42fa653d4ce899544cb20f47fd381ab5663a | [] | no_license | Jayson7/random-projects | 05dd175d00e9bd62cb39973c3439846f641675c8 | cdbebb896a0ecea0de543f16ecf4661e519ec0bb | refs/heads/master | 2023-06-19T01:54:08.339954 | 2021-07-19T01:40:09 | 2021-07-19T01:40:09 | 383,971,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,783 | py |
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-rfpl8-6sksa!z!4vqh@b9ddcxnu8j-w*j7e=rq*-^3w-qte*kc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
import cloudinary
import cloudinary.uploader
import cloudinary.api
cloudinary.config(
cloud_name = "jaytech",
api_key = "279168237821868",
api_secret = "VQAHpUEHyJfxhces1SiW5F8srEU",
secure = True
)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
"apps",
"crispy_forms",
'cloudinary',
'django_filters',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'portfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['Templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'portfolio.wsgi.application'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
import os
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_DIRS = os.path.join(BASE_DIR, 'media')
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"lexxiijoo70@gmail.com"
] | lexxiijoo70@gmail.com |
e210432980b1cb9606281d024f148ab4962d8f97 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_211/266.py | 73a2efd2c2aea6e7baf64b46d9dff97a79692c99 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,588 | py | import sys
import time
### I/O wrapper ###
class FileParser:
"""Read numbers/strings from file (or stdin by default), one line by one.
"""
def __init__(self, filepath=None, type=None):
if filepath is None:
self.fd = sys.stdin
else:
self.fd = open(filepath, type)
def read_string(self):
return self.fd.readline().rstrip()
def read_words(self):
return [x for x in self.read_string().split()]
def read_int(self):
return int(self.fd.readline())
def read_integers(self):
return [int(x) for x in self.fd.readline().rstrip().split()]
def read_float(self):
return float(self.fd.readline())
def read_floats(self):
return [float(x) for x in self.fd.readline().rstrip().split()]
def write(self, context):
if self.fd is not sys.stdin:
self.fd.write(context+"\n")
else:
print(context)
return
def close(self):
if self.fd is not sys.stdin:
self.fd.close()
self.fd = None
def MultiThread(fun, input):
from multiprocessing.dummy import Pool as ThreadPool
pool = ThreadPool()
results = pool.starmap(fun, input)
pool.close()
pool.join()
return list(filter(None.__ne__, results))
### specify the problem meta information ###
problemID = "C" # A, B, C, D...
problemSize = "local" # small, large, local
# filename = "%s-%s-practice" % (problemID, problemSize)
filename = "C-small-1-attempt1"
### the algorithm that solve the cases ###
globalCaseID = 0
def solve(case):
# record the start timing
timing.append(time.time())
N, K, U, P = case
count = 0
for foo in P:
if abs(foo-0) < 1e-7:
count += 1
if count:
inc = min(U / count, 1)
for i in range(N):
if abs(P[i]-0) < 1e-7:
P[i] += inc
U -= inc
while U > 0 and sum(P) < N:
P = sorted(P)
i = 1
while i < N and P[i] == P[0]:
i += 1
if i < N:
Next = P[i]
else:
Next = 1.0
inc = min(U / i, Next - P[0])
for j in range(i):
P[j] += inc
U -= inc
ans = 1.0
for foo in P:
ans *= foo
timing.append(time.time())
global globalCaseID
globalCaseID += 1
print("Case %d" % globalCaseID, ans, "\t\t Elapsed: %.2f seconds" % (timing[-1] - timing[-2]))
return ans
### solve the test cases ###
# for the purpose of counting the total elapsed time
timing = [time.time()]
# open the input / output files
f_in = FileParser(filename+".in", "r")
f_out = FileParser(filename+".out", "w")
# parse the input, and store them into cases
cases = []
T = f_in.read_int()
for _ in range(T):
# read the input data of each case
# f_in.read_string(), f_in.read_words()
# f_in.read_int(), f_in.read_integers()
# f_in.read_float(), f_in.read_floats()
N, K = f_in.read_integers()
U = f_in.read_float()
P = f_in.read_floats()
cases.append((N, K, U, P))
# solve each test case
#anses = MultiThread(solve, zip(cases))
for caseID in range(1, T+1):
# solve the case
ans = solve(cases[caseID-1])
#ans = anses[caseID-1]
# print the answer to output file
f_out.write("Case #%d: %.9f" % (caseID, ans))
# close the input / output files
f_in.close()
f_out.close()
# output the total elapsed time
timing.append(time.time())
total_time = timing[-1] - timing[0]
print("Total elapsed time: %.2f seconds / %.2f minutes" % (total_time, total_time/60))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
07fc07bcebb263a435c469b86d08d4dc46022037 | 60cb975f3e0251c73c457271bce8a7b2036e422b | /studysrc/day01/testIf.py | cd4b701787955e49548fb8081304b49a80d6e252 | [] | no_license | 49257620/reboot | 0a2341f23bc1a6f3ae47b59f772919228c623544 | 86b348228d1a25d78c45b0e9022d7c773544373b | refs/heads/master | 2018-11-17T19:19:58.969710 | 2018-09-25T03:15:57 | 2018-09-25T03:15:57 | 125,727,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | # encoding utf-8
promote = input('看到西瓜了吗?(看到输入\'Y\'或者\'y\'):')
money = 100.0
priceBaozi = 11.5
priceXigua = 23.8
print('>>>买了一斤包子,花费'+str(priceBaozi)+'元')
money = money - priceBaozi
if promote == 'Y' or promote == 'y':
print('>>>买了一个西瓜,花费'+str(priceXigua)+'元')
money = money - priceXigua
print('剩余:' + str(money) + '元')
| [
"49257620@qq.com"
] | 49257620@qq.com |
7f5fad0d8b83d94bd6cdc61d8149280442be7b1a | 71460b3fa81c386b99a260ccf59c109bcde3b953 | /realtime_data_processor.py | 2826b5f671b03c4c43341dc13008b15195657c8a | [
"MIT"
] | permissive | ec500-software-engineering/exercise-1-modularity-ZhibinHuang | 831f07f31bfaa2a5c3b043e9fb66942208ca6020 | ab36bf593fa61fe183c57af15a011d4353581d6d | refs/heads/master | 2020-04-21T21:49:56.137069 | 2019-02-14T23:40:19 | 2019-02-14T23:40:19 | 169,891,502 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,039 | py | import time
import random
import threading
from common_types import SensorDataType
from common_types import Message, MessageUrgency
class RealTimeDataProcessor(threading.Thread):
def __init__(self, in_queue, notification_man):
super().__init__()
self._in_queue = in_queue
self._notification_man = notification_man
# Can probably put these functions in a separate class
@staticmethod
def blood_pressure_is_normal(pressure_data):
return 90 <= pressure_data.get_systolic() <= 120 \
and 60 <= pressure_data.get_diastolic() <= 80
@staticmethod
def blood_pulse_is_normal(pulse_data):
return 60 <= pulse_data.get_pulse() <= 100
def run(self):
'''
In here we need to process data we receive from sensor read queue.
If any problems are detected in the attached patient's vitals
we issue a command to the notification manager
:return:
'''
while True:
incoming_data = self._in_queue.get(block=True)
if incoming_data.get_type() == SensorDataType.BLOOD_PRESSURE:
if not RealTimeDataProcessor.blood_pressure_is_normal(incoming_data):
self._notification_man.send_message(
Message(
'!!!! PATIENT ALERT BLOOD PRESSURE ABNORMAL !!!!',
MessageUrgency.HIGH_URGENCY
)
)
elif incoming_data.get_type() == SensorDataType.BLOOD_PULSE:
if not RealTimeDataProcessor.blood_pulse_is_normal(incoming_data):
self._notification_man.send_message(
Message(
'!!!! PATIENT ALERT PULSE IS ABNORMAL !!!!',
MessageUrgency.HIGH_URGENCY
)
)
# yield quantum/time slice for other ready threads
time.sleep(
random.randint(1, 3)
)
| [
"noreply@github.com"
] | ec500-software-engineering.noreply@github.com |
568a052bbe4f8f62f7f7f617b5b3f6e9d966ea8a | a80e9eb7ade3d43ce042071d796c00dd10b93225 | /ch_6/stars_data_dict2.py | 8633d4020bddd3caec1f4b9297bb5e9acdc1657d | [] | no_license | ksjpswaroop/python_primer | 69addfdb07471eea13dccfad1f16c212626dee0a | 99c21d80953be3c9dc95f3a316c04b0c5613e830 | refs/heads/master | 2020-07-14T17:37:45.923796 | 2014-06-06T22:30:48 | 2014-06-06T22:30:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,415 | py | # Exercise 6.13
infile = open('stars.dat', 'r')
data = {}
for line in infile.readlines()[1:]:
words = line.split()
name = ' '.join(words[:-3])
if words[-3] == '-':
distance = '-'
apparent_brightness = '-'
else:
distance = float(words[-3])
apparent_brightness = float(words[-2])
luminosity = float(words[-1])
data[name] = {'distance': distance,
'apparent brightness': apparent_brightness,
'luminosity': luminosity}
print '='*68
print '%-20s %12s %18s %15s' % ('Star', 'Distance',
'App. brightness', 'Luminosity')
print '-'*68
for star in data:
if star == 'Sun':
print '%-20s %12s %18s %15.5f' % (star,
data[star]['distance'],
data[star]['apparent brightness'],
data[star]['luminosity'])
else:
print '%-20s %12f %18f %15.5f' % (star,
data[star]['distance'],
data[star]['apparent brightness'],
data[star]['luminosity'])
print '='*68
"""
Sample run:
python stars_data_dict2.py
====================================================================
Star Distance App. brightness Luminosity
--------------------------------------------------------------------
Wolf 359 7.700000 0.000001 0.00002
Sun - - 1.00000
Alpha Centauri C 4.200000 0.000010 0.00006
Alpha Centauri B 4.300000 0.077000 0.45000
Alpha Centauri A 4.300000 0.260000 1.56000
Luyten 726-8 A 8.400000 0.000003 0.00006
Sirius B 8.600000 0.001000 0.00300
Sirius A 8.600000 1.000000 23.60000
Luyten 726-8 B 8.400000 0.000002 0.00004
BD +36 degrees 2147 8.200000 0.000300 0.00600
Barnard's Star 6.000000 0.000040 0.00050
Ross 154 9.400000 0.000020 0.00050
====================================================================
===============================
"""
| [
"noahwaterfieldprice@gmail.com"
] | noahwaterfieldprice@gmail.com |
8a2be07e1e5c38ef9e5fb4cc1ec1310b15899623 | 202be9ce15e7e41bad55e6bbe4d0c941ecbb6781 | /1037 在霍格沃茨找零钱.py | e054872072ec15b2a83bb4a99f9fb821c4ae05fb | [] | no_license | junyechen/Basic-level | ae55ab4e13fd38595772786af25fcc91c055f28c | a6e15bc3829dfe05cefc248454f0433f8070cdfb | refs/heads/master | 2020-04-29T08:01:21.936408 | 2019-07-06T04:16:14 | 2019-07-06T04:16:14 | 175,972,034 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | '''
如果你是哈利·波特迷,你会知道魔法世界有它自己的货币系统 —— 就如海格告诉哈利的:“十七个银西可(Sickle)兑一个加隆(Galleon),二十九个纳特(Knut)兑一个西可,很容易。”现在,给定哈利应付的价钱 P 和他实付的钱 A,你的任务是写一个程序来计算他应该被找的零钱。
输入格式:
输入在 1 行中分别给出 P 和 A,格式为 Galleon.Sickle.Knut,其间用 1 个空格分隔。这里 Galleon 是 [0, 107] 区间内的整数,Sickle 是 [0, 17) 区间内的整数,Knut 是 [0, 29) 区间内的整数。
输出格式:
在一行中用与输入同样的格式输出哈利应该被找的零钱。如果他没带够钱,那么输出的应该是负数。
输入样例 1:
10.16.27 14.1.28
输出样例 1:
3.2.1
输入样例 2:
14.1.28 10.16.27
输出样例 2:
-3.2.1
'''
#####################################################
'''
非常简单的题目
'''
#####################################################
P, A = [i.split('.') for i in input().split()]
P = 29 * 17 * int(P[0]) + 29 * int(P[1]) + int(P[2])
A = 29 * 17 * int(A[0]) + 29 * int(A[1]) + int(A[2])
Waste = A - P
if Waste >= 0:
flag = True
else:
flag = False
Waste = -Waste
Knut = Waste % (17 * 29) % 29
Gallen = Waste // (29 * 17)
Sickle = Waste % (17 * 29) // 29
if not flag:
print('-',end='')
print(Gallen,Sickle,Knut,sep='.')
| [
"chenjunyeword@outlook.com"
] | chenjunyeword@outlook.com |
f208947393151e0fea908eb59eb830d80620d0fe | b4916436d437d98f79ae2af4e56fa1acd5f84e7f | /pycozmo/robot.py | 33d37092a81e8891be669da54fffd7932716207c | [
"MIT",
"Apache-2.0"
] | permissive | VictorTagayun/pycozmo | b875f3e0008efeae39f3cea80418aee66af8511e | dd971aad2d32419deae00b1294922b416ba2e2b9 | refs/heads/master | 2020-07-30T07:22:21.715324 | 2019-09-13T09:32:51 | 2019-09-13T09:32:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,602 | py |
import math
from . import util
from . import protocol_encoder
MIN_HEAD_ANGLE = util.Angle(degrees=-25)
MAX_HEAD_ANGLE = util.Angle(degrees=44.5)
MIN_LIFT_HEIGHT = util.Distance(mm=32.0)
MAX_LIFT_HEIGHT = util.Distance(mm=92.0)
LIFT_ARM_LENGTH = util.Distance(mm=66.0)
LIFT_PIVOT_HEIGHT = util.Distance(mm=45.0)
MAX_WHEEL_SPEED = util.Speed(mmps=200.0)
class RobotStatusFlag(object):
IS_MOVING = 0x1
IS_CARRYING_BLOCK = 0x2
IS_PICKING_OR_PLACING = 0x4
IS_PICKED_UP = 0x8
IS_BODY_ACC_MODE = 0x10
IS_FALLING = 0x20
IS_ANIMATING = 0x40
IS_PATHING = 0x80
LIFT_IN_POS = 0x100
HEAD_IN_POS = 0x200
IS_ANIM_BUFFER_FULL = 0x400
IS_ANIMATING_IDLE = 0x800
IS_ON_CHARGER = 0x1000
IS_CHARGING = 0x2000
CLIFF_DETECTED = 0x4000
ARE_WHEELS_MOVING = 0x8000
IS_CHARGER_OOS = 0x10000
RobotStatusFlagNames = {
RobotStatusFlag.IS_MOVING: "IS_MOVING",
RobotStatusFlag.IS_CARRYING_BLOCK: "IS_CARRYING_BLOCK",
RobotStatusFlag.IS_PICKING_OR_PLACING: "IS_PICKING_OR_PLACING",
RobotStatusFlag.IS_PICKED_UP: "IS_PICKED_UP",
RobotStatusFlag.IS_BODY_ACC_MODE: "IS_BODY_ACC_MODE",
RobotStatusFlag.IS_FALLING: "IS_FALLING",
RobotStatusFlag.IS_ANIMATING: "IS_ANIMATING",
RobotStatusFlag.IS_PATHING: "IS_PATHING",
RobotStatusFlag.LIFT_IN_POS: "LIFT_IN_POS",
RobotStatusFlag.HEAD_IN_POS: "HEAD_IN_POS",
RobotStatusFlag.IS_ANIM_BUFFER_FULL: "IS_ANIM_BUFFER_FULL",
RobotStatusFlag.IS_ANIMATING_IDLE: "IS_ANIMATING_IDLE",
RobotStatusFlag.IS_ON_CHARGER: "IS_ON_CHARGER",
RobotStatusFlag.IS_CHARGING: "IS_CHARGING",
RobotStatusFlag.CLIFF_DETECTED: "CLIFF_DETECTED",
RobotStatusFlag.ARE_WHEELS_MOVING: "ARE_WHEELS_MOVING",
RobotStatusFlag.IS_CHARGER_OOS: "IS_CHARGER_OOS",
}
BODY_COLOR_NAMES = {
protocol_encoder.BodyColor.WHITE_v10: "Original",
protocol_encoder.BodyColor.RESERVED: "Reserved",
protocol_encoder.BodyColor.WHITE_v15: "White",
protocol_encoder.BodyColor.CE_LM_v15: "CE_LM",
protocol_encoder.BodyColor.LE_BL_v16: "LE_BL",
}
class LiftPosition(object):
"""
Represents the position of Cozmo's lift.
The class allows the position to be referred to as either absolute height
above the ground, as a ratio from 0.0 to 1.0, or as the angle of the lift
arm relative to the ground.
Args:
height (:class:`cozmo.util.Distance`): The height of the lift above the ground.
ratio (float): The ratio from 0.0 to 1.0 that the lift is raised from the ground.
angle (:class:`cozmo.util.Angle`): The angle of the lift arm relative to the ground.
"""
__slots__ = ('_height', )
def __init__(self, height=None, ratio=None, angle=None):
def _count_arg(arg):
# return 1 if argument is set (not None), 0 otherwise
return 0 if (arg is None) else 1
num_provided_args = _count_arg(height) + _count_arg(ratio) + _count_arg(angle)
if num_provided_args != 1:
raise ValueError("Expected one, and only one, of the distance, ratio or angle keyword arguments")
if height is not None:
if not isinstance(height, util.Distance):
raise TypeError("Unsupported type for distance - expected util.Distance")
self._height = height
elif ratio is not None:
height_mm = MIN_LIFT_HEIGHT.mm + (ratio * (MAX_LIFT_HEIGHT.mm - MIN_LIFT_HEIGHT.mm))
self._height = util.Distance(mm=height_mm)
elif angle is not None:
if not isinstance(angle, util.Angle):
raise TypeError("Unsupported type for angle - expected util.Angle")
height_mm = (math.sin(angle.radians) * LIFT_ARM_LENGTH.mm) + LIFT_PIVOT_HEIGHT.mm
self._height = util.Distance(mm=height_mm)
def __repr__(self):
return "<%s height=%s ratio=%s angle=%s>" % (self.__class__.__name__, self._height, self.ratio, self.angle)
@property
def height(self) -> util.Distance:
""" Height above the ground. """
return self._height
@property
def ratio(self) -> float:
""" The ratio from 0 to 1 that the lift is raised, 0 at the bottom, 1 at the top. """
ratio = ((self._height.mm - MIN_LIFT_HEIGHT.mm) / (MAX_LIFT_HEIGHT.mm - MIN_LIFT_HEIGHT.mm))
return ratio
@property
def angle(self) -> util.Angle:
""" The angle of the lift arm relative to the ground. """
sin_angle = (self._height.mm - LIFT_PIVOT_HEIGHT.mm) / LIFT_ARM_LENGTH.mm
angle = math.asin(sin_angle)
return util.Angle(radians=angle)
| [
"zayfod@gmail.com"
] | zayfod@gmail.com |
2f3bad0b23444dbe7cdadc422637b213f444f4f0 | 7f21abecb951371885ca007bd24eebbb61e8d0a0 | /lesson_012/python_snippets/04_queues.py | 81fbb8e8c8b542f08790a434e6a058aaf9c808bf | [] | no_license | zaboevai/python_base | a076b0d8798f103347dddcf0be0d09fb02815609 | c689568c926db5ff4f9cdb4f5c335fac7a434130 | refs/heads/develop | 2022-11-25T11:04:21.678107 | 2021-06-09T19:48:52 | 2021-06-09T19:48:52 | 191,818,125 | 23 | 18 | null | 2022-11-22T07:57:23 | 2019-06-13T19:01:48 | Python | UTF-8 | Python | false | false | 4,284 | py | # -*- coding: utf-8 -*-
# Кроме блокировок и примитивов синхронизации существует еще один способ обмена данными между потоками.
# Это очереди - Queue - https://docs.python.org/3.6/library/queue.html
# В очередь можно положить элемент и взять его. Queue гарантирует что потоки не помешают друг другу
# - операции очереди атомарные и блокирующие.
import time
from collections import defaultdict
import queue
import random
import threading
FISH = (None, 'плотва', 'окунь', 'лещ')
# Посадим всех рыбаков в лодку, в которой есть садок для улова.
class Fisher(threading.Thread):
def __init__(self, name, worms, catcher, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = name
self.worms = worms
self.catcher = catcher
def run(self):
for worm in range(self.worms):
print(f'{self.name}, {worm}: забросили ждем...', flush=True)
# time.sleep(random.randint(1, 3) / 10)
fish = random.choice(FISH)
if fish is None:
print(f'{self.name}, {worm}: сожрали червяка!', flush=True)
else:
print(f'{self.name}, {worm}: поймал {fish} и хочет положить его в садок', flush=True)
if self.catcher.full():
print(f'{self.name}, {worm}: приемщик полон !!!', flush=True)
# Этот метод у очереди - атомарный и блокирующий
# Поток приостанавливается, пока нет места в очереди
self.catcher.put(fish)
print(f'{self.name}, {worm}: наконец-то отдал {fish} приемщику', flush=True)
class Boat(threading.Thread):
def __init__(self, worms_per_fisher=10, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fishers = []
self.worms_per_fisher = worms_per_fisher
self.catcher = queue.Queue(maxsize=2)
self.fish_tank = defaultdict(int)
def add_fisher(self, name):
fisher = Fisher(name=name, worms=self.worms_per_fisher, catcher=self.catcher)
self.fishers.append(fisher)
def run(self):
print('Лодка вышла в море...', flush=True)
for fisher in self.fishers:
fisher.start()
while True:
try:
# Этот метод у очереди - атомарный и блокирующий,
# Поток приостанавливается, пока нет элементов в очереди
fish = self.catcher.get(timeout=1)
print(f'Приемщик принял {fish} и положил в садок', flush=True)
self.fish_tank[fish] += 1
except queue.Empty:
print(f'Приемщику нет рыбы в течении 1 секунды', flush=True)
if not any(fisher.is_alive() for fisher in self.fishers):
break
for fisher in self.fishers:
fisher.join()
print(f'Лодка возвращается домой с {self.fish_tank}', flush=True)
boat = Boat(worms_per_fisher=10)
humans = ['Васек', 'Колян', 'Петрович', 'Хмурый', 'Клава', ]
for name in humans:
boat.add_fisher(name=name)
boat.start()
boat.join()
print(f'лодка привезла {boat.catch}')
# Мы использовали очередь вида FIFO - first input, first output - первый вошел, первый вышел.
# В модуле queue есть еще два вида очередей:
# LifoQueue - last input, first output - последний вошел, первый вышел (еще такую очередь называют стеком).
# PriorityQueue - первым возвращается наименьший элемент, то есть sorted(list(entries))[0]
| [
"you@example.com"
] | you@example.com |
0e4a1d59e13199cb3b7d2caf6557074911679da9 | da9cbae7c2b9789951874f4b2dd9eba990753bbd | /run2018/crab_V0cumu_HIMB2018_Ks_SBPos_Mid_sysMCBias_v1.py | cdc96fb1d89a9fba410816b27db346a1d6940c11 | [] | no_license | BetterWang/QWCumuDiff | d0be53a87dd345153b603a16617f572d5adc288c | cccb286bb9ee4b9cb3aa78cc839ae993c66624f1 | refs/heads/master | 2021-09-13T01:30:37.988189 | 2021-08-25T13:36:57 | 2021-08-25T13:36:57 | 81,896,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,008 | py | from CRABAPI.RawCommand import crabCommand
from CRABClient.UserUtilities import config
from CRABClient.ClientExceptions import ClientException
from httplib import HTTPException
config = config()
config.General.requestName = 'HIMB0_V0Cumu_Ks_SBPos_Mid_sysMCBias_v2'
config.General.workArea = 'CrabArea'
config.General.transferOutputs = True
config.General.transferLogs = False
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'qwcumu_PbPb18_V0_MCBias_v2s.py'
#config.JobType.maxJobRuntimeMin = 2500
config.JobType.inputFiles = ['MC_Full_BDT250_D4.KS.weights.xml']
config.JobType.pyCfgParams = ['part=KS', 'massRange=SBPos', 'rap=Mid']
config.Data.inputDataset = '/HIMinimumBias0/qwang-V0Skim_v3-5f932986cf38f9e8dbd6c3aea7f6c2b4/USER'
config.Data.inputDBS = 'phys03'
config.Data.splitting = 'Automatic'
#config.Data.unitsPerJob = 8
config.Data.outLFNDirBase = '/store/group/phys_heavyions/qwang/PbPb2018'
config.Data.lumiMask = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions18/HI/PromptReco/Cert_326381-327564_HI_PromptReco_Collisions18_JSON.txt'
config.Data.publication = False
#config.Data.outputDatasetTag = ''
config.Data.useParent = True
config.Data.ignoreLocality = True
config.Site.whitelist = ['T2_US_Vanderbilt']
config.Site.storageSite = 'T2_CH_CERN'
config.Data.allowNonValidInputDataset = True
config.JobType.allowUndistributedCMSSW = True
#try:
# crabCommand('submit', config = config)
#except HTTPException as hte:
# print "Failed submitting task: %s" % (hte.headers)
#except ClientException as cle:
# print "Failed submitting task: %s" % (cle)
#
config.General.requestName = 'HIMB1_V0Cumu_Ks_SBPos_Mid_sysMCBias_v2'
config.Data.inputDataset = '/HIMinimumBias1/qwang-V0Skim_v3-5f932986cf38f9e8dbd6c3aea7f6c2b4/USER'
try:
crabCommand('submit', config = config)
except HTTPException as hte:
print "Failed submitting task: %s" % (hte.headers)
except ClientException as cle:
print "Failed submitting task: %s" % (cle)
| [
"BetterWang@gmail.com"
] | BetterWang@gmail.com |
841dc8000e963e39aa4a57d8541acaba350bf449 | 2b8c88dfee5c5a784357515eafe8cd5f997c8774 | /leetcode/54.spiral-matrix.py | a4bbfff15df9fe1594001407e0139df5d8c37fd6 | [] | no_license | archenRen/learnpy | e060f3aa2f77c35fc1b12345720af6c8b528da57 | 934ef76b97297f746a722a48c76672c7bc744cd9 | refs/heads/master | 2022-04-28T20:25:59.114036 | 2020-05-03T02:16:03 | 2020-05-03T02:16:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | #
# @lc app=leetcode id=54 lang=python3
#
# [54] Spiral Matrix
#
# https://leetcode.com/problems/spiral-matrix/description/
#
# algorithms
# Medium (31.36%)
# Likes: 1345
# Dislikes: 450
# Total Accepted: 264.2K
# Total Submissions: 842.4K
# Testcase Example: '[[1,2,3],[4,5,6],[7,8,9]]'
#
# Given a matrix of m x n elements (m rows, n columns), return all elements of
# the matrix in spiral order.
#
# Example 1:
#
#
# Input:
# [
# [ 1, 2, 3 ],
# [ 4, 5, 6 ],
# [ 7, 8, 9 ]
# ]
# Output: [1,2,3,6,9,8,7,4,5]
#
#
# Example 2:
#
# Input:
# [
# [1, 2, 3, 4],
# [5, 6, 7, 8],
# [9,10,11,12]
# ]
# Output: [1,2,3,4,8,12,11,10,9,5,6,7]
#
#
# https://leetcode.com/problems/spiral-matrix/discuss/20571/1-liner-in-Python-%2B-Ruby
# 有详细分析
# 递归解法
# 去一行后,顺时针旋转矩阵。参考leetcode-48. 但是顺时针旋转应该先转置再倒置。
class Solution:
def spiralOrder(self, matrix: 'List[List[int]]') -> 'List[int]':
return matrix and list(matrix.pop(0)) + self.spiralOrder(list(zip(*matrix))[::-1])
| [
"wangdi03@ppdai.com"
] | wangdi03@ppdai.com |
7bd4527bda1c4953bf8f336080c3691c2b70fad9 | 0f30dbffc77960edf69fa18c78c6d1a1658bb3dc | /tests/test_cu_linear_operator.py | 3ad17786ed37a887b11b56d0139e1ac2267065ec | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ameli/imate | 859c5ed35540cc44058cd908ce44485487acd041 | de867f131a4cda7d60a68bf0558e896fae89d776 | refs/heads/main | 2023-08-29T07:03:53.512434 | 2023-08-15T23:39:30 | 2023-08-15T23:39:30 | 308,965,310 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | #! /usr/bin/env python
# SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <sameli@berkeley.edu>
# SPDX-License-Identifier: BSD-3-Clause
# SPDX-FileType: SOURCE
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the license found in the LICENSE.txt file in the root
# directory of this source tree.
# =======
# Imports
# =======
import sys
# This package might not be compiled with the cuda support.
try:
from imate._cu_linear_operator.tests import test_cu_matrix, \
test_cu_affine_matrix_function
subpackage_exists = True
except ModuleNotFoundError:
subpackage_exists = False
# =======================
# test cu linear operator
# =======================
def test_cu_linear_operator():
"""
A wrapper for :mod:`imate._linear_operator.tests` test sub-module.
"""
# A test for linear operator
if subpackage_exists:
try:
test_cu_matrix()
test_cu_affine_matrix_function()
except RuntimeError as e:
print(e)
# ===========
# System Main
# ===========
if __name__ == "__main__":
sys.exit(test_cu_linear_operator())
| [
"sia.sunrise@gmail.com"
] | sia.sunrise@gmail.com |
90d354864b26a4f570839f712ebefc85b853342b | 00e80ba9ad9fa58ab18114e0303d9a6af21820f3 | /wanderlust/urls.py | 779adda3dad8bd1a6f8cc39d1fff493483bb2b87 | [] | no_license | Raghavareddy21/wanderlust | cc3a4dcafc5e5aba9c2236750c12768420dbeb6c | 7e972668586ec377ccec53dbcba5ce6496f58faa | refs/heads/master | 2023-01-25T00:44:05.928064 | 2020-11-25T16:42:42 | 2020-11-25T16:42:42 | 302,010,615 | 0 | 1 | null | 2020-10-11T10:39:39 | 2020-10-07T11:09:44 | JavaScript | UTF-8 | Python | false | false | 826 | py | """wanderlust URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url,include
urlpatterns = [
url('admin/', admin.site.urls),
url('',include('guide.urls')),
]
| [
"challavenkataraghavareddy21@gmail.com"
] | challavenkataraghavareddy21@gmail.com |
b5c178156f40eedcd15619cedc40e1807107e05c | 82fce9aae9e855a73f4e92d750e6a8df2ef877a5 | /Lab/venv/lib/python3.8/site-packages/OpenGL/GL/ARB/texture_buffer_range.py | 7d62ab26b7cb24e42b8cd5f95489e5d880249a37 | [] | no_license | BartoszRudnik/GK | 1294f7708902e867dacd7da591b9f2e741bfe9e5 | 6dc09184a3af07143b9729e42a6f62f13da50128 | refs/heads/main | 2023-02-20T19:02:12.408974 | 2021-01-22T10:51:14 | 2021-01-22T10:51:14 | 307,847,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | '''OpenGL extension ARB.texture_buffer_range
This module customises the behaviour of the
OpenGL.raw.GL.ARB.texture_buffer_range to provide a more
Python-friendly API
Overview (from the spec)
ARB_texture_buffer_object (which was promoted to core in OpenGL 3.1)
introduced the ability to attach the data store of a buffer object
to a buffer texture and access it from shaders. The extension only allows
the entire store of the buffer object to the texture. This extension
expands on this and allows a sub-range of the buffer's data store to
be attached to a texture. This can be used, for example, to allow multiple
buffer textures to be backed by independent sub-ranges of the same buffer
object, or for different sub-ranges of a single buffer object to be used
for different purposes.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/texture_buffer_range.txt
'''
from OpenGL.raw.GL.ARB.texture_buffer_range import _EXTENSION_NAME
def glInitTextureBufferRangeARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | [
"rudnik49@gmail.com"
] | rudnik49@gmail.com |
58f2641de9afc33cf7980ff3ad484fd18b82cdfe | 3f566babc0230a9d1d2a8ab6a4f0fee9bf2f497b | /tools/mayaCore/cmds/pSets.py | e162540227c1b10025df837e4030443add8ac9ad | [] | no_license | snaress/studio_dev | 3118e6d7b5ab7e9f7f318cf0c2c4145ad61d5f7f | a58608922abe1d47bf3d807c5db11e265aad85a2 | refs/heads/master | 2021-01-21T13:17:58.396068 | 2016-04-25T00:42:27 | 2016-04-25T00:42:27 | 51,249,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | try:
import maya.cmds as mc
except:
pass
def getAllSets(suffixes=None):
"""
Get all sets ending with given suffixes
:param suffixes: Set suffixes
:type suffixes: list
:return: Sets list
:rtype: list
"""
setList = []
for s in mc.ls(type='objectSet') or []:
for ext in suffixes:
if suffixes is None:
setList.append(s)
else:
if s.endswith(ext):
setList.append(s)
return setList
def removeSets(sets=None, suffixes=None):
"""
Delete given sets or all sets given by 'getAllSets()'
:param sets: Sets list to delete
:type sets: list
:param suffixes: Set suffixes
:type suffixes: list
"""
#--- Get Sets ---#
if sets is None:
allSets = getAllSets(suffixes=suffixes)
else:
allSets = sets
#--- Remove Sets ---#
while allSets:
for s in allSets:
try:
mc.delete(s)
print 'delete', s
except:
pass
allSets = getAllSets()
| [
"jln.buisseret@gmail.com"
] | jln.buisseret@gmail.com |
572594fbea40e8ffdd3e9a85fce23c0041774610 | ca4e57a6861f1e24d1521bf5b775aee3b6db7725 | /lex1.py | 1d60b17dd7ce6380a7e868149f8d16909811c278 | [] | no_license | mathi98/madhu | e296a477f3684a596c74a228c9ce867f1f60c3f8 | cae2adb19ccf7c7f12212d694cd0d09614cd5d81 | refs/heads/master | 2020-05-23T01:06:54.830389 | 2019-06-28T14:13:07 | 2019-06-28T14:13:07 | 186,582,298 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | k=int(input())
l=list(map(str,input().split()))
a=sorted(l,key=len)
for i in range(len(a)-1):
if len(a[i])==len(a[i+1]) and a[i]>a[i+1]:
a[i],a[i+1]=a[i+1],a[i]
print(*a)
| [
"noreply@github.com"
] | mathi98.noreply@github.com |
90af24bb6ece2824041e3e25a9bab47b6f3f620d | ad14c9b6454c3e8e657e994914bdfe97c2188c22 | /oostepbystep/person.py | ed964445701ac3ae80004664d035181f4263c8b3 | [] | no_license | woodyyan/twa-python-bootcamp | 1e6ecdac10a7fb3366ce7180eae93678afe1d9a8 | 3ef20a88e6954662e227ccf804b76ebc5fb6d74d | refs/heads/master | 2020-09-12T13:23:02.585373 | 2019-12-25T12:24:33 | 2019-12-25T12:24:33 | 222,439,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def introduce(self):
return 'My name is %s. I am %s years old.' % (self.name, self.age)
| [
"colorguitar@hotmail.com"
] | colorguitar@hotmail.com |
2b5f99242912f7e6a226598779f4b87dc13a02f9 | 762c28b8cda476574d71453701d90caf56973556 | /network/loss_lib.py | 8b9b49cbad66a3cc2ee3da483ed518f89e61978b | [] | no_license | RuiLiFeng/code | cf480e6f4ad598512b8147374687c6a379a9dc43 | 6bc288bd7d9e3dfc7f6847aaaa12bcf21f4950de | refs/heads/master | 2020-07-11T19:52:45.847382 | 2019-09-02T13:57:35 | 2019-09-02T13:57:35 | 204,631,332 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,058 | py | # coding=utf-8
# Copyright 2018 Google LLC & Hwalsuk Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of popular GAN losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import utils
import gin
import tensorflow as tf
def check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits):
"""Checks the shapes and ranks of logits and prediction tensors.
Args:
d_real: prediction for real points, values in [0, 1], shape [batch_size, 1].
d_fake: prediction for fake points, values in [0, 1], shape [batch_size, 1].
d_real_logits: logits for real points, shape [batch_size, 1].
d_fake_logits: logits for fake points, shape [batch_size, 1].
Raises:
ValueError: if the ranks or shapes are mismatched.
"""
def _check_pair(a, b):
if a != b:
raise ValueError("Shape mismatch: %s vs %s." % (a, b))
if len(a) != 2 or len(b) != 2:
raise ValueError("Rank: expected 2, got %s and %s" % (len(a), len(b)))
if (d_real is not None) and (d_fake is not None):
_check_pair(d_real.shape.as_list(), d_fake.shape.as_list())
if (d_real_logits is not None) and (d_fake_logits is not None):
_check_pair(d_real_logits.shape.as_list(), d_fake_logits.shape.as_list())
if (d_real is not None) and (d_real_logits is not None):
_check_pair(d_real.shape.as_list(), d_real_logits.shape.as_list())
@gin.configurable(whitelist=[])
def non_saturating(d_real_logits, d_fake_logits, d_real=None, d_fake=None):
"""Returns the discriminator and generator loss for Non-saturating loss.
Args:
d_real_logits: logits for real points, shape [batch_size, 1].
d_fake_logits: logits for fake points, shape [batch_size, 1].
d_real: ignored.
d_fake: ignored.
Returns:
A tuple consisting of the discriminator loss, discriminator's loss on the
real samples and fake samples, and the generator's loss.
"""
with tf.name_scope("non_saturating_loss"):
check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits)
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_real_logits, labels=tf.ones_like(d_real_logits),
name="cross_entropy_d_real"))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_fake_logits, labels=tf.zeros_like(d_fake_logits),
name="cross_entropy_d_fake"))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_fake_logits, labels=tf.ones_like(d_fake_logits),
name="cross_entropy_g"))
return d_loss, d_loss_real, d_loss_fake, g_loss
@gin.configurable(whitelist=[])
def wasserstein(d_real_logits, d_fake_logits, d_real=None, d_fake=None):
"""Returns the discriminator and generator loss for Wasserstein loss.
Args:
d_real_logits: logits for real points, shape [batch_size, 1].
d_fake_logits: logits for fake points, shape [batch_size, 1].
d_real: ignored.
d_fake: ignored.
Returns:
A tuple consisting of the discriminator loss, discriminator's loss on the
real samples and fake samples, and the generator's loss.
"""
with tf.name_scope("wasserstein_loss"):
check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits)
d_loss_real = -tf.reduce_mean(d_real_logits)
d_loss_fake = tf.reduce_mean(d_fake_logits)
d_loss = d_loss_real + d_loss_fake
g_loss = -d_loss_fake
return d_loss, d_loss_real, d_loss_fake, g_loss
@gin.configurable(whitelist=[])
def least_squares(d_real, d_fake, d_real_logits=None, d_fake_logits=None):
"""Returns the discriminator and generator loss for the least-squares loss.
Args:
d_real: prediction for real points, values in [0, 1], shape [batch_size, 1].
d_fake: prediction for fake points, values in [0, 1], shape [batch_size, 1].
d_real_logits: ignored.
d_fake_logits: ignored.
Returns:
A tuple consisting of the discriminator loss, discriminator's loss on the
real samples and fake samples, and the generator's loss.
"""
with tf.name_scope("least_square_loss"):
check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits)
d_loss_real = tf.reduce_mean(tf.square(d_real - 1.0))
d_loss_fake = tf.reduce_mean(tf.square(d_fake))
d_loss = 0.5 * (d_loss_real + d_loss_fake)
g_loss = 0.5 * tf.reduce_mean(tf.square(d_fake - 1.0))
return d_loss, d_loss_real, d_loss_fake, g_loss
@gin.configurable(whitelist=[])
def hinge(d_real_logits, d_fake_logits, d_real=None, d_fake=None):
"""Returns the discriminator and generator loss for the hinge loss.
Args:
d_real_logits: logits for real points, shape [batch_size, 1].
d_fake_logits: logits for fake points, shape [batch_size, 1].
d_real: ignored.
d_fake: ignored.
Returns:
A tuple consisting of the discriminator loss, discriminator's loss on the
real samples and fake samples, and the generator's loss.
"""
with tf.name_scope("hinge_loss"):
check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits)
d_loss_real = tf.reduce_mean(tf.nn.relu(1.0 - d_real_logits))
d_loss_fake = tf.reduce_mean(tf.nn.relu(1.0 + d_fake_logits))
d_loss = d_loss_real + d_loss_fake
g_loss = - tf.reduce_mean(d_fake_logits)
return d_loss, d_loss_real, d_loss_fake, g_loss
@gin.configurable("loss", whitelist=["fn"])
def get_losses(fn=non_saturating, **kwargs):
"""Returns the losses for the discriminator and generator."""
return utils.call_with_accepted_args(fn, **kwargs)
| [
"frl1996@mail.ustc.edu.cn"
] | frl1996@mail.ustc.edu.cn |
7b59590b96288ab93d3835dd03615e1183066465 | ab00b17a719d02ef7eea2189b052787b5dc3f3e1 | /jsk_arc2017_common/scripts/install_data.py | a29ba3942b2439d13ed1730325a124d43ba226ed | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | 708yamaguchi/jsk_apc | aa826cd991dbde43327887d03f686d278e37b8f1 | b5d0c08f9a8628237b2c7fcf1d4fb60a632d99cb | refs/heads/master | 2021-01-17T21:15:56.575294 | 2017-06-30T04:24:47 | 2017-06-30T05:41:40 | 84,169,587 | 0 | 0 | null | 2017-07-23T07:38:15 | 2017-03-07T07:39:52 | Common Lisp | UTF-8 | Python | false | false | 1,190 | py | #!/usr/bin/env python
import multiprocessing
import jsk_data
PKG = 'jsk_arc2017_common'
def download_data(path, url, md5):
p = multiprocessing.Process(
target=jsk_data.download_data,
kwargs=dict(
pkg_name=PKG,
path=path,
url=url,
md5=md5,
),
)
p.start()
def main():
# dataset: v1
# augmentation: standard
download_data(
path='data/models/fcn32s_arc2017_dataset_v1_20170326_005.pth',
url='https://drive.google.com/uc?id=0B9P1L--7Wd2vT1pnWnVsNERHTVk',
md5='ae9d13c126389bd63bccf0db1551f31e',
)
# dataset: v1
# augmentation: stack
download_data(
path='data/models/fcn32s_arc2017_dataset_v1_20170417.pth',
url='https://drive.google.com/uc?id=0B9P1L--7Wd2vYWloN0FGeEhlcGs',
md5='a098399a456de29ef8d4feaa8ae795e9',
)
# dataset: v2
# augmentation: stack
download_data(
path='data/models/fcn32s_arc2017_datasetv2_cfg003_20170612.npz',
url='https://drive.google.com/uc?id=0B9P1L--7Wd2vS1VaWWVFNDVFQ1k',
md5='e4e07b66ebeaf6b33a79eb1b605ee3a3',
)
if __name__ == '__main__':
main()
| [
"www.kentaro.wada@gmail.com"
] | www.kentaro.wada@gmail.com |
135757553766e681c1df21a38f881fc5b66bca3b | 0ec0fa7a6dc0659cc26113e3ac734434b2b771f2 | /4.refactored/log/2016-11-22@11:30/parameters.py | 454e4db41ce2579cc4d73cfb4ec5cf3a76c10cc8 | [] | no_license | goldleaf3i/3dlayout | b8c1ab3a21da9129829e70ae8a95eddccbf77e2f | 1afd3a94a6cb972d5d92fe373960bd84f258ccfe | refs/heads/master | 2021-01-23T07:37:54.396115 | 2017-03-28T10:41:06 | 2017-03-28T10:41:06 | 86,431,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,770 | py | import numpy as np
import xml.etree.cElementTree as ET
from xml.etree import ElementTree
#-------------------------------
#AZIONE = 'batch'
AZIONE = 'mappa_singola'
#-------------------------------
DISEGNA = True
#DISEGNA = False
#-------------------------------
#metodo di classificazione per le celle
#TRUE = Metodo di Matteo
#False = Metodo di Valerio
metodo_classificazione_celle = 1
#metodo_classificazione_celle = 2
#metodo_classificazione_celle = 3
#-------------------------------
#se voglio caricare i pickle basta caricare il main e quindi LOADMAIN = True, se voglio rifare tutto basta settare LOADMAIN = False
#LOADMAIN = True
LOADMAIN = False
#-------------------------------
#dipende da cosa stai considerando, per le mappe parziali non abbiamo l'xml, allora non posso calcolare l'accuracy
#mappa_completa = True
mappa_completa = False
#-------------------------------
#metodo di predizione scelto
#MCMC = True
MCMC = False #equivale a fare solo le azioni geometriche semplici
azioni_semplici = False
azione_complessa = True
class Path_obj():
def __init__(self):
#path output pickle
#self.outLayout_pickle_path = './data/OUTPUT/pickle/layout_stanze.pkl'
#self.outTopologicalGraph_pickle_path = './data/OUTPUT/pickle/grafo_topologico.pkl'
#self.out_pickle_path = './data/OUTPUT/pickle/'
#dataset input folder
self.INFOLDERS = './data/INPUT/'
self.OUTFOLDERS = './data/OUTPUT/'
#self.DATASETs =['SCHOOL']
self.DATASETs =['PARZIALI']
#self.DATASETs =['SCHOOL_grandi']
#dataset output folder
self.data_out_path = './data/OUTPUT/'
#-----------------------------MAPPA METRICA--------------------------------
#mappa metrica di default ricorda che se e' un survey allora devi anche mettere a true flip_dataset
#self.metricMap = './data/INPUT/IMGs/SURVEY/office_e.png'
#self.metricMap = './data/INPUT/IMGs/SCHOOL/herndon_updated.png'
#self.metricMap = './data/INPUT/IMGs/SCHOOL/cunningham2f_updated.png'
#self.metricMap = './data/INPUT/IMGs/SCHOOL/bronxville_updated.png'
#self.metricMap = './data/INPUT/PARZIALI/battle_creekhs_1_updated78.png'
self.metricMap = './data/INPUT/PARZIALI/Valleceppi_P1_updated93.png'
#self.metricMap = './data/INPUT/PARZIALI/battle_creekhs_1_updated148.png'
#self.metricMap = './data/INPUT/IMGs/SCHOOL_grandi/cunningham2f_updated.png'
#----------------------------NOMI FILE DI INPUT----------------------------
#xml ground truth corrispondente di default
#nome_gt = './data/INPUT/XMLs/SCHOOL/cunningham2f_updated.xml'
#self.nome_gt = './data/INPUT/XMLs/SURVEY/office_e.xml'
#self.nome_gt = './data/INPUT/XMLs/SCHOOL/cunningham2f_updated.xml'
#self.nome_gt = './data/INPUT/XMLs/SCHOOL/bronxville_updated.xml'
self.nome_gt = './data/INPUT/XMLs/SCHOOL/battle_creekhs_1_updated.xml'
#CARTELLA DOVE SALVO
self.filepath = './'
self.filepath_pickle_layout = './Layout.pkl'
self.filepath_pickle_grafoTopologico = './GrafoTopologico.pkl'
class Parameter_obj():
def __init__(self):
#distanza massima in pixel per cui 2 segmenti con stesso cluster angolare sono considerati appartenenti anche allo stesso cluster spaziale
self.minLateralSeparation = 7
#self.minLateralSeparation = 15
#self.cv2thresh = 200
self.cv2thresh = 150
#self.cv2thresh = 170
#parametri di Canny
self.minVal = 90
self.maxVal = 100
#parametri di Hough
self.rho = 1
self.theta = np.pi/180
self.thresholdHough = 20
self.minLineLength = 3#7
self.maxLineGap = 3
#parametri di DBSCAN
self.eps = 0.85#0.85#1.5#0.85
self.minPts = 1
#parametri di mean-shift
self.h = 0.023
self.minOffset = 0.00001
#diagonali (se ho delle linee oblique metto diagonali a False)
self.diagonali = True
#self.diagonali = False
#maxLineGap di hough
#self.m = 20
self.m = 50 #mappe parziali
#flip_dataset = False #questo lo metti a true se la mappa che stai guardando e' di SURVEY
self.flip_dataset=False
#aggiunti di recente
self.apertureSize = 5
self.t =1
#self.minimaLunghezzaParete = 40 #TODO: ancora da aggiungere all'XML, non lo sto piu' usando
#self.sogliaLateraleClusterMura =8 #(prima era 10, guarda quale valore e' migliore)
self.sogliaLateraleClusterMura =10
self.soglia_q_split = 0.2
#parametro che divide il peso di un edge per attribuire un peso per la matrice L di Mura
self.sigma=0.1#0.00000125 #TODO: da inserire in XML. prima era 0.1
def indent(elem, level=0):
i = "\n" + level*"\t"
j = "\n" + (level-1)*"\t"
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + "\t"
if not elem.tail or not elem.tail.strip():
elem.tail = i
for subelem in elem:
indent(subelem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = j
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = j
return elem
def to_XML(parameter_obj, path_obj):
root = ET.Element("root")
par = ET.SubElement(root, "parametri")
ET.SubElement(par, "p1", name="minLateralSeparation").text = str(parameter_obj.minLateralSeparation)
ET.SubElement(par, "p2", name="cv2thresh").text = str(parameter_obj.cv2thresh)
ET.SubElement(par, "p3", name="minVal").text = str(parameter_obj.minVal)
ET.SubElement(par, "p4", name="maxVal").text = str(parameter_obj.maxVal)
ET.SubElement(par, "p5", name="rho").text = str(parameter_obj.rho)
ET.SubElement(par, "p6", name="theta").text = str(parameter_obj.theta)
ET.SubElement(par, "p7", name="thresholdHough").text = str(parameter_obj.thresholdHough)
ET.SubElement(par, "p8", name="minLineLength").text = str(parameter_obj.minLineLength)
ET.SubElement(par, "p9", name="maxLineGap").text = str(parameter_obj.maxLineGap)
ET.SubElement(par, "p10", name="eps").text = str(parameter_obj.eps)
ET.SubElement(par, "p11", name="minPts").text = str(parameter_obj.minPts)
ET.SubElement(par, "p12", name="h").text = str(parameter_obj.h)
ET.SubElement(par, "p13", name="minOffset").text = str(parameter_obj.minOffset)
ET.SubElement(par, "p14", name="diagonali").text = str(parameter_obj.diagonali)
ET.SubElement(par, "p15", name="m").text = str(parameter_obj.m)
ET.SubElement(par, "p16", name="flip_dataset").text = str(parameter_obj.flip_dataset)
ET.SubElement(par, "p17", name="apertureSize").text = str(parameter_obj.apertureSize)
ET.SubElement(par, "p18", name="t").text = str(parameter_obj.t)
ET.SubElement(par, "p19", name="sogliaLateraleClusterMura").text = str(parameter_obj.sogliaLateraleClusterMura)
ET.SubElement(par, "p20", name="soglia_q_split").text = str(parameter_obj.soglia_q_split)
par2 = ET.SubElement(root, "path")
ET.SubElement(par2, "p1", name="INFOLDERS").text = str(path_obj.INFOLDERS)
ET.SubElement(par2, "p2", name="OUTFOLDERS").text = str(path_obj.OUTFOLDERS)
#ET.SubElement(par2, "p3", name="DATASETs").text = path_obj.DATASETs
ET.SubElement(par2, "p4", name="data_out_path").text = str(path_obj.data_out_path)
ET.SubElement(par2, "p5", name="metricMap").text = str(path_obj.metricMap)
ET.SubElement(par2, "p6", name="nome_gt").text = str(path_obj.nome_gt)
ET.SubElement(par2, "p7", name="filepath").text = str(path_obj.filepath)
ET.SubElement(par2, "p8", name="filepath_pickle_layout").text = str(path_obj.filepath_pickle_layout)
ET.SubElement(par2, "p9", name="filepath_pickle_grafoTopologico").text = str(path_obj.filepath_pickle_grafoTopologico)
tree = ET.ElementTree(root)
indent(root)
tree.write(path_obj.filepath+"parametri.xml")
#ElementTree.dump(root)
#root = ElementTree.parse(path_obj.filepath+"parametri.xml").getroot()
#indent(root)
#ElementTree.dump(root)
def load_from_XML(parXML):
#creo nuovi oggetti del tipo parameter_obj, path_obj
parameter_obj = Parameter_obj()
path_obj = Path_obj()
tree = ET.parse(parXML)
root = tree.getroot()
for parametro in root.findall('parametri'):
parameter_obj.minLateralSeparation = int(parametro.find('p1').text)
parameter_obj.cv2thresh = int(parametro.find('p2').text)
parameter_obj.minVal = int(parametro.find('p3').text)
parameter_obj.maxVal = int(parametro.find('p4').text)
parameter_obj.rho = int(parametro.find('p5').text)
parameter_obj.theta = float(parametro.find('p6').text)
parameter_obj.thresholdHough = int(parametro.find('p7').text)
parameter_obj.minLineLength = int(parametro.find('p8').text)
parameter_obj.maxLineGap = int(parametro.find('p9').text)
parameter_obj.eps = float(parametro.find('p10').text)
parameter_obj.minPts = int(parametro.find('p11').text)
parameter_obj.h = float(parametro.find('p12').text)
parameter_obj.minOffset = float(parametro.find('p13').text)
parameter_obj.diagonali = bool(parametro.find('p14').text)
parameter_obj.m = int(parametro.find('p15').text)
parameter_obj.flip_dataset = bool(parametro.find('p16').text)
parameter_obj.apertureSize = int(parametro.find('p17').text)
parameter_obj.t = int(parametro.find('p18').text)
parameter_obj.sogliaLateraleClusterMura = int(parametro.find('p19').text)
parameter_obj.soglia_q_split = int(parametro.find('p20').text)
for path in root.findall('path'):
path_obj.INFOLDERS = path.find('p1').text
path_obj.OUTFOLDERS = path.find('p2').text
#path_obj.DATASETs = path.find('p3').text #TODOquesto non e' una stringa ma una lista non puoi fare in qusto modo, ma siccome non ti dovrebbe servire mai per ora non lo caricare la lascialo vuoto
path_obj.data_out_path = path.find('p4').text
path_obj.metricMap = path.find('p5').text
path_obj.nome_gt = path.find('p6').text
path_obj.filepath = path.find('p7').text
path_obj.filepath_pickle_layout = path.find('p8').text
path_obj.filepath_pickle_grafoTopologico = path.find('p9').text
return parameter_obj, path_obj | [
"matteo.luperto@polimi.it"
] | matteo.luperto@polimi.it |
1d03ec3a4d774df3d2140acab5720877acba9a93 | 1287ad54942fd2020a217ab12004a541abb62558 | /pythonexercicios/venv/Scripts/easy_install-3.7-script.py | 85cff29658e254aba0ab93beceea719744a8fa95 | [] | no_license | LuPessoa/exerciciospy- | 637f24581722e547a62380973ca645b55ff65d90 | b5faad818f978bb13a65922edceb17888b73a407 | refs/heads/master | 2023-05-12T04:16:39.847184 | 2021-06-04T03:02:24 | 2021-06-04T03:02:24 | 374,410,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | #!C:\Users\User\phytonexercicios\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"lulenemacedo29@gmail.com"
] | lulenemacedo29@gmail.com |
85d948583795cd31b49710278b255196b01efc9a | 73330107dd79b9973e7fbcd9aeda8039690139c6 | /Word Break.py | 46e17ae0c2f22f0f9bdf0737cf3597433dfd68e4 | [] | no_license | nithinveer/leetcode-solutions | 2f908cd204c130034def8934d41ef6869029a403 | 196e58cd38db846653fb074cfd0363997121a7cf | refs/heads/master | 2021-06-25T22:28:50.391671 | 2021-04-20T07:04:19 | 2021-04-20T07:04:19 | 219,891,708 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,862 | py | class Solution(object):
def __init__(self):
self.found = False
self.memo = {}
def dfs(self, tmp, idx):
# print(tmp,idx)
#base case
if tmp in self.wordDict and idx == len(self.s)-1:
print(tmp, idx)
self.found = True
return True
elif idx == len(self.s)-1:
return False
if tmp+"#"+str(idx) not in self.memo:
a = False
if tmp in self.wordDict:
print(tmp, idx)
a = self.dfs(self.s[idx+1], idx+1)
b = self.dfs(tmp+self.s[idx+1], idx+1)
self.memo[tmp+"#"+str(idx)] = a or b
return self.memo[tmp+"#"+str(idx)]
# return
def wordBreak(self, s, wordDict):
"""
:type s: str
:type wordDict: List[str]
:rtype: bool
"""
self.wordDict = wordDict
self.s = s
self.dfs(s[0],0)
return self.found
def wordBreakOld(self, s, wordDict):
"""
:type s: str
:type wordDict: List[str]
:rtype: bool
"""
dp = [False for i in range(len(s) + 1)]
dp[0] = True
for i in range(1, len(s) + 1):
for word in wordDict:
if dp[i - len(word)] and s[i - len(word):i] == word:
dp[i] = True
return dp[-1]
sol = Solution()
s = "acaaaaabbbdbcccdcdaadcdccacbcccabbbbcdaaaaaadb"
wordDict = ["abbcbda","cbdaaa","b","dadaaad","dccbbbc","dccadd","ccbdbc","bbca","bacbcdd","a","bacb","cbc","adc","c","cbdbcad","cdbab","db","abbcdbd","bcb","bbdab","aa","bcadb","bacbcb","ca","dbdabdb","ccd","acbb","bdc","acbccd","d","cccdcda","dcbd","cbccacd","ac","cca","aaddc","dccac","ccdc","bbbbcda","ba","adbcadb","dca","abd","bdbb","ddadbad","badb","ab","aaaaa","acba","abbb"]
print(sol.wordBreak(s, wordDict)) | [
"nithinveer@iitj.ac.in"
] | nithinveer@iitj.ac.in |
76c2bec4f5e99360815ec228c4951542e0b41029 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/CJ/16_0_1_AndriyM_problem_a.py | 4457b1d99d97d7ebbca30cc6d77b89935bc71794 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 689 | py |
def digits(n):
return set(map(int, str(n)))
def last_seen(n):
max_mult = 1000
digs = digits(n)
current_mult = 1
while current_mult < max_mult and len(digs) < 10:
current_mult += 1
digs = digs | digits(n*current_mult)
if len(digs) == 10:
return str(current_mult*n)
else:
return 'INSOMNIA'
if __name__ == '__main__':
# for x in range(0, 1000000):
# print(last_seen(x))
with open('A-large.in', 'r') as inp:
lines = inp.readlines()
T = int(lines[0])
with open('A-large.out', 'w') as out:
for i in range(1, T+ 1):
out.write('Case #%d: %s\n' % (i, last_seen(int(lines[i]))))
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
511a86f6ac8020ec418a0b6a7242cf42893a18d8 | fe2ac50a9b03ae6b43ee12676799a3ae51495310 | /venv_coupe/Scripts/pip-script.py | 73b270447d1f10e8b0533c078a061ac573264c91 | [] | no_license | rvfedorin/CoupeCounter | 482faaaaa005b64b26b7939d1b98810f19cb1b6b | 86caeaa34dbfe71c7f8b76b8db9ee92e5dd0532e | refs/heads/master | 2020-03-30T01:53:16.502119 | 2018-10-05T09:40:10 | 2018-10-05T09:40:10 | 150,600,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | #!C:\Users\Wolf\PycharmProjects\coupe_count\venv_coupe\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.3','console_scripts','pip'
__requires__ = 'pip==9.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.3', 'console_scripts', 'pip')()
)
| [
"35657347+rvfedorin@users.noreply.github.com"
] | 35657347+rvfedorin@users.noreply.github.com |
a167c98b6d931efedb00c817e8c755d196939060 | 7087a5dd1772c9456f098bc024a894dcaeef5432 | /backup/build/new-calkube/kubernetes-6.0.0_snapshot-py2.7.egg/kubernetes/client/models/v1beta1_mutating_webhook_configuration.py | 27c1aca77257c666ab13d4af8626047f70c14a6a | [] | no_license | santhoshchami/kubecctl-python | 5be7a5a17cc6f08ec717b3eb1c11719ef7653aba | cd45af465e25b0799d65c573e841e2acb983ee68 | refs/heads/master | 2021-06-23T11:00:43.615062 | 2019-07-10T16:57:06 | 2019-07-10T16:57:06 | 145,669,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,939 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1MutatingWebhookConfiguration(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'webhooks': 'list[V1beta1Webhook]'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'webhooks': 'webhooks'
}
def __init__(self, api_version=None, kind=None, metadata=None, webhooks=None):
"""
V1beta1MutatingWebhookConfiguration - a model defined in Swagger
"""
self._api_version = None
self._kind = None
self._metadata = None
self._webhooks = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if webhooks is not None:
self.webhooks = webhooks
@property
def api_version(self):
"""
Gets the api_version of this V1beta1MutatingWebhookConfiguration.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1beta1MutatingWebhookConfiguration.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1MutatingWebhookConfiguration.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1MutatingWebhookConfiguration.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1beta1MutatingWebhookConfiguration.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1MutatingWebhookConfiguration.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1MutatingWebhookConfiguration.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1MutatingWebhookConfiguration.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta1MutatingWebhookConfiguration.
Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
:return: The metadata of this V1beta1MutatingWebhookConfiguration.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1MutatingWebhookConfiguration.
Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
:param metadata: The metadata of this V1beta1MutatingWebhookConfiguration.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def webhooks(self):
"""
Gets the webhooks of this V1beta1MutatingWebhookConfiguration.
Webhooks is a list of webhooks and the affected resources and operations.
:return: The webhooks of this V1beta1MutatingWebhookConfiguration.
:rtype: list[V1beta1Webhook]
"""
return self._webhooks
@webhooks.setter
def webhooks(self, webhooks):
"""
Sets the webhooks of this V1beta1MutatingWebhookConfiguration.
Webhooks is a list of webhooks and the affected resources and operations.
:param webhooks: The webhooks of this V1beta1MutatingWebhookConfiguration.
:type: list[V1beta1Webhook]
"""
self._webhooks = webhooks
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1MutatingWebhookConfiguration):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"root@kube-node02.local"
] | root@kube-node02.local |
c5d18fc4ec58ba588b22d10547e718a9c6c64f06 | 9644567f9cd3415e6c8b1470fde72ab178bb8eb0 | /flask/lib/python2.7/site-packages/flask_ponywhoosh/views.py | 45f5a9aeb475a8bb6957dc593b119433011ad8a6 | [
"Apache-2.0"
] | permissive | Ahmad31/Web_Flask_Cassandra | 01d44ee03fcb457ea3a01629f6fd29870663b8ff | 76acb074fce521e904f3b2a41e6ab69571f4369e | refs/heads/master | 2021-06-10T02:42:53.494515 | 2019-11-27T16:22:48 | 2019-11-27T16:22:48 | 88,625,344 | 3 | 1 | Apache-2.0 | 2021-03-19T22:23:05 | 2017-04-18T12:59:21 | Python | UTF-8 | Python | false | false | 3,007 | py | '''
flask_ponywhoosh.views module
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Perform full-text searches over your database with Pony ORM and PonyWhoosh,
for flask applications.
:copyright: (c) 2015-2017 by Jonathan Prieto-Cubides & Felipe Rodriguez.
:license: MIT (see LICENSE.md)
'''
import re
from pprint import pprint
from flask import render_template
from flask.views import View
from form import SearchForm
class IndexView(View):
"""This is all the setting for the template index.html in the templates folder.
methods (list): POST and GET
"""
methods = ['POST', 'GET']
def __init__(self, pw, action_url_form):
self._pw = pw
self.debug = self._pw.debug
self.action_url_form = action_url_form
def dispatch_request(self):
""" This form is plugeable. That means that all what you need to do is
to install the package and run the url :: /ponywhoosh/
(You may change it in the config) and get the results.
Returns:
Results: The results are sent to the template using bootstrap.
They are renderized using whether a grid or a table, depending on what
models did you register.
By default the first field registered is considered the one that will
be contained in the tittle of each searh result.
"""
ctx = {'form' : SearchForm()}
except_field = None
query, fields = None, None
wildcards = True
form = SearchForm()
if self.debug:
print 'form:'
pprint(form.data)
if form.validate_on_submit():
add_wildcards = form.add_wildcards.data
except_fields = re.split('\W+', form.except_field.data, flags=re.UNICODE)
fields = re.split('\W+', form.fields.data, flags=re.UNICODE)
models = re.split('\W+', form.models.data, flags=re.UNICODE)
query = form.query.data
something = form.something.data
results = self._pw.search(
query
, add_wildcards=add_wildcards
, something=something
, include_entity=True
, fields=fields
, models=models
, except_fields=except_fields
, use_dict=False
)
if self.debug:
print 'form = ',
pprint({
'query': query
, 'add_wildcards': add_wildcards
, 'something': something
, 'include_entity': True
, 'fields': fields
, 'models': models
, 'except_fields': except_fields
})
print "results = "
pprint(results)
return render_template(
'ponywhoosh/results.html'
, entidades=list(self._pw._entities.keys())
, action_url_form=self.action_url_form
, form=form
, results=results
, n=results['cant_results']
, labels=results['results'].keys()
)
return render_template(
'ponywhoosh/index.html'
, form=form
, action_url_form=self.action_url_form
, query=query
)
| [
"aku.anwar.aan@gmail.com"
] | aku.anwar.aan@gmail.com |
4d75e4757f1aec208f56653737558c8d8f20f81c | 9a50339b63586a405d16acf732f877d17742f45b | /phase/migrations/0003_phasesubcategory_category.py | 488d0763a9d715cb7a1161ce37f23b6542996d66 | [] | no_license | ahsanhabib98/PPPsPerformance | 777798caa1e41fbcf9d084b8166cae2e9628b3ef | 1a9af5eb447cac81f9dc929a74e3fddf21d87588 | refs/heads/master | 2020-04-22T23:35:52.842941 | 2019-02-14T19:51:03 | 2019-02-14T19:51:03 | 170,747,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | # Generated by Django 2.0.5 on 2019-01-29 15:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('phase', '0002_auto_20190129_2108'),
]
operations = [
migrations.AddField(
model_name='phasesubcategory',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='phase.PhaseCategory'),
),
]
| [
"ahredoan@gmail.com"
] | ahredoan@gmail.com |
75a1839ac3f3ee5ea8197ccce4cc2e3fc8b2821b | 1462c42bef31c022040b4cae73a96d852e857b51 | /loaner/web_app/backend/api/shelf_api.py | de3b892d75dd8ee46ea078402abd727c6ff6615c | [
"Apache-2.0"
] | permissive | Getechsupport/getechgraband-go | f5e54754a2f300bfd2c8be54edd386841c5fae6c | 6d2a040ef9617fabd8c691ec1c787cf5ec9edb73 | refs/heads/master | 2022-08-17T12:11:06.299622 | 2020-03-10T16:22:34 | 2020-03-10T16:22:34 | 243,598,161 | 0 | 0 | Apache-2.0 | 2022-07-07T17:22:55 | 2020-02-27T19:21:54 | Python | UTF-8 | Python | false | false | 8,087 | py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The entry point for the Shelf methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from protorpc import message_types
from google.appengine.api import datastore_errors
import endpoints
from loaner.web_app.backend.api import auth
from loaner.web_app.backend.api import permissions
from loaner.web_app.backend.api import root_api
from loaner.web_app.backend.api.messages import shelf_messages
from loaner.web_app.backend.lib import api_utils
from loaner.web_app.backend.lib import search_utils
from loaner.web_app.backend.lib import user
from loaner.web_app.backend.models import device_model
from loaner.web_app.backend.models import shelf_model
_SHELF_DOES_NOT_EXIST_MSG = (
'The shelf with location: %s does not exist. Please double '
'check the location.')
_DEVICE_DOES_NOT_EXIST_MSG = (
'The device_identifier: %s is either not enrolled or an invalid serial '
'number has been entered.')
@root_api.ROOT_API.api_class(resource_name='shelf', path='shelf')
class ShelfApi(root_api.Service):
"""This class is for the Shelf API."""
@auth.method(
shelf_messages.EnrollShelfRequest,
message_types.VoidMessage,
name='enroll',
path='enroll',
http_method='POST',
permission=permissions.Permissions.MODIFY_SHELF)
def enroll(self, request):
"""Enrolls a shelf in the program."""
user_email = user.get_user_email()
self.check_xsrf_token(self.request_state)
try:
shelf_model.Shelf.enroll(
user_email=user_email,
friendly_name=request.friendly_name,
location=request.location,
latitude=request.latitude,
longitude=request.longitude,
altitude=request.altitude,
capacity=request.capacity,
audit_notification_enabled=request.audit_notification_enabled,
responsible_for_audit=request.responsible_for_audit,
audit_interval_override=request.audit_interval_override,
)
except (shelf_model.EnrollmentError, datastore_errors.BadValueError) as err:
raise endpoints.BadRequestException(str(err))
return message_types.VoidMessage()
@auth.method(
shelf_messages.ShelfRequest,
shelf_messages.Shelf,
name='get',
path='get',
http_method='POST',
permission=permissions.Permissions.READ_SHELVES)
def get(self, request):
"""Gets a shelf based on location."""
self.check_xsrf_token(self.request_state)
return api_utils.build_shelf_message_from_model(get_shelf(request))
@auth.method(
shelf_messages.ShelfRequest,
message_types.VoidMessage,
name='disable',
path='disable',
http_method='POST',
permission=permissions.Permissions.MODIFY_SHELF)
def disable(self, request):
"""Disables a shelf by its location."""
self.check_xsrf_token(self.request_state)
user_email = user.get_user_email()
shelf = get_shelf(request)
shelf.disable(user_email)
return message_types.VoidMessage()
@auth.method(
shelf_messages.UpdateShelfRequest,
message_types.VoidMessage,
name='update',
path='update',
http_method='POST',
permission=permissions.Permissions.MODIFY_SHELF)
def update(self, request):
"""Gets a shelf using location to update its properties."""
self.check_xsrf_token(self.request_state)
user_email = user.get_user_email()
shelf = get_shelf(request.shelf_request)
kwargs = api_utils.to_dict(request, shelf_model.Shelf)
shelf.edit(user_email=user_email, **kwargs)
return message_types.VoidMessage()
@auth.method(
shelf_messages.Shelf,
shelf_messages.ListShelfResponse,
name='list',
path='list',
http_method='POST',
permission=permissions.Permissions.READ_SHELVES)
def list_shelves(self, request):
"""Lists enabled or all shelves based on any shelf attribute."""
self.check_xsrf_token(self.request_state)
if request.page_size <= 0:
raise endpoints.BadRequestException(
'The value for page_size must be greater than 0.')
query, sort_options, returned_fields = (
search_utils.set_search_query_options(request.query))
if not query:
query = search_utils.to_query(request, shelf_model.Shelf)
offset = search_utils.calculate_page_offset(
page_size=request.page_size, page_number=request.page_number)
search_results = shelf_model.Shelf.search(
query_string=query, query_limit=request.page_size,
offset=offset, sort_options=sort_options,
returned_fields=returned_fields)
total_pages = search_utils.calculate_total_pages(
page_size=request.page_size, total_results=search_results.number_found)
shelves_messages = []
for document in search_results.results:
message = search_utils.document_to_message(
document, shelf_messages.Shelf())
message.shelf_request = shelf_messages.ShelfRequest()
message.shelf_request.urlsafe_key = document.doc_id
message.shelf_request.location = message.location
shelves_messages.append(message)
return shelf_messages.ListShelfResponse(
shelves=shelves_messages,
total_results=search_results.number_found,
total_pages=total_pages)
@auth.method(
shelf_messages.ShelfAuditRequest,
message_types.VoidMessage,
name='audit',
path='audit',
http_method='POST',
permission=permissions.Permissions.AUDIT_SHELF)
def audit(self, request):
"""Performs an audit on a shelf based on location."""
self.check_xsrf_token(self.request_state)
shelf = get_shelf(request.shelf_request)
user_email = user.get_user_email()
devices_on_shelf = []
shelf_string_query = 'shelf: {}'.format(shelf.key.urlsafe())
devices_retrieved_on_shelf = device_model.Device.search(shelf_string_query)
for device_identifier in request.device_identifiers:
device = device_model.Device.get(identifier=device_identifier)
if not device:
raise endpoints.NotFoundException(
_DEVICE_DOES_NOT_EXIST_MSG % device_identifier)
if device.shelf:
if device.shelf == shelf.key:
devices_on_shelf.append(device.key.urlsafe())
logging.info('Device %s is already on shelf.', device.identifier)
continue
try:
device.move_to_shelf(shelf=shelf, user_email=user_email)
devices_on_shelf.append(device.key)
except device_model.UnableToMoveToShelfError as err:
raise endpoints.BadRequestException(str(err))
for device in devices_retrieved_on_shelf.results:
if device.doc_id not in devices_on_shelf:
api_utils.get_ndb_key(device.doc_id).get().remove_from_shelf(
shelf=shelf, user_email=user_email)
shelf.audit(user_email=user_email, num_of_devices=len(devices_on_shelf))
return message_types.VoidMessage()
def get_shelf(request):
"""Gets a shelf using the location.
Args:
request: shelf_messages.ShelfRequest, the request message for a shelf.
Returns:
Shelf object.
Raises:
endpoints.NotFoundException when a shelf can not be found.
"""
if request.urlsafe_key:
shelf = api_utils.get_ndb_key(request.urlsafe_key).get()
else:
shelf = shelf_model.Shelf.get(location=request.location)
if not shelf:
raise endpoints.NotFoundException(
_SHELF_DOES_NOT_EXIST_MSG % request.location)
return shelf
| [
"email"
] | email |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.