content stringlengths 5 1.05M |
|---|
class Vocabulary:
UNK = '<unk>'
def __init__(self):
self.__size = 1
self.__dict = {self.UNK: 0}
self.__reverse_dict = {'0': self.UNK}
self.__freeze = False
def __len__(self):
return self.__size
def freeze(self):
self.__freeze = True
def transform(self, text: str):
if not self.__freeze and text not in self.__dict:
self.__dict[text] = self.__size
self.__reverse_dict[str(self.__size)] = text
self.__size += 1
return self.__dict[text] if text in self.__dict else self.__dict[self.UNK]
def restore(self, num: int):
key = str(num)
return self.__reverse_dict[key] if key in self.__reverse_dict else self.UNK
|
from AVL_TABLE import AVL_TABLE as AvlT
import os
class Nodo:
def __init__(self, avlTable, name):
self.name = name
self.avlTable = avlTable
self.izq = None
self.der = None
self.factor = 1
class AVL_DB:
def __init__(self):
self.raiz = None
def insertar(self, tabla, name):
self.raiz = self.__insertar(self.raiz, tabla, name)
def __insertar(self, nodo, tabla, name):
# Insertar nodos
if nodo == None:
return Nodo(tabla, name)
elif name < nodo.name:
nodo.izq = self.__insertar(nodo.izq, tabla, name)
elif name > nodo.name:
nodo.der = self.__insertar(nodo.der, tabla, name)
# Determinar el Factor
nodo.factor = 1 + max(self.__obtenerFactor(nodo.der), self.__obtenerFactor(nodo.izq))
factorBalance = self.__obtenerBalance(nodo)
# Rotaciones
if factorBalance > 1 and name < nodo.izq.name:
return self.__rotacionDerecha(nodo)
if factorBalance < -1 and name > nodo.der.name:
return self.__rotacionIzquierda(nodo)
if factorBalance > 1 and name > nodo.izq.name:
nodo.izq = self.__rotacionIzquierda(nodo.izq)
return self.__rotacionDerecha(nodo)
if factorBalance < -1 and name < nodo.der.name:
nodo.der = self.__rotacionDerecha(nodo.der)
return self.__rotacionIzquierda(nodo)
return nodo
def __obtenerFactor(self, nodo):
if nodo == None:
return 0
return nodo.factor
def __obtenerBalance(self, nodo):
if nodo == None:
return 0
return self.__obtenerFactor(nodo.izq) - self.__obtenerFactor(nodo.der)
def __rotacionDerecha(self, nodo):
# declarar nodos a mover
nodo2 = nodo.izq
nodo2_1 = nodo2.der
# mover nodos
nodo2.der = nodo
nodo.izq = nodo2_1
# recalcular factor
nodo.factor = 1 + max(self.__obtenerFactor(nodo.izq), self.__obtenerFactor(nodo.der))
nodo2.factor = 1 + max(self.__obtenerFactor(nodo2.izq), self.__obtenerFactor(nodo.der))
return nodo2
def __rotacionIzquierda(self, nodo):
# declarar nodos a mover
nodo2 = nodo.der
nodo2_1 = nodo2.izq
# mover nodos
nodo.der = nodo2_1
nodo2.izq = nodo
# recalcular factor
nodo.factor = 1 + max(self.__obtenerFactor(nodo.izq), self.__obtenerFactor(nodo.der))
nodo2.factor = 1 + max(self.__obtenerFactor(nodo2.izq), self.__obtenerFactor(nodo.der))
return nodo2
def eliminar(self, avlTable):
self.raiz = self.__eliminar(self.raiz, avlTable)
def __eliminar(self, raiz, name):
# Buscar el nodo
if raiz == None:
return raiz
elif name < raiz.name:
raiz.izq = self.__eliminar(raiz.izq, name)
elif name > raiz.name:
raiz.der = self.__eliminar(raiz.der, name)
else:
# Nodo Hoja
if raiz.factor == 1:
raiz = self.__caso1(raiz)
return raiz
# Nodo con dos hijos
elif raiz.der != None and raiz.izq != None:
valores = self.__caso2(raiz.izq)
raiz.izq = valores.nodo
raiz.name = valores.avlTable
raiz = self.__balance(raiz)
return raiz
# Nodo con un hijo
elif raiz.der != None or raiz.izq != None:
raiz = self.__caso3(raiz)
return raiz
raiz = self.__balance(raiz)
return raiz
def __balance(self,raiz):
# Determinar el Factor
raiz.factor = 1 + max(self.__obtenerFactor(raiz.der), self.__obtenerFactor(raiz.izq))
factorBalance = self.__obtenerBalance(raiz)
# Rotaciones
if factorBalance > 1 and self.__obtenerBalance(raiz.izq) >= 0:
return self.__rotacionDerecha(raiz)
if factorBalance < -1 and self.__obtenerBalance(raiz.der) <= 0:
return self.__rotacionIzquierda(raiz)
if factorBalance > 1 and self.__obtenerBalance(raiz.izq) < 0:
raiz.izq = self.__rotacionIzquierda(raiz.izq)
return self.__rotacionDerecha(raiz)
if factorBalance < -1 and self.__obtenerBalance(raiz.der) > 0:
raiz.der = self.__rotacionDerecha(raiz.der)
return self.__rotacionIzquierda(raiz)
return raiz
# Funcion caso 1
def __caso1(self, nodo):
nodo = None
return nodo
# Funcion caso 2
def __caso2(self, nodo):
class NodoyValor:
def __init__(self):
self.nodo = None
self.avlTable = None
if nodo.der == None:
if nodo.factor == 1:
valores = NodoyValor()
valores.avlTable = nodo.name
nodo = None
valores.nodo = nodo
return valores
elif nodo.izq != None:
valores = NodoyValor()
valores.avlTable = nodo.name
valores.nodo = nodo.izq
nodo = None
return valores
rotorno = self.__caso2(nodo.der)
nodo.der = rotorno.nodo
rotorno.nodo = nodo
return rotorno
# Funcion caso 3
def __caso3(self, nodo):
if nodo.der != None:
nodo = nodo.der
return nodo
else:
nodo = nodo.izq
return nodo
# Metodo para buscar
def buscar(self, name):
resultado = self.__buscar(name, self.raiz)
return resultado
def __buscar(self, name, nodo):
if nodo != None:
if name < nodo.name:
nodo = self.__buscar(name, nodo.izq)
elif name > nodo.name:
nodo = self.__buscar(name, nodo.der)
return nodo
# Metodo para Graficar
def graficar(self):
if self.raiz != None:
graph = 'digraph G{\n'
graph += "node[shape = \"record\"]\n"
graph += self.__graficar(self.raiz)
graph += '}'
file = open("AVL_DB.dot", "w")
file.write(graph)
file.close()
os.system('dot -Tpng AVL_DB.dot -o AVL_DB.png')
else:
print('No ha Bases de datos')
def __graficar(self, raiz):
if raiz == None:
return ''
graph = ''
graph += self.__graficar(raiz.der)
graph += self.__graficar(raiz.izq)
nodo = 'node' + str(raiz.name)
if raiz.factor == 1:
graph += nodo + '[label=' + str(raiz.name) + ']\n'
else:
graph += nodo + '[label=\"<f0>|{' + str(raiz.name) + '}|<f2>\"]\n'
if raiz.izq != None:
graph += nodo + ':f0 -> ' + 'node' + str(raiz.izq.name) + '\n'
if raiz.der != None:
graph += nodo + ':f2 -> ' + 'node' + str(raiz.der.name) + '\n'
return graph
# Meotodo para ShowDatabase
def recorrido(self):
lista_BD = self.__recorrido(self.raiz)
return lista_BD
def __recorrido(self, nodo):
bases = ''
if nodo == None:
return ''
bases += str(self.__recorrido(nodo.izq))
bases += str(nodo.name) + ' '
bases += str(self.__recorrido(nodo.der))
return bases
def lista_bases(self):
lista = []
lista_db = self._lista_bases(self.raiz, lista)
return lista_db
def _lista_bases(self, nodo, lista):
if nodo is not None:
lista.append(nodo.name)
self._lista_bases(nodo.izq, lista)
self._lista_bases(nodo.der, lista)
return lista
# Metodo para actualizar
def actualizar(self, valor_actual, nuevo_valor):
nodo = self.buscar(valor_actual)
if nodo is not None:
self.eliminar(nodo.name)
self.insertar(nodo.avlTable, nuevo_valor)
return 'exito'
else:
return 'error'
# Metodo para el DropDatabase
def eliminarDB(self, nodoDB):
self.eliminar(nodoDB)
|
import importlib
class BMCType:
pass
class BMCError(Exception):
pass
types = []
def register_bmc_type(klass):
inst = klass()
types.append(inst)
def resolve_bmc_type(klass_name):
return next(t for t in types if t.name == klass_name)
def list_bmc_types():
return types
importlib.import_module('.plain', 'mr_provisioner.bmc_types')
importlib.import_module('.moonshot', 'mr_provisioner.bmc_types')
|
import pandas as pd
import numpy as np
import re
import json
# dataset is downloaded from https://data.cityofnewyork.us/Health/DOHMH-New-York-City-Restaurant-Inspection-Results/rs6k-p7g6
data = pd.read_csv('../data/raw_data/DOHMH_New_York_City_Restaurant_Inspection_Results.csv')
# select columns of interest
isp_data = data.copy()[['CAMIS', 'DBA', 'BORO', 'BUILDING',
'STREET', 'ZIPCODE', 'PHONE',
'CUISINE DESCRIPTION', 'INSPECTION TYPE',
'VIOLATION CODE', 'VIOLATION DESCRIPTION',
'Latitude', 'Longitude', 'SCORE', 'GRADE']]
isp_data.columns = ['camis', 'dba', 'boro', 'building',
'street', 'zipcode', 'phone',
'cuisine description', 'inspection type',
'violation code', 'violation description',
'latitude', 'longitude', 'score', 'grade']
isp_data['inspection date'] = pd.to_datetime(data['INSPECTION DATE'].copy())
# code inspection types into four groups: 0 : initial inspection, 1 : re-inspection, 2: reopening, -2: nan
code = np.full([isp_data.shape[0]], -2)
re_ips = isp_data['inspection type'].copy().str.contains('Re-inspection|Second', flags=re.IGNORECASE, regex=True)
re_op = isp_data['inspection type'].copy().str.contains('Reopening', flags=re.IGNORECASE, regex=True)
code[re_ips == True] = 1
code[re_ips == False] = 0
code[re_op == True] = 2
isp_data['inspection code'] = code
# replace all nans with -2
isp_data = isp_data.fillna(-2)
# check whether scores and grades match. If not, re-assign the grades.
dfg = isp_data.groupby(['camis'])
for id in dfg.groups.keys():
df = dfg.get_group(id)
for i in range(df.shape[0]):
if df.iloc[i]['score'] >= 0:
if df.iloc[i]['inspection code'] == 2:
if df.iloc[i]['grade'] != 'P':
isp_data.loc[df.iloc[i].name, 'grade'] = 'P'
elif df.iloc[i]['score'] < 14:
if df.iloc[i]['grade'] != 'A':
isp_data.loc[df.iloc[i].name, 'grade'] = 'A'
elif df.iloc[i]['inspection code'] == 1:
if 14 <= df.iloc[i]['score'] < 28:
if df.iloc[i]['grade'] != 'B':
isp_data.loc[df.iloc[i].name, 'grade'] = 'B'
elif df.iloc[i]['grade'] != 'C':
isp_data.loc[df.iloc[i].name, 'grade'] = 'C'
elif df.iloc[i]['inspection code'] == 0:
if df.iloc[i]['grade'] != 'P':
isp_data.loc[df.iloc[i].name, 'grade'] = 'P'
elif df.iloc[i]['grade'] != -2 :
isp_data.loc[df.iloc[i].name, 'grade'] = -2
# replace cuisine description with code
code_to_cuisine = dict(zip(range(len(set(isp_data['cuisine description']))), set(isp_data['cuisine description'])))
cuisine_to_code = dict(zip(set(isp_data['cuisine description']), range(len(set(isp_data['cuisine description'])))))
code_to_cuisine[cuisine_to_code['Latin (Cuban, Dominican, Puerto Rican, South & Central American)']] = \
'Latin (Cuban, Dominican, Puerto<br>Rican, South & Central American)'
cuisine_to_code['Latin (Cuban, Dominican, Puerto<br>Rican, South & Central American)'] =\
cuisine_to_code['Latin (Cuban, Dominican, Puerto Rican, South & Central American)']
isp_data['cuisine type'] = isp_data['cuisine description'].replace(cuisine_to_code)
# replace violation description with code
code_to_violation = dict(zip(range(len(set(isp_data['violation description']))), list(set(isp_data['violation description']))))
violation_to_code = dict(zip(list(set(isp_data['violation description'])), range(len(set(isp_data['violation description'])))))
# add breaks in violation descriptions
for item in isp_data['violation description']:
l = item.split()
temp = []
i = 5
while i < len(l):
temp.append(' '.join(l[max(0, i - 10): i]))
i += 10
temp.append(' '.join(l[i - 10:]))
item_br = '<br>'.join(temp)
code_to_violation[violation_to_code[item]] = item_br
violation_to_code[item_br] = violation_to_code[item]
violation_to_code['NA'] = violation_to_code['-2']
code_to_violation[violation_to_code['NA']] = 'NA'
isp_data['violation type'] = isp_data['violation description'].replace(violation_to_code)
isp_data['camis'] = isp_data['camis'].astype(str)
# save restaurant information
rst_info = isp_data[['CAMIS', 'DBA', 'BORO', 'BUILDING',
'STREET', 'ZIPCODE', 'PHONE',
'CUISINE DESCRIPTION',
'Latitude', 'Longitude']].drop_duplicates()
rst_info.to_csv('../data/clean_data/nyc_restaurants_info.csv')
# save restaurant information
rst_info = isp_data[['camis', 'dba', 'boro', 'building',
'street', 'zipcode', 'phone',
'cuisine type',
'latitude', 'longitude']].drop_duplicates()
rst_info = rst_info.sort_values(['camis'])
rst_info['current_grade'] = isp_data.sort_values(['camis', 'inspection date']).groupby(['camis'])['grade'].apply(lambda x: x.iloc[-1]).values
rst_info.to_csv('../data/clean_data/nyc_restaurants_info.csv', index=False)
# save inspection information
isp_info = isp_data[['camis', 'dba', 'inspection code', 'violation type',
'inspection date', 'score', 'grade']]\
[isp_data['score'] >= 0].sort_values(['camis', 'inspection date'])
isp_info.to_csv('../data/clean_data/nyc_restaurants_grades.csv', index=False)
# save analysis results
analysis_data = rst_info.groupby(['boro', 'cuisine type', 'current_grade'])[['dba']].count().reset_index()
analysis_data.columns = ['boro', 'cuisine type', 'grade', 'count']
analysis_data.to_csv('../data/clean_data/nyc_restaurants_analysis.csv', index=False)
# save dicts
with open('../data/clean_data/code_to_cuisine.txt', 'w') as json_file:
json.dump(code_to_cuisine, json_file)
with open('../data/clean_data/cuisine_to_code.txt', 'w') as json_file:
json.dump(cuisine_to_code, json_file)
with open('../data/clean_data/code_to_violation.txt', 'w') as json_file:
json.dump(code_to_violation, json_file)
with open('../data/clean_data/violation_to_code.txt', 'w') as json_file:
json.dump(violation_to_code, json_file)
|
#!/usr/bin/env python
# Score is:
# % gene level right or - gene's num txpts
# + % txpt level right
# + % uniq calls right
# + % definitive rel calls right
import os, re, sys
from os.path import basename, exists, getsize
from operator import itemgetter
from sys import stderr
import argparse
class G:
"""Generic container for shared variables."""
args = None # set by getParameters()
gene_transcript_counts = None
notify_quiet = False
notify_logFH = None
def __init__(self):
__name__ = "G"
self.notify_logFH = None
pass
## GLOBALS ##
global g
g = G()
def main(): ##########################################################
args = getParameters()
notify('#' * 47 + '\n' )
notify('# Starting evaluation of assignments\n' )
#'A|B|2:At|Bt|Call|5:Score|Ag|Bg|8:Category|...'
refD, source_ids = loadValidationFile( args.known )
loadGFFLoci() # gets counts of child transcripts for each gene from transcript fasta and gff3
counts = scoreSolutions( refD, source_ids )
scores = []
metrics = [
('gene', 'obs_gene_ct', 'max_gene_ct', ' ' * 6 ),
('transcript', 'obs_txpt_ct', 'max_txpt_ct', ' ' * 0 ),
('unique', 'obs_uniq_ct', 'max_uniq_ct', ' ' * 4 ),
('definitive', 'obs_defn_ct', 'definite_ct', ' ' * 0 )
]
notify('#' * 47 + '\n' )
notify('# Results:\n')
for A in list( counts.keys() ):
for B in list( counts[ A ].keys() ):
for metric, obs_tag, tot_tag, pad in metrics:
try:
obs, tot = counts[A][B][ obs_tag ], counts[A][B][ tot_tag ]
except KeyError as e:
notify( '# %s metrics were unset for %s to %s\n'%( metric, A,B) )
continue
# notify('Obs:%d Tot:%d\n'%(obs, tot) )
pct = 100.0 * abs( obs ) / tot
if obs < 0: pct = pct * -1.0
scores.append( pct )
notify('# %s to %s %s call accuracy:%s\t% 3.2f%%\n'%(A, B, metric, pad, pct))
notify('# ' + '-' * 45 + '\n' )
notify('# Final Score: \t% 3.2f\n'%( sum( scores ) ) )
notify('#' * 47 + '\n' )
# end main() ######################################################################
def commafy( number ): ############################################################
"""commafy(number) - add commas to a number"""
import re
return re.sub(r'(\d{3})(?=\d)',r'\1,' ,str(number)[::-1])[::-1]
# end commafy()
def getParameters():
epilog = 'Example:\n' +\
' %(prog)s -e 3 A.transcripts.fasta B.transcripts.fasta A.gff3 B.gff3 dev_validation_set.tsv my_solutions.tsv'
parser = argparse.ArgumentParser(prog='check_min_accuracy.py',
description='Checks a proposed solution file against a check set.\n' +\
'Remember that identifiers should remain exactly as supplied.',
formatter_class=argparse.RawTextHelpFormatter, epilog=epilog )
parser.add_argument( '-e', '--errors-to-report', metavar='#', default=3, dest='max_error_ct',
help='Maximum number of assignment errors to report to STDERR')
parser.add_argument( 'txpt', metavar="TRANSCRIPT-FASTA", nargs=2,
help="Supply space-separated paths to each transcript FASTA file for the genome pair.")
parser.add_argument( 'gff', metavar="GFF3", nargs=2,
help="Supply space-separated paths to each GFF3 file for the genome pair")
parser.add_argument( 'known', metavar="REFERENCE_FILE",
help="Path to the supplied reference validation file used to assess accuracy")
parser.add_argument( 'test', metavar="SOLUTION_FILE",
help='Path to the solution file to assess for accuracy in the format required for the proposal:\n' +\
'SourceA|SourceB|A_transcript|[B_transcript]|Call|Score|A_gene|[B_gene]\n'+\
'The pipes represent TAB characters\n')
args = parser.parse_args()
for f in args.txpt:
if not exists( f ): sys.exit('ERROR: Failed to find transcript file %s.\n'%( f ) )
if getsize( f )< 1: sys.exit('ERROR: Transcript file %s is empty.\n'%( f) )
for f in args.gff:
if not exists( f ): sys.exit('ERROR: Failed to find GFF3 file %s.\n'%( f ) )
if getsize( f )< 1: sys.exit('ERROR: GFF3 file %s is empty.\n'%( f) )
if not exists( args.known ): sys.exit('ERROR: Failed to find the validation file. You supplied:\n%s\n'%( args.known))
if getsize( args.known )< 1: sys.exit('ERROR: Supplied validation file appears empty. You supplied:\n%s\n'%( args.known))
if not exists( args.test ): sys.exit('ERROR: Failed to find the solution file. You supplied:\n%s\n'%( args.test))
if getsize( args.test ) < 1: sys.exit('ERROR: Supplied solution file appears empty. You supplied:\n%s\n'%( args.test))
g.args = args
return args
# end getParameters()
def loadGFFLoci(): ################################################################
""" Get counts of transcripts per gene """
txptNameD = loadTranscriptSets() # source->set([txptIDs])
features_to_track = set( ['mRNA', 'miRNA', 'lincRNA'] )
g.gene_transcript_counts = {}
gffPat = re.compile( r'Name=((?:transcript|gene)([A-Z])\d+[^;]*);Parent=(gene([A-Z])\d+)' )
for gff3_file in g.args.gff:
notify('# Examining feature relationships in %s\n'%( basename( gff3_file ) ) )
with open( gff3_file, 'r' ) as iFH:
for line in iFH:
if line.startswith('#'): continue
r = line.strip().split( '\t' )
if len( r ) < 9: continue
if r[2].strip() in features_to_track:
m = gffPat.search( r[8] )
if not m:
sys.exit('# ERROR: Unhandled GFF3 match type:\n# %s'%(line) )
t_source = m.group( 2 )
g_source = m.group( 4 )
if t_source != g_source:
sys.exit('# ERROR: Child and parent should never have different sources.\n# %s'%(line) )
txpt_id, gene_id = m.group( 1 ), m.group( 3 )
if t_source not in g.gene_transcript_counts: g.gene_transcript_counts[ t_source ] = {}
if gene_id not in g.gene_transcript_counts[ t_source ]:
g.gene_transcript_counts[ t_source ][ gene_id ] = 0
g.gene_transcript_counts[ t_source ][ gene_id ] += 1
if txpt_id in txptNameD[ t_source ]: txptNameD[ t_source ].remove( txpt_id )
for g_source in list( g.gene_transcript_counts.keys() ): # GFF known transcripts have been removed
unplaced_ct = len( txptNameD[ g_source ] )
if unplaced_ct > 0:
notify("# Source %s had %s unplaced transcripts, treating them as single genes\n"%(g_source,
commafy(unplaced_ct) ) )
else: notify('# Source %s had no unplaced transcripts\n'%( g_source) )
for txpt_id in txptNameD[ g_source ]:
g.gene_transcript_counts[ g_source ][ txpt_id + '_gene' ] = 1
# now we have enough info to penalize incorrect gene calls
# end loadGFFLoci() ################################################################
def loadTranscriptSets(): ##########################################################
""" Get full listing of transcripts which may vary from GFF3 contents """
namePat = re.compile( r'\>(transcript([A-Z])\d+)' )
txptNameD = {} # source->set([txptIDs])
for txpt_file in g.args.txpt:
notify('# Getting a list of transcripts from %s\n'%( basename( txpt_file ) ) )
with open( txpt_file, 'r' ) as iFH:
for line in iFH:
m = namePat.search( line )
if m:
txpt_id = m.group( 1 )
source = m.group( 2 )
if source not in txptNameD: txptNameD[ source ] = set([])
txptNameD[ source ].add( txpt_id )
return txptNameD
# end loadTranscriptSets() #########################################################
def loadValidationFile( known_file ): ##############################################
""" this version stores:
refA->refB->ATxpt = 'c':call, 't':set(Btxpts), 'g':set(Bgenes)
"""
refD = {}
source_ids = set( [] )
uniques = set([ 'absent_gene', 'absent_genome', 'absent_transcript', 'gene_fusion',
'unique_transcript' ] )
dupe_call_check = set([])
with open( known_file, 'r' ) as iFH:
for line in iFH:
if line.startswith( '#' ): continue
r = line.strip( '\n' ).split( '\t' )
if len( r ) < 1: continue
if len( r ) < 3:
if len( line.strip() ) == 0: continue
sys.exit( 'Error: validation file row has less than the minimum of 3 values for A_Src|B_Src|A_ID\n"%s"'%line)
A, B = r[0], r[1]
if A not in refD:
refD[ A ] = {}
source_ids.add( A )
if B not in refD[ A ]:
refD[ A ][ B ] = { 'max_gene_ct':0, 'max_txpt_ct':0, 'max_uniq_ct':0, 'definite_ct':0 }
source_ids.add( B )
At = r[ 2 ].strip()
if At not in refD[ A ][ B ]: refD[ A ][ B ][ At ] = { 'c':'','t':set([]),'g':set([]) }
c_call = None
if len( r ) < 8:
notify( line )
notify( '# %s\n'%( str( r ) ) )
sys.exit('\n# ERROR: Unexpectedly short line in check file. 8 or more columns expected!\n')
Bt, call, score = r[3].strip(), r[4].strip().lower(), float( r[5].strip() )
call_tupe = (A, B, At, Bt, call, r[7].strip() )
if call_tupe in dupe_call_check:
notify("# WARNING: Already observed this mapping for %s:%s"%(At, line))
continue
else: dupe_call_check.add( call_tupe )
refD[A][B]['max_txpt_ct'] += 1
# manage the call for this row
refD[A][B][At][ 'c' ] = call
if call != 'no_call': refD[A][B][ 'definite_ct' ] += 1 # track number of possible definite calls
if call == 'unique_transcript': refD[A][B]['max_uniq_ct'] += 1
Ag = r[6].strip() # do nothing with the A gene
for Bg in r[7].strip().split( ';' ):
refD[A][B][At]['g'].add( Bg )
refD[ A ][ B ][ 'max_gene_ct' ] += 1
if Bt: refD[A][B][At]['t'].add( Bt )
return refD, source_ids
# end loading the validation file ######################################################
def scoreSolutions( refD, source_ids ): ###############################################
# refD[ A ][ B ] = { 'max_gene_ct':0, , 'max_uniq_ct':0, 'definite_ct':0 }
# if At not in refD[ A ][ B ]: refD[ A ][ B ][ At ] = { 'c':'','t':set([]),'g':set([]) }
#
global g
# begin loading the solution file
notify("# Scoring relationships assigned in '%s' by validation set '%s'\n"%( basename( g.args.test ),
basename( g.args.known )))
counts = {}
reported_failures = 0
unknown_gene_ct = 0
fails_to_report = g.args.max_error_ct
dupe_call_check = set([])
DBG = True
obs_pairs = set([])
absents = set( ['absent_gene', 'absent_transcript', 'absent_genome', 'gene_fusion'] )
with open( g.args.test, 'r' ) as iFH:
for line in iFH:
if line.startswith( '#' ): continue
r = line.strip( '\n' ).split( '\t' )
if len( r ) < 1 or r[0] == '': continue
if len( r ) < 8:
notify( '# ' + line )
notify( '# %s\n'%( str( r ) ) )
sys.exit('\n# ERROR: Unexpectedly short line in solution file. 8 or more columns expected!\n')
A, B = r[0].strip(), r[1].strip()
At, Bt, call, score = r[2].strip(), r[3].strip(), r[4].strip().lower(), float( r[5].strip() )
Ag = r[6].strip()
if A not in refD or B not in refD[ A ]: # verify known and test file sources match
msg = 'Error: We expect the source columns to ONLY contain "%s"\n'%('" or "'.join( list( source_ids ) ) )
msg+= ' You supplied: "%s" and "%s"\n\n'%( A, B )
sys.exit( msg )
if ( A , B ) not in obs_pairs: # init observation trackers
obs_pairs.add( ( A , B ) )
refD[A][B][ 'obs_gene_ct' ] = 0
refD[A][B][ 'obs_txpt_ct' ] = 0
refD[A][B][ 'obs_uniq_ct' ] = 0
refD[A][B][ 'obs_defn_ct' ] = 0
if At not in refD[A][B]: continue # only score things in the known set
FAIL = False
# check for repeated assertions and ignore them
call_tupe = (A, B, At, Bt, call, r[7].strip() )
if call_tupe in dupe_call_check:
notify( '*' * 30 + '\n' )
notify( "# ERROR: already observed this mapping for %s.v.%s:\n%s\n"%(A,B,str(call_tupe) ) )
notify( '*' * 30 + '\n' )
continue
else: dupe_call_check.add( call_tupe )
CORRECT_CALL = True
if call != refD[A][B][At]['c']:
CORRECT_CALL = False
FAIL = True
# get B transcript score
if CORRECT_CALL:
if Bt:
if Bt in refD[A][B][At]['t']:
refD[A][B][ 'obs_txpt_ct' ] += 1
if call == 'unique_transcript': refD[A][B][ 'obs_uniq_ct' ] += 1
else: pass # unknown but check set may be incomplete so ignore
elif call in absents: refD[A][B][ 'obs_txpt_ct' ] += 1
elif Bt: refD[A][B][ 'obs_txpt_ct' ] -= 1 # got it wrong
# get B definitive call score
if CORRECT_CALL and call != 'no_call': refD[A][B][ 'obs_defn_ct' ] += 1
elif call == 'no_call': pass
elif CORRECT_CALL == False: refD[A][B][ 'obs_defn_ct' ] -= 1
# get B gene score
gScore = 0
for Bg in r[7].strip().split( ';' ):
if Bg not in refD[A][B][At]['g']:
if len( Bg ) < 1: continue
if unknown_gene_ct < 3:
notify('# WARNING: Unrecognized gene "%s", possibly for '%(Bg) +\
'transcript missing from GFF. Ignoring\n')
if unknown_gene_ct == 2: notify('# WARNING: Further unrecognizezd gene examples will not be reported\n')
unknown_gene_ct += 1
continue
if Bg in refD[A][B][At]['g']: gScore += 1
else: # uh oh, this is wrong penalize by A gene's number of transcripts
gScore -= g.gene_transcript_counts[ A ][ Ag ]
refD[A][B]['obs_gene_ct'] += gScore
if FAIL and reported_failures < int(fails_to_report):
if reported_failures == 0:
notify('#'*47 +'\n')
notify('# Sample failed assignments:\n' )
eCall = refD[A][B][At]['c']
eTxpts = '|'.join( refD[A][B][At]['t'] )
eGenes = '|'.join( refD[A][B][At]['g'] )
notify("# Expected call '%s' B txpt(s): '%s' B gene(s): '%s'\n"%( eCall, eTxpts, eGenes ) )
notify("# Your data:%s\n"%( '|'.join(r) ) )
reported_failures += 1
return refD
# end scoreSolutions() ############################################################################
def mixSort(list, key=None): ######################################################################
""" Sorts a list in place as humans expect
This implementation based on Ned Batchelders sort from the web
"""
numPat = re.compile(r'([0-9]+)')
if key is None:
################################################################################################
def tryint(s):
""" Supports mixSort """
try: return int(s)
except: return s
# end tryint() ################################################################################
def alphanum_key(s):
""" Turns a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
Supports mixSort
"""
if isinstance(s, basestring):
return [ tryint(c) for c in numPat.split(s) ]
else:
return s
# end alphanum_key ###########################################################################
else: # supplied a way to retrieve the part of the iterable item to use for sorting
''' from http://stackoverflow.com/questions/6849047 '''
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda item:[ convert( c ) for c in numPat.split( key(item) ) ]
list.sort(key=alphanum_key)
# end mixSort() ###################################################################################
####################################################################################################
def notify(msg, suppress=None, logFH=None):
"""determines whether program feedback should be written back to the terminal
param1: a text message to print if it is ok to do so
param2: if set to True, notify will not write anything, nor will subsequent calls to notify
"""
if suppress != None: G.notify_quiet = suppress
if logFH is not None:
if G.notify_logFH is None:
G.notify_logFH = logFH
elif logFH != G.notify_logFH:
G.notify_logFH = logFH
if msg == None: msg = ""
if G.notify_quiet == False:
if msg.find("\r") > -1:
stderr.write( msg )
else:
stderr.write( msg )
if G.notify_logFH != None:
G.notify_logFH.write( msg )
return 0
# end notify() ####################################################################################
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.argv.append("--help")
main() |
import os, os.path, datetime, string, errno
from maperipy import *
import GenIsraelHikingTiles
# http://stackoverflow.com/questions/749711/how-to-get-the-python-exe-location-programmatically
MaperitiveDir = os.path.dirname(os.path.dirname(os.path.normpath(os.__file__)))
# App.log('MaperitiveDir: ' + MaperitiveDir)
ProgramFiles = os.path.normpath(os.path.dirname(MaperitiveDir))
# App.log('ProgramFiles: ' + ProgramFiles)
IsraelHikingDir = os.path.dirname(os.path.dirname(os.path.normpath(App.script_dir)))
# App.log('App.script_dir: ' + App.script_dir)
# App.log('IsraelHikingDir: ' + IsraelHikingDir)
App.run_command('change-dir dir="' + IsraelHikingDir +'"')
os.chdir(IsraelHikingDir)
# Keep the name of the Tile Upload command
upload_tiles = os.path.join(IsraelHikingDir, "Scripts", "Batch", "UploadTiles.bat")
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def add_to_PATH(app_dir):
full_app_dir=os.path.join(ProgramFiles, app_dir)
for path_dir in (string.split(os.environ["PATH"], os.pathsep)):
if os.path.basename(path_dir) == app_dir:
# Application already found in PATH
return
if not os.path.isdir(full_app_dir):
# Application not a sibling of Maperitive
App.log("Warning: " + app_dir + " location not found. Could not add it to PATH.")
return
os.environ["PATH"] = string.join([os.environ["PATH"],full_app_dir], os.pathsep)
add_to_PATH("wget")
add_to_PATH("WinSCP")
add_to_PATH("Mobile Atlas Creator")
def zip_and_upload(zip_file):
if os.path.exists(upload_tiles):
App.log("=== Create a Zip file with new tiles ===")
App.run_command('zip base-dir="' + os.path.join(IsraelHikingDir, 'Site') + '" zip-file="' + zip_file + '"')
App.log("=== Upload " + zip_file + "===")
App.log('App.start_program("' + upload_tiles + '", [' + zip_file + '])')
App.start_program(upload_tiles, [zip_file])
# Keep batch windows open up to 24 hours
os.environ["NOPAUSE"] = "TIMEOUT /T 86400"
# Cleanup partially completed runs, and incompletly uploaded zip files
App.run_program(os.path.join(IsraelHikingDir, "Scripts", "Batch", "FindUpdatedTiles.bat"), 14400, [])
gen_cmd = GenIsraelHikingTiles.IsraelHikingTileGenCommand(BoundingBox(Srid.Wgs84LonLat, 34.00842, 29.32535, 35.92745, 33.398339999), 7, 16)
# Create a new map if all Zip files were created and uploaded
try:
if os.path.getsize(os.path.join(IsraelHikingDir, 'output', 'TileUpdate.zip' )) == 0 \
and os.path.getsize(os.path.join(IsraelHikingDir, 'output', 'TileUpdate16.zip' )) == 0 \
and os.path.getsize(os.path.join(IsraelHikingDir, 'output', 'LastModified.zip' )) == 0 \
and os.path.getsize(os.path.join(IsraelHikingDir, 'output', 'OverlayTiles.zip' )) == 0 :
# All zip files were created and uploded - delete them to start a new map
os.remove(os.path.join(IsraelHikingDir, 'output', 'TileUpdate.zip' ))
os.remove(os.path.join(IsraelHikingDir, 'output', 'TileUpdate16.zip' ))
os.remove(os.path.join(IsraelHikingDir, 'output', 'LastModified.zip' ))
os.remove(os.path.join(IsraelHikingDir, 'output', 'OverlayTiles.zip' ))
except OSError as exc:
# Never mind, some Zip files were probably not found
pass
if not os.path.exists(os.path.join(IsraelHikingDir, 'output', 'TileUpdate.zip' )) \
and not os.path.exists(os.path.join(IsraelHikingDir, 'output', 'TileUpdate16.zip' )) \
and not os.path.exists(os.path.join(IsraelHikingDir, 'output', 'LastModified.zip' )) \
and not os.path.exists(os.path.join(IsraelHikingDir, 'output', 'OverlayTiles.zip' )) :
App.log("=== Update israel-and-palestine-latest.osm.pbf ===")
# wget for Windows: http://gnuwin32.sourceforge.net/packages/wget.htm
App.run_program('wget.exe', 1200,
["--timestamping",
"--no-directories", "--no-verbose",
'--directory-prefix="' + os.path.join(IsraelHikingDir, 'Cache') + '"',
"http://download.geofabrik.de/asia/israel-and-palestine-latest.osm.pbf"])
LastModified = datetime.datetime.fromtimestamp(os.path.getmtime(os.path.join(IsraelHikingDir, 'Cache', 'israel-and-palestine-latest.osm.pbf')))
if LastModified + datetime.timedelta(1) < datetime.datetime.today():
App.log("=== pbf file not updated ===");
App.run_command("pause 15000")
# Create LastModified.js file and add it to zip file
App.log("=== Create Last Update info:" + LastModified.strftime("%d-%m-%Y") + " ===")
mkdir_p(os.path.join(IsraelHikingDir, 'Site', 'Tiles')) # For initial creation of LastModified.js
jsFile = open(os.path.join(IsraelHikingDir, 'Site', 'Tiles', 'LastModified.js'), 'w')
jsFile.write("function getLastModifiedDate() { return '"
+ LastModified.strftime("%d-%m-%Y")
+ "'; }")
jsFile.close()
App.run_command('zip base-dir="' + os.path.join(IsraelHikingDir, 'Site')
+ '" files="' + os.path.join(IsraelHikingDir, 'Site', 'Tiles', 'LastModified.js')
+ '" zip-file="' + os.path.join(IsraelHikingDir, 'output', 'LastModified.zip') + '"')
else :
App.log('=== Continueing execution of the previous build ===')
App.run_command("pause 15000")
zip_file = os.path.join(IsraelHikingDir, 'output', 'TileUpdate.zip')
if not os.path.exists(zip_file) :
App.run_command("run-script file=" + os.path.join("Scripts", "Maperitive", "IsraelHiking.mscript"))
# Map Created
#Original# App.run_command("generate-tiles minzoom=7 maxzoom=15 subpixel=3 tilesdir=" + IsraelHikingDir + "\Site\Tiles use-fprint=true")
gen_cmd.GenToDirectory(7, 15, os.path.join(IsraelHikingDir, 'Site', 'Tiles'))
App.collect_garbage()
program_line = os.path.join(ProgramFiles, "Mobile Atlas Creator", "Create Israel Hiking.bat")
if os.path.exists(program_line):
App.log("=== Launch creation of Oruxmap IsraelHiking map ===")
App.log('App.start_program("' + program_line + '", [])')
App.start_program(program_line, [])
zip_and_upload(zip_file)
App.collect_garbage()
else :
App.log('Skipped: ' + zip_file + ' already exists.')
zip_file = os.path.join(IsraelHikingDir, 'output', 'OverlayTiles.zip')
if not os.path.exists(zip_file) :
App.log("=== Create Trails Overlay tiles ===")
App.run_command("run-script file=" + os.path.join("Scripts", "Maperitive", "IsraelHikingOverlay.mscript"))
App.collect_garbage()
#Original# generate-tiles minzoom=7 maxzoom=16 subpixel=3 min-tile-file-size=385 tilesdir=Site\OverlayTiles use-fprint=true
gen_cmd.GenToDirectory(7, 16, os.path.join(IsraelHikingDir, 'Site', 'OverlayTiles'))
App.collect_garbage()
# zip base-dir=Site zip-file=output\OverlayTiles.zip
zip_and_upload(zip_file)
program_line = os.path.join(ProgramFiles, "Mobile Atlas Creator", "All IsraelHikingOverlay Maps.bat")
if os.path.exists(program_line):
App.log("=== Launch creation of All IsraelHikingOverlay Maps ===")
App.log('App.start_program("' + program_line + '", [])')
App.start_program(program_line, [])
App.collect_garbage()
else :
App.log('Skipped: ' + zip_file + ' already exists.')
zip_file = os.path.join(IsraelHikingDir, 'output', 'TileUpdate16.zip')
if not os.path.exists(zip_file) :
App.log('=== creating zoom level 16 ===')
App.run_command("run-script file=" + os.path.join("Scripts", "Maperitive", "IsraelHiking.mscript"))
# Map Created
App.log("=== Create tiles for zoom 16 ===")
gen_cmd.GenToDirectory(16, 16, os.path.join(IsraelHikingDir, 'Site', 'Tiles'))
App.collect_garbage()
zip_and_upload(zip_file)
else :
App.log('Skipped: ' + zip_file + ' already exists.')
zip_file = os.path.join(IsraelHikingDir, 'output', 'LastModified.zip')
if os.path.exists(zip_file) \
and os.path.getsize(zip_file) > 0 \
and os.path.exists(os.path.join(IsraelHikingDir, 'output', 'TileUpdate.zip')) \
and os.path.exists(os.path.join(IsraelHikingDir, 'output', 'TileUpdate16.zip')) \
and os.path.exists(upload_tiles):
App.log("=== Upload Last Update info ===")
App.log('App.start_program("' + upload_tiles + '", [' + zip_file + '])')
App.start_program(upload_tiles, [zip_file])
App.run_command("exit")
# vim: shiftwidth=4 expandtab ai
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import scipy.stats
def get_meta_backbone(name, state_dict=None):
if name == 'conv4':
from meta_backbones import conv4
backbone = conv4()
elif name == 'resnet12':
from meta_backbones import resnet12
backbone = resnet12()
elif name == 'resnet10':
from meta_backbones import resnet10
backbone = resnet10()
elif name == 'resnet18':
from meta_backbones import resnet18
backbone = resnet18()
elif name == 'resnet34':
from meta_backbones import resnet34
backbone = resnet34()
elif name == 'resnet50':
from meta_backbones import resnet50
backbone = resnet50()
elif name == 'resnet101':
from meta_backbones import resnet101
backbone = resnet101()
elif name == 'resnet152':
from meta_backbones import resnet152
backbone = resnet152()
elif name == 'wrn28_10':
from meta_backbones import wrn28_10
backbone = wrn28_10()
else:
raise ValueError('Non-supported Backbone.')
if state_dict is not None:
backbone.load_state_dict(state_dict)
return backbone
class Averager():
def __init__(self):
self.n = 0
self.v = 0
def add(self, x):
self.v = (self.v * self.n + x) / (self.n + 1)
self.n += 1
def item(self):
return self.v
class Mean_confidence_interval():
def __init__(self, confidence=0.95):
self.list = []
self.confidence = confidence
self.n = 0
def add(self, x):
self.list.append(x)
self.n += 1
def item(self, return_str=False):
mean, standard_error = np.mean(self.list), scipy.stats.sem(self.list)
h = standard_error * scipy.stats.t._ppf((1 + self.confidence) / 2, self.n - 1)
if return_str:
return '{0:.2f}; {1:.2f}'.format(mean * 100, h * 100)
else:
return mean
def count_acc(logits, labels):
"""Compute the accuracy (after adaptation) of MAML on the test/query points
Parameters
----------
logits : `torch.FloatTensor` instance
Outputs/logits of the model on the query points. This tensor has shape
`(num_examples, num_classes)`.
targets : `torch.LongTensor` instance
A tensor containing the targets of the query points. This tensor has
shape `(num_examples,)`.
Returns
-------
accuracy : `torch.FloatTensor` instance
Mean accuracy on the query points
"""
_, predictions = torch.max(logits, dim=-1)
return torch.mean(predictions.eq(labels).float())
def set_reproducibility(seed=0):
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def get_outputs_c_h(backbone, image_len):
c_dict = {
'conv4': 64,
'resnet12': 512,
'resnet18': 512,
'wrn28_10': 640,
'resnet10': 512,
'resnet34': 512,
'resnet50': 2048,
'resnet101': 2048,
'resnet152': 2048
}
c = c_dict[backbone]
h_devisor_dict = {
'conv4': 16,
'resnet12': 16,
'resnet18': 8,
'wrn28_10': 4,
'resnet10': 8,
'resnet34': 8,
'resnet50': 8,
'resnet101': 8,
'resnet152': 8
}
h = image_len // h_devisor_dict[backbone]
if image_len == 84 and h_devisor_dict[backbone] == 8:
h = 11
return c, h
|
import rclpy #ros
from rclpy.node import Node
from std_msgs.msg import String #msg
class MinimalSubscriber(Node):
def __init__(self):
super().__init__('minimal_subscriber')
self.subscription = self.create_subscription(String,'topic', self.listener_callback,10)
self.subscription # prevent unused variable warning
def listener_callback(self, msg):
self.get_logger().info('I heard: "%s"' % msg.data)
def main(args=None):
rclpy.init(args=args)
minimal_subscriber = MinimalSubscriber()
rclpy.spin(minimal_subscriber)
minimal_subscriber.destroy_node()# Destroy the node explicitly (optional - otherwise it will be done automatically when the garbage collector destroys the node object)
rclpy.shutdown()
if __name__ == '__main__':
main() |
from flask import Flask, request, jsonify
from flask_basicauth import BasicAuth
from textblob import TextBlob
import pickle as pkl
import pandas as pd
import os
model = pkl.load(open('./models/model.pkl','rb'))
app = Flask(__name__)
app.config['BASIC_AUTH_USERNAME'] = os.environ.get('BASIC_AUTH_USERNAME')
app.config['BASIC_AUTH_PASSWORD'] = os.environ.get('BASIC_AUTH_PASSWORD')
basic_auth = BasicAuth(app)
@app.route('/')
def home():
return 'API running'
@app.route('/sentiment/<frase>/')
def sentiment_analysis(frase):
tb = TextBlob(frase)
if tb.detect_language() != 'en':
tb_en = tb.translate(to='en')
polaridade = tb_en.sentiment.polarity
else:
polaridade = tb.sentiment.polarity
return 'polaridad: {}'.format(polaridade)
@app.route('/house_price/',methods=['POST'])
@basic_auth.required
def house_price():
content = request.get_json()
df = [[content['tamanho'],content['ano'],content['garagem']]]
value = model.predict(df)
return jsonify({'value':float(value)})
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0') |
from flask import render_template, redirect, url_for, abort, flash, request
from . import main
from flask_login import login_required, current_user
from app.models import User, Post, Category
from .. import db,photos
from .forms import UpdateProfile,CommentsForm,PostForm
@main.route('/')
def index():
posts = Post.query.order_by(Post.date_posted.desc()).all()
print(posts)
return render_template('index.html', posts=posts)
@main.route('/posts')
def posts():
posts = Post.query.all()
sales = Post.query.filter_by(category_name="Sales")
interview = Post.query.filter_by(category_name = 'Interview').all()
elevator = Post.query.filter_by(category_name = 'Elevator').all()
promotion = Post.query.filter_by(category_name = 'Promotion').all()
personal = Post.query.filter_by(category_name = 'Personal').all()
pickuplines = Post.query.filter_by(category_name = 'Pickuplines').all()
title = 'PitchDom - Welcome to PitchDom'
return render_template('posts.html', title=title , posts = posts, sales=sales, interview = interview,
elevator = elevator,promotion = promotion, personal = personal, pickuplines = pickuplines )
@main.route('/addpost',methods = ['GET', 'POST'])
@login_required
def addposts():
form = PostForm()
if form.validate_on_submit():
title = form.title.data
post = form.post.data
category = form.category.data
user_id = current_user
new_pitch_object = Post( category_name=category,title=title,content=post)
new_pitch_object.save_post()
return redirect(url_for('main.addposts'))
return(redirect(url_for('main.category')))
title = 'Add-Post - Welcome to PitchDom'
return render_template('addpost.html', title=title, post_form=form)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/add_category', methods=['GET', 'POST'])
def add_cat():
form = CatForm()
if form.validate_on_submit():
category = Category(name=form.name.data)
db.session.add(category)
db.session.commit()
flash('Category added successfully.')
return redirect(url_for('.index'))
return render_template('add_category.html', form=form)
|
import re
import pandas as pd
import numpy as np
import scipy as sp
from scipy.spatial.distance import pdist
import sys
import warnings
import sklearn
import importlib
import copy
if (sys.version_info < (3, 0)):
warnings.warn("As of version 0.29.0 shap only supports Python 3 (not 2)!")
import_errors = {}
def assert_import(package_name):
global import_errors
if package_name in import_errors:
msg,e = import_errors[package_name]
print(msg)
raise e
def record_import_error(package_name, msg, e):
global import_errors
import_errors[package_name] = (msg, e)
def shapley_coefficients(n):
out = np.zeros(n)
for i in range(n):
out[i] = 1 / (n * sp.special.comb(n-1,i))
return out
def convert_name(ind, shap_values, input_names):
if type(ind) == str:
nzinds = np.where(np.array(input_names) == ind)[0]
if len(nzinds) == 0:
# we allow rank based indexing using the format "rank(int)"
if ind.startswith("rank("):
return np.argsort(-np.abs(shap_values).mean(0))[int(ind[5:-1])]
# we allow the sum of all the SHAP values to be specified with "sum()"
# assuming here that the calling method can deal with this case
elif ind == "sum()":
return "sum()"
else:
raise ValueError("Could not find feature named: " + ind)
else:
return nzinds[0]
else:
return ind
def potential_interactions(shap_values_column, shap_values_matrix):
""" Order other features by how much interaction they seem to have with the feature at the given index.
This just bins the SHAP values for a feature along that feature's value. For true Shapley interaction
index values for SHAP see the interaction_contribs option implemented in XGBoost.
"""
# ignore inds that are identical to the column
ignore_inds = np.where((shap_values_matrix.values.T - shap_values_column.values).T.std(0) < 1e-8)
values = shap_values_matrix.values
X = shap_values_matrix.data
if X.shape[0] > 10000:
a = np.arange(X.shape[0])
np.random.shuffle(a)
inds = a[:10000]
else:
inds = np.arange(X.shape[0])
x = shap_values_column.data[inds]
srt = np.argsort(x)
shap_ref = shap_values_column.values[inds]
shap_ref = shap_ref[srt]
inc = max(min(int(len(x) / 10.0), 50), 1)
interactions = []
for i in range(X.shape[1]):
encoded_val_other = encode_array_if_needed(X[inds, i][srt], dtype=np.float)
val_other = encoded_val_other
v = 0.0
if not (i in ignore_inds or np.sum(np.abs(val_other)) < 1e-8):
for j in range(0, len(x), inc):
if np.std(val_other[j:j + inc]) > 0 and np.std(shap_ref[j:j + inc]) > 0:
v += abs(np.corrcoef(shap_ref[j:j + inc], val_other[j:j + inc])[0, 1])
val_v = v
val_other = np.isnan(encoded_val_other)
v = 0.0
if not (i in ignore_inds or np.sum(np.abs(val_other)) < 1e-8):
for j in range(0, len(x), inc):
if np.std(val_other[j:j + inc]) > 0 and np.std(shap_ref[j:j + inc]) > 0:
v += abs(np.corrcoef(shap_ref[j:j + inc], val_other[j:j + inc])[0, 1])
nan_v = v
interactions.append(max(val_v, nan_v))
return np.argsort(-np.abs(interactions))
def approximate_interactions(index, shap_values, X, feature_names=None):
""" Order other features by how much interaction they seem to have with the feature at the given index.
This just bins the SHAP values for a feature along that feature's value. For true Shapley interaction
index values for SHAP see the interaction_contribs option implemented in XGBoost.
"""
# convert from DataFrames if we got any
if str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = X.columns
X = X.values
index = convert_name(index, shap_values, feature_names)
if X.shape[0] > 10000:
a = np.arange(X.shape[0])
np.random.shuffle(a)
inds = a[:10000]
else:
inds = np.arange(X.shape[0])
x = X[inds, index]
srt = np.argsort(x)
shap_ref = shap_values[inds, index]
shap_ref = shap_ref[srt]
inc = max(min(int(len(x) / 10.0), 50), 1)
interactions = []
for i in range(X.shape[1]):
encoded_val_other = encode_array_if_needed(X[inds, i][srt], dtype=np.float)
val_other = encoded_val_other
v = 0.0
if not (i == index or np.sum(np.abs(val_other)) < 1e-8):
for j in range(0, len(x), inc):
if np.std(val_other[j:j + inc]) > 0 and np.std(shap_ref[j:j + inc]) > 0:
v += abs(np.corrcoef(shap_ref[j:j + inc], val_other[j:j + inc])[0, 1])
val_v = v
val_other = np.isnan(encoded_val_other)
v = 0.0
if not (i == index or np.sum(np.abs(val_other)) < 1e-8):
for j in range(0, len(x), inc):
if np.std(val_other[j:j + inc]) > 0 and np.std(shap_ref[j:j + inc]) > 0:
v += abs(np.corrcoef(shap_ref[j:j + inc], val_other[j:j + inc])[0, 1])
nan_v = v
interactions.append(max(val_v, nan_v))
return np.argsort(-np.abs(interactions))
def encode_array_if_needed(arr, dtype=np.float64):
try:
return arr.astype(dtype)
except ValueError:
unique_values = np.unique(arr)
encoding_dict = {string: index for index, string in enumerate(unique_values)}
encoded_array = np.array([encoding_dict[string] for string in arr], dtype=dtype)
return encoded_array
def sample(X, nsamples=100, random_state=0):
if nsamples >= X.shape[0]:
return X
else:
return sklearn.utils.resample(X, n_samples=nsamples, random_state=random_state)
def safe_isinstance(obj, class_path_str):
"""
Acts as a safe version of isinstance without having to explicitly
import packages which may not exist in the users environment.
Checks if obj is an instance of type specified by class_path_str.
Parameters
----------
obj: Any
Some object you want to test against
class_path_str: str or list
A string or list of strings specifying full class paths
Example: `sklearn.ensemble.RandomForestRegressor`
Returns
--------
bool: True if isinstance is true and the package exists, False otherwise
"""
if isinstance(class_path_str, str):
class_path_strs = [class_path_str]
elif isinstance(class_path_str, list) or isinstance(class_path_str, tuple):
class_path_strs = class_path_str
else:
class_path_strs = ['']
# try each module path in order
for class_path_str in class_path_strs:
if "." not in class_path_str:
raise ValueError("class_path_str must be a string or list of strings specifying a full \
module path to a class. Eg, 'sklearn.ensemble.RandomForestRegressor'")
# Splits on last occurence of "."
module_name, class_name = class_path_str.rsplit(".", 1)
# here we don't check further if the model is not imported, since we shouldn't have
# an object of that types passed to us if the model the type is from has never been
# imported. (and we don't want to import lots of new modules for no reason)
if module_name not in sys.modules:
continue
module = sys.modules[module_name]
#Get class
_class = getattr(module, class_name, None)
if _class is None:
continue
if isinstance(obj, _class):
return True
return False
def format_value(s, format_str):
""" Strips trailing zeros and uses a unicode minus sign.
"""
if not issubclass(type(s), str):
s = format_str % s
s = re.sub(r'\.?0+$', '', s)
if s[0] == "-":
s = u"\u2212" + s[1:]
return s
# From: https://groups.google.com/forum/m/#!topic/openrefine/G7_PSdUeno0
def ordinal_str(n):
""" Converts a number to and ordinal string.
"""
return str(n) + {1: 'st', 2: 'nd', 3: 'rd'}.get(4 if 10 <= n % 100 < 20 else n % 10, "th")
class OpChain():
""" A way to represent a set of dot chained operations on an object without actually running them.
"""
def __init__(self, root_name=""):
self._ops = []
self._root_name = root_name
def apply(self, obj):
""" Applies all our ops to the given object.
"""
for o in self._ops:
op,args,kwargs = o
if args is not None:
obj = getattr(obj, op)(*args, **kwargs)
else:
obj = getattr(obj, op)
return obj
def __call__(self, *args, **kwargs):
""" Update the args for the previous operation.
"""
new_self = OpChain(self._root_name)
new_self._ops = copy.copy(self._ops)
new_self._ops[-1][1] = args
new_self._ops[-1][2] = kwargs
return new_self
def __getitem__(self, item):
new_self = OpChain(self._root_name)
new_self._ops = copy.copy(self._ops)
new_self._ops.append(["__getitem__", [item], {}])
return new_self
def __getattr__(self, name):
new_self = OpChain(self._root_name)
new_self._ops = copy.copy(self._ops)
new_self._ops.append([name, None, None])
return new_self
def __repr__(self):
out = self._root_name
for o in self._ops:
op,args,kwargs = o
out += "."
out += op
if (args is not None and len(args) > 0) or (kwargs is not None and len(kwargs) > 0):
out += "("
if args is not None and len(args) > 0:
out += ", ".join([str(v) for v in args])
if kwargs is not None and len(kwargs) > 0:
out += ", " + ", ".join([str(k)+"="+str(kwargs[k]) for k in kwargs.keys()])
out += ")"
return out |
# I gave up on Haskell for this one after I spent hours optimizing...
from blist import *
numPlayers = 458
numBalls = 7130700
scores = blist([0]*numPlayers)
balls = blist([0])
lastPos = 0
for i in range(1, numBalls):
if (i%1000 == 0):
print(i, numBalls)
player = i % numPlayers
# print(i, lastPos, player, scores, balls)
if (i % 23) == 0:
lastPos = (lastPos-7) % len(balls)
scores[player] += i
scores[player] += balls[lastPos]
del balls[lastPos]
else:
lastPos = 1+(lastPos+1) % (len(balls))
balls.insert(lastPos, i)
#print(i, lastPos, player, scores, balls)
print(max(scores))
|
import requests
from datetime import datetime
def get_country(country):
url = 'https://corona.lmao.ninja/countries/' + country
response = requests.get(url)
data = [
response.json()['country'],
response.json()['cases'],
response.json()['todayCases'],
response.json()['deaths'],
response.json()['todayDeaths'],
response.json()['recovered'],
response.json()['active'],
response.json()['critical']
]
meta_data = response.json()['countryInfo']
return data, meta_data
def get_country_hist(country, type):
url = 'https://corona.lmao.ninja/v2/historical/' + country
response = requests.get(url)
data = response.json()['timeline']
dicts = data[type]
labels = []
hist_data = []
for key, value in list(dicts.items())[-20:]:
labels.append(datetime.strptime(key, '%m/%d/%y').strftime('%d %b'))
hist_data.append([value])
return labels, hist_data
|
from abc import ABC
import json
import os
import logging
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
import torch.nn as nn
from transformers.modeling_bert import BertPreTrainedModel, BertModel
from torchcrf import CRF
from ts.torch_handler.base_handler import BaseHandler
logger = logging.getLogger(__name__)
from transformers import BertTokenizer
class IntentClassifier(nn.Module):
def __init__(self, input_dim, num_intent_labels, dropout_rate=0.):
super(IntentClassifier, self).__init__()
self.dropout = nn.Dropout(dropout_rate)
self.linear = nn.Linear(input_dim, num_intent_labels)
def forward(self, x):
x = self.dropout(x)
return self.linear(x)
class SlotClassifier(nn.Module):
def __init__(self, input_dim, num_slot_labels, dropout_rate=0.):
super(SlotClassifier, self).__init__()
self.dropout = nn.Dropout(dropout_rate)
self.linear = nn.Linear(input_dim, num_slot_labels)
def forward(self, x):
x = self.dropout(x)
return self.linear(x)
class JointBERT(BertPreTrainedModel):
def __init__(self, config, args, intent_label_lst, slot_label_lst):
super(JointBERT, self).__init__(config)
self.args = args
self.num_intent_labels = len(intent_label_lst)
self.num_slot_labels = len(slot_label_lst)
self.bert = BertModel(config=config) # Load pretrained bert
self.intent_classifier = IntentClassifier(config.hidden_size, self.num_intent_labels, args.dropout_rate)
self.slot_classifier = SlotClassifier(config.hidden_size, self.num_slot_labels, args.dropout_rate)
if args.use_crf:
self.crf = CRF(num_tags=self.num_slot_labels, batch_first=True)
def forward(self, input_ids, attention_mask, token_type_ids, intent_label_ids, slot_labels_ids):
outputs = self.bert(input_ids, attention_mask=attention_mask,
token_type_ids=token_type_ids) # sequence_output, pooled_output, (hidden_states), (attentions)
sequence_output = outputs[0]
pooled_output = outputs[1] # [CLS]
intent_logits = self.intent_classifier(pooled_output)
slot_logits = self.slot_classifier(sequence_output)
total_loss = 0
# 1. Intent Softmax
if intent_label_ids is not None:
if self.num_intent_labels == 1:
intent_loss_fct = nn.MSELoss()
intent_loss = intent_loss_fct(intent_logits.view(-1), intent_label_ids.view(-1))
else:
intent_loss_fct = nn.CrossEntropyLoss()
intent_loss = intent_loss_fct(intent_logits.view(-1, self.num_intent_labels), intent_label_ids.view(-1))
total_loss += intent_loss
# 2. Slot Softmax
if slot_labels_ids is not None:
if self.args.use_crf:
slot_loss = self.crf(slot_logits, slot_labels_ids, mask=attention_mask.byte(), reduction='mean')
slot_loss = -1 * slot_loss # negative log-likelihood
else:
slot_loss_fct = nn.CrossEntropyLoss(ignore_index=self.args.ignore_index)
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = slot_logits.view(-1, self.num_slot_labels)[active_loss]
active_labels = slot_labels_ids.view(-1)[active_loss]
slot_loss = slot_loss_fct(active_logits, active_labels)
else:
slot_loss = slot_loss_fct(slot_logits.view(-1, self.num_slot_labels), slot_labels_ids.view(-1))
total_loss += self.args.slot_loss_coef * slot_loss
outputs = ((intent_logits, slot_logits),) + outputs[2:] # add hidden states and attention if they are here
outputs = (total_loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions) # Logits is a tuple of intent and slot logits
# 把输入文本转换成tensor,用于输入BERT模型
def convert_input_file_to_tensor_dataset(lines,
args,
tokenizer,
pad_token_label_id,
cls_token_segment_id=0,
pad_token_segment_id=0,
sequence_a_segment_id=0,
mask_padding_with_zero=True):
# Setting based on the current model type
cls_token = tokenizer.cls_token
sep_token = tokenizer.sep_token
unk_token = tokenizer.unk_token
pad_token_id = tokenizer.pad_token_id
all_input_ids = []
all_attention_mask = []
all_token_type_ids = []
all_slot_label_mask = []
for words in lines:
tokens = []
slot_label_mask = []
for word in words:
word_tokens = tokenizer.tokenize(word)
if not word_tokens:
word_tokens = [unk_token] # For handling the bad-encoded word
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
slot_label_mask.extend([pad_token_label_id + 1] + [pad_token_label_id] * (len(word_tokens) - 1))
# Account for [CLS] and [SEP]
special_tokens_count = 2
if len(tokens) > args.max_seq_len - special_tokens_count:
tokens = tokens[: (args.max_seq_len - special_tokens_count)]
slot_label_mask = slot_label_mask[:(args.max_seq_len - special_tokens_count)]
# Add [SEP] token
tokens += [sep_token]
token_type_ids = [sequence_a_segment_id] * len(tokens)
slot_label_mask += [pad_token_label_id]
# Add [CLS] token
tokens = [cls_token] + tokens
token_type_ids = [cls_token_segment_id] + token_type_ids
slot_label_mask = [pad_token_label_id] + slot_label_mask
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = args.max_seq_len - len(input_ids)
input_ids = input_ids + ([pad_token_id] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
slot_label_mask = slot_label_mask + ([pad_token_label_id] * padding_length)
all_input_ids.append(input_ids)
all_attention_mask.append(attention_mask)
all_token_type_ids.append(token_type_ids)
all_slot_label_mask.append(slot_label_mask)
# Change to Tensor
all_input_ids = torch.tensor(all_input_ids, dtype=torch.long)
all_attention_mask = torch.tensor(all_attention_mask, dtype=torch.long)
all_token_type_ids = torch.tensor(all_token_type_ids, dtype=torch.long)
all_slot_label_mask = torch.tensor(all_slot_label_mask, dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_slot_label_mask)
return dataset
class TransformerClassifierHandler(BaseHandler, ABC):
def __init__(self):
super(TransformerClassifierHandler, self).__init__()
logger.debug('__init__ the handler')
self.initialized = False
def initialize(self, ctx):
# 初始化handler, 读入模型
logger.debug('Initializating the handler')
self.manifest = ctx.manifest
properties = ctx.system_properties
logger.debug(str(properties.keys()))
model_dir = properties.get("model_dir")
logger.debug('Loading model from {0}'.format(model_dir))
self.tokenizer = BertTokenizer.from_pretrained(model_dir)
self.device = torch.device("cuda:" + str(properties.get("gpu_id")) if torch.cuda.is_available() else "cpu")
# args = torch.load(os.path.join(model_dir, 'training_args.bin'))
self.args = torch.load(os.path.join(model_dir, 'training_args.bin'))
self.intent_label_lst = [label.strip() for label in open(os.path.join(model_dir, self.args.intent_label_file), 'r', encoding='utf-8')]
self.slot_label_lst = [label.strip() for label in open(os.path.join(model_dir, self.args.slot_label_file), 'r', encoding='utf-8')]
self.model = JointBERT.from_pretrained(model_dir,
args=self.args,
intent_label_lst=self.intent_label_lst,
slot_label_lst=self.slot_label_lst)
self.model.to(self.device)
self.model.eval()
logger.debug('Transformer model from path {0} loaded successfully'.format(model_dir))
self.initialized = True
def preprocess(self, requests):
logger.debug('Preprocessing in the handler')
data = requests[0]
text = data.get("data")
if text is None:
text = data.get("body")
print(text)
text = text.decode("utf-8").strip()
lines = [line.strip().split() for line in text.split("\n")]
logger.info("Received text: %s", text)
pad_token_label_id = self.args.ignore_index
dataset = convert_input_file_to_tensor_dataset(lines, self.args, self.tokenizer, pad_token_label_id)
return (lines, dataset)
def inference(self, inputs):
lines, dataset = inputs
# Predict
sampler = SequentialSampler(dataset)
data_loader = DataLoader(dataset, sampler=sampler, batch_size=4)
all_slot_label_mask = None
intent_preds = None
slot_preds = None
for batch in data_loader:
batch = tuple(t.to(self.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"intent_label_ids": None,
"slot_labels_ids": None,
"token_type_ids":batch[2]}
outputs = self.model(**inputs)
_, (intent_logits, slot_logits) = outputs[:2]
# Intent Prediction
if intent_preds is None:
intent_preds = intent_logits.detach().cpu().numpy()
else:
intent_preds = np.append(intent_preds, intent_logits.detach().cpu().numpy(), axis=0)
# Slot prediction
if slot_preds is None:
if self.args.use_crf:
# decode() in `torchcrf` returns list with best index directly
slot_preds = np.array(model.crf.decode(slot_logits))
else:
slot_preds = slot_logits.detach().cpu().numpy()
all_slot_label_mask = batch[3].detach().cpu().numpy()
else:
if self.args.use_crf:
slot_preds = np.append(slot_preds, np.array(model.crf.decode(slot_logits)), axis=0)
else:
slot_preds = np.append(slot_preds, slot_logits.detach().cpu().numpy(), axis=0)
all_slot_label_mask = np.append(all_slot_label_mask, batch[3].detach().cpu().numpy(), axis=0)
intent_preds = np.argmax(intent_preds, axis=1)
if not self.args.use_crf:
slot_preds = np.argmax(slot_preds, axis=2)
slot_label_map = {i: label for i, label in enumerate(self.slot_label_lst)}
slot_preds_list = [[] for _ in range(slot_preds.shape[0])]
for i in range(slot_preds.shape[0]):
for j in range(slot_preds.shape[1]):
if all_slot_label_mask[i, j] != self.args.ignore_index:
slot_preds_list[i].append(slot_label_map[slot_preds[i][j]])
prediction = ""
for words, slot_preds, intent_pred in zip(lines, slot_preds_list, intent_preds):
line = ""
for word, pred in zip(words, slot_preds):
if pred == 'O':
line = line + word + " "
else:
line = line + "[{}:{}] ".format(word, pred)
prediction += "<{}> -> {}\n".format(self.intent_label_lst[intent_pred], line.strip())
logger.info("Prediction Done!")
return [prediction]
def postprocess(self, inference_output):
return inference_output
_service = TransformerClassifierHandler()
def handle(data, context):
try:
if not _service.initialized:
_service.initialize(context)
if data is None:
return None
data = _service.preprocess(data)
data = _service.inference(data)
data = _service.postprocess(data)
return data
except Exception as e:
raise e
|
# <Copyright 2020, Argo AI, LLC. Released under the MIT license.>
import os
import shutil
from collections import defaultdict
from pathlib import Path
from typing import Any, DefaultDict, Dict, List, Mapping, NamedTuple, Tuple
import numpy as np
from scipy.spatial.transform import Rotation
from argoverse.evaluation.detection.utils import wrap_angle
from argoverse.evaluation.eval_tracking import eval_tracks
from argoverse.utils.json_utils import save_json_dict
_ROOT = Path(__file__).resolve().parent
"""
FRAG: the number of track fragmentations (FM) counts how many times a
ground truth trajectory is interrupted (untracked). In other words, a
fragmentation is counted each time a trajectory changes its status
from tracked to untracked and tracking of that same trajectory is
resumed at a later point.
(ref: Milan et al., MOT16, https://arxiv.org/pdf/1603.00831.pdf)
IDSW: an identity switch, is counted if a ground truth target i
is matched to track j and the last known assignment was k != j
(ref: Milan et al., MOT16, https://arxiv.org/pdf/1603.00831.pdf)
MT: a target is mostly tracked if it is successfully tracked
for at least 80% of its life span. Note that it is irrelevant
for this measure whether the ID remains the same throughout the track.
(ref: Leal-Taixe et al., MOT15, https://arxiv.org/pdf/1504.01942.pdf)
Note: IDF1 is not the same as F1 score. It uses the number of false
negatives matches after global min-cost matching.
(https://arxiv.org/pdf/1609.01775.pdf)
"""
def check_mkdir(dirpath: str) -> None:
""" """
if not Path(dirpath).exists():
os.makedirs(dirpath, exist_ok=True)
def yaw_to_quaternion3d(yaw: float) -> Tuple[float, float, float, float]:
"""
Args:
- yaw: rotation about the z-axis, in radians
Returns:
- qx,qy,qz,qw: quaternion coefficients
"""
qx, qy, qz, qw = Rotation.from_euler("z", yaw).as_quat()
return qx, qy, qz, qw
class TrackedObjRec(NamedTuple):
l: float
w: float
h: float
qx: float
qy: float
qz: float
qw: float
cx: float
cy: float
cz: float
track_id: str
label_class: str
class TrackedObjects:
def __init__(self, log_id: str, is_gt: bool) -> None:
""" """
self.ts_to_trackedlabels_dict: DefaultDict[int, List[Dict[str, Any]]] = defaultdict(list)
self.log_id = log_id
tracks_type = "gt" if is_gt else "pred"
self.log_dir = f"{_ROOT}/test_data/"
self.log_dir += f"eval_tracking_dummy_logs_{tracks_type}/{self.log_id}"
def add_obj(self, o: TrackedObjRec, ts_ns: int) -> None:
"""
Args:
- ts_ns: timestamp in nanoseconds
"""
self.ts_to_trackedlabels_dict[ts_ns] += [
{
"center": {"x": o.cx, "y": o.cy, "z": o.cz},
"rotation": {"x": o.qx, "y": o.qy, "z": o.qz, "w": o.qw},
"length": o.l,
"width": o.w,
"height": o.h,
"track_label_uuid": o.track_id,
"timestamp": ts_ns, # 1522688014970187
"label_class": o.label_class,
}
]
def save_to_disk(self) -> None:
"""
Labels and predictions should be saved in JSON e.g.
`tracked_object_labels_315969629019741000.json`
"""
for ts_ns, ts_trackedlabels in self.ts_to_trackedlabels_dict.items():
json_fpath = f"{self.log_dir}/per_sweep_annotations_amodal/"
check_mkdir(json_fpath)
json_fpath += f"tracked_object_labels_{ts_ns}.json"
save_json_dict(json_fpath, ts_trackedlabels)
def dump_1obj_scenario_json(
centers: List[Tuple[int, int, int]],
yaw_angles: List[float],
log_id: str,
is_gt: bool,
) -> None:
"""
Egovehicle stationary (represented by `o`).
Sequence of 4-nanosecond timestamps.
"""
t_objs = TrackedObjects(log_id=log_id, is_gt=is_gt)
l = 2
w = 2
h = 1
track_id = "obj_a"
label_class = "VEHICLE"
for ts_ns, (center, yaw_angle) in enumerate(zip(centers, yaw_angles)):
cx, cy, cz = center
qx, qy, qz, qw = yaw_to_quaternion3d(yaw=yaw_angle)
tor = TrackedObjRec(l, w, h, qx, qy, qz, qw, cx, cy, cz, track_id, label_class)
t_objs.add_obj(tor, ts_ns=ts_ns)
t_objs.save_to_disk()
def run_eval(exp_name: str) -> Mapping[str, Any]:
""" """
pred_log_dir = f"{_ROOT}/test_data/eval_tracking_dummy_logs_pred"
gt_log_dir = f"{_ROOT}/test_data/eval_tracking_dummy_logs_gt"
out_fpath = f"{_ROOT}/test_data/{exp_name}.txt"
out_file = open(out_fpath, "w")
eval_tracks(
path_tracker_output_root=pred_log_dir,
path_dataset_root=gt_log_dir,
d_min=0,
d_max=100,
out_file=out_file,
centroid_method="average",
diffatt=None,
category="VEHICLE",
)
out_file.close()
with open(out_fpath, "r") as f:
result_lines = f.readlines()
result_vals = result_lines[0].strip().split(" ")
fn, num_frames, mota, motp_c, motp_o, motp_i, idf1 = result_vals[:7]
most_track, most_lost, num_fp, num_miss, num_sw, num_frag = result_vals[7:]
result_dict = {
"filename": fn,
"num_frames": int(num_frames),
"mota": float(mota),
"motp_c": float(motp_c),
"motp_o": float(motp_o),
"motp_i": float(motp_i),
"idf1": float(idf1),
"most_track": float(most_track),
"most_lost": float(most_lost),
"num_fp": int(num_fp),
"num_miss": int(num_miss),
"num_sw": int(num_sw),
"num_frag": int(num_frag),
}
shutil.rmtree(pred_log_dir)
shutil.rmtree(gt_log_dir)
return result_dict
def get_1obj_gt_scenario() -> Tuple[List[Tuple[int, int, int]], List[float]]:
"""
Egovehicle stationary (represented by `o`).
Seqeuence of 4-nanosecond timestamps.
|-|
| |
|-|
|-|
| |
|-|
o (x,y,z) = (0,0,0)
|-|
| |
|-|
|-|
| | (x,y,z)=(-3,2,0)
|-|
"""
centers = []
# timestamp 0
cx = -3
cy = 2
cz = 0
centers += [(cx, cy, cz)]
# timestamp 1
cx = -1
cy = 2
cz = 0
centers += [(cx, cy, cz)]
# timestamp 2
cx = 1
cy = 2
cz = 0
centers += [(cx, cy, cz)]
# timestamp 3
cx = 3
cy = 2
cz = 0
centers += [(cx, cy, cz)]
yaw_angles = [0.0, 0.0, 0.0, 0.0]
return centers, yaw_angles
def test_1obj_perfect() -> None:
""" """
log_id = "1obj_perfect"
gt_centers, gt_yaw_angles = get_1obj_gt_scenario()
centers = gt_centers
yaw_angles = gt_yaw_angles
# dump the ground truth first
dump_1obj_scenario_json(gt_centers, gt_yaw_angles, log_id, is_gt=True)
dump_1obj_scenario_json(centers, yaw_angles, log_id, is_gt=False)
result_dict = run_eval(exp_name=log_id)
assert result_dict["num_frames"] == 4
assert result_dict["mota"] == 100.0
assert result_dict["motp_c"] == 0.0
assert result_dict["motp_o"] == 0.0
assert result_dict["motp_i"] == 0.0
assert result_dict["idf1"] == 1.0
assert result_dict["most_track"] == 1.0
assert result_dict["most_lost"] == 0.0
assert result_dict["num_fp"] == 0
assert result_dict["num_miss"] == 0
assert result_dict["num_sw"] == 0
assert result_dict["num_frag"] == 0
def test_1obj_offset_translation() -> None:
""" """
log_id = "1obj_offset_translation"
centers = []
# timestamp 0
cx = -4
cy = 3
cz = 0
centers += [(cx, cy, cz)]
# timestamp 1
cx = -2
cy = 3
cz = 0
centers += [(cx, cy, cz)]
# timestamp 2
cx = 0
cy = 3
cz = 0
centers += [(cx, cy, cz)]
# timestamp 3
cx = 2
cy = 3
cz = 0
centers += [(cx, cy, cz)]
yaw_angles = [0.0, 0.0, 0.0, 0.0]
# dump the ground truth first
gt_centers, gt_yaw_angles = get_1obj_gt_scenario()
# dump the ground truth first
dump_1obj_scenario_json(gt_centers, gt_yaw_angles, log_id, is_gt=True)
dump_1obj_scenario_json(centers, yaw_angles, log_id, is_gt=False)
result_dict = run_eval(exp_name=log_id)
assert result_dict["num_frames"] == 4
assert result_dict["mota"] == 100.0
# Centroids will be (1,1) away from true centroid each time
assert np.allclose(result_dict["motp_c"], np.sqrt(2), atol=0.01)
assert result_dict["motp_o"] == 0.0
assert result_dict["motp_i"] == 0.0
assert result_dict["idf1"] == 1.0
assert result_dict["most_track"] == 1.0
assert result_dict["most_lost"] == 0.0
assert result_dict["num_fp"] == 0
assert result_dict["num_miss"] == 0
assert result_dict["num_sw"] == 0
assert result_dict["num_frag"] == 0
def test_1obj_poor_translation() -> None:
"""
Miss in 1st frame, TP in 2nd frame,
lost in 3rd frame, retrack as TP in 4th frame
Yields 1 fragmentation. Prec=0.5, recall=0.5, F1=0.5
mostly tracked if it is successfully tracked
for at least 80% of its life span
If a track is only recovered for less than 20% of its
total length, it is said to be mostly lost (ML)
"""
log_id = "1obj_poor_translation"
centers = []
# timestamp 0
cx = -5
cy = 4
cz = 0
centers += [(cx, cy, cz)]
# timestamp 1
cx = -2
cy = 3
cz = 0
centers += [(cx, cy, cz)]
# timestamp 2
cx = 1
cy = 4
cz = 0
centers += [(cx, cy, cz)]
# timestamp 3
cx = 4
cy = 3
cz = 0
centers += [(cx, cy, cz)]
yaw_angles = [0.0, 0.0, 0.0, 0.0]
# dump the ground truth first
gt_centers, gt_yaw_angles = get_1obj_gt_scenario()
# dump the ground truth first
dump_1obj_scenario_json(gt_centers, gt_yaw_angles, log_id, is_gt=True)
dump_1obj_scenario_json(centers, yaw_angles, log_id, is_gt=False)
result_dict = run_eval(exp_name=log_id)
assert result_dict["num_frames"] == 4
mota = 1 - ((2 + 2 + 0) / 4) # 1 - (FN+FP+SW)/#GT
assert mota == 0.0
assert result_dict["mota"] == 0.0
assert np.allclose(result_dict["motp_c"], np.sqrt(2), atol=0.01) # (1,1) away each time
assert result_dict["motp_o"] == 0.0
assert result_dict["motp_i"] == 0.0
prec = 0.5
recall = 0.5
f1 = 2 * prec * recall / (prec + recall)
assert f1 == 0.5
assert result_dict["idf1"] == 0.5
assert result_dict["most_track"] == 0.0
assert result_dict["most_lost"] == 0.0
assert result_dict["num_fp"] == 2
assert result_dict["num_miss"] == 2 # false-negatives
assert result_dict["num_sw"] == 0
assert result_dict["num_frag"] == 1
def test_1obj_poor_orientation() -> None:
""" """
log_id = "1obj_poor_orientation"
centers = []
# timestamp 0
cx = -3
cy = 2
cz = 0
centers += [(cx, cy, cz)]
# timestamp 1
cx = -1
cy = 2
cz = 0
centers += [(cx, cy, cz)]
# timestamp 2
cx = 1
cy = 2
cz = 0
centers += [(cx, cy, cz)]
# timestamp 3
cx = 3
cy = 2
cz = 0
centers += [(cx, cy, cz)]
yaw_angles = [0.25, -0.25, 0.25, -0.25]
# dump the ground truth first
gt_centers, gt_yaw_angles = get_1obj_gt_scenario()
# dump the ground truth first
dump_1obj_scenario_json(gt_centers, gt_yaw_angles, log_id, is_gt=True)
dump_1obj_scenario_json(centers, yaw_angles, log_id, is_gt=False)
result_dict = run_eval(exp_name=log_id)
assert result_dict["num_frames"] == 4
assert result_dict["mota"] == 100.0
assert result_dict["motp_c"] == 0
assert np.allclose(result_dict["motp_o"], 14.32, atol=0.01)
assert result_dict["motp_i"] == 0.0
assert result_dict["idf1"] == 1.0
assert result_dict["most_track"] == 1.0
assert result_dict["most_lost"] == 0.0
assert result_dict["num_fp"] == 0
assert result_dict["num_miss"] == 0
assert result_dict["num_sw"] == 0
assert result_dict["num_frag"] == 0
def test_orientation_error1() -> None:
""" """
yaw1 = np.deg2rad([179])
yaw2 = np.deg2rad([-179])
error_deg = np.rad2deg(wrap_angle(yaw1 - yaw2))
assert np.allclose(error_deg, 2.0, atol=1e-2)
def test_orientation_error2() -> None:
""" """
yaw1 = np.deg2rad([-179])
yaw2 = np.deg2rad([179])
error_deg = np.rad2deg(wrap_angle(yaw1 - yaw2))
print(error_deg)
assert np.allclose(error_deg, 2.0, atol=1e-2)
def test_orientation_error3() -> None:
""" """
yaw1 = np.deg2rad([179])
yaw2 = np.deg2rad([178])
error_deg = np.rad2deg(wrap_angle(yaw1 - yaw2))
assert np.allclose(error_deg, 1.0, atol=1e-2)
def test_orientation_error4() -> None:
""" """
yaw1 = np.deg2rad([178])
yaw2 = np.deg2rad([179])
error_deg = np.rad2deg(wrap_angle(yaw1 - yaw2))
assert np.allclose(error_deg, 1.0, atol=1e-2)
def test_orientation_error5() -> None:
""" """
yaw1 = np.deg2rad([3])
yaw2 = np.deg2rad([-3])
error_deg = np.rad2deg(wrap_angle(yaw1 - yaw2))
assert np.allclose(error_deg, 6.0, atol=1e-2)
def test_orientation_error6() -> None:
""" """
yaw1 = np.deg2rad([-3])
yaw2 = np.deg2rad([3])
error_deg = np.rad2deg(wrap_angle(yaw1 - yaw2))
assert np.allclose(error_deg, 6.0, atol=1e-2)
def test_orientation_error7() -> None:
""" """
yaw1 = np.deg2rad([-177])
yaw2 = np.deg2rad([-179])
error_deg = np.rad2deg(wrap_angle(yaw1 - yaw2))
assert np.allclose(error_deg, 2.0, atol=1e-2)
def test_orientation_error8() -> None:
""" """
yaw1 = np.deg2rad([-179])
yaw2 = np.deg2rad([-177])
error_deg = np.rad2deg(wrap_angle(yaw1 - yaw2))
assert np.allclose(error_deg, 2.0, atol=1e-2)
def get_mot16_scenario_a() -> Tuple[List[Tuple[int, int, int]], List[float]]:
"""
https://arxiv.org/pdf/1603.00831.pdf
"""
centers = []
# timestamp 0
cx = 0
cy = -1
cz = 0
centers += [(cx, cy, cz)]
# timestamp 1
cx = 2
cy = 1
cz = 0
centers += [(cx, cy, cz)]
# timestamp 2
cx = 4
cy = 1
cz = 0
centers += [(cx, cy, cz)]
# timestamp 3
cx = 6
cy = 0
cz = 0
centers += [(cx, cy, cz)]
# timestamp 4
cx = 8
cy = -1
cz = 0
centers += [(cx, cy, cz)]
# timestamp 5
cx = 10
cy = 0
cz = 0
centers += [(cx, cy, cz)]
yaw_angles = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
return centers, yaw_angles
def test_mot16_scenario_a() -> None:
"""
See page 8 of MOT16 paper: https://arxiv.org/pdf/1603.00831.pdf
"""
log_id = "mot16_scenario_a"
gt_centers, gt_yaw_angles = get_mot16_scenario_a()
dump_1obj_scenario_json(gt_centers, gt_yaw_angles, log_id, is_gt=True)
t_objs = TrackedObjects(log_id=log_id, is_gt=False)
l = 2
w = 2
h = 1
label_class = "VEHICLE"
# ----------- Red track --------------------------------------------
track_id = "red_obj"
cx, cy, cz = (0, -3, 0)
qx, qy, qz, qw = yaw_to_quaternion3d(yaw=0)
tor = TrackedObjRec(l, w, h, qx, qy, qz, qw, cx, cy, cz, track_id, label_class)
t_objs.add_obj(tor, ts_ns=0)
cx, cy, cz = (2, 0, 0)
qx, qy, qz, qw = yaw_to_quaternion3d(yaw=0)
tor = TrackedObjRec(l, w, h, qx, qy, qz, qw, cx, cy, cz, track_id, label_class)
t_objs.add_obj(tor, ts_ns=1)
cx, cy, cz = (4, 0, 0)
qx, qy, qz, qw = yaw_to_quaternion3d(yaw=0)
tor = TrackedObjRec(l, w, h, qx, qy, qz, qw, cx, cy, cz, track_id, label_class)
t_objs.add_obj(tor, ts_ns=2)
cx, cy, cz = (6, 1, 0)
qx, qy, qz, qw = yaw_to_quaternion3d(yaw=0)
tor = TrackedObjRec(l, w, h, qx, qy, qz, qw, cx, cy, cz, track_id, label_class)
t_objs.add_obj(tor, ts_ns=3)
cx, cy, cz = (8, 3, 0)
qx, qy, qz, qw = yaw_to_quaternion3d(yaw=0)
tor = TrackedObjRec(l, w, h, qx, qy, qz, qw, cx, cy, cz, track_id, label_class)
t_objs.add_obj(tor, ts_ns=4)
# ----------- Blue track -------------------------------------------
track_id = "blue_obj"
cx, cy, cz = (4, -4, 0)
qx, qy, qz, qw = yaw_to_quaternion3d(yaw=0)
tor = TrackedObjRec(l, w, h, qx, qy, qz, qw, cx, cy, cz, track_id, label_class)
t_objs.add_obj(tor, ts_ns=2)
cx, cy, cz = (6, -2, 0)
qx, qy, qz, qw = yaw_to_quaternion3d(yaw=0)
tor = TrackedObjRec(l, w, h, qx, qy, qz, qw, cx, cy, cz, track_id, label_class)
t_objs.add_obj(tor, ts_ns=3)
cx, cy, cz = (8, 0, 0)
qx, qy, qz, qw = yaw_to_quaternion3d(yaw=0)
tor = TrackedObjRec(l, w, h, qx, qy, qz, qw, cx, cy, cz, track_id, label_class)
t_objs.add_obj(tor, ts_ns=4)
cx, cy, cz = (10, 1, 0)
qx, qy, qz, qw = yaw_to_quaternion3d(yaw=0)
tor = TrackedObjRec(l, w, h, qx, qy, qz, qw, cx, cy, cz, track_id, label_class)
t_objs.add_obj(tor, ts_ns=5)
t_objs.save_to_disk()
result_dict = run_eval(exp_name=log_id)
assert result_dict["num_frames"] == 6
assert result_dict["mota"] == 0.0 # 1 - (4+1+1)/6 = 0
assert result_dict["motp_c"] == 1 # off by 1 meter at every frame
assert result_dict["motp_o"] == 0.0
assert result_dict["motp_i"] == 0.0 # using same-sized box for GT and predictions
assert result_dict["most_track"] == 1.0 # GT obj is tracked for 80% of lifetime
assert result_dict["most_lost"] == 0.0
assert result_dict["num_fp"] == 4
assert result_dict["num_miss"] == 1 # just 1 false negative
assert result_dict["num_sw"] == 1 # switch from red to blue
assert result_dict["num_frag"] == 0
def test_mot16_scenario_b() -> None:
"""
See page 8 of MOT16 paper: https://arxiv.org/pdf/1603.00831.pdf
Scenario `a` and Scenario `b` share the same ground truth.
"""
log_id = "mot16_scenario_b"
gt_centers, gt_yaw_angles = get_mot16_scenario_a()
dump_1obj_scenario_json(gt_centers, gt_yaw_angles, log_id, is_gt=True)
t_objs = TrackedObjects(log_id=log_id, is_gt=False)
l = 2
w = 2
h = 1
label_class = "VEHICLE"
# ----------- Red track --------------------------------------------
track_id = "red_obj"
cx, cy, cz = (0, -0.5, 0)
qx, qy, qz, qw = yaw_to_quaternion3d(yaw=0)
tor = TrackedObjRec(l, w, h, qx, qy, qz, qw, cx, cy, cz, track_id, label_class)
t_objs.add_obj(tor, ts_ns=0)
cx, cy, cz = (2, 0, 0)
qx, qy, qz, qw = yaw_to_quaternion3d(yaw=0)
tor = TrackedObjRec(l, w, h, qx, qy, qz, qw, cx, cy, cz, track_id, label_class)
t_objs.add_obj(tor, ts_ns=1)
cx, cy, cz = (4, 3, 0)
qx, qy, qz, qw = yaw_to_quaternion3d(yaw=0)
tor = TrackedObjRec(l, w, h, qx, qy, qz, qw, cx, cy, cz, track_id, label_class)
t_objs.add_obj(tor, ts_ns=2)
# ----------- Blue track -------------------------------------------
track_id = "blue_obj"
cx, cy, cz = (6, -2, 0)
qx, qy, qz, qw = yaw_to_quaternion3d(yaw=0)
tor = TrackedObjRec(l, w, h, qx, qy, qz, qw, cx, cy, cz, track_id, label_class)
t_objs.add_obj(tor, ts_ns=3)
cx, cy, cz = (8, -1, 0)
qx, qy, qz, qw = yaw_to_quaternion3d(yaw=0)
tor = TrackedObjRec(l, w, h, qx, qy, qz, qw, cx, cy, cz, track_id, label_class)
t_objs.add_obj(tor, ts_ns=4)
cx, cy, cz = (10, 1, 0)
qx, qy, qz, qw = yaw_to_quaternion3d(yaw=0)
tor = TrackedObjRec(l, w, h, qx, qy, qz, qw, cx, cy, cz, track_id, label_class)
t_objs.add_obj(tor, ts_ns=5)
t_objs.save_to_disk()
result_dict = run_eval(exp_name=log_id)
assert result_dict["num_frames"] == 6
assert result_dict["mota"] == 16.67 # 1 - (2+2+1)/6 = 0.1667
assert result_dict["motp_c"] == 0.62 # off by [0.5,1,0,1] -> 0.625 truncated
assert result_dict["motp_o"] == 0.0
assert result_dict["motp_i"] == 0.0 # using same-sized box for GT and predictions
assert result_dict["most_track"] == 0.0 # GT obj is tracked for only 67% of lifetime
assert result_dict["most_lost"] == 0.0
assert result_dict["num_fp"] == 2
assert result_dict["num_miss"] == 2 # 2 false negatives
assert result_dict["num_sw"] == 1 # switch from red to blue
assert result_dict["num_frag"] == 1 # 1 frag, since tracked->untracked->tracked
"""
try 2 tracks
then try 2 logs
"""
if __name__ == "__main__":
""" """
test_1obj_perfect()
test_1obj_offset_translation()
test_1obj_poor_translation()
test_1obj_poor_orientation()
test_mot16_scenario_a()
test_mot16_scenario_b()
test_orientation_error1()
test_orientation_error2()
test_orientation_error3()
test_orientation_error4()
test_orientation_error5()
test_orientation_error6()
test_orientation_error7()
test_orientation_error8()
|
"""
HTTP interface for retrieving a topology.
Currently this means an XML representation of an NML topology.
Author: Henrik Thostrup Jensen <htj@nordu.net>
Copyright: NORDUnet (2013)
"""
from xml.etree import ElementTree as ET
from opennsa.shared import modifiableresource
from opennsa.topology import nmlxml
class NMLService(object):
def __init__(self, nml_network, can_swap_label):
self.nml_network = nml_network
self.can_swap_label = can_swap_label
self._resource = modifiableresource.ModifiableResource('NMLService', 'application/xml')
self.update()
def update(self):
xml_nml_topology = nmlxml.topologyXML(self.nml_network, self.can_swap_label)
representation = ET.tostring(xml_nml_topology, 'utf-8')
self._resource.updateResource(representation)
def resource(self):
return self._resource
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--file', type=str, required=True)
parser.add_argument('--line', type=int, required=True)
args = parser.parse_args()
filename = args.file
line = args.line
with open(filename, 'rt') as f:
lines = f.readlines()
newcontent = [l for i, l in enumerate(lines) if i != line -1]
with open(filename, 'wt') as f:
f.writelines(newcontent)
|
from netmiko import ConnectHandler
class Router(object):
def __init__(self, hostname=None, os=None, device_type='cisco_ios', ip=None, username='cisco', password='cisco', secret='cisco'):
self.hostname = hostname
self.os = os
self.interfaces = 2
self.device_type = device_type
self.ip = ip
self.username = username
self.password = password
def show_interface(self):
self.net_connect = ConnectHandler(device_type=self.device_type, ip=self.ip, username=self.username, password=self.password)
output = self.net_connect.send_command('show ip int brief')
return output
def save_config(self):
self.net_connect = ConnectHandler(device_type=self.device_type, ip=self.ip, username=self.username, password=self.password)
output = self.net_connect.send_command_expect('write memory')
return output
def check_cdp(self):
self.net_connect = ConnectHandler(device_type=self.device_type, ip=self.ip, username=self.username, password=self.password)
output = self.net_connect.send_command('show run | i cdp')
return output
def turn_off_cdp(self):
self.net_connect = ConnectHandler(device_type=self.device_type, ip=self.ip, username=self.username, password=self.password)
self.net_connect.enable()
config_commands = ['no cdp run']
output = self.net_connect.send_config_set(config_commands)
return output
if __name__ == "__main__":
r1 = Router(hostname='lax-tor-r1', os='iosv', device_type='cisco_ios', ip='172.16.1.17', username='cisco', password='cisco')
print("router interface count: " + str(r1.interfaces))
output = r1.show_interface()
print(output)
|
from mycroft import MycroftSkill, intent_file_handler
from SPARQLWrapper import SPARQLWrapper, JSON
import os
from mycroft.util.log import LOG
from difflib import SequenceMatcher
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
def uri_to_str(uri):
str = uri.split("/").pop().split("#").pop()
if str.startswith("genid"):
return "blank node"
return str
GRAPHDB_REPO_URL = 'http://localhost:7200'
#GRAPHDB_REPO_URL = 'http://graphdb.sti2.at'
class BookDialog(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
self.wrapper = SPARQLWrapper(GRAPHDB_REPO_URL + '/repositories/OCWS2019')
self.wrapper.addParameter('infer', 'false')
self.current = []
@intent_file_handler('explore.new.intent')
def handle_explore_new(self, message):
any_str = message.data.get('any')
sparql = self.read_sparql_file('explore_new.rq').replace('$ANY$', any_str)
self.wrapper.setQuery(sparql)
self.wrapper.setReturnFormat(JSON)
result = self.wrapper.query().convert()
bindings = result["results"]["bindings"]
if len(bindings) > 0:
subj = uri_to_str(bindings[0]["s"]["value"])
name = "not found"
rdftype = "not found"
for b in bindings:
if b["p"]["value"] == "http://www.w3.org/1999/02/22-rdf-syntax-ns#type":
rdftype = uri_to_str(b["o"]["value"])
if b["p"]["value"] == "http://schema.org/name":
name = uri_to_str(b["o"]["value"])
self.current.append(bindings)
self.speak("I found something. Subject '{}' of type '{}' with name '{}'".format(subj, rdftype, name))
else:
self.speak("I found nothing.")
@intent_file_handler('explore.properties.intent')
def handle_explore_properties(self, message):
bindings = self.current[-1]
if len(bindings) > 0:
subj = uri_to_str(bindings[0]["s"]["value"])
props = set()
for b in bindings:
props.add(uri_to_str(b["p"]["value"]))
answer = "The subject uses {} properties. {}".format(len(props), " . ".join(list(props)))
self.speak(answer)
else:
self.speak("I found nothing.")
@intent_file_handler('explore.details.intent')
def handle_explore_details(self, message):
bindings = self.current[-1]
if len(bindings) > 0:
subj = uri_to_str(bindings[0]["s"]["value"])
answer = "Details on subject {}. ".format(subj)
key_val = {}
for b in bindings:
prop = uri_to_str(b["p"]["value"])
if prop not in key_val:
key_val[prop] = []
if "n" in b:
key_val[prop].append("a new {} with name {}".format(
"node " + uri_to_str(b["o"]["value"]) if b["o"]["type"] == "uri" else "blank node"
,uri_to_str(b["n"]["value"])))
else:
key_val[prop].append(uri_to_str(b["o"]["value"]))
for key in key_val:
answer += "{} {} {}. ".format(key, ("is" if len(key_val[key]) == 1 else "are"), " and ".join(key_val[key]))
self.speak(answer)
else:
self.speak("I found nothing.")
@intent_file_handler('explore.property.intent')
def handle_explore_property(self, message):
any_str = message.data.get('any')
bindings = self.current[-1]
if len(bindings) > 0:
subj = uri_to_str(bindings[0]["s"]["value"])
vals = []
real_prop = ""
for b in bindings:
prop = uri_to_str(b["p"]["value"])
if similar(prop, any_str) > 0.85:
real_prop = prop
if "n" in b:
vals.append("a new {} with name {}".format(
"node " + uri_to_str(b["o"]["value"]) if b["o"]["type"] == "uri" else "blank node"
,uri_to_str(b["n"]["value"])))
else:
vals.append(uri_to_str(b["o"]["value"]))
answer = "I did not find any property called {} on the subject {}".format(any_str, subj)
if len(vals) > 0:
answer = "The subject {} has for the property {} the value{}: {}".format(subj, real_prop, ("s" if len(vals) > 1 else ""), " . ".join(vals))
self.speak(answer)
else:
self.speak("I found nothing.")
@intent_file_handler('explore.back.intent')
def handle_explore_back(self, message):
if len(self.current) > 1:
self.current.pop()
bindings = self.current[-1]
self.speak("Now at subject " + uri_to_str(bindings[0]["s"]["value"]))
else:
self.speak("Cannot go back")
@intent_file_handler('how.to.create.a.knowledge.graph.intent')
def handle_how_to_create_a_knowledge_graph(self, message):
self.handle('how_to_create_a_knowledge_graph.rq', 'text')
@intent_file_handler('tell.me.chapters.of.knowledge.graphs.methodology.tools.and.selected.use.cases.intent')
def handle_tell_me_chapters_of_knowledge_graphs_methodology_tools_and_selected_use_cases(self, message):
self.handle('tell_me_chapters_of_knowledge_graphs_methodology_tools_and_selected_use_cases.rq', 'name')
@intent_file_handler('tell.me.articles.from.dieter.fensel.intent')
def handle_tell_me_articles_from_dieter_fensel(self, message):
self.handle('tell_me_articles_from_dieter_fensel.rq', 'name')
@intent_file_handler('tell.me.some.open.knowledge.graphs.intent')
def handle_tell_me_some_open_knowledge_graphs_intent(self, message):
self.handle('tell_me_some_open_knowledge_graphs.rq', 'name')
@intent_file_handler('tell.me.some.proprietary.knowledge.graphs.intent')
def handle_tell_me_some_proprietary_graphs_intent(self, message):
self.handle('tell_me_some_proprietary_knowledge_graphs.rq', 'name')
@intent_file_handler('tell.me.sub.types.of.knowledge.graphs.intent')
def handle_tell_me_sub_types_of_knowledge_graphs(self, message):
self.handle('tell_me_sub_types_of_knowledge_graphs.rq', 'label')
@intent_file_handler('what.are.knowledge.graphs.intent')
def handle_what_are_knowledge_graphs(self, message):
self.handle('what_are_knowledge_graphs.rq', 'comment')
@intent_file_handler('what.is.a.graph.intent')
def handle_what_is_a_graph(self, message):
self.handle('what_is_a_graph.rq', 'description')
@intent_file_handler('which.knowledge.graph.has.the.highest.number.of.triples.intent')
def handle_which_knowledge_graph_has_the_highest_number_of_triples(self, message):
# Inference important for result (correct rdf:type handling)
self.wrapper.clearParameter('infer')
results = self.run_file_query('which_knowledge_graph_has_the_highest_number_of_triples.rq')
self.wrapper.addParameter('infer', 'false')
binding = results["results"]["bindings"][0]
answer = 'The graph {} with {} triples'.format(
binding["name"]["value"],
binding["numTriples"]["value"])
self.speak(answer)
@intent_file_handler('who.are.the.authors.of.knowledge.graphs.methodology.tools.and.selected.use.cases.intent')
def handle_who_are_the_authors_of_knowledge_graphs_methodology_tools_and_selected_use_cases(self, message):
self.handle('who_are_the_authors_of_knowledge_graphs_methodology_tools_and_selected_use_cases.rq', 'name')
def handle(self, sparql_file_name, value):
results = self.run_file_query(sparql_file_name)
answer = self.create_answer(results, value)
self.speak(answer)
def run_file_query(self, file_name):
sparql = self.read_sparql_file(file_name)
return self.run_query(sparql)
@staticmethod
def read_sparql_file(file_name):
dir_path = os.path.dirname(os.path.realpath(__file__))
sparql_file = open(dir_path + "/sparql/" + file_name)
return sparql_file.read()
def run_query(self, sparql):
self.wrapper.setQuery(sparql)
self.wrapper.setReturnFormat(JSON)
return self.wrapper.query().convert()
@staticmethod
def create_answer(results, value):
answer = ''
i = 1
bindings = results["results"]["bindings"]
for binding in bindings:
answer += (str(i) + ". " if len(bindings) > 1 else "") + binding[value]["value"] + ".\n"
i += 1
return answer
def create_skill():
return BookDialog()
|
# -*- coding: utf-8 -*-
from os import getcwd
from time import sleep
import boto3
from api.rdb.config import get, is_test
from api.rdb.utils.cognito import get_cognito_app_client_id
from api.rdb.utils.lambda_logger import lambda_logger
from api.rdb.utils.service_framework import handle_request
logger = lambda_logger(__name__, getcwd())
def handler(request, context):
cognito_idp_client = boto3.client('cognito-idp')
# noinspection PyPep8Naming,PyUnusedLocal
def http_put(request_params, request_body):
# type: (dict, dict) -> dict
logger.info("http_put")
# Don't Email when new user is provisioned in Cognito if is_test()
message_action = 'SUPPRESS' if is_test() else 'RESEND'
cognito_user = user_persist(cognito_idp_client,
get('aws_user_pools_id'),
request_body,
True,
['EMAIL'],
message_action)
cognito_user = cognito_idp_client.admin_get_user(UserPoolId=get('aws_user_pools_id'),
Username=cognito_user['Username'])
return remove_cruft(cognito_user)
# noinspection PyPep8Naming,PyUnusedLocal,PyBroadException
def http_delete(request_params, request_body):
# type: (dict, dict) -> dict
logger.info("http_delete")
if request_params and 'force' in request_params:
logger.info(request_params['force'])
# if test, delete the user
# if production, de-activate
logger.info("http_delete in %s mode" % 'test' if is_test() else 'production')
# disable user in Cognito
if is_test() or "force" in request_params:
try:
cognito_idp_client.admin_disable_user(UserPoolId=get('aws_user_pools_id'),
Username=request_params['username'])
sleep(2)
except cognito_idp_client.exceptions.UserNotFoundException as ex:
return {}
if is_test() or "force" in request_params:
logger.info("Deleting user %s for real" % request_params['username'])
cognito_idp_client.admin_delete_user(UserPoolId=get('aws_user_pools_id'),
Username=request_params['username'])
logger.info("Deleted user from Cognito")
return {}
# noinspection PyPep8Naming,PyUnusedLocal
def http_get(request_params, request_body):
# type: (dict, dict) -> dict
logger.info("http_get")
cognito_user = cognito_idp_client.admin_get_user(UserPoolId=get('aws_user_pools_id'),
Username=request_params['username'])
return remove_cruft(cognito_user)
# noinspection PyPep8Naming,PyUnusedLocal
def http_post(request_params, request_body):
# type: (dict, dict) -> dict
logger.info("http_post")
cognito_user = cognito_idp_client.admin_get_user(UserPoolId=get('aws_user_pools_id'),
Username=request_body['username'])
if "cognito_user_pool_app_client_id" in request_body:
cognito_app_client_id = request_body['cognito_user_pool_app_client_id']
else:
cognito_app_client_id = get_cognito_app_client_id(cognito_idp_client,
cognito_user_pool_id=get('aws_user_pools_id'))
# TODO: enable ADMIN_NO_SRP_AUTH and USER_PASSWORD_AUTH auth flows
if 'newpassword' in request_body:
# noinspection PyBroadException
auth_response = cognito_idp_client.admin_initiate_auth(
UserPoolId=get('aws_user_pools_id'),
AuthFlow='ADMIN_NO_SRP_AUTH',
AuthParameters={
'USERNAME': cognito_user['Username'],
'PASSWORD': request_body['password']
},
ClientId=cognito_app_client_id
)
# https://github.com/capless/warrant/issues/14
tokens = cognito_idp_client.respond_to_auth_challenge(
ClientId=cognito_app_client_id,
ChallengeName='NEW_PASSWORD_REQUIRED',
Session=auth_response['Session'], # 'session_string_from_first_challenge_response',
ChallengeResponses={
'NEW_PASSWORD': request_body['newpassword'],
'USERNAME': request_body['username']
}
)
logger.info('newpassword successful, return result tokens')
cognito_user = remove_cruft(cognito_user)
return cognito_user
return handle_request(request, context, http_get=http_get, http_put=http_put, http_delete=http_delete,
http_post=http_post)
# https://console.aws.amazon.com/cognito/pool/edit/?region=us-east-1&id=us-east-1:e55c591f-df3e-4161-8d54-0fd7e38dfd91
# http://boto3.readthedocs.io/en/latest/reference/services/cognito-idp.html#CognitoIdentityProvider.Client.admin_create_user
# noinspection PyUnusedLocal
def user_persist(cognito_idp_client, cognito_user_pool_id, request_body, email_verified, delivery_medium,
message_action):
from api.rdb.model.table_user_profile import User_profile
# noinspection PyBroadException
try:
# need this step after user successful login (means that they changed default password successfully
cognito_user = cognito_idp_client.admin_get_user(UserPoolId=cognito_user_pool_id,
Username=request_body['username'])
if 'UserStatus' in cognito_user and cognito_user['UserStatus'] == 'FORCE_CHANGE_PASSWORD':
with User_profile.atomic():
User_profile.get_or_create(username=cognito_user['Username'])
return cognito_user
except cognito_idp_client.exceptions.UserNotFoundException as ex:
pass
if is_test():
# Don't Email when new user is provisioned in Cognito
cognito_user = cognito_idp_client.admin_create_user(
UserPoolId=cognito_user_pool_id,
Username=request_body["username"],
UserAttributes=[
{
'Name': 'email',
'Value': request_body["email"]
},
{
'Name': 'email_verified',
'Value': 'True'
},
{
'Name': 'phone_number',
'Value': request_body["phone_number"]
},
{
'Name': 'phone_number_verified',
'Value': 'True'
}
],
TemporaryPassword=request_body["password"],
ForceAliasCreation=email_verified,
MessageAction=message_action,
DesiredDeliveryMediums=delivery_medium
)
else:
# Send Email when new user is provisioned in Cognito with temp password
cognito_user = cognito_idp_client.admin_create_user(
UserPoolId=cognito_user_pool_id,
Username=request_body["username"],
UserAttributes=[
{
'Name': 'email',
'Value': request_body["email"]
},
{
'Name': 'email_verified',
'Value': 'True'
},
{
'Name': 'phone_number',
'Value': request_body["phone_number"]
},
{
'Name': 'phone_number_verified',
'Value': 'True'
}
],
TemporaryPassword=request_body["password"],
ForceAliasCreation=email_verified,
DesiredDeliveryMediums=delivery_medium
)
with User_profile.atomic():
request_body = {'username': cognito_user['User']['Username']}
# user_profile = User_profile.create(**request_body)
User_profile.get_or_create(username=request_body['username'], defaults=request_body)
return cognito_user['User']
def remove_cruft(cognito_user):
cu = cognito_user
# cu.pop('UserAttributes', None)
cu.pop('ResponseMetadata', None)
return cu
|
'''
Flask app intialization
'''
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
import mysite.routes
|
from __future__ import absolute_import, division, print_function
import numpy as np
from sklearn.base import BaseEstimator
from SparseGroupLasso.linalg import vec_norm, dot
from .utils import S, norm_non0
__author__ = 'Romain Tavenard romain.tavenard[at]univ-rennes2.fr'
class SSGL(BaseEstimator):
"""A Semi-Sparse Group Lasso model using a blockwise descent solver.
Implements the methods presented by Noah Simon et. al [1], adding the
notion of the semi-sparse model, as introduced by Romain Tavenard [2].
Attributes
----------
ind_sparse : np.ndarray
groups : np.ndarray
alpha : float
alpha paramater provided to constructor
lambda_ : float
lambda_ parameter provided to constructor
max_iter_outer : int
max_iter_outer parameter provided to constructor
max_iter_inner : int
max_iter_inner parameter provided to constructor
rtol : float
rtol parameter provided to constructor
coef_ : None
.. [1] Noah Simon, Jerome Friedman, Trevor Hastie, Rob Tibshirani,
"A Sparse-group Lasso," Journal of Computational and Graphical Statistics,
Vol 22, Issue 2 (2013),
<http://www.stanford.edu/~hastie/Papers/SGLpaper.pdf>
.. [2] Romain Tavenard, Sparse-Group Lasso jupyter notebook, Nov 30, 2016
<https://github.com/rtavenar/Homework/blob/master/pynb/sparse_group_lasso.ipynb>
See Also
--------
subgradients_semisparse : same model, different solver
"""
def __init__(self, groups, alpha, lambda_, ind_sparse,
max_iter_outer=10000, max_iter_inner=100, rtol=1e-6,
warm_start=False):
"""
Parameters
----------
groups : array-like
alpha : float
lambda_ : float
ind_sparse : array-like
max_iter_outer : int, optional
Default: 10000
max_iter_inner : int, optional
Default: 100
rtol : float, optional
Default: 1e-6
warm_start : boolean, optional
If True, use previous value of `coef_` as starting point to fit new data
Default: False
"""
self.ind_sparse = np.array(ind_sparse)
self.groups = np.array(groups)
self.alpha = alpha
self.lambda_ = lambda_
self.max_iter_outer = max_iter_outer
self.max_iter_inner = max_iter_inner
self.rtol = rtol
self.warm_start = warm_start
self.coef_ = None
def fit(self, X, y):
"""Fit this SGL model using features X and output y
Parameters
----------
X : np.ndarray
Feature matrix used to train this SGL model. Dimensions are n x p,
where n is the number of samples and p is the number of features
y : np.ndarray
Response vector used to train this SGL model. Length is n,
where n is the number of samples.
"""
# Assumption: group ids are between 0 and max(groups)
# Other assumption: ind_sparse is of dimension X.shape[1] and has 0 if
# the dimension should not be pushed
# towards sparsity and 1 otherwise
n_groups = np.max(self.groups) + 1
n, d = X.shape
assert d == self.ind_sparse.shape[0]
alpha_lambda = self.alpha * self.lambda_ * self.ind_sparse
if not self.warm_start or self.coef_ is None:
self.coef_ = np.random.randn(d)
# Adaptation of the heuristic (?) from fabianp's code:
t = n / (np.linalg.norm(X, 2) ** 2)
for iter_outer in range(self.max_iter_outer):
beta_old = self.coef_.copy()
for gr in range(n_groups):
# 1- Should the group be zero-ed out?
indices_group_k = self.groups == gr
X_group_t = X[:, indices_group_k].T
grad_l = self._grad_l(X, X_group_t, y, indices_group_k,
group_zero=True)
if self.discard_group(grad_l, indices_group_k):
self.coef_[indices_group_k] = 0.
else:
# 2- If the group is not zero-ed out,
# perform GD for the group:
beta_k = self.coef_[indices_group_k]
p_l = np.sqrt(np.sum(indices_group_k))
for iter_inner in range(self.max_iter_inner):
grad_l = self._grad_l(
X, X_group_t, y, indices_group_k)
tmp = S(beta_k - t * grad_l, t *
alpha_lambda[indices_group_k])
norm_tmp = vec_norm(tmp)
# Equation 12 in Simon paper:
step = (1. -
(t * (1 - self.alpha) * self.lambda_ * p_l /
norm_tmp))
tmp *= max([step, 0.])
tmp_beta_k = tmp - beta_k
norm_tmp_beta_k = vec_norm(tmp_beta_k)
norm_non0_tmp = norm_non0(tmp)
if norm_tmp_beta_k / norm_non0_tmp < self.rtol:
self.coef_[indices_group_k] = tmp
break
beta_k = self.coef_[indices_group_k] = tmp
beta_old_coef = beta_old - self.coef_
if (vec_norm(beta_old_coef) / norm_non0(self.coef_) < self.rtol):
break
return self
def _grad_l(self, X, X_group_t, y, indices_group, group_zero=False):
if group_zero:
beta = self.coef_.copy()
beta[indices_group] = 0.
else:
beta = self.coef_
n, d = X.shape
r = y - np.dot(X, beta)
return -np.dot(X_group_t, r) / n
@staticmethod
def _static_grad_l(X, X_group_t, y, indices_group, beta=None):
n, d = X.shape
if beta is None:
beta = np.zeros((d, ))
r = y - np.dot(X, beta)
return -np.dot(X_group_t, r) / n
def unregularized_loss(self, X, y):
"""The unregularized loss function (i.e. RSS)
Returns
-------
np.float64
The unregularized loss
"""
n, d = X.shape
r = y - np.dot(X, self.coef_)
return np.dot(r, r) / (2 * n)
def loss(self, X, y):
"""Total loss function with regularization
Returns
-------
np.float64
The regularized loss
"""
alpha_lambda = self.alpha * self.lambda_ * self.ind_sparse
reg_l1 = np.linalg.norm(alpha_lambda * self.coef_, ord=1)
s = 0
n_groups = np.max(self.groups) + 1
for gr in range(n_groups):
indices_group_k = self.groups == gr
s += (np.sqrt(np.sum(indices_group_k)) *
vec_norm(self.coef_[indices_group_k]))
reg_l2 = (1. - self.alpha) * self.lambda_ * s
#print(reg_l1, reg_l2, self.unregularized_loss(X, y))
return self.unregularized_loss(X, y) + reg_l2 + reg_l1
def discard_group(self, grad_l, ind):
"""
Parameters
----------
X : np.ndarray
Feature matrix used to train this SGL model. Dimensions are n x p,
where n is the number of samples and p is the number of features
y : np.ndarray
Response vector used to train this SGL model. Length is n,
where n is the number of samples.
ind : boolean np.ndarray
boolean mask for this groups indices
Returns
-------
boolean
If true, indicates that the coefficients for this group should
be discarded.
"""
alpha_lambda = self.alpha * self.lambda_ * self.ind_sparse
this_S = S(grad_l, alpha_lambda[ind])
norm_2 = vec_norm(this_S)
p_l = np.sqrt(np.sum(ind))
return norm_2 <= (1 - self.alpha) * self.lambda_ * p_l
def predict(self, X):
"""Predict response vector using the trained coefficients.
Parameters
----------
X : np.ndarray
Feature matrix used to train this SGL model. Dimensions are n x p,
where n is the number of samples and p is the number of features
Returns
-------
yhat : np.ndarray
Predicted response vector of length n
"""
return np.dot(X, self.coef_)
def fit_predict(self, X, y):
"""Fit the model and predict the response vector
Parameters
----------
X : np.ndarray
Feature matrix used to train this SGL model. Dimensions are n x p,
where n is the number of samples and p is the number of features
y : np.ndarray
Response vector used to train this SGL model. Length is n,
where n is the number of samples.
Returns
-------
yhat : np.ndarray
Predicted response vector of length n
"""
return self.fit(X, y).predict(X)
@classmethod
def lambda_max(cls, X, y, groups, alpha, ind_sparse=None):
n, d = X.shape
n_groups = np.max(groups) + 1
max_min_lambda = -np.inf
if ind_sparse is None:
ind_sparse = np.ones((d, ))
for gr in range(n_groups):
indices_group = groups == gr
sqrt_p_l = np.sqrt(np.sum(indices_group))
X_group_t = X[:, indices_group].T
vec_A = np.abs(cls._static_grad_l(X, X_group_t, y, indices_group))
if alpha > 0.:
min_lambda = np.inf
breakpoints_lambda = np.unique(vec_A / alpha)
lower = 0.
for l in breakpoints_lambda:
indices_nonzero = vec_A >= alpha * l
indices_nonzero_sparse = np.logical_and(indices_nonzero, ind_sparse[indices_group] > 0)
n_nonzero_sparse = np.sum(indices_nonzero_sparse)
a = n_nonzero_sparse * alpha ** 2 - (sqrt_p_l * (1. - alpha)) ** 2
b = - 2. * alpha * np.sum(vec_A[indices_nonzero_sparse])
c = np.sum(vec_A[indices_nonzero] ** 2)
delta = b ** 2 - 4 * a * c
if delta >= 0.:
candidate0 = (- b - np.sqrt(delta)) / (2 * a)
candidate1 = (- b + np.sqrt(delta)) / (2 * a)
if lower <= candidate0 <= l:
min_lambda = candidate0
break
elif lower <= candidate1 <= l:
min_lambda = candidate1
break
lower = l
else:
min_lambda = np.linalg.norm(np.dot(X[:, indices_group].T, y) / n) / sqrt_p_l
if min_lambda > max_min_lambda:
max_min_lambda = min_lambda
return max_min_lambda
@classmethod
def candidate_lambdas(cls, X, y, groups, alpha, ind_sparse=None, n_lambdas=5, lambda_min_ratio=.1):
l_max = cls.lambda_max(X, y, groups=groups, alpha=alpha, ind_sparse=ind_sparse)
return np.logspace(np.log10(lambda_min_ratio * l_max), np.log10(l_max), num=n_lambdas)
class SSGL_LogisticRegression(SSGL):
# Up to now, we assume that y is 0 or 1
def unregularized_loss(self, X, y): # = -1/n * log-likelihood
n, d = X.shape
x_beta = np.dot(X, self.coef_)
y_x_beta = x_beta * y
log_1_e_xb = np.log(1. + np.exp(x_beta))
return np.sum(log_1_e_xb - y_x_beta, axis=0) / n
def _grad_l(self, X, X_group_t, y, indices_group, group_zero=False,
beta_zero=False):
if beta_zero:
beta = np.zeros(self.coef_.shape)
elif group_zero:
beta = self.coef_.copy()
beta[indices_group] = 0.
else:
beta = self.coef_
n, d = X.shape
exp_xb = np.exp(np.dot(X, beta))
ratio = exp_xb / (1. + exp_xb)
return np.sum(X_group_t.T * (ratio - y).reshape((n, 1)), axis=0) / n
@staticmethod
def _static_grad_l(X, X_group_t, y, indices_group, beta=None):
n, d = X.shape
if beta is None:
ratio = .5
else:
exp_xb = np.exp(np.dot(X, beta))
ratio = exp_xb / (1. + exp_xb)
return np.sum(X_group_t.T * (ratio - y).reshape((n, 1)),
axis=0) / n
def predict(self, X):
y = np.ones((X.shape[0]))
y[np.exp(np.dot(X, self.coef_)) < 1.] = 0.
return y
@staticmethod
def __logistic(X, beta):
return 1. / (1. + np.exp(np.dot(X, beta)))
if __name__ == "__main__":
n = 1000
d = 20
groups = np.array([0] * int(d / 2) + [1] * (d - int(d / 2)))
alpha = .5
epsilon = .001
np.random.seed(0)
X = np.random.randn(n, d)
secret_beta = np.random.randn(d)
ind_sparse = np.zeros((d, ))
for i in range(d):
if groups[i] == 0 or i % 2 == 0:
secret_beta[i] = 0
if i % 2 != 0:
ind_sparse[i] = 1
y = np.dot(X, secret_beta)
lambda_max = SGL.lambda_max(X, y, groups=groups, alpha=alpha, ind_sparse=ind_sparse)
print(lambda_max)
for l in [lambda_max - epsilon, lambda_max + epsilon]:
model = SGL(groups=groups, alpha=alpha, lambda_=l, ind_sparse=ind_sparse)
model.fit(X, y)
print(l, model.coef_)
print(SGL.candidate_lambdas(X, y, groups=groups, alpha=alpha))
|
# -*- coding: utf-8 -*-
"""Example that runs the checks on all structures in the CoRE MOF"""
import concurrent.futures
from glob import glob
import pandas as pd
from tqdm import tqdm # pylint:disable=import-error
from mofchecker import MOFChecker
from mofchecker.errors import NoMetal
all_structures = glob("2019-11-01-ASR-public_12020/structure_10143/*.cif")
def get_feat_one_structure(cif):
"""Run the MOFCheck on one structure"""
try:
mofchecker = MOFChecker.from_cif(cif)
descriptors = mofchecker.get_mof_descriptors()
return descriptors
except NoMetal:
print("{} has no metal".format(cif))
return None
def main():
"""Loops over all structures"""
mof_features = []
with concurrent.futures.ProcessPoolExecutor() as executor:
for result in tqdm(
executor.map(get_feat_one_structure, all_structures),
total=len(all_structures),
):
if result is not None:
mof_features.append(result)
df = pd.DataFrame(mof_features) # pylint:disable=invalid-name
df.to_csv("mof_feat.csv", index=False)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from celery.schedules import crontab
from celery.task import periodic_task
from celery import group
from .models import User
from .subtasks.user import task_check_user
@periodic_task(run_every=crontab(minute=0), ignore_result=True)
def task_start_worker():
user_id_list = User.objects.filter(is_active=True).values_list('id', flat=True).order_by('id')
tasks = group(task_check_user.s(user_id) for user_id in user_id_list)
tasks.apply_async()
|
# -*- coding: utf-8 -*
"""FaceNet inference implementation."""
import tensorflow.compat.v1 as tf
class FaceNet:
"""FaceNet inference implementation."""
def __init__(self, model_path):
"""Class constructor.
Parameters
----------
model_path : str
Path to a trained FaceNet model.
"""
# model loading
graph_def = tf.GraphDef()
with open(model_path, 'rb') as f:
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name='')
# session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self._sess = tf.Session(graph=graph, config=config)
# graph nodes
self._img_ph = graph.get_tensor_by_name('input:0')
self._is_training_ph = graph.get_tensor_by_name('phase_train:0')
self._emb_op = graph.get_tensor_by_name('embeddings:0')
def calc_embeddings(self, imgs_batch):
"""Calculate embeddings for given images.
Parameters
----------
imgs_batch : tf.EagerTensor
TF EagerTensor with resized to (160, 160) faces.
Returns
-------
list
A list with calculated embeddings for each image.
"""
np_imgs_batch = imgs_batch.numpy()
embeddings = self._sess.run(self._emb_op,
feed_dict={self._img_ph: np_imgs_batch,
self._is_training_ph: False})
return embeddings.tolist()
|
class UserGroup:
def __init__(self, group_name, members):
self.group_name = group_name
self.members = members
self.members_count = len(self.members)
def update_group_expense(self, lender, amount):
# Check if lender is in the group
if lender in self.members:
count_shares = self.members_count - 1
else:
count_shares = self.members_count
for member in self.members:
if lender == member:
continue
else:
# Add owes entry
member.add_entry(lender, -amount/count_shares)
# Add loaned entry
lender.add_entry(member, amount/count_shares)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: hack2020team/headposeservice/pose_service.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='hack2020team/headposeservice/pose_service.proto',
package='youlearn.headpose.v1',
syntax='proto3',
serialized_options=b'\n\020com.youlearn.apiB\rHeadPoseProtoP\001',
serialized_pb=b'\n/hack2020team/headposeservice/pose_service.proto\x12\x14youlearn.headpose.v1\"T\n\x05\x46rame\x12\x18\n\x10\x66rame_identifier\x18\x01 \x01(\x04\x12\x0e\n\x06height\x18\x02 \x01(\x04\x12\r\n\x05width\x18\x03 \x01(\x04\x12\x12\n\nframe_data\x18\x04 \x01(\x0c\"6\n\x0cPoseResponse\x12\x18\n\x10\x66rame_identifier\x18\x01 \x01(\x04\x12\x0c\n\x04pose\x18\x02 \x03(\x02\x32[\n\x0bHeadPoseApi\x12L\n\x07GetPose\x12\x1b.youlearn.headpose.v1.Frame\x1a\".youlearn.headpose.v1.PoseResponse\"\x00\x42#\n\x10\x63om.youlearn.apiB\rHeadPoseProtoP\x01\x62\x06proto3'
)
_FRAME = _descriptor.Descriptor(
name='Frame',
full_name='youlearn.headpose.v1.Frame',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='frame_identifier', full_name='youlearn.headpose.v1.Frame.frame_identifier', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='height', full_name='youlearn.headpose.v1.Frame.height', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='width', full_name='youlearn.headpose.v1.Frame.width', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='frame_data', full_name='youlearn.headpose.v1.Frame.frame_data', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=73,
serialized_end=157,
)
_POSERESPONSE = _descriptor.Descriptor(
name='PoseResponse',
full_name='youlearn.headpose.v1.PoseResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='frame_identifier', full_name='youlearn.headpose.v1.PoseResponse.frame_identifier', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pose', full_name='youlearn.headpose.v1.PoseResponse.pose', index=1,
number=2, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=159,
serialized_end=213,
)
DESCRIPTOR.message_types_by_name['Frame'] = _FRAME
DESCRIPTOR.message_types_by_name['PoseResponse'] = _POSERESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Frame = _reflection.GeneratedProtocolMessageType('Frame', (_message.Message,), {
'DESCRIPTOR' : _FRAME,
'__module__' : 'hack2020team.headposeservice.pose_service_pb2'
# @@protoc_insertion_point(class_scope:youlearn.headpose.v1.Frame)
})
_sym_db.RegisterMessage(Frame)
PoseResponse = _reflection.GeneratedProtocolMessageType('PoseResponse', (_message.Message,), {
'DESCRIPTOR' : _POSERESPONSE,
'__module__' : 'hack2020team.headposeservice.pose_service_pb2'
# @@protoc_insertion_point(class_scope:youlearn.headpose.v1.PoseResponse)
})
_sym_db.RegisterMessage(PoseResponse)
DESCRIPTOR._options = None
_HEADPOSEAPI = _descriptor.ServiceDescriptor(
name='HeadPoseApi',
full_name='youlearn.headpose.v1.HeadPoseApi',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=215,
serialized_end=306,
methods=[
_descriptor.MethodDescriptor(
name='GetPose',
full_name='youlearn.headpose.v1.HeadPoseApi.GetPose',
index=0,
containing_service=None,
input_type=_FRAME,
output_type=_POSERESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_HEADPOSEAPI)
DESCRIPTOR.services_by_name['HeadPoseApi'] = _HEADPOSEAPI
# @@protoc_insertion_point(module_scope)
|
#!/usr/bin/env python
import os
import sys
import fastq
import generalUtils
import argparse
parser = argparse.ArgumentParser(description='writes sequence length distribution')
parser.add_argument('-i', required= True, help='input')
parser.add_argument('-o', required= True, help='output')
parser.add_argument('-l', required= True, help='sequence length of interest')
args = parser.parse_args()
fastqFile = args.i
output = args.o
seqLength = int(args.l)
myFastq = fastq.fastq(fastqFile)
lengthDistro = myFastq.getLengthDistribution(seqLength)
generalUtils.writeDict(lengthDistro, output)
|
import re
import requests
import weather_dao
from requests.exceptions import RequestException
def get_one_page(url, headers):
try:
response = requests.get(url, headers=headers)
if response.status_code == 200:
response.encoding = 'utf-8'
return response.text
return None
except RequestException:
return None
def parse_weather(idd):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/604.4.7 (KHTML, like Gecko) Version/11.0.2 Safari/604.4.7'}
ccnumber = weather_dao.find_city_number(idd)
city_number = str(ccnumber)
html = get_one_page('http://www.weather.com.cn/weather1d/' + city_number + '.shtml', headers)
if not html:
print("城市代码有误!")
exit(1)
city_name = weather_dao.find_city_name(city_number)
aim = re.findall('<input type="hidden" id="hidden_title" value="(.*?)月(.*?)日(.*?)时(.*?) (.*?) (.*?) (.*?)"',
html,re.S)
lightdata = re.findall('<li class="li1 hot">\n<i></i>\n<span>(.*?)</span>\n<em>(.*?)</em>\n<p>(.*?)</p>\n</li>',
html, re.S)
colddata = re.findall('<li class="li2 hot">\n(.*?)</span>\n<em>(.*?)</em>\n<p>(.*?)</p>', html, re.S)
weardata = re.findall(
'<li class="li3 hot" id="chuanyi">\n(.*?)<span>(.*?)</span>\n<em>(.*?)</em>\n<p>(.*?)</p>',
html, re.S)
print(city_name)
today_time = "当前日期:%s月%s日,%s" % (aim[0][0], aim[0][1], aim[0][4])
print(today_time)
update_time = "更新时间:%s:00" % aim[0][2]
print(update_time)
today_wether = "当前天气:%s" % aim[0][5]
print(today_wether)
today_temperature = "今日温度:%s" % aim[0][6]
print(today_temperature)
today_ziwaixian = "%s:%s %s" % (lightdata[0][1], lightdata[0][0], lightdata[0][2])
print(today_ziwaixian)
today_yundong = "%s:%s" % (colddata[0][1], colddata[0][2])
print(today_yundong)
today_wear = "%s:%s %s" % (weardata[0][2], weardata[0][1], weardata[0][3])
print(today_wear)
print("--" * 40)
weather_dao.update_weather_spider(city_number, today_time, update_time, today_wether, today_temperature,
today_ziwaixian,
today_yundong, today_wear)
def mistaken():
try:
print('*****循环跳过,本页无内容*****')
######采集代码##########
print('——————————正常运行——————————')
except:
mistaken()
if __name__ == '__main__':
for idd in range(1, 448):
try:
parse_weather(idd)
# time.sleep(random.randint(1, 5))
except:
mistaken()
idd += 1
fo = open("D:\phython\dataaaaa\mistakes.txt", "a")
fo.write(f"第{idd}条对应的城市代码有问题\n")
fo.close()
|
def fun():
n=int(input())
total=(n*(n+1)*(2*n+1))/6
sum=(n+1)*(n+1)
sum=(sum*(sum-1))/12
print(int(sum//total))
n=int(input())
while n!=0:
n-=1
fun()
|
import torch
import torch.nn as nn
import os
from collections import OrderedDict
from functools import partial
import colossalai
import colossalai.nn as col_nn
import torch
import torch.nn as nn
from colossalai.builder import build_pipeline_model
from colossalai.engine.schedule import (InterleavedPipelineSchedule,
PipelineSchedule)
from colossalai.logging import disable_existing_loggers, get_dist_logger
from colossalai.trainer import Trainer, hooks
from colossalai.utils import MultiTimer, get_dataloader
from torchvision import transforms
from torchvision.datasets import CIFAR10
import glob
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.nn import Accuracy, CrossEntropyLoss
from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR
from colossalai.trainer import Trainer, hooks
from colossalai.utils import MultiTimer, is_using_pp
from colossalai.utils.model.pipelinable import PipelinableContext
class MlpBlock(nn.Module):
def __init__(self, hidden_dim, mlp_dim):
super(MlpBlock, self).__init__()
self.mlp = nn.Sequential(
nn.Linear(hidden_dim, mlp_dim),
nn.GELU(),
nn.Linear(mlp_dim, hidden_dim)
)
def forward(self, x):
return self.mlp(x)
class MixerBlock(nn.Module):
def __init__(self, num_tokens, hidden_dim, tokens_mlp_dim, channels_mlp_dim):
super(MixerBlock, self).__init__()
self.ln_token = nn.LayerNorm(hidden_dim)
self.token_mix = MlpBlock(num_tokens, tokens_mlp_dim)
self.ln_channel = nn.LayerNorm(hidden_dim)
self.channel_mix = MlpBlock(hidden_dim, channels_mlp_dim)
def forward(self, x):
out = self.ln_token(x).transpose(1, 2)
x = x + self.token_mix(out).transpose(1, 2)
out = self.ln_channel(x)
x = x + self.channel_mix(out)
return x
# class MlpMixer(nn.Module):
# def __init__(self, num_classes, num_blocks, patch_size, hidden_dim, tokens_mlp_dim, channels_mlp_dim, image_size=224):
# super(MlpMixer, self).__init__()
# num_tokens = (image_size // patch_size)**2
#
# self.patch_emb = nn.Conv2d(3, hidden_dim, kernel_size=patch_size, stride=patch_size, bias=False)
#
# self.mlp = nn.Sequential(*[MixerBlock(num_tokens, hidden_dim, tokens_mlp_dim, channels_mlp_dim) for _ in range(num_blocks)])
# self.ln = nn.LayerNorm(hidden_dim)
# self.fc = nn.Linear(hidden_dim, num_classes)
#
# self.mlpm = nn.Sequential(
# self.patch_emb,
# data_flatten(),
# self.mlp,
# data_mean(),
# self.ln,
# self.fc)
#
# def forward(self, x):
# return self.mlpm(x)
class data_flatten(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.flatten(2).transpose(1, 2)
class data_mean(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.mean(dim=1)
def MlpMixer(num_classes, num_blocks, patch_size, hidden_dim, tokens_mlp_dim, channels_mlp_dim, image_size=224):
num_tokens = (image_size // patch_size) ** 2
patch_emb = nn.Conv2d(3, hidden_dim, kernel_size=patch_size, stride=patch_size, bias=False)
mlp = nn.Sequential(
*[MixerBlock(num_tokens, hidden_dim, tokens_mlp_dim, channels_mlp_dim) for _ in range(num_blocks)])
ln = nn.LayerNorm(hidden_dim)
fc = nn.Linear(hidden_dim, num_classes)
return nn.Sequential(patch_emb,data_flatten(),mlp,ln, data_mean(), fc)
# def data_flatten(x):
# return x.flatten(2).transpose(1, 2)
#
# def data_mean(x):
# return x.mean(dim=1)
# def mixer_s32(num_classes=10, image_size=32, patch_size=4,**kwargs):
# return MlpMixer(num_classes, 8, patch_size, 512, 256, 2048, image_size)
def mixer_s32(num_classes=1000, image_size=224, patch_size=32,**kwargs):
return MlpMixer(num_classes, 8, patch_size, 512, 256, 2048, image_size, **kwargs)
def build_cifar(batch_size):
transform_train = transforms.Compose([
transforms.RandomCrop(32, pad_if_needed=True),
transforms.AutoAugment(policy=transforms.AutoAugmentPolicy.CIFAR10),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_dataset = CIFAR10(root=os.environ['DATA'], train=True, download=True, transform=transform_train)
test_dataset = CIFAR10(root=os.environ['DATA'], train=False, transform=transform_test)
train_dataloader = get_dataloader(dataset=train_dataset, shuffle=True, batch_size=batch_size, pin_memory=True)
test_dataloader = get_dataloader(dataset=test_dataset, batch_size=batch_size, pin_memory=True)
return train_dataloader, test_dataloader
# Train
BATCH_SIZE = 128
NUM_EPOCHS = 2
NUM_CHUNKS = 1
CONFIG = dict(NUM_MICRO_BATCHES=4, parallel=dict(pipeline=2))
num_classes = 10
image_size = 32
patch_size = 4
def train():
# initialize distributed setting
parser = colossalai.get_default_parser()
args = parser.parse_args()
# launch from torch
colossalai.launch_from_torch(config=args.config)
# get logger
logger = get_dist_logger()
logger.info("initialized distributed environment", ranks=[0])
if hasattr(gpc.config, 'LOG_PATH'):
if gpc.get_global_rank() == 0:
log_path = gpc.config.LOG_PATH
if not os.path.exists(log_path):
os.mkdir(log_path)
logger.log_to_file(log_path)
use_pipeline = is_using_pp()
# pipelinable = PipelinableContext()
# with pipelinable:
# model = mixer_s32(num_classes,image_size,patch_size)
# pipelinable.to_layer_list()
# pipelinable.load_policy("uniform")
# model = pipelinable.partition(1, gpc.pipeline_parallel_size, gpc.get_local_rank(ParallelMode.PIPELINE))
if use_pipeline:
pipelinable = PipelinableContext()
with pipelinable:
model = mixer_s32(num_classes,image_size,patch_size)
pipelinable.to_layer_list()
pipelinable.load_policy("uniform")
model = pipelinable.partition(1, gpc.pipeline_parallel_size, gpc.get_local_rank(ParallelMode.PIPELINE))
else:
model = mixer_s32(num_classes,image_size,patch_size)
# count number of parameters
total_numel = 0
for p in model.parameters():
total_numel += p.numel()
if not gpc.is_initialized(ParallelMode.PIPELINE):
pipeline_stage = 0
else:
pipeline_stage = gpc.get_local_rank(ParallelMode.PIPELINE)
logger.info(f"number of parameters: {total_numel} on pipeline stage {pipeline_stage}")
# craete dataloaders
train_dataloader, test_dataloader = build_cifar(BATCH_SIZE)
# create loss function
criterion = CrossEntropyLoss(label_smoothing=0.1)
# create optimizer
optimizer = torch.optim.AdamW(model.parameters(), lr=gpc.config.LEARNING_RATE, weight_decay=gpc.config.WEIGHT_DECAY)
# create lr scheduler
lr_scheduler = CosineAnnealingWarmupLR(optimizer=optimizer,
total_steps=gpc.config.NUM_EPOCHS,
warmup_steps=gpc.config.WARMUP_EPOCHS)
# intiailize
engine, train_dataloader, test_dataloader, _ = colossalai.initialize(model=model,
optimizer=optimizer,
criterion=criterion,
train_dataloader=train_dataloader,
test_dataloader=test_dataloader)
logger.info("Engine is built", ranks=[0])
# create timer
timer = MultiTimer()
# create trainer
trainer = Trainer(engine=engine, logger=logger, timer=timer)
logger.info("Trainer is built", ranks=[0])
# create a list of useful hooks
hook_list = [
hooks.LogMetricByEpochHook(logger=logger),
hooks.LogMetricByStepHook(),
hooks.AccuracyHook(accuracy_func=Accuracy()),
hooks.LossHook(),
hooks.ThroughputHook(),
hooks.LRSchedulerHook(lr_scheduler=lr_scheduler, by_epoch=True)
]
# start training
logger.info("Train start", ranks=[0])
trainer.fit(
train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
epochs=gpc.config.NUM_EPOCHS,
hooks=hook_list,
display_progress=True,
test_interval=1,
)
if __name__ == '__main__':
train()
|
import os
from slackclient import SlackClient
slack_token = os.environ["SLACK_API_TOKEN"]
sc = SlackClient(slack_token)
# Get the group channels with the following
# for group in sc.api_call("groups.list").get("groups"):
# print("{}-{}".format(group['id'],group['name']))
sc.api_call(
"chat.postMessage",
channel="GDZULG3U0",
text="Hello from Python! :tada:"
)
|
#!/usr/bin/env python3
# coding: utf8
# Author: Lenz Furrer, 2017
'''
Iteration utilities.
'''
import json
import itertools as it
def iter_chunks(iterable, chunksize):
'''
Iterate over chunks of fixed size.
'''
base = iter(iterable) # make sure items are consumed by islice()
while True:
chunk = peekaheaditer(it.islice(base, chunksize))
try:
next(chunk)
except StopIteration:
break
else:
yield chunk
def peekaheaditer(iterator):
'''
Iterator wrapper for yielding the first element twice.
'''
try:
first = next(iterator)
except StopIteration:
return
yield first
yield first
yield from iterator
class CacheOneIter:
'''
An iterator which provides a method for repeating the last item.
'''
def __init__(self, iterable):
self._base = iter(iterable)
self._current = None
self._proceed = True
def __iter__(self):
return self
def __next__(self):
if self._proceed:
self._current = next(self._base)
self._proceed = True
return self._current
def repeat(self):
'''
In the next iteration, yield the same item again.
If this is called before the first call to __next__,
the first item will be None.
'''
self._proceed = False
def json_iterencode(o, check_circular=False, indent=2, **kwargs):
'''
Iterate over chunks of serialised JSON.
Iterators are supported.
'''
enc = json.JSONEncoder(check_circular=check_circular, indent=indent,
default=jsonable_iterator, **kwargs)
return enc.iterencode(o)
def jsonable_iterator(o):
'''
Default function for encoding iterators in JSON.
Warning: Relies on some implementation details about how
lists/tuples are serialised.
'''
# For lists/tuples, the JSON encoder
# (1) checks if the list is non-empty, and
# (2) if so, iterates over the elements (once).
#
# This function wraps non-empty iterators in a _PhonyList,
# which inherits from list in order to pass the isinstance()
# test. Besides that, its bool() value is True and it can
# be iterated over (once).
try:
first = next(o)
except AttributeError:
raise TypeError("{!r} is not JSON serializable".format(o))
except StopIteration:
return ()
else:
return _PhonyList(it.chain([first], o))
class _PhonyList(list):
'''
A wrapper for an iterator claiming to be a list.
'''
def __init__(self, idata):
super().__init__()
self.idata = idata
def __iter__(self):
for elem in self.idata:
yield elem
def __bool__(self):
return True
|
import os
import random
import discord
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
token = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
bot = commands.Bot(command_prefix='!')
@bot.event
async def on_ready():
guild = discord.utils.get(bot.guilds, name=GUILD)
print(
f'{bot.user} is connected to the following guild:\n'
f'{guild.name}(id: {guild.id})'
)
@bot.command(name="bold")
async def bold_one(ctx):
print("bold triggered")
await ctx.send("You are a bold one.")
@bot.event
async def on_message(message):
if message.author == bot.user:
return
if message.content.lower().contains("hello there") || message.content.lower().contains("hello, there"):
await message.channel.send("General Kenobi. You are a bold one.")
bot.run(token)
|
import numpy as np
import scipy.stats
import pandas
class EvalIRModel(object):
def __init__(self,dictModels,dictMetrics,queries):
self.dictModels = dictModels
self.dictMetrics = dictMetrics
self.queries = queries
def evalSimple(self,idq,mod,met):
liste = self.dictModels[mod].getRanking(self.queries[idq].getTexte())
return self.dictMetrics[met].evalQuery(liste,self.queries[idq])
def evalSingleQuery(self,idq):
evaluate = {}
for met in self.dictMetrics.keys():
evaluate[met] = {mod:self.evalSimple(idq,mod,met) for mod in self.dictModels.keys()}
return evaluate
def evalSingleComb(self,mod,met):
evaluate = np.array([self.evalSimple(idq,mod,met) for idq in self.queries.keys()])
return (np.mean(evaluate),np.std(evaluate))
def evalAll(self):
evaluate = {}
for met in self.dictMetrics.keys():
evaluate[met] = {}
for mod in self.dictModels.keys():
underev = np.array([self.evalSimple(idq,mod,met) for idq in self.queries.keys()])
evaluate[met][mod] = (np.round(np.mean(underev)*100,3),np.round(np.std(underev)*100,3))
return evaluate
def tTest(self,X,Y,alpha):
n = len(X)
meanX = np.mean(X)
meanY = np.mean(Y)
sX = np.sum((X-meanX)**2)/(n-1)
sY = np.sum((Y-meanY)**2)/(n-1)
z = (meanX-meanY)/(np.sqrt((sX+sY)/n))
df = 2*n - 2
cv = scipy.stats.t.ppf(1.0 - alpha/2, df)
return z, cv, np.abs(z) <= cv
def finalEv(self):
whole = self.evalAll()
whole = pandas.DataFrame.from_dict(whole)
print(whole)
return whole |
from kitsune.karma.actions import KarmaAction
from kitsune.karma.manager import KarmaManager
class TestAction1(KarmaAction):
"""A test action for testing!"""
action_type = 'test-action-1'
default_points = 3
class TestAction2(KarmaAction):
"""Another test action for testing!"""
action_type = 'test-action-2'
default_points = 7
KarmaManager.action_types = {} # Clear them out for tests.
KarmaManager.register(TestAction1)
KarmaManager.register(TestAction2)
|
from __future__ import absolute_import
import pytest
from sentry.auth.exceptions import IdentityNotValid
from sentry.models import AuthIdentity, AuthProvider
from sentry.testutils import TestCase
from sentry_auth_chy.constants import DATA_VERSION
class ChyOAuth2ProviderTest(TestCase):
def setUp(self):
self.org = self.create_organization(owner=self.user)
self.user = self.create_user('foo@example.com')
self.auth_provider = AuthProvider.objects.create(
provider='chy',
organization=self.org,
)
super(ChyOAuth2ProviderTest, self).setUp()
def test_refresh_identity_without_refresh_token(self):
auth_identity = AuthIdentity.objects.create(
auth_provider=self.auth_provider,
user=self.user,
data={
'access_token': 'access_token',
}
)
provider = self.auth_provider.get_provider()
with pytest.raises(IdentityNotValid):
provider.refresh_identity(auth_identity)
def test_handles_multiple_domains(self):
self.auth_provider.update(
config={'domains': ['example.com']},
)
provider = self.auth_provider.get_provider()
assert provider.domains == ['example.com']
def test_handles_legacy_single_domain(self):
self.auth_provider.update(
config={'domain': 'example.com'},
)
provider = self.auth_provider.get_provider()
assert provider.domains == ['example.com']
def test_build_config(self):
provider = self.auth_provider.get_provider()
state = {
'domain': 'example.com',
'user': {
'iss': 'accounts.chy.com',
'at_hash': 'HK6E_P6Dh8Y93mRNtsDB1Q',
'email_verified': 'true',
'sub': '10769150350006150715113082367',
'azp': '1234987819200.apps.chyusercontent.com',
'email': 'jsmith@example.com',
'aud': '1234987819200.apps.chyusercontent.com',
'iat': 1353601026,
'exp': 1353604926,
'hd': 'example.com'
},
}
result = provider.build_config(state)
assert result == {
'domains': ['example.com'],
'version': DATA_VERSION,
}
|
"""Common utils for the project"""
import csv
from datetime import datetime
from functools import wraps
AIRPORTS = {
'AMS': {
'city_code': 'NL',
'country': 'BKK'
},
'BKK': {
'city_code': 'FRA',
'country': 'DE'
},
'BUR': {
'city_code': 'BUR',
'country': 'US'
},
'CNX': {
'city_code': 'BKK',
'country': 'TH'
},
'CPT': {
'city_code': 'CPT',
'country': 'ZA'
},
'CGN': {
'city_code': 'CGN',
'country': 'DE'
},
'CHQ': {
'city_code': 'HER',
'country': 'GR'
},
'CUN': {
'city_code': 'CUN',
'country': 'MX'
},
'DMK': {
'city_code': 'BKK',
'country': 'TH'
},
'DUS': {
'city_code': 'DUS',
'country': 'DE'
},
'FRA': {
'city_code': 'FRA',
'country': 'DE'
},
'HAJ': {
'city_code': 'HAJ',
'country': 'DE'
},
'HAM': {
'city_code': 'HAM',
'country': 'DE'
},
'HAV': {
'city_code': 'HAV',
'country': 'CU'
},
'HER': {
'city_code': 'HER',
'country': 'GR'
},
'HHN': {
'city_code': 'FRA',
'country': 'DE'
},
'HKT': {
'city_code': 'HKT',
'country': 'TH'
},
'LAS': {
'city_code': 'LAS',
'country': 'US'
},
'LIS': {
'city_code': 'BKK',
'country': 'TH'
},
'LPA': {
'city_code': 'LPA',
'country': 'ES'
},
'MAD': {
'city_code': 'MAD',
'country': 'ES'
},
'MAN': {
'city_code': 'MAN',
'country': 'UK'
},
'MRU': {
'city_code': 'MRU',
'country': 'MU'
},
'MUC': {
'city_code': 'MUC',
'country': 'DE'
},
'NRN': {
'city_code': 'DUS',
'country': 'DE'
},
'OAK': {
'city_code': 'OAK',
'country': 'US'
},
'PMI': {
'city_code': 'PMI',
'country': 'ES'
},
'SJO': {
'city_code': 'SJO',
'country': 'CR'
},
'STR': {
'city_code': 'STR',
'country': 'DE'
},
'SYQ': {
'city_code': 'SJO',
'country': 'CR'
},
'WDH': {
'city_code': 'WDH',
'country': 'NA'
}
}
def get_names_values_from_csv(csv_path):
"""Return a tuple of two elements - names and values of csv"""
with open(csv_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
rows = [r for r in csv_reader]
names = rows[0]
values = rows[1:]
return names, values
def get_pairs_list_from_names_values(names, values):
"""Return a list of pairs from list of keys and list of lists of params
e.g. names = ['a', 'b', 'c']
values = [1, 2, 3, 4, 5, 6, 7, 8, 9]
return: [('a', 1), ('b', 2), ('c', 3),
('a', 4), ('b', 5), ('c', 6),
('a', 7), ('b', 8), ('c', 9)]
"""
zipped_values = [zip(names, v) for v in values]
return [list(v) for v in zipped_values]
def get_numerical_value(value):
"""Return a numerical value for value.
If the passed value is a number return it.
Otherwise return a hash over the value.
"""
if is_number(value):
return float(value)
if is_date(value):
return timestamp_from_date(value)
return hash(value)
def is_date(value):
"""Return if the string value is a date"""
try:
datetime_from_csv_col(value)
return True
except:
return False
def timestamp_from_date(value):
"""Get timestamp from a string date value"""
return datetime_from_csv_col(value).timestamp()
def is_number(value):
"""Return if the passed value can be parsed to float."""
try:
float(value)
return True
except ValueError:
return False
def get_value_by_key_in_pairs_list(pairs_list, key):
"""e.g. [('a': 4), ('b': 3)], 'b' -> 3"""
for pair in pairs_list:
if pair[0] == key:
return pair[1]
raise ValueError('Attribute not found: {}'.format(key))
def empty_string_on_empty_input(func):
"""Decorator to return '' on empty string input
Used for columns where data might be missing
"""
@wraps(func)
def wrapper(*args, **kwargs):
for arg in args:
if arg.strip() == '':
return ''
return func(*args, **kwargs)
return wrapper
@empty_string_on_empty_input
def month_day_from_date(date):
"""Get the day of month from a date in the csv"""
dt = datetime_from_csv_col(date)
return str(dt.day)
@empty_string_on_empty_input
def weekday_from_date(date):
"""Get the weekday (number from 0 to 6) from a date in the csv"""
dt = datetime_from_csv_col(date)
return str(dt.weekday())
@empty_string_on_empty_input
def city_code_from_airport(airport):
"""Get the city code of airport code"""
return AIRPORTS[airport]['city_code']
@empty_string_on_empty_input
def country_from_airport(airport):
"""Get the country code from airport code"""
return AIRPORTS[airport]['country']
@empty_string_on_empty_input
def days_in_range(start_date, end_date):
"""Get the number of days between two columns in the csv"""
start_datetime = datetime_from_csv_col(start_date)
end_datetime = datetime_from_csv_col(end_date)
delta = end_datetime - start_datetime
return str(delta.days)
def datetime_from_csv_col(col):
"""Return datetime from passed csv col in format MM/DD/YY """
date_fields = col.split('/')
month = int(date_fields[0])
day = int(date_fields[1])
year = int("20" + date_fields[2])
return datetime(year, month, day)
def get_relative_error(approximation, real):
"""Return the relative error of two values"""
return abs(approximation - real) / real
def get_relative_error_success_count(relative_errors, threshold=0.05):
"""Return the count of the errors below a threshold"""
return len(list(filter(lambda x: x <= threshold, relative_errors)))
def get_median_of_list(lst):
"""Return the median element of a sorted list"""
ln = len(lst)
div_by_2 = int(ln / 2)
if ln % 2 == 0:
return (lst[div_by_2 - 1] + lst[div_by_2]) / 2
return lst[div_by_2]
def get_avg_of_list(lst):
"""Return the average of a list"""
return sum(lst) / len(lst)
def print_comparable_flights(f1, f2):
"""Prints flights with attributes next to each other"""
p1 = f1._Flight__pairs_list
p2 = f2._Flight__pairs_list
print('F1 ---- F2')
for i in range(len(p1)):
print('{}: {} ---- {}'.format(p1[i][0], p1[i][1], p2[i][1]))
|
"""shmap.py
A class for working with imported shmeppy .json maps
"""
import json
from datetime import datetime
import shmobjs
TOKEN_OPS = {
'CreateToken': ['color', 'position', 'tokenId'],
'MoveToken': ['position', 'tokenId'],
'UpdateTokenLabel': ['label', 'tokenId'],
'DeleteToken': ['tokenId'],
'ResizeToken': ['width', 'height', 'tokenId']
}
class Shmap:
"""represents a shmeppy map
does not include fog of war
"""
def __init__(self, name, map_dict=None, operations=None):
if map_dict:
self.__dict__ = map_dict
else:
self.exportFormatVersion = 1
self.operations = operations if operations else []
self.name = name
print(f"New Map: {self.name}")
self.tokens = self.make_tokens(self.operations)
print("Tokens Loaded")
self.edges_l, self.edges_t = self.make_edges(self.operations)
print("Edges Loaded")
self.fills = self.make_fills(self.operations)
print("Fills Loaded")
self.ops_lists = [self.tokens, self.edges_l, self.edges_t, self.fills]
def json_format(self):
return {"exportFormatVersion": self.exportFormatVersion, "operations": self.operations}
def get_bb_dimensions(self):
"""returns absolute dimensions of bounding box"""
(ux, uy), (lx, ly) = self.bb
return lx-ux+1, ly-uy+1
def set_bounding_box(self):
"""returns bounding box of a map in squares x,y"""
print(" -------------------------")
print(" Getting Bounding Box ")
print(" Position and Dimensions ")
print(" -------------------------")
x_list, y_list = [], []
cells = []
cells.extend(self.fills.keys())
cells.extend(self.edges_l.keys())
cells.extend(self.edges_t.keys())
for cell_pos in cells:
xs, ys = cell_pos
x_list += [xs]
y_list += [ys]
for token in self.tokens.values():
xs, ys = token.get_xys()
x_list += xs
y_list += ys
ul_corner = min(x_list), min(y_list)
lr_corner = max(x_list), max(y_list)
self.bb = ul_corner, lr_corner
return self.bb
def as_ops(self):
"""returns tokens, edges, fills as a combined list of operations"""
ops = []
# add tokens as separate operations
for token in self.tokens.values():
ops.append(token.as_op())
# add edges as single op
edges_op = {
"id": "2",
"type": "UpdateCellEdges",
"left": self.dict_to_cells(self.edges_l),
"top": self.dict_to_cells(self.edges_t)
}
ops.append(edges_op)
# add fills as single op
fills_op = {
"id": "1",
"type": "FillCells",
"cellFills": self.dict_to_cells(self.fills)
}
ops.append(fills_op)
return ops
def dict_to_cells(self, dict):
"""converts {position: color} dict to list cells [[x,y], color]"""
cells = []
for pos, color in dict.items():
cell = [list(pos), color]
cells.append(cell)
return cells
def offset_ops(self, offset):
"""offset positions of all operations by offset (x,y)"""
x, y = offset
for id, token in self.tokens.items():
token.update(x=x, y=y)
new_dict = {}
for pos in self.edges_l:
new_pos = pos[0]+x, pos[1]+y
new_dict.update({new_pos: self.edges_l[pos]})
self.edges_l = new_dict
new_dict = {}
for pos in self.edges_t:
new_pos = pos[0]+x, pos[1]+y
new_dict.update({new_pos: self.edges_t[pos]})
self.edges_t = new_dict
new_dict = {}
for pos in self.fills:
new_pos = pos[0]+x, pos[1]+y
new_dict.update({new_pos: self.fills[pos]})
self.fills = new_dict
def make_tokens(self, op_list, debug=False):
"""returns a dict of {tokenId: token_obj} from op_list
Uses final position of token, and omits deleted tokens
"""
token_dict = {}
for op in op_list:
if op['type'] == 'CreateToken':
if debug: print(f"+CREATED TOKEN {op['tokenId']}")
token_dict.update({op['tokenId']: shmobjs.Token(**op)})
elif op['type'] == 'DeleteToken':
token_dict.pop(op['tokenId'])
if debug: print(f"-DELETED TOKEN {op['tokenId']}")
elif op['type'] in TOKEN_OPS.keys():
token_obj = token_dict[op['tokenId']]
if debug: print(f"=TOKEN OP {op['type']}")
if debug:
print(f' Token properties before:\n {token_obj.__dict__}')
token_obj.update(**op)
return token_dict
def make_edges(self, op_list):
"""returns edge dict {posiiton:color} from op_list
Uses final position of token, and omits deleted tokens
"""
left_edge_dict = {}
top_edge_dict = {}
for op in op_list:
if op['type'] == 'UpdateCellEdges':
if op['left'] != []:
for position, color in op['left']:
left_edge_dict.update({tuple(position): color})
if op['top'] != []:
for position, color in op['top']:
top_edge_dict.update({tuple(position): color})
return left_edge_dict, top_edge_dict
def make_fills(self, op_list):
"""returns a dict of {position: color} from op_list
Uses final position of token, and omits deleted tokens
"""
cells_dict = {} # {position: color}
for op in op_list:
if op['type'] == 'FillCells':
for position, color in op['cellFills']:
cells_dict.update({tuple(position): color})
return cells_dict
def export_to(self, outpath, filename=None, filenamestem=None):
"""exports map to outpath"""
print(f"\nAttempting Export to:\n {outpath}\n")
if not filename:
stem = filenamestem if filenamestem else self.name
ts = str(datetime.now())[:-7]
ts = ts.replace(':', '').replace('-', '').replace(' ', '_')
filename = f"{stem}_{ts}.json"
outpath = (outpath / filename).resolve()
print(f"Full Export Path:\n {outpath}")
try:
with outpath.open(mode='w') as j_file:
json.dump(self.json_format(), j_file, indent=2)
result = f"SUCCESS, exported {outpath.name} to:\n {outpath}"
except FileNotFoundError:
result = "Export failed, please enter a valid output destination."
except SyntaxError as e:
result = f"Export failed, check that you have entered a valid path name.\n {e}"
except PermissionError as e:
result = f"Export failed due to {e}. Usually this is because you did not specify a filename."
return result
|
#!/usr/bin/env python3
import asyncio
import requests
import sys
from concurrent.futures import ThreadPoolExecutor
import pyfiglet
#ascii_banner = pyfiglet.figlet_format("S3LURKARN")
url = 'https://s3.amazonaws.com/'
name = sys.argv[1].strip()
connectors = ['-','_','']
url = url + name
def fetch(session, com):
with session.get(com) as response:
#url = "http://" + name + com + ".s3.amazonaws.com"
if response.status_code == 200:
print("Public",com),
#conn = client('s3')
#for key in conn.list_objects(Bucket=bucket)['Contents']:print(key['Key'])
#elif response.status_code == 404: print("None"),
#elif response.status_code == 403: print("Secure",com),
#elif response.status_code == 301: print("Redirect",url),
#elif response.status_code == 400: print("BadName"),
#return
async def get_data_asynchronous():
common = [
'test','dev','bucket',
'files','upload','uploads',
'123','000','1','2',
'store','001','s3',
'aws','prd','prod',
'pub','public','production',
'development','testing',
'archive','backup','backups','bak','web',
'devops','sec','secure',
'hidden','secret','staging',
'static','download',
'admin','administrator','app',
'assets','common','contract',
'corp','corperate','directory',
'docker','dynamo','dynamodb',
'ec2','export','fileshare',
'git','github','gitlab',
'graphql','infra',
'internal','internal-tools',
'jira','kubernetes','ldap',
'mysql','mariadb','packages',
'postgres','private','share',
'terrafrom','deploy',
'www','keys','web'
]
connector1 = ["http://" + name + "-" + comm + ".s3.amazonaws.com" for comm in common]
connector2 = ["http://" + comm + "-" + name + ".s3.amazonaws.com" for comm in common]
connector3 = ["http://" + name + "." + comm + ".s3.amazonaws.com" for comm in common]
connector4 = ["http://" + comm + "." + name + ".s3.amazonaws.com" for comm in common]
connector5 = ["http://" + name + "_" + comm + ".s3.amazonaws.com" for comm in common]
connector6 = ["http://" + comm + "_" + name + ".s3.amazonaws.com" for comm in common]
connector7 = ["http://" + name + ".s3.amazonaws.com"]
common = connector1 + connector2 + connector3 + connector4 + connector5 + connector6 + connector7
#common = common + connector1 + connector2 + connector3 + [name]
#print(common)
with ThreadPoolExecutor(max_workers=25) as executor:
with requests.Session() as session:
loop = asyncio.get_event_loop()
tasks = [
loop.run_in_executor(
executor,
fetch,
*(session, com)
)
for com in common
]
for response in await asyncio.gather(*tasks):
pass
def main():
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(get_data_asynchronous())
loop.run_until_complete(future)
main()
|
from django import forms
from django.contrib.contenttypes.models import ContentType
class VoteForm(forms.Form):
content_type = forms.ModelChoiceField(widget=forms.HiddenInput, queryset=ContentType.objects.all())
object_id = forms.IntegerField(widget=forms.HiddenInput)
vote = forms.IntegerField(widget=forms.HiddenInput)
# def clean(self):
# content_type = self.cleaned_data['content_type']
# Model = content_type.model_class()
# id = self.cleaned_data['object_id']
# try:
# obj = Model.objects.get(id=id)
# except Model.DoesNotExist:
# raise forms.ValidationError("No such %s object with id %s" % (Model, id))
# self.cleaned_data['object'] = obj
# return self.cleaned_data
|
load("@bazel_gazelle//:deps.bzl", "go_repository")
# Run this to update:
# bazel run //:gazelle -- update-repos -from_file=go.mod -to_macro=deps.bzl%go_dependencies
def go_dependencies():
go_repository(
name = "co_honnef_go_tools",
importpath = "honnef.co/go/tools",
sum = "h1:W18jzjh8mfPez+AwGLxmOImucz/IFjpNlrKVnaj2YVc=",
version = "v0.0.1-2020.1.6",
)
go_repository(
name = "com_github_burntsushi_toml",
importpath = "github.com/BurntSushi/toml",
sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=",
version = "v0.3.1",
)
go_repository(
name = "com_github_google_renameio",
importpath = "github.com/google/renameio",
sum = "h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=",
version = "v0.1.0",
)
go_repository(
name = "com_github_kisielk_gotool",
importpath = "github.com/kisielk/gotool",
sum = "h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=",
version = "v1.0.0",
)
go_repository(
name = "com_github_kr_pretty",
importpath = "github.com/kr/pretty",
sum = "h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=",
version = "v0.1.0",
)
go_repository(
name = "com_github_kr_pty",
importpath = "github.com/kr/pty",
sum = "h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=",
version = "v1.1.1",
)
go_repository(
name = "com_github_kr_text",
importpath = "github.com/kr/text",
sum = "h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=",
version = "v0.1.0",
)
go_repository(
name = "com_github_rogpeppe_go_internal",
importpath = "github.com/rogpeppe/go-internal",
sum = "h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk=",
version = "v1.3.0",
)
go_repository(
name = "com_github_yuin_goldmark",
importpath = "github.com/yuin/goldmark",
sum = "h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM=",
version = "v1.2.1",
)
go_repository(
name = "in_gopkg_check_v1",
importpath = "gopkg.in/check.v1",
sum = "h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=",
version = "v1.0.0-20180628173108-788fd7840127",
)
go_repository(
name = "in_gopkg_errgo_v2",
importpath = "gopkg.in/errgo.v2",
sum = "h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=",
version = "v2.1.0",
)
go_repository(
name = "org_golang_x_crypto",
importpath = "golang.org/x/crypto",
sum = "h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=",
version = "v0.0.0-20200622213623-75b288015ac9",
)
go_repository(
name = "org_golang_x_mod",
importpath = "golang.org/x/mod",
sum = "h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=",
version = "v0.3.0",
)
go_repository(
name = "org_golang_x_net",
importpath = "golang.org/x/net",
sum = "h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=",
version = "v0.0.0-20201021035429-f5854403a974",
)
go_repository(
name = "org_golang_x_sync",
importpath = "golang.org/x/sync",
sum = "h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=",
version = "v0.0.0-20201020160332-67f06af15bc9",
)
go_repository(
name = "org_golang_x_sys",
importpath = "golang.org/x/sys",
sum = "h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=",
version = "v0.0.0-20200930185726-fdedc70b468f",
)
go_repository(
name = "org_golang_x_text",
importpath = "golang.org/x/text",
sum = "h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=",
version = "v0.3.3",
)
go_repository(
name = "org_golang_x_tools",
importpath = "golang.org/x/tools",
sum = "h1:rbvTkL9AkFts1cgI78+gG6Yu1pwaqX6hjSJAatB78E4=",
version = "v0.0.0-20201023174141-c8cfbd0f21e6",
)
go_repository(
name = "org_golang_x_xerrors",
importpath = "golang.org/x/xerrors",
sum = "h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=",
version = "v0.0.0-20200804184101-5ec99f83aff1",
)
|
import numpy as np
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Containers.Sequential import Sequential
from PuzzleLib.Modules.Conv2D import Conv2D
from PuzzleLib.Modules.Activation import Activation, relu
from PuzzleLib.Modules.MaxPool2D import MaxPool2D
from PuzzleLib.Modules.AvgPool2D import AvgPool2D
from PuzzleLib.Modules.Flatten import Flatten
from PuzzleLib.Modules.SoftMax import SoftMax
def loadNiNImageNet(modelpath, poolmode="max", actInplace=False, initscheme="none", name="CaffeNet"):
if poolmode == "avg":
pool = AvgPool2D
elif poolmode == "max":
pool = MaxPool2D
else:
raise ValueError("Unsupported pool mode")
net = Sequential(name=name)
net.append(Conv2D(3, 96, 11, stride=4, initscheme=initscheme, name="conv1"))
net.append(Activation(relu, inplace=actInplace, name="relu0"))
net.append(Conv2D(96, 96, 1, stride=1, initscheme=initscheme, name="cccp1"))
net.append(Activation(relu, inplace=actInplace, name="relu1"))
net.append(Conv2D(96, 96, 1, stride=1, initscheme=initscheme, name="cccp2"))
net.append(Activation(relu, inplace=actInplace, name="relu2"))
net.append(pool(3, 2, name="pool1"))
net.append(Conv2D(96, 256, 5, stride=1, pad=2, initscheme=initscheme, name="conv2"))
net.append(Activation(relu, inplace=actInplace, name="relu3"))
net.append(Conv2D(256, 256, 1, stride=1, initscheme=initscheme, name="cccp3"))
net.append(Activation(relu, inplace=actInplace, name="relu4"))
net.append(Conv2D(256, 256, 1, stride=1, initscheme=initscheme, name="cccp4"))
net.append(Activation(relu, inplace=actInplace, name="relu5"))
net.append(pool(3, 2, name="pool2"))
net.append(Conv2D(256, 384, 3, stride=1, pad=1, initscheme=initscheme, name="conv3"))
net.append(Activation(relu, inplace=actInplace, name="relu6"))
net.append(Conv2D(384, 384, 1, stride=1, initscheme=initscheme, name="cccp5"))
net.append(Activation(relu, inplace=actInplace, name="relu7"))
net.append(Conv2D(384, 384, 1, stride=1, initscheme=initscheme, name="cccp6"))
net.append(Activation(relu, inplace=actInplace, name="relu8"))
net.append(pool(3, 2, name="pool3"))
net.append(Conv2D(384, 1024, 3, stride=1, pad=1, initscheme=initscheme, name="conv4-1024"))
net.append(Activation(relu, inplace=actInplace, name="relu9"))
net.append(Conv2D(1024, 1024, 1, stride=1, initscheme=initscheme, name="cccp7-1024"))
net.append(Activation(relu, inplace=actInplace, name="relu10"))
net.append(Conv2D(1024, 1000, 1, stride=1, initscheme=initscheme, name="cccp8-1024"))
net.append(Activation(relu, inplace=actInplace, name="relu11"))
net.append(AvgPool2D(5, 1, name="pool4"))
net.append(Flatten())
net.append(SoftMax())
if modelpath is not None:
net.load(modelpath)
return net
def unittest():
nin = loadNiNImageNet(None, initscheme="gaussian")
data = gpuarray.to_gpu(np.random.randn(1, 3, 224, 224).astype(np.float32))
nin(data)
del nin
gpuarray.memoryPool.freeHeld()
if __name__ == "__main__":
unittest()
|
from __future__ import unicode_literals
import json
import mock
import time
from base64 import b64encode
from copy import deepcopy
from unittest import TestCase
from oauthlib.common import urlencode
from oauthlib.oauth2 import TokenExpiredError, OAuth2Error
from oauthlib.oauth2 import MismatchingStateError
from oauthlib.oauth2 import WebApplicationClient, MobileApplicationClient
from oauthlib.oauth2 import LegacyApplicationClient, BackendApplicationClient
from requests_oauthlib import OAuth2Session, TokenUpdated
fake_time = time.time()
def fake_token(token):
def fake_send(r, **kwargs):
resp = mock.MagicMock()
resp.text = json.dumps(token)
return resp
return fake_send
class OAuth2SessionTest(TestCase):
def setUp(self):
# For python 2.6
if not hasattr(self, 'assertIn'):
self.assertIn = lambda a, b: self.assertTrue(a in b)
self.token = {
'token_type': 'Bearer',
'access_token': 'asdfoiw37850234lkjsdfsdf',
'refresh_token': 'sldvafkjw34509s8dfsdf',
'expires_in': '3600',
'expires_at': fake_time + 3600,
}
self.client_id = 'foo'
self.clients = [
WebApplicationClient(self.client_id, code='asdf345xdf'),
LegacyApplicationClient(self.client_id),
BackendApplicationClient(self.client_id),
]
self.all_clients = self.clients + [MobileApplicationClient(self.client_id)]
def test_add_token(self):
token = 'Bearer ' + self.token['access_token']
def verifier(r, **kwargs):
auth_header = r.headers.get(str('Authorization'), None)
self.assertEqual(auth_header, token)
resp = mock.MagicMock()
resp.cookes = []
return resp
for client in self.all_clients:
auth = OAuth2Session(client=client, token=self.token)
auth.send = verifier
auth.get('https://i.b')
def test_authorization_url(self):
url = 'https://example.com/authorize?foo=bar'
web = WebApplicationClient(self.client_id)
s = OAuth2Session(client=web)
auth_url, state = s.authorization_url(url)
self.assertIn(state, auth_url)
self.assertIn(self.client_id, auth_url)
self.assertIn('response_type=code', auth_url)
mobile = MobileApplicationClient(self.client_id)
s = OAuth2Session(client=mobile)
auth_url, state = s.authorization_url(url)
self.assertIn(state, auth_url)
self.assertIn(self.client_id, auth_url)
self.assertIn('response_type=token', auth_url)
@mock.patch("time.time", new=lambda: fake_time)
def test_refresh_token_request(self):
self.expired_token = dict(self.token)
self.expired_token['expires_in'] = '-1'
del self.expired_token['expires_at']
def fake_refresh(r, **kwargs):
if "/refresh" in r.url:
self.assertNotIn("Authorization", r.headers)
resp = mock.MagicMock()
resp.text = json.dumps(self.token)
return resp
# No auto refresh setup
for client in self.clients:
auth = OAuth2Session(client=client, token=self.expired_token)
self.assertRaises(TokenExpiredError, auth.get, 'https://i.b')
# Auto refresh but no auto update
for client in self.clients:
auth = OAuth2Session(client=client, token=self.expired_token,
auto_refresh_url='https://i.b/refresh')
auth.send = fake_refresh
self.assertRaises(TokenUpdated, auth.get, 'https://i.b')
# Auto refresh and auto update
def token_updater(token):
self.assertEqual(token, self.token)
for client in self.clients:
auth = OAuth2Session(client=client, token=self.expired_token,
auto_refresh_url='https://i.b/refresh',
token_updater=token_updater)
auth.send = fake_refresh
auth.get('https://i.b')
def fake_refresh_with_auth(r, **kwargs):
if "/refresh" in r.url:
self.assertIn("Authorization", r.headers)
encoded = b64encode(b"foo:bar")
content = (b"Basic " + encoded).decode('latin1')
self.assertEqual(r.headers["Authorization"], content)
resp = mock.MagicMock()
resp.text = json.dumps(self.token)
return resp
for client in self.clients:
auth = OAuth2Session(client=client, token=self.expired_token,
auto_refresh_url='https://i.b/refresh',
token_updater=token_updater)
auth.send = fake_refresh_with_auth
auth.get('https://i.b', client_id='foo', client_secret='bar')
@mock.patch("time.time", new=lambda: fake_time)
def test_token_from_fragment(self):
mobile = MobileApplicationClient(self.client_id)
response_url = 'https://i.b/callback#' + urlencode(self.token.items())
auth = OAuth2Session(client=mobile)
self.assertEqual(auth.token_from_fragment(response_url), self.token)
@mock.patch("time.time", new=lambda: fake_time)
def test_fetch_token(self):
url = 'https://example.com/token'
for client in self.clients:
auth = OAuth2Session(client=client, token=self.token)
auth.send = fake_token(self.token)
self.assertEqual(auth.fetch_token(url), self.token)
error = {'error': 'invalid_request'}
for client in self.clients:
auth = OAuth2Session(client=client, token=self.token)
auth.send = fake_token(error)
self.assertRaises(OAuth2Error, auth.fetch_token, url)
def test_cleans_previous_token_before_fetching_new_one(self):
"""Makes sure the previous token is cleaned before fetching a new one.
The reason behind it is that, if the previous token is expired, this
method shouldn't fail with a TokenExpiredError, since it's attempting
to get a new one (which shouldn't be expired).
"""
new_token = deepcopy(self.token)
past = time.time() - 7200
now = time.time()
self.token['expires_at'] = past
new_token['expires_at'] = now + 3600
url = 'https://example.com/token'
with mock.patch('time.time', lambda: now):
for client in self.clients:
auth = OAuth2Session(client=client, token=self.token)
auth.send = fake_token(new_token)
self.assertEqual(auth.fetch_token(url), new_token)
def test_web_app_fetch_token(self):
# Ensure the state parameter is used, see issue #105.
client = OAuth2Session('foo', state='somestate')
self.assertRaises(MismatchingStateError, client.fetch_token,
'https://i.b/token',
authorization_response='https://i.b/no-state?code=abc')
def test_client_id_proxy(self):
sess = OAuth2Session('test-id')
self.assertEqual(sess.client_id, 'test-id')
sess.client_id = 'different-id'
self.assertEqual(sess.client_id, 'different-id')
sess._client.client_id = 'something-else'
self.assertEqual(sess.client_id, 'something-else')
del sess.client_id
self.assertIsNone(sess.client_id)
def test_access_token_proxy(self):
sess = OAuth2Session('test-id')
self.assertIsNone(sess.access_token)
sess.access_token = 'test-token'
self.assertEqual(sess.access_token, 'test-token')
sess._client.access_token = 'different-token'
self.assertEqual(sess.access_token, 'different-token')
del sess.access_token
self.assertIsNone(sess.access_token)
def test_token_proxy(self):
token = {
'access_token': 'test-access',
}
sess = OAuth2Session('test-id', token=token)
self.assertEqual(sess.access_token, 'test-access')
self.assertEqual(sess.token, token)
token['access_token'] = 'something-else'
sess.token = token
self.assertEqual(sess.access_token, 'something-else')
self.assertEqual(sess.token, token)
sess._client.access_token = 'different-token'
token['access_token'] = 'different-token'
self.assertEqual(sess.access_token, 'different-token')
self.assertEqual(sess.token, token)
# can't delete token attribute
with self.assertRaises(AttributeError):
del sess.token
def test_authorized_false(self):
sess = OAuth2Session('foo')
self.assertFalse(sess.authorized)
@mock.patch("time.time", new=lambda: fake_time)
def test_authorized_true(self):
def fake_token(token):
def fake_send(r, **kwargs):
resp = mock.MagicMock()
resp.text = json.dumps(token)
return resp
return fake_send
url = 'https://example.com/token'
for client in self.clients:
sess = OAuth2Session(client=client)
sess.send = fake_token(self.token)
self.assertFalse(sess.authorized)
sess.fetch_token(url)
self.assertTrue(sess.authorized)
|
# Copyright (c) Ye Liu. All rights reserved.
from .io import dump, dumps, list_from_file, load, loads, open
__all__ = ['dump', 'dumps', 'list_from_file', 'load', 'loads', 'open']
|
"""SELU Activation Function"""
from torch.nn import SELU as _SELU
from neuralpy.utils import CustomLayer
class SELU(CustomLayer):
"""
SELU applies scaled exponential linear units to input tensors
For more information, check https://pytorch.org/docs/stable/nn.html#selu
Supported Arguments
name=None: (String) Name of the activation function layer,
if not provided then automatically calculates a unique name for the layer
"""
def __init__(self, name=None):
"""
__init__ method for the SELU Activation Function class
Supported Arguments
name=None: (String) Name of the activation function layer,
if not provided then automatically calculates a unique name for the
layer
"""
super().__init__(_SELU, "SELU", layer_name=name)
def set_input_dim(self, prev_input_dim, layer_type):
"""
This method calculates the input shape for layer based on previous output
layer. Here for this activation function, we don't need it
This method is used by the NeuralPy Models, for building the models.
No need to call this method for using NeuralPy.
"""
# SELU does not need to n_input, so returning None
return None
def get_layer(self):
"""
Provides details of the layer
This method is used by the NeuralPy Models, for building the models.
No need to call this method for using NeuralPy.
"""
# Returning all the details of the activation function
return self._get_layer_details(None, {"inplace": False})
|
import pandas as pd
df = pd.read_csv('CFR_WorldData.csv')
"""
Pasar de:
1. Entity
2. Code
3. Date
4. Total confirmed deaths due to COVID-19 (deaths)
5. Total confirmed cases of COVID-19 (cases)
a las siguientes tablas, una por pais
1. date
2. total cases
3. total deaths
"""
filt = df['code']=='AFG'
d = df[filt]
tot = []
cfr = []
startday = dt.datetime(year=2020, month=1, day=1)
for i in range(len(d)):
date = dt.datetime.strptime(d['date'][i], '%b %d, %Y')
#tot.append(d['cases'][i])
#cfr.append(d['deaths'][i]) / max(1, d['cases'][i])
|
import unittest
import os
import foowise.test
def additional_tests():
this_suite_path = os.path.abspath(__file__)
test_dir = os.path.dirname(this_suite_path)
loader = unittest.TestLoader()
return loader.discover(test_dir, pattern='Test_*.py')
|
import binascii;
"""
def bin2hex(bin_prog):
hex_prog = "";
j = 0;
for i in range(int(len(bin_prog)/8)):
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(i*8)+4]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(i*8)+5]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(i*8)+6]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(i*8)+7]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(i*8)+0]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(i*8)+1]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(i*8)+2]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(i*8)+3]));
hex_prog = hex_prog + "\n";
j = j + 1;
if((len(bin_prog)%8 != 0)):
hex_prog = hex_prog + "00";
hex_prog = hex_prog + "00";
hex_prog = hex_prog + "00";
hex_prog = hex_prog + "00";
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(j*8)+0]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(j*8)+1]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(j*8)+2]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(j*8)+3]));
hex_prog = hex_prog + "\n";
return hex_prog;
"""
def bin2hex(bin_prog):
hex_prog = "";
j = 0;
for i in range(int(len(bin_prog)/8)):
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(i*8)+7]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(i*8)+6]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(i*8)+5]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(i*8)+4]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(i*8)+3]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(i*8)+2]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(i*8)+1]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(i*8)+0]));
hex_prog = hex_prog + "\n";
j = j + 1;
if((len(bin_prog)%8 != 0)):
hex_prog = hex_prog + "00";
hex_prog = hex_prog + "00";
hex_prog = hex_prog + "00";
hex_prog = hex_prog + "00";
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(j*8)+3]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(j*8)+2]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(j*8)+1]));
hex_prog = hex_prog + (binascii.b2a_hex(bin_prog[(j*8)+0]));
hex_prog = hex_prog + "\n";
return hex_prog;
|
import cv2
import time
import imutils
cam=cv2.VideoCapture(0)
time.sleep(1)
firstFrame=None
area=500
while True:
_,img=cam.read()
text="Normal"
img=imutils.resize(img,width=500)
grayImg=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gaussianImg=cv2.GaussianBlur(grayImg,(21,21),0)
if firstFrame is None:
firstFrame=gaussianImg
continue
imgDiff=cv2.absdiff(firstFrame,grayImg)
threshImg=cv2.threshold(imgDiff,25,255,cv2.THRESH_BINARY)[1]
threshImg=cv2.dilate(threshImg,None,iterations=2)
cnts=cv2.findContours(threshImg.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts=imutils.grab_contours(cnts)
for c in cnts:
if cv2.contourArea(c)< area:
continue
(x,y,w,h)=cv2.boundingRect(c)
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
text="Moving Object detected"
print(text)
cv2.putText(img,text,(10,20),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,255),2)
cv2.imshow("cameraFeed",img)
key=cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cam.release()
cv2.destroyAllWindows()
|
from algorithms import *
from utils import DataUtils
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
data = DataUtils('data/基站经纬度.csv', 'data/上网信息输出表(日表)6月15号之后.csv')
mip_placer = MIPServerPlacer(data.base_stations, data.distances)
mip_placer.place_server(10, 2)
print(mip_placer.objective_latency(), mip_placer.objective_workload())
# kmeans_placer = KMeansServerPlacement(data.base_stations, data.distances)
# kmeans_placer.place_server(300, 30)
# print(kmeans_placer.objective_latency(), kmeans_placer.objective_workload())
# top_k_placer = TopKServerPlacement(data.base_stations, data.distances)
# top_k_placer.place_server(300, 30)
# print(top_k_placer.objective_latency(), top_k_placer.objective_workload())
# random_placer = RandomServerPlacement(data.base_stations, data.distances)
# random_placer.place_server(300, 30)
# print(random_placer.objective_latency(), random_placer.objective_workload())
pass
|
#!/usr/bin/env python
from setuptools import find_packages
from distutils.core import setup
version = {}
with open('src/version.py', 'r') as fp:
exec(fp.read(), version)
with open('requirements.txt', 'r') as fp:
required = fp.read().splitlines()
with open('README.md', 'r') as fh:
long_description = fh.read()
setup(name='OWLS-Autonomy',
version=version['__version__'],
description='Autonomy tools for the Ocean Worlds Life Surveyor (OWLS) instrument suite.',
long_description=long_description,
long_description_content_type="text/markdown",
author='Machine Learning and Instrument Autonomy (MLIA) group at JPL',
author_email='',
url='https://github.com/JPLMLIA/OWLS-Autonomy',
package_dir={'': 'src'},
packages=find_packages(),
install_requires=required,
python_requires='>=3.6',
) |
from convs.condconv import *
from convs.cc_inf import *
from convs.dyconv import *
from convs.dyres_conv import *
from convs.dyres_inf import *
from convs.ddsnet import *
from convs.dds_exp import *
from convs.dychannel import *
from flops_counter import get_model_complexity_info
import torch
x = torch.randn(1, 16, 32, 32)
net = CondConv(x.size(1), x.size(1), 3, num_experts=4)
flops, params = get_model_complexity_info(net, (x.size(1), 32, 32), as_strings=True, print_per_layer_stat=False)
print('--CondConv\nFlops: {}\nParams: {}'.format(flops, params))
net = DyConv(x.size(1), x.size(1), 3, num_experts=4)
flops, params = get_model_complexity_info(net, (x.size(1), 32, 32), as_strings=True, print_per_layer_stat=False)
print('--DyConv\nFlops: {}\nParams: {}'.format(flops, params))
net = DyResConv_Inf(x.size(1), x.size(1), 3, num_experts=4, mode='A')
flops, params = get_model_complexity_info(net, (x.size(1), 32, 32), as_strings=True, print_per_layer_stat=False)
print('--DyResA\nFlops: {}\nParams: {}'.format(flops, params))
net = DyResConv_Inf(x.size(1), x.size(1), 3, num_experts=4, mode='B')
flops, params = get_model_complexity_info(net, (x.size(1), 32, 32), as_strings=True, print_per_layer_stat=False)
print('--DyResB\nFlops: {}\nParams: {}'.format(flops, params))
# net = DDSConv(x.size(1), x.size(1), 3, num_experts=4)
# flops, params = get_model_complexity_info(net, (x.size(1), 32, 32), as_strings=True, print_per_layer_stat=False)
# print('Flops: {}\nParams: {}'.format(flops, params))
net = DDSConv_Exp(x.size(1), x.size(1), 3, num_experts=4)
flops, params = get_model_complexity_info(net, (x.size(1), 32, 32), as_strings=True, print_per_layer_stat=False)
print('--DDS\nFlops: {}\nParams: {}'.format(flops, params)) |
from pyroot_easiroc.MuonTrackReconstructor import MuonTrackReconstructor
from tqdm import tqdm
rootfile_path = "/data/hamada/easiroc_data/run017.root"
threshold_s = [1200] * 64
mtr = MuonTrackReconstructor(rootfile_path, threshold_s)
mtr._pre_cut_threshold_layer()
mtr._multi_hit()
mtr._under_layer_limit()
mtr.hit_muon_straight()
i_event = 0
mtr.show(mtr._hit_muon_index[i_event]) |
import argparse
import os
import anndata
import numpy as np
import pandas as pd
import scanpy as sc
from scipy import sparse
import trvae
if not os.getcwd().endswith("tests"):
os.chdir("./tests")
from matplotlib import pyplot as plt
FASHION_MNIST_CLASS_DICT = {
0: "T-shirt or top",
1: "Trouser",
2: "Pullover",
3: "Dress",
4: "Coat",
5: "Sandal",
6: "Shirt",
7: "Sneaker",
8: "Bag",
9: "Ankle boot"
}
DATASETS = {
"CelebA": {"name": 'celeba', "gender": "Male", 'attribute': "Smiling", 'source_key': -1, "target_key": 1,
"width": 64, 'height': 64, "n_channels": 3},
"MNIST": {"name": 'mnist', "source_key": 1, "target_key": 7,
"train_digits": [], "test_digits": [],
"width": 28, 'height': 28, "n_channels": 1},
"ThinMNIST": {"name": 'thin_mnist', "source_key": "normal", "target_key": "thin",
'train_digits': [1, 3, 6, 7], 'test_digits': [0, 2, 4, 5, 8, 9],
"width": 28, 'height': 28,
"n_channels": 1},
"ThickMNIST": {"name": 'thick_mnist', "source_key": "normal", "target_key": "thick",
'train_digits': [1, 3, 6, 7], 'test_digits': [0, 2, 4, 5, 8, 9],
"width": 28, 'height': 28,
"n_channels": 1},
"FashionMNIST": {"name": "fashion_mnist", "source_key": FASHION_MNIST_CLASS_DICT[0],
"target_key": FASHION_MNIST_CLASS_DICT[1],
"width": 28, 'height': 28, "n_channels": 1},
# "Horse2Zebra": {"name": "h2z", "source_key": "horse", "target_key": "zebra", "size": 256, "n_channels": 3,
# "resize": 64},
# "Apple2Orange": {"name": "a2o", "source_key": "apple", "target_key": "orange", "size": 256, "n_channels": 3,
# "resize": 64}
}
def train_network(data_dict=None,
z_dim=100,
mmd_dimension=256,
alpha=0.001,
beta=100,
gamma=1.0,
kernel='multi-scale-rbf',
n_epochs=500,
batch_size=512,
dropout_rate=0.2,
arch_style=1,
preprocess=True,
learning_rate=0.001,
gpus=1,
max_size=50000,
early_stopping_limit=50,
):
data_name = data_dict['name']
source_key = data_dict.get('source_key', None)
target_key = data_dict.get('target_key', None)
img_width = data_dict.get("width", None)
img_height = data_dict.get("height", None)
n_channels = data_dict.get("n_channels", None)
train_digits = data_dict.get("train_digits", None)
test_digits = data_dict.get("test_digits", None)
attribute = data_dict.get('attribute', None)
if data_name == "celeba":
gender = data_dict.get('gender', None)
data = trvae.prepare_and_load_celeba(file_path="../data/celeba/img_align_celeba.zip",
attr_path="../data/celeba/list_attr_celeba.txt",
landmark_path="../data/celeba/list_landmarks_align_celeba.txt",
gender=gender,
attribute=attribute,
max_n_images=max_size,
img_width=img_width,
img_height=img_height,
restore=True,
save=True)
if sparse.issparse(data.X):
data.X = data.X.A
source_images = data.copy()[data.obs['condition'] == source_key].X
target_images = data.copy()[data.obs['condition'] == target_key].X
source_images = np.reshape(source_images, (-1, img_width, img_height, n_channels))
target_images = np.reshape(target_images, (-1, img_width, img_height, n_channels))
if preprocess:
source_images /= 255.0
target_images /= 255.0
else:
data = sc.read(f"../data/{data_name}/{data_name}.h5ad")
source_images = data.copy()[data.obs["condition"] == source_key].X
target_images = data.copy()[data.obs["condition"] == target_key].X
source_images = np.reshape(source_images, (-1, img_width, img_height, n_channels))
target_images = np.reshape(target_images, (-1, img_width, img_height, n_channels))
if preprocess:
source_images /= 255.0
target_images /= 255.0
source_labels = np.zeros(shape=source_images.shape[0])
target_labels = np.ones(shape=target_images.shape[0])
train_labels = np.concatenate([source_labels, target_labels], axis=0)
train_images = np.concatenate([source_images, target_images], axis=0)
train_images = np.reshape(train_images, (-1, np.prod(source_images.shape[1:])))
if data_name.__contains__('mnist'):
preprocessed_data = anndata.AnnData(X=train_images)
preprocessed_data.obs["condition"] = train_labels
preprocessed_data.obs['labels'] = data.obs['labels'].values
data = preprocessed_data.copy()
else:
preprocessed_data = anndata.AnnData(X=train_images)
preprocessed_data.obs['condition'] = train_labels
if data.obs.columns.__contains__('labels'):
preprocessed_data.obs['labels'] = data.obs['condition'].values
data = preprocessed_data.copy()
train_size = int(data.shape[0] * 0.85)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
train_idx = indices[:train_size]
test_idx = indices[train_size:]
data_train = data[train_idx, :]
data_valid = data[test_idx, :]
print(data_train.shape, data_valid.shape)
if train_digits is not None:
train_data = data_train.copy()[
~((data_train.obs['labels'].isin(test_digits)) & (data_train.obs['condition'] == 1))]
valid_data = data_valid.copy()[
~((data_valid.obs['labels'].isin(test_digits)) & (data_valid.obs['condition'] == 1))]
elif data_name == "celeba":
train_data = data_train.copy()[
~((data_train.obs['labels'] == -1) & (data_train.obs['condition'] == target_key))]
valid_data = data_valid.copy()[
~((data_valid.obs['labels'] == -1) & (data_valid.obs['condition'] == target_key))]
else:
train_data = data_train.copy()
valid_data = data_valid.copy()
network = trvae.archs.DCtrVAE(x_dimension=source_images.shape[1:],
z_dimension=z_dim,
mmd_dimension=mmd_dimension,
alpha=alpha,
beta=beta,
gamma=gamma,
kernel=kernel,
arch_style=arch_style,
train_with_fake_labels=False,
learning_rate=learning_rate,
model_path=f"../models/RCCVAE/{data_name}-{img_width}x{img_height}-{preprocess}/{arch_style}-{z_dim}/",
gpus=gpus,
dropout_rate=dropout_rate)
print(train_data.shape, valid_data.shape)
network.train(train_data,
use_validation=True,
valid_adata=valid_data,
n_epochs=n_epochs,
batch_size=batch_size,
verbose=2,
early_stop_limit=early_stopping_limit,
shuffle=True,
save=True)
print("Model has been trained")
def evaluate_network(data_dict=None, z_dim=100, n_files=5, k=5, arch_style=1, preprocess=True, max_size=80000):
data_name = data_dict['name']
source_key = data_dict.get('source_key', None)
target_key = data_dict.get('target_key', None)
img_width = data_dict.get("width", None)
img_height = data_dict.get("height", None)
n_channels = data_dict.get('n_channels', None)
train_digits = data_dict.get('train_digits', None)
test_digits = data_dict.get('test_digits', None)
attribute = data_dict.get('attribute', None)
if data_name == "celeba":
gender = data_dict.get('gender', None)
data = trvae.prepare_and_load_celeba(file_path="../data/celeba/img_align_celeba.zip",
attr_path="../data/celeba/list_attr_celeba.txt",
landmark_path="../data/celeba/list_landmarks_align_celeba.txt",
gender=gender,
attribute=attribute,
max_n_images=max_size,
img_width=img_width,
img_height=img_height,
restore=True,
save=False)
valid_data = data.copy()[data.obs['labels'] == -1] # get females (Male = -1)
train_data = data.copy()[data.obs['labels'] == +1] # get males (Male = 1)
if sparse.issparse(valid_data.X):
valid_data.X = valid_data.X.A
source_images_train = train_data[train_data.obs["condition"] == source_key].X
source_images_valid = valid_data[valid_data.obs["condition"] == source_key].X
source_images_train = np.reshape(source_images_train, (-1, img_width, img_height, n_channels))
source_images_valid = np.reshape(source_images_valid, (-1, img_width, img_height, n_channels))
if preprocess:
source_images_train /= 255.0
source_images_valid /= 255.0
else:
data = sc.read(f"../data/{data_name}/{data_name}.h5ad")
if train_digits is not None:
train_data = data[data.obs['labels'].isin(train_digits)]
valid_data = data[data.obs['labels'].isin(test_digits)]
else:
train_data = data.copy()
valid_data = data.copy()
source_images_train = train_data[train_data.obs["condition"] == source_key].X
target_images_train = train_data[train_data.obs["condition"] == target_key].X
source_images_train = np.reshape(source_images_train, (-1, img_width, img_height, n_channels))
target_images_train = np.reshape(target_images_train, (-1, img_width, img_height, n_channels))
source_images_valid = valid_data[valid_data.obs["condition"] == source_key].X
target_images_valid = valid_data[valid_data.obs["condition"] == target_key].X
source_images_valid = np.reshape(source_images_valid, (-1, img_width, img_height, n_channels))
target_images_valid = np.reshape(target_images_valid, (-1, img_width, img_height, n_channels))
if preprocess:
source_images_train /= 255.0
source_images_valid /= 255.0
target_images_train /= 255.0
target_images_valid /= 255.0
image_shape = (img_width, img_height, n_channels)
source_images_train = np.reshape(source_images_train, (-1, np.prod(image_shape)))
source_images_valid = np.reshape(source_images_valid, (-1, np.prod(image_shape)))
source_data_train = anndata.AnnData(X=source_images_train)
source_data_valid = anndata.AnnData(X=source_images_valid)
network = trvae.DCtrVAE(x_dimension=image_shape,
z_dimension=z_dim,
arch_style=arch_style,
model_path=f"../models/RCCVAE/{data_name}-{img_width}x{img_height}-{preprocess}/{arch_style}-{z_dim}/")
network.restore_model()
results_path_train = f"../results/RCCVAE/{data_name}-{img_width}x{img_height}-{preprocess}/{arch_style}-{z_dim}/{source_key} to {target_key}/train/"
results_path_valid = f"../results/RCCVAE/{data_name}-{img_width}x{img_height}-{preprocess}/{arch_style}-{z_dim}/{source_key} to {target_key}/valid/"
os.makedirs(results_path_train, exist_ok=True)
os.makedirs(results_path_valid, exist_ok=True)
if sparse.issparse(valid_data.X):
valid_data.X = valid_data.X.A
if test_digits is not None:
k = len(test_digits)
for j in range(n_files):
if test_digits is not None:
source_sample_train = []
source_sample_valid = []
target_sample_train = []
target_sample_valid = []
for digit in test_digits:
source_images_digit_valid = valid_data[
(valid_data.obs['labels'] == digit) & (valid_data.obs['condition'] == source_key)]
target_images_digit_valid = valid_data[
(valid_data.obs['labels'] == digit) & (valid_data.obs['condition'] == target_key)]
if j == 0:
source_images_digit_valid.X /= 255.0
random_samples = np.random.choice(source_images_digit_valid.shape[0], 1, replace=False)
source_sample_valid.append(source_images_digit_valid.X[random_samples])
target_sample_valid.append(target_images_digit_valid.X[random_samples])
for digit in train_digits:
source_images_digit_train = train_data[
(train_data.obs['labels'] == digit) & (train_data.obs['condition'] == source_key)]
target_images_digit_train = train_data[
(train_data.obs['labels'] == digit) & (train_data.obs['condition'] == target_key)]
if j == 0:
source_images_digit_train.X /= 255.0
random_samples = np.random.choice(source_images_digit_train.shape[0], 1, replace=False)
source_sample_train.append(source_images_digit_train.X[random_samples])
target_sample_train.append(target_images_digit_train.X[random_samples])
else:
random_samples_train = np.random.choice(source_data_train.shape[0], k, replace=False)
random_samples_valid = np.random.choice(source_data_valid.shape[0], k, replace=False)
source_sample_train = source_data_train.X[random_samples_train]
source_sample_valid = source_data_valid.X[random_samples_valid]
source_sample_train = np.array(source_sample_train)
source_sample_valid = np.array(source_sample_valid)
# if data_name.__contains__("mnist"):
# target_sample = np.array(target_sample)
# target_sample_reshaped = np.reshape(target_sample, (-1, *image_shape))
source_sample_train = np.reshape(source_sample_train, (-1, np.prod(image_shape)))
source_sample_train_reshaped = np.reshape(source_sample_train, (-1, *image_shape))
if data_name.__contains__("mnist"):
target_sample_train = np.reshape(target_sample_train, (-1, np.prod(image_shape)))
target_sample_train_reshaped = np.reshape(target_sample_train, (-1, *image_shape))
target_sample_valid = np.reshape(target_sample_valid, (-1, np.prod(image_shape)))
target_sample_valid_reshaped = np.reshape(target_sample_valid, (-1, *image_shape))
source_sample_valid = np.reshape(source_sample_valid, (-1, np.prod(image_shape)))
source_sample_valid_reshaped = np.reshape(source_sample_valid, (-1, *image_shape))
source_sample_train = anndata.AnnData(X=source_sample_train)
source_sample_valid = anndata.AnnData(X=source_sample_valid)
pred_sample_train = network.predict(adata=source_sample_train,
encoder_labels=np.zeros((k, 1)),
decoder_labels=np.ones((k, 1)))
pred_sample_train = np.reshape(pred_sample_train, newshape=(-1, *image_shape))
pred_sample_valid = network.predict(adata=source_sample_valid,
encoder_labels=np.zeros((k, 1)),
decoder_labels=np.ones((k, 1)))
pred_sample_valid = np.reshape(pred_sample_valid, newshape=(-1, *image_shape))
print(source_sample_train.shape, source_sample_train_reshaped.shape, pred_sample_train.shape)
plt.close("all")
if train_digits is not None:
k = len(train_digits)
if data_name.__contains__("mnist"):
fig, ax = plt.subplots(len(train_digits), 3, figsize=(k * 1, 6))
else:
fig, ax = plt.subplots(k, 2, figsize=(k * 1, 6))
for i in range(k):
ax[i, 0].axis('off')
if source_sample_train_reshaped.shape[-1] > 1:
ax[i, 0].imshow(source_sample_train_reshaped[i])
else:
ax[i, 0].imshow(source_sample_train_reshaped[i, :, :, 0], cmap='Greys')
ax[i, 1].axis('off')
if data_name.__contains__("mnist"):
ax[i, 2].axis('off')
# if i == 0:
# if data_name == "celeba":
# ax[i, 0].set_title(f"without {data_dict['attribute']}")
# ax[i, 1].set_title(f"with {data_dict['attribute']}")
# elif data_name.__contains__("mnist"):
# ax[i, 0].set_title(f"Source")
# ax[i, 1].set_title(f"Target (Ground Truth)")
# ax[i, 2].set_title(f"Target (Predicted)")
# else:
# ax[i, 0].set_title(f"{source_key}")
# ax[i, 1].set_title(f"{target_key}")
if pred_sample_train.shape[-1] > 1:
ax[i, 1].imshow(pred_sample_train[i])
else:
ax[i, 1].imshow(target_sample_train_reshaped[i, :, :, 0], cmap='Greys')
ax[i, 2].imshow(pred_sample_train[i, :, :, 0], cmap='Greys')
# if data_name.__contains__("mnist"):
# ax[i, 2].imshow(target_sample_reshaped[i, :, :, 0], cmap='Greys')
plt.savefig(os.path.join(results_path_train, f"sample_images_{j}.pdf"))
print(source_sample_valid.shape, source_sample_valid_reshaped.shape, pred_sample_valid.shape)
plt.close("all")
if test_digits is not None:
k = len(test_digits)
if data_name.__contains__("mnist"):
fig, ax = plt.subplots(k, 3, figsize=(k * 1, 6))
else:
fig, ax = plt.subplots(k, 2, figsize=(k * 1, 6))
for i in range(k):
ax[i, 0].axis('off')
if source_sample_valid_reshaped.shape[-1] > 1:
ax[i, 0].imshow(source_sample_valid_reshaped[i])
else:
ax[i, 0].imshow(source_sample_valid_reshaped[i, :, :, 0], cmap='Greys')
ax[i, 1].axis('off')
if data_name.__contains__("mnist"):
ax[i, 2].axis('off')
# if i == 0:
# if data_name == "celeba":
# ax[i, 0].set_title(f"without {data_dict['attribute']}")
# ax[i, 1].set_title(f"with {data_dict['attribute']}")
# elif data_name.__contains__("mnist"):
# ax[i, 0].set_title(f"Source")
# ax[i, 1].set_title(f"Target (Ground Truth)")
# ax[i, 2].set_title(f"Target (Predicted)")
# else:
# ax[i, 0].set_title(f"{source_key}")
# ax[i, 1].set_title(f"{target_key}")
if pred_sample_valid.shape[-1] > 1:
ax[i, 1].imshow(pred_sample_valid[i])
else:
ax[i, 1].imshow(target_sample_valid_reshaped[i, :, :, 0], cmap='Greys')
ax[i, 2].imshow(pred_sample_valid[i, :, :, 0], cmap='Greys')
# if data_name.__contains__("mnist"):
# ax[i, 2].imshow(target_sample_reshaped[i, :, :, 0], cmap='Greys')
plt.savefig(os.path.join(results_path_valid, f"./sample_images_{j}.pdf"))
def visualize_trained_network_results(data_dict, z_dim=100, arch_style=1, preprocess=True, max_size=80000):
plt.close("all")
data_name = data_dict.get('name', None)
source_key = data_dict.get('source_key', None)
target_key = data_dict.get('target_key', None)
img_width = data_dict.get('width', None)
img_height = data_dict.get('height', None)
n_channels = data_dict.get('n_channels', None)
train_digits = data_dict.get('train_digits', None)
test_digits = data_dict.get('test_digits', None)
attribute = data_dict.get('attribute', None)
path_to_save = f"../results/RCCVAE/{data_name}-{img_width}x{img_height}-{preprocess}/{arch_style}-{z_dim}/{source_key} to {target_key}/UMAPs/"
os.makedirs(path_to_save, exist_ok=True)
sc.settings.figdir = os.path.abspath(path_to_save)
if data_name == "celeba":
gender = data_dict.get('gender', None)
data = trvae.prepare_and_load_celeba(file_path="../data/celeba/img_align_celeba.zip",
attr_path="../data/celeba/list_attr_celeba.txt",
landmark_path="../data/celeba/list_landmarks_align_celeba.txt",
gender=gender,
attribute=attribute,
max_n_images=max_size,
img_width=img_width,
img_height=img_height,
restore=True,
save=False)
if sparse.issparse(data.X):
data.X = data.X.A
train_images = data.X
train_data = anndata.AnnData(X=data)
train_data.obs['condition'] = data.obs['condition'].values
train_data.obs.loc[train_data.obs['condition'] == 1, 'condition'] = f'with {attribute}'
train_data.obs.loc[train_data.obs['condition'] == -1, 'condition'] = f'without {attribute}'
train_data.obs['labels'] = data.obs['labels'].values
train_data.obs.loc[train_data.obs['labels'] == 1, 'labels'] = f'Male'
train_data.obs.loc[train_data.obs['labels'] == -1, 'labels'] = f'Female'
if preprocess:
train_images /= 255.0
else:
train_data = sc.read(f"../data/{data_name}/{data_name}.h5ad")
train_images = np.reshape(train_data.X, (-1, img_width, img_height, n_channels))
if preprocess:
train_images /= 255.0
train_labels, _ = trvae.label_encoder(train_data)
fake_labels = np.ones(train_labels.shape)
network = trvae.DCtrVAE(x_dimension=(img_width, img_height, n_channels),
z_dimension=z_dim,
arch_style=arch_style,
model_path=f"../models/RCCVAE/{data_name}-{img_width}x{img_height}-{preprocess}/{arch_style}-{z_dim}/", )
network.restore_model()
train_data_feed = np.reshape(train_images, (-1, img_width, img_height, n_channels))
latent_with_true_labels = network.to_z_latent(train_data_feed, train_labels)
latent_with_fake_labels = network.to_z_latent(train_data_feed, fake_labels)
mmd_latent_with_true_labels = network.to_mmd_layer(network, train_data_feed, train_labels, feed_fake=False)
mmd_latent_with_fake_labels = network.to_mmd_layer(network, train_data_feed, train_labels, feed_fake=True)
latent_with_true_labels = sc.AnnData(X=latent_with_true_labels)
latent_with_true_labels.obs['condition'] = pd.Categorical(train_data.obs['condition'].values)
latent_with_fake_labels = sc.AnnData(X=latent_with_fake_labels)
latent_with_fake_labels.obs['condition'] = pd.Categorical(train_data.obs['condition'].values)
mmd_latent_with_true_labels = sc.AnnData(X=mmd_latent_with_true_labels)
mmd_latent_with_true_labels.obs['condition'] = train_data.obs['condition'].values
mmd_latent_with_fake_labels = sc.AnnData(X=mmd_latent_with_fake_labels)
mmd_latent_with_fake_labels.obs['condition'] = train_data.obs['condition'].values
if data_name.__contains__("mnist") or data_name == "celeba":
latent_with_true_labels.obs['labels'] = pd.Categorical(train_data.obs['labels'].values)
latent_with_fake_labels.obs['labels'] = pd.Categorical(train_data.obs['labels'].values)
mmd_latent_with_true_labels.obs['labels'] = pd.Categorical(train_data.obs['labels'].values)
mmd_latent_with_fake_labels.obs['labels'] = pd.Categorical(train_data.obs['labels'].values)
color = ['condition', 'labels']
else:
color = ['condition']
if train_digits is not None:
train_data.obs.loc[(train_data.obs['condition'] == source_key) & (
train_data.obs['labels'].isin(train_digits)), 'type'] = 'training'
train_data.obs.loc[
(train_data.obs['condition'] == source_key) & (
train_data.obs['labels'].isin(test_digits)), 'type'] = 'training'
train_data.obs.loc[(train_data.obs['condition'] == target_key) & (
train_data.obs['labels'].isin(train_digits)), 'type'] = 'training'
train_data.obs.loc[
(train_data.obs['condition'] == target_key) & (
train_data.obs['labels'].isin(test_digits)), 'type'] = 'heldout'
sc.pp.neighbors(train_data)
sc.tl.umap(train_data)
sc.pl.umap(train_data, color=color,
save=f'_{data_name}_train_data.png',
show=False,
wspace=0.5)
if train_digits is not None:
sc.tl.umap(train_data)
sc.pl.umap(train_data, color=['type', 'labels'],
save=f'_{data_name}_data_type.png',
show=False)
sc.pp.neighbors(latent_with_true_labels)
sc.tl.umap(latent_with_true_labels)
sc.pl.umap(latent_with_true_labels, color=color,
save=f"_{data_name}_latent_with_true_labels.png",
wspace=0.5,
show=False)
sc.pp.neighbors(latent_with_fake_labels)
sc.tl.umap(latent_with_fake_labels)
sc.pl.umap(latent_with_fake_labels, color=color,
save=f"_{data_name}_latent_with_fake_labels.png",
wspace=0.5,
show=False)
sc.pp.neighbors(mmd_latent_with_true_labels)
sc.tl.umap(mmd_latent_with_true_labels)
sc.pl.umap(mmd_latent_with_true_labels, color=color,
save=f"_{data_name}_mmd_latent_with_true_labels.png",
wspace=0.5,
show=False)
sc.pp.neighbors(mmd_latent_with_fake_labels)
sc.tl.umap(mmd_latent_with_fake_labels)
sc.pl.umap(mmd_latent_with_fake_labels, color=color,
save=f"_{data_name}_mmd_latent_with_fake_labels.png",
wspace=0.5,
show=False)
plt.close("all")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sample a trained autoencoder.')
arguments_group = parser.add_argument_group("Parameters")
arguments_group.add_argument('-d', '--data', type=str, required=True,
help='name of dataset you want to train')
arguments_group.add_argument('-z', '--z_dim', type=int, default=20, required=False,
help='latent space dimension')
arguments_group.add_argument('-m', '--mmd_dimension', type=int, default=128, required=False,
help='MMD Layer dimension')
arguments_group.add_argument('-a', '--alpha', type=float, default=0.005, required=False,
help='Alpha coeff in loss term')
arguments_group.add_argument('-b', '--beta', type=float, default=100, required=False,
help='Beta coeff in loss term')
arguments_group.add_argument('-k', '--kernel', type=str, default='multi-scale-rbf', required=False,
help='Kernel type')
arguments_group.add_argument('-n', '--n_epochs', type=int, default=5000, required=False,
help='Maximum Number of epochs for training')
arguments_group.add_argument('-c', '--batch_size', type=int, default=512, required=False,
help='Batch Size')
arguments_group.add_argument('-s', '--arch_style', type=int, default=1, required=False,
help='Model Architecture Style')
arguments_group.add_argument('-r', '--dropout_rate', type=float, default=0.4, required=False,
help='Dropout ratio')
arguments_group.add_argument('-w', '--width', type=int, default=0, required=False,
help='Image Width to be resize')
arguments_group.add_argument('-e', '--height', type=int, default=0, required=False,
help='Image Height to be resize')
arguments_group.add_argument('-p', '--preprocess', type=int, default=True, required=False,
help='do preprocess images')
arguments_group.add_argument('-l', '--learning_rate', type=float, default=0.001, required=False,
help='Learning Rate for Optimizer')
arguments_group.add_argument('-g', '--gpus', type=int, default=1, required=False,
help='Learning Rate for Optimizer')
arguments_group.add_argument('-x', '--max_size', type=int, default=50000, required=False,
help='Max Size for CelebA')
arguments_group.add_argument('-t', '--do_train', type=int, default=1, required=False,
help='do train the network')
arguments_group.add_argument('-y', '--early_stopping_limit', type=int, default=50, required=False,
help='do train the network')
arguments_group.add_argument('-f', '--gamma', type=float, default=1.0, required=False,
help='do train the network')
args = vars(parser.parse_args())
data_dict = DATASETS[args['data']]
if args['width'] > 0 and args['height'] > 0:
data_dict['width'] = args['width']
data_dict['height'] = args['height']
if args['preprocess'] == 0:
args['preprocess'] = False
else:
args['preprocess'] = True
if args['max_size'] == 0:
args['max_size'] = None
del args['data']
del args['width']
del args['height']
if args['do_train'] > 0:
del args['do_train']
train_network(data_dict=data_dict, **args)
evaluate_network(data_dict,
z_dim=args['z_dim'],
n_files=30,
arch_style=args['arch_style'],
max_size=args['max_size'],
k=4)
# visualize_trained_network_results(data_dict,
# z_dim=args['z_dim'],
# arch_style=args['arch_style'],
# max_size=args['max_size'],
# preprocess=args['preprocess'])
print(f"Model for {data_dict['name']} has been trained and sample results are ready!")
|
"""Qt windows for camera controlled labeling
Colin Dietrich 2019
"""
from functools import partial
from PyQt5 import QtGui, QtCore
from PyQt5.QtWidgets import QWidget, QApplication
# import everything, since it's hard to remember what class does what
from PyQt5.QtWidgets import QMainWindow,\
QApplication, QLabel, QLineEdit, QPushButton,\
QHBoxLayout, QVBoxLayout, QGroupBox, QFrame, QGridLayout, QAction
import config
from camera_capture import CAM
cam = CAM(cam_id=config.camera_id, description=config.description)
cam.save_directory = config.data_dir
class Window(QWidget):
def __init__(self, mapping):
QWidget.__init__(self)
layout = QVBoxLayout(self)
self.buttons = []
for key, value in mapping.items():
#text_label, image = value[0], value[1]
q_push_button = QPushButton(key, self)
#q_push_button.setStyleSheet("background-image: url({});".format(value))
q_push_button.setStyleSheet(value)
self.buttons.append(q_push_button)
self.buttons[-1].clicked.connect(partial(handleButton, data=key))
layout.addWidget(self.buttons[-1])
def handleButton(self, data="\n"):
cam.write_image_file(data, verbose=True)
print(data)
|
'''
Created on Feb 8, 2016
@author: hanhanwu
'''
import math
import load_match_data
import preprocess_data
def sumsqures(v):
return sum([p**2 for p in v])
# radial-basis function, used as a kernel to get best linear separation for a given data set with proper adjustments
def rbf(v1, v2, gamma=20):
dv = [v1[i]-v2[i] for i in range(len(v1))]
squared_dist = sumsqures(dv)
return math.e**(-gamma*squared_dist)
# With kernel function, we won't calculate the new locations of points in the new transformed space, threfore
# it dones't make sense to calculate the new data with the average of the existing data set.
# But, by calculating the distances between the new data and the existing points in the data set, then do the average
# will get the same result.
def nlclassify(new_v, rs, offset, gamma=10):
sum0, sum1, count0, count1 = 0.0, 0.0, 0.0, 0.0
for r in rs:
if r.match == 0:
sum0 += rbf(new_v, r.data, gamma)
count0 += 1
elif r.match == 1:
sum1 += rbf(new_v, r.data, gamma)
count1 += 1
y = sum0/count0 -sum1/count1 + offset
if y > 0: return 0
return 1
def get_offset(rs, gamma=10):
r0 = []
r1 = []
count0 = 0.0
count1 = 0.0
for r in rs:
if r.match == 0:
r0.append(r.data)
elif r.match == 1:
r1.append(r.data)
sum0 = sum(sum(rbf(v1, v2, gamma) for v1 in r0) for v2 in r0)
sum1 = sum(sum(rbf(v1, v2, gamma) for v1 in r1) for v2 in r1)
return sum1/(len(r1)**2) - sum0/(len(r0)**2)
def main():
print 'use agesonly.csv to predict:'
agesonly_path = '[your agesonly.csv path]' # change to your agesonly.csv path
agesonly_rows = load_match_data.load_csv(agesonly_path, True)
offset1 = get_offset(agesonly_rows)
print nlclassify([27, 30], agesonly_rows, offset1)
print nlclassify([30, 27], agesonly_rows, offset1)
print 'use scaled matchmaker.csv to predict:'
matchmaker_path = '[your matchmaker.csv path]' # change to your matchmaker.csv path
ls = file(matchmaker_path)
numerical_rows = preprocess_data.to_numerical(ls)
rescaled_data = preprocess_data.rescale_data(numerical_rows)
offset2 = get_offset(rescaled_data)
new_p1 = [28.0, -1, -1, 26.0, -1, 1, 2, 0.8] # man doesn't want children, women wants
new_p2 = [28.0, -1, 1, 26.0, -1, 1, 2, 0.8] # both want children
print nlclassify(new_p1, rescaled_data, offset2)
print nlclassify(new_p2, rescaled_data, offset2)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
from collections import namedtuple
import contextlib
import io
import json
import logging
import os
from pathlib import Path
import shlex
import signal
import subprocess
import sys
import tempfile
import uuid
import zmq
from .simplesetup import RemoteTeamPlayer, SimpleController, SimplePublisher, SimpleServer
_logger = logging.getLogger(__name__)
_mswindows = (sys.platform == "win32")
TeamSpec = namedtuple("TeamSpec", ["module", "address"])
ModuleSpec = namedtuple("ModuleSpec", ["prefix", "module"])
def get_python_process():
py_proc = sys.executable
if not py_proc:
raise RuntimeError("Cannot retrieve current Python executable.")
return py_proc
def shlex_unsplit(cmd):
"""
Translates a list of command arguments into bash-like ‘human’ readable form.
Pseudo-reverses shlex.split()
Example
-------
>>> shlex_unsplit(["command", "-f", "Hello World"])
"command -f 'Hello World'"
Parameters
----------
cmd : list of string
command + parameter list
Returns
-------
string
"""
return " ".join(shlex.quote(arg) for arg in cmd)
def firstNN(*args):
"""
Return the first argument not None.
Example
-------
>>> firstNN(None, False, True)
False
>>> firstNN(True, False, True)
True
>>> firstNN(None, None, True)
True
>>> firstNN(None, 2, True)
2
>>> firstNN(None, None, None)
None
>>> firstNN()
None
"""
return next(filter(lambda x: x is not None, args), None)
def start_logging(filename):
if not filename or filename == '-':
hdlr = logging.StreamHandler()
else:
hdlr = logging.FileHandler(filename, mode='w')
logger = logging.getLogger('pelita')
FORMAT = '[%(relativeCreated)06d %(name)s:%(levelname).1s][%(funcName)s] %(message)s'
formatter = logging.Formatter(FORMAT)
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
class ModuleRunner:
def __init__(self, team_spec):
self.team_spec = team_spec
class DefaultRunner(ModuleRunner):
def call_args(self, addr):
player = 'pelita.scripts.pelita_player'
external_call = [get_python_process(),
'-m',
player,
self.team_spec,
addr,
'--color',
self.color]
return external_call
class BinRunner(ModuleRunner):
def call_args(self, addr):
external_call = [self.team_spec,
addr]
return external_call
@contextlib.contextmanager
def _call_pelita_player(module_spec, address, color='', dump=None):
""" Context manager version of `call_pelita_player`.
Runs `call_pelita_player` as long as the `with` statement is executed
and automatically terminates it afterwards. This is useful, if one
just needs to send a few commands to a player.
"""
proc = None
try:
proc, stdout, stderr = call_pelita_player(module_spec, address, color, dump)
yield proc
except KeyboardInterrupt:
pass
finally:
# we close stdout, stderr before terminating
# this hopefully means that it will do some flushing
if stdout:
stdout.close()
if stderr:
stderr.close()
if proc is None:
print("Problem running pelita player.")
else:
_logger.debug("Terminating proc %r", proc)
proc.terminate()
proc.wait()
_logger.debug("%r terminated.", proc)
def call_pelita_player(module_spec, address, color='', dump=None):
""" Starts another process with the same Python executable,
the same start script (pelitagame) and runs `team_spec`
as a standalone client on URL `addr`.
"""
defined_runners = {
"py": DefaultRunner,
"bin": BinRunner,
}
if module_spec.prefix is not None:
try:
runner = defined_runners[module_spec.prefix]
except KeyError:
raise ValueError("Unknown runner: {}:".format(module_spec.prefix))
else:
runner = DefaultRunner
runner_inst = runner(module_spec.module)
runner_inst.color = color
call_args = runner_inst.call_args(address)
_logger.debug("Executing: %r", call_args)
if dump:
stdout = Path(dump + '.' + (color or module_spec) + '.out').open('w')
stderr = Path(dump + '.' + (color or module_spec) + '.err').open('w')
return (subprocess.Popen(call_args, stdout=stdout, stderr=stderr), stdout, stderr)
else:
return (subprocess.Popen(call_args), None, None)
@contextlib.contextmanager
def run_and_terminate_process(args, **kwargs):
""" This serves as a contextmanager around `subprocess.Popen`, ensuring that
after the body of the with-clause has finished, the process itself (and the
process’s children) terminates as well.
On Unix we try sending a SIGTERM before killing the process group but as
afterwards we only wait on the first child, this means that the grand children
do not get the chance to properly terminate.
In cases where the first child has children that should properly close, the
first child should catch SIGTERM with a signal handler and wait on its children.
On Windows we send a CTRL_BREAK_EVENT to the whole process group and
hope for the best. :)
"""
_logger.debug("Executing: {}".format(shlex_unsplit(args)))
try:
if _mswindows:
p = subprocess.Popen(args, **kwargs, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
else:
p = subprocess.Popen(args, **kwargs, preexec_fn=os.setsid)
yield p
finally:
if _mswindows:
_logger.debug("Sending CTRL_BREAK_EVENT to {proc} with pid {pid}.".format(proc=p, pid=p.pid))
os.kill(p.pid, signal.CTRL_BREAK_EVENT)
else:
try:
pgid = os.getpgid(p.pid)
_logger.debug("Sending SIGTERM to pgid {pgid}.".format(pgid=pgid))
os.killpg(pgid, signal.SIGTERM) # send sigterm, or ...
try:
# It would be nicer to wait with os.waitid on the process group
# but that does not seem to exist on macOS.
p.wait(3)
except subprocess.TimeoutExpired:
_logger.debug("Sending SIGKILL to pgid {pgid}.".format(pgid=pgid))
os.killpg(pgid, signal.SIGKILL) # send sigkill, or ...
except ProcessLookupError:
# did our process group vanish?
# we try killing only the child process then
_logger.debug("Sending SIGTERM to pid {pid}.".format(pid=p.pid))
p.terminate()
try:
p.wait(3)
except subprocess.TimeoutExpired:
_logger.debug("Sending SIGKILL to pid {pid}.".format(pid=p.pid))
p.kill()
def call_pelita(team_specs, *, rounds, filter, viewer, dump, seed):
""" Starts a new process with the given command line arguments and waits until finished.
Returns
=======
tuple of (game_state, stdout, stderr)
"""
if _mswindows:
raise RuntimeError("call_pelita is currently unavailable on Windows")
team1, team2 = team_specs
ctx = zmq.Context()
reply_sock = ctx.socket(zmq.PAIR)
if os.name.upper() == 'POSIX':
filename = 'pelita-reply.{uuid}'.format(pid=os.getpid(), uuid=uuid.uuid4())
path = os.path.join(tempfile.gettempdir(), filename)
reply_addr = 'ipc://' + path
reply_sock.bind(reply_addr)
else:
addr = 'tcp://127.0.0.1'
reply_port = reply_sock.bind_to_random_port(addr)
reply_addr = 'tcp://127.0.0.1' + ':' + str(reply_port)
rounds = ['--rounds', str(rounds)] if rounds else []
filter = ['--filter', filter] if filter else []
viewer = ['--' + viewer] if viewer else []
dump = ['--dump', dump] if dump else []
seed = ['--seed', seed] if seed else []
cmd = [get_python_process(), '-m', 'pelita.scripts.pelita_main',
team1, team2,
'--reply-to', reply_addr,
*seed,
*dump,
*filter,
*rounds,
*viewer]
# We use the environment variable PYTHONUNBUFFERED here to retrieve stdout without buffering
with run_and_terminate_process(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True,
env=dict(os.environ, PYTHONUNBUFFERED='x')) as proc:
#if ARGS.dry_run:
# print("Would run: {cmd}".format(cmd=cmd))
# print("Choosing winner at random.")
# return random.choice([0, 1, 2])
poll = zmq.Poller()
poll.register(reply_sock, zmq.POLLIN)
poll.register(proc.stdout.fileno(), zmq.POLLIN)
poll.register(proc.stderr.fileno(), zmq.POLLIN)
with io.StringIO() as stdout_buf, io.StringIO() as stderr_buf:
final_game_state = None
while True:
evts = dict(poll.poll(1000))
if not evts and proc.poll() is not None:
# no more events and proc has finished.
# we give up
break
stdout_ready = (not proc.stdout.closed) and evts.get(proc.stdout.fileno(), False)
if stdout_ready:
line = proc.stdout.readline()
if line:
print(line, end='', file=stdout_buf)
else:
poll.unregister(proc.stdout.fileno())
proc.stdout.close()
stderr_ready = (not proc.stderr.closed) and evts.get(proc.stderr.fileno(), False)
if stderr_ready:
line = proc.stderr.readline()
if line:
print(line, end='', file=stderr_buf)
else:
poll.unregister(proc.stderr.fileno())
proc.stderr.close()
socket_ready = evts.get(reply_sock, False)
if socket_ready:
try:
pelita_status = json.loads(reply_sock.recv_string())
game_state = pelita_status['__data__']['game_state']
finished = game_state.get("finished", None)
team_wins = game_state.get("team_wins", None)
game_draw = game_state.get("game_draw", None)
if finished:
final_game_state = game_state
break
except ValueError: # JSONDecodeError
pass
except KeyError:
pass
return (final_game_state, stdout_buf.getvalue(), stderr_buf.getvalue())
def check_team(team_spec):
ctx = zmq.Context()
socket = ctx.socket(zmq.PAIR)
if team_spec.module is None:
_logger.info("Binding zmq.PAIR to %s", team_spec.address)
socket.bind(team_spec.address)
else:
_logger.info("Binding zmq.PAIR to %s", team_spec.address)
socket_port = socket.bind_to_random_port(team_spec.address)
team_spec = team_spec._replace(address="%s:%d" % (team_spec.address, socket_port))
team_player = RemoteTeamPlayer(socket)
if team_spec.module:
with _call_pelita_player(team_spec.module, team_spec.address):
name = team_player.team_name()
else:
name = team_player.team_name()
return name
def strip_module_prefix(module):
if "@" in module:
try:
prefix, module = module.split("@")
return ModuleSpec(prefix=prefix, module=module)
except ValueError:
raise ValueError("Bad module definition: {}.".format(module))
else:
return ModuleSpec(prefix=None, module=module)
def prepare_team(team_spec):
# check if we've been given an address which a remote
# player wants to connect to
if "://" in team_spec:
module = None
address = team_spec
else:
module = strip_module_prefix(team_spec)
address = "tcp://127.0.0.1"
return TeamSpec(module, address)
def run_game(team_specs, game_config, viewers=None, controller=None):
if viewers is None:
viewers = []
teams = [prepare_team(team_spec) for team_spec in team_specs]
server = SimpleServer(layout_string=game_config["layout_string"],
rounds=game_config["rounds"],
bind_addrs=[team.address for team in teams],
max_timeouts=game_config["max_timeouts"],
timeout_length=game_config["timeout_length"],
layout_name=game_config["layout_name"],
seed=game_config["seed"])
# Update our teams with the bound addresses
teams = [
team._replace(address=address)
for team, address in zip(teams, server.bind_addresses)
]
color = {}
for idx, team in enumerate(teams):
if idx == 0:
color[team] = 'Blue'
elif idx == 1:
color[team] = 'Red'
else:
color[team] = ''
if team.module is None:
print("Waiting for external team %d to connect to %s." % (idx, team.address))
external_players = [
call_pelita_player(team.module, team.address, color[team], dump=game_config['dump'])
for team in teams
if team.module
]
for viewer in viewers:
server.game_master.register_viewer(viewer)
if game_config.get("publisher"):
server.game_master.register_viewer(game_config["publisher"])
with autoclose_subprocesses(external_players):
if controller is not None:
if controller.game_master is None:
controller.game_master = server.game_master
controller.run()
server.exit_teams()
else:
server.run()
return server.game_master.game_state
@contextlib.contextmanager
def tk_viewer(publish_to=None, geometry=None, delay=None):
if publish_to is None:
publish_to = "tcp://127.0.0.1:*"
publisher = SimplePublisher(publish_to)
controller = SimpleController(None, "tcp://127.0.0.1:*")
viewer = run_external_viewer(publisher.socket_addr, controller.socket_addr,
geometry=geometry, delay=delay)
yield { "publisher": publisher, "controller": controller }
@contextlib.contextmanager
def channel_setup(publish_to=None, reply_to=None):
if publish_to is None:
publish_to = "tcp://127.0.0.1:*"
publisher = SimplePublisher(publish_to)
controller = SimpleController(None, "tcp://127.0.0.1:*", reply_to=reply_to)
yield { "publisher": publisher, "controller": controller }
def run_external_viewer(subscribe_sock, controller, geometry, delay, stop_after):
# Something on OS X prevents Tk from running in a forked process.
# Therefore we cannot use multiprocessing here. subprocess works, though.
viewer_args = [ str(subscribe_sock) ]
if controller:
viewer_args += ["--controller-address", str(controller)]
if geometry:
viewer_args += ["--geometry", "{0}x{1}".format(*geometry)]
if delay:
viewer_args += ["--delay", str(delay)]
if stop_after is not None:
viewer_args += ["--stop-after", str(stop_after)]
tkviewer = 'pelita.scripts.pelita_tkviewer'
external_call = [get_python_process(),
'-m',
tkviewer] + viewer_args
_logger.debug("Executing: %r", external_call)
# os.setsid will keep the viewer from closing when the main process exits
# a better solution might be to decouple the viewer from the main process
if _mswindows:
p = subprocess.Popen(external_call, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
else:
p = subprocess.Popen(external_call, preexec_fn=os.setsid)
return p
@contextlib.contextmanager
def autoclose_subprocesses(subprocesses):
"""
Automatically close subprocesses when the context ends.
This needs to be done to shut down misbehaving bots
when the main program finishes.
"""
try:
yield
except KeyboardInterrupt:
pass
finally:
# we close stdout, stderr before terminating
# this hopefully means that it will do some flushing
for (sp, stdout, stderr) in subprocesses:
if stdout:
stdout.close()
if stderr:
stderr.close()
# kill all client processes. NOW!
# (is ths too early?)
for (sp, stdout, stderr) in subprocesses:
_logger.debug("Attempting to terminate %r.", sp)
sp.terminate()
for (sp, stdout, stderr) in subprocesses:
sp.wait()
_logger.debug("%r terminated.", sp)
|
name = input("What's your name?")
old = input("How old are u?")
live = input("Where are u from?")
print("This is:", name)
print("It is:", old)
print("She(he) lives in:", live)
|
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class SystemRestfulInteractionCode(GenericTypeCode):
"""
SystemRestfulInteraction
From: http://hl7.org/fhir/ValueSet/system-restful-interaction in valuesets.xml
Operations supported by REST at the system level.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://hl7.org/fhir/restful-interaction
"""
codeset: FhirUri = "http://hl7.org/fhir/restful-interaction"
class SystemRestfulInteractionCodeValues:
"""
From: http://hl7.org/fhir/ValueSet/system-restful-interaction in valuesets.xml
"""
Transaction = SystemRestfulInteractionCode("transaction")
"""
From: http://hl7.org/fhir/ValueSet/system-restful-interaction in valuesets.xml
"""
Batch = SystemRestfulInteractionCode("batch")
"""
From: http://hl7.org/fhir/ValueSet/system-restful-interaction in valuesets.xml
"""
Search_system = SystemRestfulInteractionCode("search-system")
"""
From: http://hl7.org/fhir/ValueSet/system-restful-interaction in valuesets.xml
"""
History_system = SystemRestfulInteractionCode("history-system")
|
import sys
from PyQt5 import QtWidgets, QtGui
def window():
app = QtWidgets.QApplication(sys.argv)
w = QtWidgets.QWidget()
l1 = QtWidgets.QLabel(w)
l2 = QtWidgets.QLabel(w)
l1.setText('this is a test')
l2.setPixmap(QtGui.QPixmap('../resources/lab-badge.png'))
w.setWindowTitle('ImageAlign')
w.setGeometry(100, 100, 300, 300)
l1.move(100, 20)
l2.move(10, 90)
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
window()
|
import os, sys
import yaml
import logging
import click
from voluptuous import Schema
from .defaults import settings
from .validators import SchemaCheck
from .config_utils import process_config
from .exceptions import *
from .utils import *
from .indexlist import IndexList
from .snapshotlist import SnapshotList
from .actions import *
from ._version import __version__
CLASS_MAP = {
'alias' : Alias,
'allocation' : Allocation,
'close' : Close,
'cluster_routing' : ClusterRouting,
'create_index' : CreateIndex,
'delete_indices' : DeleteIndices,
'delete_snapshots' : DeleteSnapshots,
'forcemerge' : ForceMerge,
'index_settings' : IndexSettings,
'open' : Open,
'reindex' : Reindex,
'replicas' : Replicas,
'restore' : Restore,
'rollover' : Rollover,
'snapshot' : Snapshot,
'shrink' : Shrink,
}
def process_action(client, config, **kwargs):
"""
Do the `action` in the configuration dictionary, using the associated args.
Other necessary args may be passed as keyword arguments
:arg config: An `action` dictionary.
"""
logger = logging.getLogger(__name__)
# Make some placeholder variables here for readability
logger.debug('Configuration dictionary: {0}'.format(config))
logger.debug('kwargs: {0}'.format(kwargs))
action = config['action']
# This will always have some defaults now, so no need to do the if...
# # OLD WAY: opts = config['options'] if 'options' in config else {}
opts = config['options']
logger.debug('opts: {0}'.format(opts))
mykwargs = {}
action_class = CLASS_MAP[action]
# Add some settings to mykwargs...
if action == 'delete_indices':
mykwargs['master_timeout'] = (
kwargs['master_timeout'] if 'master_timeout' in kwargs else 30)
### Update the defaults with whatever came with opts, minus any Nones
mykwargs.update(prune_nones(opts))
logger.debug('Action kwargs: {0}'.format(mykwargs))
### Set up the action ###
if action == 'alias':
# Special behavior for this action, as it has 2 index lists
logger.debug('Running "{0}" action'.format(action.upper()))
action_obj = action_class(**mykwargs)
if 'add' in config:
logger.debug('Adding indices to alias "{0}"'.format(opts['name']))
adds = IndexList(client)
adds.iterate_filters(config['add'])
action_obj.add(adds, warn_if_no_indices=opts['warn_if_no_indices'])
if 'remove' in config:
logger.debug(
'Removing indices from alias "{0}"'.format(opts['name']))
removes = IndexList(client)
removes.iterate_filters(config['remove'])
action_obj.remove(
removes, warn_if_no_indices= opts['warn_if_no_indices'])
elif action in [ 'cluster_routing', 'create_index', 'rollover']:
action_obj = action_class(client, **mykwargs)
elif action == 'delete_snapshots' or action == 'restore':
logger.debug('Running "{0}"'.format(action))
slo = SnapshotList(client, repository=opts['repository'])
slo.iterate_filters(config)
# We don't need to send this value to the action
mykwargs.pop('repository')
action_obj = action_class(slo, **mykwargs)
else:
logger.debug('Running "{0}"'.format(action.upper()))
ilo = IndexList(client)
ilo.iterate_filters(config)
action_obj = action_class(ilo, **mykwargs)
### Do the action
if 'dry_run' in kwargs and kwargs['dry_run'] == True:
action_obj.do_dry_run()
else:
logger.debug('Doing the action here.')
action_obj.do_action()
def run(config, action_file, dry_run=False):
"""
Actually run.
"""
client_args = process_config(config)
logger = logging.getLogger(__name__)
logger.debug('Client and logging options validated.')
# Extract this and save it for later, in case there's no timeout_override.
default_timeout = client_args.pop('timeout')
logger.debug('default_timeout = {0}'.format(default_timeout))
#########################################
### Start working on the actions here ###
#########################################
logger.debug('action_file: {0}'.format(action_file))
action_config = get_yaml(action_file)
logger.debug('action_config: {0}'.format(action_config))
action_dict = validate_actions(action_config)
actions = action_dict['actions']
logger.debug('Full list of actions: {0}'.format(actions))
action_keys = sorted(list(actions.keys()))
for idx in action_keys:
action = actions[idx]['action']
action_disabled = actions[idx]['options'].pop('disable_action')
logger.debug('action_disabled = {0}'.format(action_disabled))
continue_if_exception = (
actions[idx]['options'].pop('continue_if_exception'))
logger.debug(
'continue_if_exception = {0}'.format(continue_if_exception))
timeout_override = actions[idx]['options'].pop('timeout_override')
logger.debug('timeout_override = {0}'.format(timeout_override))
ignore_empty_list = actions[idx]['options'].pop('ignore_empty_list')
logger.debug('ignore_empty_list = {0}'.format(ignore_empty_list))
### Skip to next action if 'disabled'
if action_disabled:
logger.info(
'Action ID: {0}: "{1}" not performed because "disable_action" '
'is set to True'.format(idx, action)
)
continue
else:
logger.info('Preparing Action ID: {0}, "{1}"'.format(idx, action))
# Override the timeout, if specified, otherwise use the default.
if isinstance(timeout_override, int):
client_args['timeout'] = timeout_override
else:
client_args['timeout'] = default_timeout
# Set up action kwargs
kwargs = {}
kwargs['master_timeout'] = (
client_args['timeout'] if client_args['timeout'] <= 300 else 300)
kwargs['dry_run'] = dry_run
# Create a client object for each action...
client = get_client(**client_args)
logger.debug('client is {0}'.format(type(client)))
##########################
### Process the action ###
##########################
try:
logger.info('Trying Action ID: {0}, "{1}": '
'{2}'.format(idx, action, actions[idx]['description'])
)
process_action(client, actions[idx], **kwargs)
except Exception as e:
if isinstance(e, NoIndices) or isinstance(e, NoSnapshots):
if ignore_empty_list:
logger.info(
'Skipping action "{0}" due to empty list: '
'{1}'.format(action, type(e))
)
else:
logger.error(
'Unable to complete action "{0}". No actionable items '
'in list: {1}'.format(action, type(e))
)
sys.exit(1)
else:
logger.error(
'Failed to complete action: {0}. {1}: '
'{2}'.format(action, type(e), e)
)
if continue_if_exception:
logger.info(
'Continuing execution with next action because '
'"continue_if_exception" is set to True for action '
'{0}'.format(action)
)
else:
sys.exit(1)
logger.info('Action ID: {0}, "{1}" completed.'.format(idx, action))
logger.info('Job completed.')
@click.command()
@click.option('--config',
help="Path to configuration file. Default: ~/.curator/curator.yml",
type=click.Path(exists=True), default=settings.config_file()
)
@click.option('--dry-run', is_flag=True, help='Do not perform any changes.')
@click.argument('action_file', type=click.Path(exists=True), nargs=1)
@click.version_option(version=__version__)
def cli(config, dry_run, action_file):
"""
Curator for Elasticsearch indices.
See http://elastic.co/guide/en/elasticsearch/client/curator/current
"""
run(config, action_file, dry_run) |
#
# PySNMP MIB module JUNIPER-JS-DNS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/JUNIPER-JS-DNS-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:59:32 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion")
jnxJsDnsRoot, = mibBuilder.importSymbols("JUNIPER-JS-SMI", "jnxJsDnsRoot")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ModuleIdentity, iso, Bits, IpAddress, Counter64, Counter32, Gauge32, ObjectIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Integer32, Unsigned32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "iso", "Bits", "IpAddress", "Counter64", "Counter32", "Gauge32", "ObjectIdentity", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Integer32", "Unsigned32", "TimeTicks")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
jnxJsDns = ModuleIdentity((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 10, 1))
if mibBuilder.loadTexts: jnxJsDns.setLastUpdated('200704141245Z')
if mibBuilder.loadTexts: jnxJsDns.setOrganization('Juniper Networks, Inc.')
if mibBuilder.loadTexts: jnxJsDns.setContactInfo(' Juniper Technical Assistance Center Juniper Networks, Inc. 1194 N. Mathilda Avenue Sunnyvale, CA 94089 E-mail: support@juniper.net')
if mibBuilder.loadTexts: jnxJsDns.setDescription('This MIB provides collated statistics for the Domain Name System (DNS) proxy collected over all interfaces on which it is configured to serve')
jnxJsDnsProxyDataObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 10, 1, 1))
jnxJsDNSProxyQueriesReceived = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 10, 1, 1, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsDNSProxyQueriesReceived.setStatus('current')
if mibBuilder.loadTexts: jnxJsDNSProxyQueriesReceived.setDescription('Count of total number of DNS queries received by the DNS Proxy.')
jnxJsDnsProxyResponsesSent = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 10, 1, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsDnsProxyResponsesSent.setStatus('current')
if mibBuilder.loadTexts: jnxJsDnsProxyResponsesSent.setDescription('Count of DNS queries answered sent by the DNS Proxy. This includes DNS cache hits and misses that were answered.')
jnxJsDnsProxyQueriesForwarded = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 10, 1, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsDnsProxyQueriesForwarded.setStatus('current')
if mibBuilder.loadTexts: jnxJsDnsProxyQueriesForwarded.setDescription('Count of DNS queries forwarded to other DNS server. This is number of queries that have been proxied due to cache miss.')
jnxJsDnsProxyNegativeResponses = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 10, 1, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsDnsProxyNegativeResponses.setStatus('current')
if mibBuilder.loadTexts: jnxJsDnsProxyNegativeResponses.setDescription('Count of Negative DNS query responses. This is the count of DNS queries that the Proxy could not obtain answers for.')
jnxJsDnsProxyRetryRequests = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 10, 1, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsDnsProxyRetryRequests.setStatus('current')
if mibBuilder.loadTexts: jnxJsDnsProxyRetryRequests.setDescription('Count of DNS retry queries that this proxy received.')
jnxJsDnsProxyPendingRequests = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 10, 1, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsDnsProxyPendingRequests.setStatus('current')
if mibBuilder.loadTexts: jnxJsDnsProxyPendingRequests.setDescription('Count of DNS requests yet to be answered.')
jnxJsDnsProxyServerFailures = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 10, 1, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsDnsProxyServerFailures.setStatus('current')
if mibBuilder.loadTexts: jnxJsDnsProxyServerFailures.setDescription('Count of DNS Proxy Failures.')
mibBuilder.exportSymbols("JUNIPER-JS-DNS-MIB", jnxJsDnsProxyNegativeResponses=jnxJsDnsProxyNegativeResponses, jnxJsDnsProxyServerFailures=jnxJsDnsProxyServerFailures, jnxJsDnsProxyPendingRequests=jnxJsDnsProxyPendingRequests, jnxJsDnsProxyQueriesForwarded=jnxJsDnsProxyQueriesForwarded, jnxJsDnsProxyDataObjects=jnxJsDnsProxyDataObjects, PYSNMP_MODULE_ID=jnxJsDns, jnxJsDns=jnxJsDns, jnxJsDnsProxyResponsesSent=jnxJsDnsProxyResponsesSent, jnxJsDnsProxyRetryRequests=jnxJsDnsProxyRetryRequests, jnxJsDNSProxyQueriesReceived=jnxJsDNSProxyQueriesReceived)
|
from sklearn.feature_extraction.text import TfidfVectorizer
from mosestokenizer import MosesDetokenizer
from nltk.corpus import stopwords
#open to file in this directory
f1= open('/home/shuchita-rahman/Documents/thesis/f1', 'r')
f2= open('/home/shuchita-rahman/Documents/thesis/f2', 'r')
#put file data in data variable
mainData = f1.read()
checkSimilarityData = f2.read()
#remove stop word from this data(am, an etc.)
stop = set(stopwords.words('english'))
mainData = [word for word in mainData.split() if word not in stop]
checkSimilarityData= [word for word in checkSimilarityData.split() if word not in stop]
#data are tokenized to convert it to vector we must detokenize data or make it a string
detokenize = MosesDetokenizer('en')
mainData = detokenize(mainData)
checkSimilarityData = detokenize(checkSimilarityData)
#printing data whinch are now string
print(mainData)
print(checkSimilarityData)
#changing sentence to vector
tfidf = TfidfVectorizer()
fitDataOne = tfidf.fit_transform([mainData])
feature_names = tfidf.get_feature_names()
#for every word how much similar they are
for col in fitDataOne.nonzero()[1]:
print(feature_names[col], ' - ', fitDataOne[0, col])
fitDataTwo = tfidf.fit_transform([checkSimilarityData])
feature_names = tfidf.get_feature_names()
#feature_names = tfidf.get_feature_names(fitDataTwo)
print("\n-------------\n")
for col in fitDataTwo.nonzero()[1]:
print(feature_names[col], ' - ', fitDataTwo[0, col])
|
frase = (input('Digite uma frase: ').upper().strip())
print('Sua frase tem {} letras A'.format(frase.count('A')))
print('A primeira letra A está na posição: {}'.format(frase.find('A')+1))
print('A última letra A está na posição: {}'.format(frase.rfind('A')+1))
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cloudevents.sdk.event import base, opt
class Event(base.BaseEvent):
_ce_required_fields = {"id", "source", "type", "specversion"}
_ce_optional_fields = {
"datacontentencoding",
"datacontenttype",
"schemaurl",
"subject",
"time",
}
def __init__(self):
self.ce__specversion = opt.Option("specversion", "0.3", True)
self.ce__id = opt.Option("id", None, True)
self.ce__source = opt.Option("source", None, True)
self.ce__type = opt.Option("type", None, True)
self.ce__datacontenttype = opt.Option("datacontenttype", None, False)
self.ce__datacontentencoding = opt.Option(
"datacontentencoding", None, False
)
self.ce__subject = opt.Option("subject", None, False)
self.ce__time = opt.Option("time", None, False)
self.ce__schemaurl = opt.Option("schemaurl", None, False)
self.ce__data = opt.Option("data", None, False)
self.ce__extensions = opt.Option("extensions", dict(), False)
def CloudEventVersion(self) -> str:
return self.ce__specversion.get()
def EventType(self) -> str:
return self.ce__type.get()
def Source(self) -> str:
return self.ce__source.get()
def EventID(self) -> str:
return self.ce__id.get()
def EventTime(self) -> str:
return self.ce__time.get()
def Subject(self) -> str:
return self.ce__subject.get()
def SchemaURL(self) -> str:
return self.ce__schemaurl.get()
def Data(self) -> object:
return self.ce__data.get()
def Extensions(self) -> dict:
return self.ce__extensions.get()
def ContentType(self) -> str:
return self.ce__datacontenttype.get()
def ContentEncoding(self) -> str:
return self.ce__datacontentencoding.get()
def SetEventType(self, eventType: str) -> base.BaseEvent:
self.Set("type", eventType)
return self
def SetSource(self, source: str) -> base.BaseEvent:
self.Set("source", source)
return self
def SetEventID(self, eventID: str) -> base.BaseEvent:
self.Set("id", eventID)
return self
def SetEventTime(self, eventTime: str) -> base.BaseEvent:
self.Set("time", eventTime)
return self
def SetSubject(self, subject: str) -> base.BaseEvent:
self.Set("subject", subject)
return self
def SetSchemaURL(self, schemaURL: str) -> base.BaseEvent:
self.Set("schemaurl", schemaURL)
return self
def SetData(self, data: object) -> base.BaseEvent:
self.Set("data", data)
return self
def SetExtensions(self, extensions: dict) -> base.BaseEvent:
self.Set("extensions", extensions)
return self
def SetContentType(self, contentType: str) -> base.BaseEvent:
self.Set("datacontenttype", contentType)
return self
def SetContentEncoding(self, contentEncoding: str) -> base.BaseEvent:
self.Set("datacontentencoding", contentEncoding)
return self
@property
def datacontentencoding(self):
return self.ContentEncoding()
@datacontentencoding.setter
def datacontentencoding(self, value: str):
self.SetContentEncoding(value)
@property
def subject(self) -> str:
return self.Subject()
@subject.setter
def subject(self, value: str):
self.SetSubject(value)
@property
def schema_url(self) -> str:
return self.SchemaURL()
@schema_url.setter
def schema_url(self, value: str):
self.SetSchemaURL(value)
|
# 띄어쓰기, 들여쓰기 꼭 확인하세요!
# answer와 같은 형태가 되도록 string1, string2, string3를 수정하고 조합해주세요!
answer = "3/29은 NEXT 세션하는 날!"
string1 = "2021-03-29은 "
string2 = "next"
string3 = " 세션하는 날! "
string1 = string1.replace("2021-0", "").replace("-", "/")
string2 = string2.upper()
string3 = string3.strip()
string = string1 + string2 + " " + string3
if answer == string:
print("정답입니다!")
else:
print("틀렸습니다.")
print(f"정답 : {answer}")
print(f'입력값 : {string}')
|
import socket
from six.moves.urllib.parse import urlparse
from frappe import get_conf
config = get_conf()
REDIS_KEYS = ('redis_cache', 'redis_queue', 'redis_socketio')
def is_open(ip, port, timeout=10):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
try:
s.connect((ip, int(port)))
s.shutdown(socket.SHUT_RDWR)
return True
except socket.error:
return False
finally:
s.close()
def check_database():
db_type = config.get("db_type", "mariadb")
db_host = config.get("db_host", "localhost")
db_port = config.get("db_port", 3306 if db_type == "mariadb" else 5342)
return {db_type: is_open(db_host, db_port)}
def check_redis(redis_services=None):
services = redis_services or REDIS_KEYS
status = {}
for conn in services:
redis_url = urlparse(config.get(conn)).netloc
redis_host, redis_port = redis_url.split(":")
status[conn] = is_open(redis_host, redis_port)
return status
def check_connection(redis_services=None):
service_status = {}
service_status.update(check_database())
service_status.update(check_redis(redis_services))
return service_status |
from torch import nn
from pystiche.image import CaffePreprocessing, TorchPreprocessing
PREPROCESSORS = {"torch": TorchPreprocessing, "caffe": CaffePreprocessing}
__all__ = ["get_preprocessor"]
def get_preprocessor(framework: str) -> nn.Module:
return PREPROCESSORS[framework]()
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the compute RPC API.
"""
from nova import exception
from nova import flags
from nova.openstack.common import rpc
from nova.openstack.common.rpc import common as rpc_common
import nova.openstack.common.rpc.proxy
FLAGS = flags.FLAGS
def _compute_topic(topic, ctxt, host, instance):
'''Get the topic to use for a message.
:param topic: the base topic
:param ctxt: request context
:param host: explicit host to send the message to.
:param instance: If an explicit host was not specified, use
instance['host']
:returns: A topic string
'''
if not host:
if not instance:
raise exception.NovaException(_('No compute host specified'))
host = instance['host']
if not host:
raise exception.NovaException(_('Unable to find host for '
'Instance %s') % instance['uuid'])
return rpc.queue_get_for(ctxt, topic, host)
class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
'''Client side of the compute rpc API.
API version history:
1.0 - Initial version.
1.1 - Adds get_host_uptime()
1.2 - Adds check_can_live_migrate_[destination|source]
1.3 - Adds change_instance_metadata()
'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self):
super(ComputeAPI, self).__init__(
topic=FLAGS.compute_topic,
default_version=self.BASE_RPC_API_VERSION)
def add_aggregate_host(self, ctxt, aggregate_id, host_param, host):
'''Add aggregate host.
:param ctxt: request context
:param aggregate_id:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
self.cast(ctxt, self.make_msg('add_aggregate_host',
aggregate_id=aggregate_id, host=host_param),
topic=_compute_topic(self.topic, ctxt, host, None))
def add_fixed_ip_to_instance(self, ctxt, instance, network_id):
self.cast(ctxt, self.make_msg('add_fixed_ip_to_instance',
instance_uuid=instance['uuid'], network_id=network_id),
topic=_compute_topic(self.topic, ctxt, None, instance))
def attach_volume(self, ctxt, instance, volume_id, mountpoint):
self.cast(ctxt, self.make_msg('attach_volume',
instance_uuid=instance['uuid'], volume_id=volume_id,
mountpoint=mountpoint),
topic=_compute_topic(self.topic, ctxt, None, instance))
def check_can_live_migrate_destination(self, ctxt, instance, destination,
block_migration, disk_over_commit):
self.call(ctxt, self.make_msg('check_can_live_migrate_destination',
instance_id=instance['id'],
block_migration=block_migration,
disk_over_commit=disk_over_commit),
topic=_compute_topic(self.topic, ctxt, destination, None),
version='1.2')
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
self.call(ctxt, self.make_msg('check_can_live_migrate_source',
instance_id=instance['id'],
dest_check_data=dest_check_data),
topic=_compute_topic(self.topic, ctxt, None, instance),
version='1.2')
def check_shared_storage_test_file(self, ctxt, filename, host):
raise rpc_common.RPCException(message=_('Deprecated from version 1.2'))
def cleanup_shared_storage_test_file(self, ctxt, filename, host):
raise rpc_common.RPCException(message=_('Deprecated from version 1.2'))
def compare_cpu(self, ctxt, cpu_info, host):
raise rpc_common.RPCException(message=_('Deprecated from version 1.2'))
def create_shared_storage_test_file(self, ctxt, host):
raise rpc_common.RPCException(message=_('Deprecated from version 1.2'))
def confirm_resize(self, ctxt, instance, migration_id, host,
cast=True):
rpc_method = self.cast if cast else self.call
return rpc_method(ctxt, self.make_msg('confirm_resize',
instance_uuid=instance['uuid'], migration_id=migration_id),
topic=_compute_topic(self.topic, ctxt, host, instance))
def detach_volume(self, ctxt, instance, volume_id):
self.cast(ctxt, self.make_msg('detach_volume',
instance_uuid=instance['uuid'], volume_id=volume_id),
topic=_compute_topic(self.topic, ctxt, None, instance))
def finish_resize(self, ctxt, instance, migration_id, image, disk_info,
host):
self.cast(ctxt, self.make_msg('finish_resize',
instance_uuid=instance['uuid'], migration_id=migration_id,
image=image, disk_info=disk_info),
topic=_compute_topic(self.topic, ctxt, host, None))
def finish_revert_resize(self, ctxt, instance, migration_id, host):
self.cast(ctxt, self.make_msg('finish_revert_resize',
instance_uuid=instance['uuid'], migration_id=migration_id),
topic=_compute_topic(self.topic, ctxt, host, None))
def get_console_output(self, ctxt, instance, tail_length):
return self.call(ctxt, self.make_msg('get_console_output',
instance_uuid=instance['uuid'], tail_length=tail_length),
topic=_compute_topic(self.topic, ctxt, None, instance))
def get_console_pool_info(self, ctxt, console_type, host):
return self.call(ctxt, self.make_msg('get_console_pool_info',
console_type=console_type),
topic=_compute_topic(self.topic, ctxt, host, None))
def get_console_topic(self, ctxt, host):
return self.call(ctxt, self.make_msg('get_console_topic'),
topic=_compute_topic(self.topic, ctxt, host, None))
def get_diagnostics(self, ctxt, instance):
return self.call(ctxt, self.make_msg('get_diagnostics',
instance_uuid=instance['uuid']),
topic=_compute_topic(self.topic, ctxt, None, instance))
def get_instance_disk_info(self, ctxt, instance):
return self.call(ctxt, self.make_msg('get_instance_disk_info',
instance_name=instance['name']),
topic=_compute_topic(self.topic, ctxt, None, instance))
def get_vnc_console(self, ctxt, instance, console_type):
return self.call(ctxt, self.make_msg('get_vnc_console',
instance_uuid=instance['uuid'], console_type=console_type),
topic=_compute_topic(self.topic, ctxt, None, instance))
def host_maintenance_mode(self, ctxt, host_param, mode, host):
'''Set host maintenance mode
:param ctxt: request context
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param mode:
:param host: This is the host to send the message to.
'''
return self.call(ctxt, self.make_msg('host_maintenance_mode',
host=host_param, mode=mode),
topic=_compute_topic(self.topic, ctxt, host, None))
def host_power_action(self, ctxt, action, host):
topic = _compute_topic(self.topic, ctxt, host, None)
return self.call(ctxt, self.make_msg('host_power_action',
action=action), topic)
def inject_file(self, ctxt, instance, path, file_contents):
self.cast(ctxt, self.make_msg('inject_file',
instance_uuid=instance['uuid'], path=path,
file_contents=file_contents),
topic=_compute_topic(self.topic, ctxt, None, instance))
def inject_network_info(self, ctxt, instance):
self.cast(ctxt, self.make_msg('inject_network_info',
instance_uuid=instance['uuid']),
topic=_compute_topic(self.topic, ctxt, None, instance))
def lock_instance(self, ctxt, instance):
self.cast(ctxt, self.make_msg('lock_instance',
instance_uuid=instance['uuid']),
topic=_compute_topic(self.topic, ctxt, None, instance))
def post_live_migration_at_destination(self, ctxt, instance,
block_migration, host):
return self.call(ctxt,
self.make_msg('post_live_migration_at_destination',
instance_id=instance['id'], block_migration=block_migration),
_compute_topic(self.topic, ctxt, host, None))
def pause_instance(self, ctxt, instance):
self.cast(ctxt, self.make_msg('pause_instance',
instance_uuid=instance['uuid']),
topic=_compute_topic(self.topic, ctxt, None, instance))
def power_off_instance(self, ctxt, instance):
self.cast(ctxt, self.make_msg('power_off_instance',
instance_uuid=instance['uuid']),
topic=_compute_topic(self.topic, ctxt, None, instance))
def power_on_instance(self, ctxt, instance):
self.cast(ctxt, self.make_msg('power_on_instance',
instance_uuid=instance['uuid']),
topic=_compute_topic(self.topic, ctxt, None, instance))
def pre_live_migration(self, ctxt, instance, block_migration, disk,
host):
return self.call(ctxt, self.make_msg('pre_live_migration',
instance_id=instance['id'], block_migration=block_migration,
disk=disk), _compute_topic(self.topic, ctxt, host, None))
def reboot_instance(self, ctxt, instance, reboot_type):
self.cast(ctxt, self.make_msg('reboot_instance',
instance_uuid=instance['uuid'], reboot_type=reboot_type),
topic=_compute_topic(self.topic, ctxt, None, instance))
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref):
self.cast(ctxt, self.make_msg('rebuild_instance',
instance_uuid=instance['uuid'], new_pass=new_pass,
injected_files=injected_files, image_ref=image_ref,
orig_image_ref=orig_image_ref),
topic=_compute_topic(self.topic, ctxt, None, instance))
def refresh_provider_fw_rules(self, ctxt, host):
self.cast(ctxt, self.make_msg('refresh_provider_fw_rules'),
_compute_topic(self.topic, ctxt, host, None))
def refresh_security_group_rules(self, ctxt, security_group_id, host):
self.cast(ctxt, self.make_msg('refresh_security_group_rules',
security_group_id=security_group_id),
topic=_compute_topic(self.topic, ctxt, host, None))
def refresh_security_group_members(self, ctxt, security_group_id,
host):
self.cast(ctxt, self.make_msg('refresh_security_group_members',
security_group_id=security_group_id),
topic=_compute_topic(self.topic, ctxt, host, None))
def remove_aggregate_host(self, ctxt, aggregate_id, host_param, host):
'''Remove aggregate host.
:param ctxt: request context
:param aggregate_id:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
self.cast(ctxt, self.make_msg('remove_aggregate_host',
aggregate_id=aggregate_id, host=host_param),
topic=_compute_topic(self.topic, ctxt, host, None))
def remove_fixed_ip_from_instance(self, ctxt, instance, address):
self.cast(ctxt, self.make_msg('remove_fixed_ip_from_instance',
instance_uuid=instance['uuid'], address=address),
topic=_compute_topic(self.topic, ctxt, None, instance))
def remove_volume_connection(self, ctxt, instance, volume_id, host):
return self.call(ctxt, self.make_msg('remove_volume_connection',
instance_id=instance['id'], volume_id=volume_id),
topic=_compute_topic(self.topic, ctxt, host, None))
def rescue_instance(self, ctxt, instance, rescue_password):
self.cast(ctxt, self.make_msg('rescue_instance',
instance_uuid=instance['uuid'],
rescue_password=rescue_password),
topic=_compute_topic(self.topic, ctxt, None, instance))
def reset_network(self, ctxt, instance):
self.cast(ctxt, self.make_msg('reset_network',
instance_uuid=instance['uuid']),
topic=_compute_topic(self.topic, ctxt, None, instance))
def resize_instance(self, ctxt, instance, migration_id, image):
topic = _compute_topic(self.topic, ctxt, None, instance)
self.cast(ctxt, self.make_msg('resize_instance',
instance_uuid=instance['uuid'], migration_id=migration_id,
image=image), topic)
def resume_instance(self, ctxt, instance):
self.cast(ctxt, self.make_msg('resume_instance',
instance_uuid=instance['uuid']),
topic=_compute_topic(self.topic, ctxt, None, instance))
def revert_resize(self, ctxt, instance, migration_id, host):
self.cast(ctxt, self.make_msg('revert_resize',
instance_uuid=instance['uuid'], migration_id=migration_id),
topic=_compute_topic(self.topic, ctxt, host, instance))
def rollback_live_migration_at_destination(self, ctxt, instance, host):
self.cast(ctxt, self.make_msg('rollback_live_migration_at_destination',
instance_id=instance['id']),
topic=_compute_topic(self.topic, ctxt, host, None))
def set_admin_password(self, ctxt, instance, new_pass):
self.cast(ctxt, self.make_msg('set_admin_password',
instance_uuid=instance['uuid'], new_pass=new_pass),
topic=_compute_topic(self.topic, ctxt, None, instance))
def set_host_enabled(self, ctxt, enabled, host):
topic = _compute_topic(self.topic, ctxt, host, None)
return self.call(ctxt, self.make_msg('set_host_enabled',
enabled=enabled), topic)
def get_host_uptime(self, ctxt, host):
topic = _compute_topic(self.topic, ctxt, host, None)
return self.call(ctxt, self.make_msg('get_host_uptime'), topic,
version='1.1')
def snapshot_instance(self, ctxt, instance, image_id, image_type,
backup_type, rotation):
self.cast(ctxt, self.make_msg('snapshot_instance',
instance_uuid=instance['uuid'], image_id=image_id,
image_type=image_type, backup_type=backup_type,
rotation=rotation),
topic=_compute_topic(self.topic, ctxt, None, instance))
def start_instance(self, ctxt, instance):
self.cast(ctxt, self.make_msg('start_instance',
instance_uuid=instance['uuid']),
topic=_compute_topic(self.topic, ctxt, None, instance))
def stop_instance(self, ctxt, instance, cast=True):
rpc_method = self.cast if cast else self.call
return rpc_method(ctxt, self.make_msg('stop_instance',
instance_uuid=instance['uuid']),
topic=_compute_topic(self.topic, ctxt, None, instance))
def suspend_instance(self, ctxt, instance):
self.cast(ctxt, self.make_msg('suspend_instance',
instance_uuid=instance['uuid']),
topic=_compute_topic(self.topic, ctxt, None, instance))
def terminate_instance(self, ctxt, instance):
self.cast(ctxt, self.make_msg('terminate_instance',
instance_uuid=instance['uuid']),
topic=_compute_topic(self.topic, ctxt, None, instance))
def unlock_instance(self, ctxt, instance):
self.cast(ctxt, self.make_msg('unlock_instance',
instance_uuid=instance['uuid']),
topic=_compute_topic(self.topic, ctxt, None, instance))
def unpause_instance(self, ctxt, instance):
self.cast(ctxt, self.make_msg('unpause_instance',
instance_uuid=instance['uuid']),
topic=_compute_topic(self.topic, ctxt, None, instance))
def unrescue_instance(self, ctxt, instance):
self.cast(ctxt, self.make_msg('unrescue_instance',
instance_uuid=instance['uuid']),
topic=_compute_topic(self.topic, ctxt, None, instance))
def change_instance_metadata(self, ctxt, instance, diff):
self.cast(ctxt, self.make_msg('change_instance_metadata',
instance_uuid=instance['uuid'], diff=diff),
topic=_compute_topic(self.topic, ctxt, None, instance),
version='1.3')
class SecurityGroupAPI(nova.openstack.common.rpc.proxy.RpcProxy):
'''Client side of the security group rpc API.
API version history:
1.0 - Initial version.
'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self):
super(SecurityGroupAPI, self).__init__(
topic=FLAGS.compute_topic,
default_version=self.BASE_RPC_API_VERSION)
def refresh_security_group_rules(self, ctxt, security_group_id, host):
self.cast(ctxt, self.make_msg('refresh_security_group_rules',
security_group_id=security_group_id),
topic=_compute_topic(self.topic, ctxt, host, None))
def refresh_security_group_members(self, ctxt, security_group_id,
host):
self.cast(ctxt, self.make_msg('refresh_security_group_members',
security_group_id=security_group_id),
topic=_compute_topic(self.topic, ctxt, host, None))
|
import sys
from io import StringIO
from contextlib import contextmanager
@contextmanager
def capture_output():
'''
Context manager to capture stdout and stderr.
Captures stddout and stderr from a Python code section,
using two StringIO objects.
Example::
with capture_output() as (out, err):
routine_that_prints_lots()
*out.getvalue()* will return as string with whatever
was printed on stdout. *err.getvalue()* will return
the same for stderr.
Nothing will appear on screen.
'''
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
|
def file(path:str, operation:str = 'r'):
with open(path, operation) as f:
return f.read()
|
import pygame
import sys
from definitions import *
from gameParameters import backgroundImage, gameDisplay, display_width, \
display_height, clock
class Button:
"""Class for rectangular buttons
Args:
location (tuple, int): Defines self.x, self.y
width (int, default=80): Defines self._width
height (int, default=30): Defines self._height
message (str, default=None): Defines self._message
color1 (tuple, int, default=green): Defines self._color1
color2 (tuple, int, default=bright_green): Defines self._color2
action (func or str, default=None): Defines self._action
font (default="Comic Sans MS"): Defines self._font
font_size (int, default=20): Defines self.font_size
message_color (tuple, int, default=black): Defines self._message_color
linked (bool, default=False): Defines self.linked
Attributes:
_mouse: Tracks mouse position
_click: Tracks mouse button clicking
_message (str): The message written on center of button
x, y (tuple, int): Location of upper left corner of button rectangle
_width (int): Width of button rectangle
_height (int): Height of button rectangle
_color1 (tuple, int): Color of button while mouse not hovering
_color2 (tuple, int): Color of button while mouse is hovering
_action (function or str): Calls _action() as a function if function,
else returns _action if string
_font: Font object for button text
_message_color (tuple, int): Color of text on button
linked (bool): If True, indicates button should expect other buttons
linked to it, for selection interaction
selected (bool): If True, makes button selected_color until a
different, linked button is pressed
selected_color (tuple, int): Assigns color to selected button in a
set of linked buttons
selected_text_color (tuple, int): Assigns text color to selected button
in a set of linked buttons
clickable (bool): If True, button can be pressed
unclickable_timer (int): Timer for length of unclickable duration
unclickable_countdown (int): Counts down from unclickable_timer to 0
Methods:
draw: Draws button with overlaid text, listens for hover and
clicks, changing color on hover and calling function or
returning string on click. Can link to other buttons.
set_text: Overlays text on top of button if text exists
"""
def __init__(self, location, width=80, height=30,
message=None, color1=green, color2=bright_green,
action=None, font="Comic Sans MS", font_size=20,
message_color=black, linked=False):
self._mouse = None
self._click = None
self._message = message
self.x, self.y = location
self._width = width
self._height = height
self._color1 = color1
self._color2 = color2
self._action = action
self._font = pygame.font.SysFont(font, font_size)
self._message_color = message_color
self.linked = linked
self.selected = False
self.selected_color = purple
self.selected_text_color = white
self.clickable = False
self.unclickable_timer = int(0.3 * seconds)
self.unclickable_countdown = int(0.3 * seconds)
def draw(self, *args):
"""Draws the buttons and accepts hover and click input to perform tasks
Draws button with overlaid text, listens for hover and
clicks, changing color on hover and calling function or
returning string on click. Can link to other buttons.
Args:
*args: Other Button class instances
Returns:
self._action (optional): Only returned if it is a string
"""
self._mouse = pygame.mouse.get_pos()
self._click = pygame.mouse.get_pressed()
# If mouse hovering button
if (self.x < self._mouse[0] < self.x + self._width
and self.y <
self._mouse[1] <
self.y + self._height):
if self.selected:
pygame.draw.rect(
gameDisplay, self.selected_color,
(self.x, self.y, self._width, self._height))
else:
pygame.draw.rect(
gameDisplay, self._color2,
(self.x, self.y, self._width, self._height))
if self._click[0] == 1 and self._action is not None:
if self.clickable:
self.clickable = False
self.unclickable_countdown = self.unclickable_timer
if self.linked:
self.selected = True
for arg in args:
arg.selected = False
if isinstance(self._action, str):
return self._action
else:
self._action()
# If mouse not hovering over button
else:
if self.selected:
pygame.draw.rect(
gameDisplay, self.selected_color,
(self.x, self.y, self._width, self._height))
else:
pygame.draw.rect(
gameDisplay, self._color1,
(self.x, self.y, self._width, self._height))
if self._message:
self.set_text()
# Can button be clicked?
if self.unclickable_countdown > 0:
self.unclickable_countdown -= 1
self.clickable = False
else:
self.clickable = True
def set_text(self):
"""Overlays text on top of button, if text exists"""
if self.selected:
text_surface = self._font.render(
self._message, True, self.selected_text_color)
else:
text_surface = self._font.render(
self._message, True, self._message_color)
text_rect = text_surface.get_rect()
text_rect.center = ((self.x + self._width // 2),
(self.y + self._height // 2))
gameDisplay.blit(text_surface, text_rect)
class Tracker:
"""Keeps track of a statistic for player and displays it on screen
Args:
location (tuple, int): Defines self.x, self.y
start_stat (int): Defines self.stat
width (int, default=120): Defines self._width
height (int, default=30): Defines self._height
background_color (tuple, int, default=black):
Defines self._background_color
font (str, default="Comic Sans MS"): Defines font-type for self._font
font_size (int, default=20): Defines font-size for self._font
text_color (str, default=white): Defines self.text_color
prefix (str, default=None): Defines self.prefix
special (str, default=None): Defines self.special
Attributes:
x, y (tuple, int): Coordinates for upper-left corner of stat display
stat (int): Tracks player's stat
_width (int): Width of background rectangle for stat display
_height (int): Height of background rectangle for stat display
_background_color (tuple, int): Stat display rectangle background color
_font (obj): Font for stat display, defined by font and font_size
text_color (tuple, int): Color of stat display text
background (bool): If True, displays background for sta display
prefix (str): String to display in front of stat
special (str): Defines special tracker type for unique display
Methods:
draw: Draws background of stat display, calls set_text
set_text: Writes text to stat display
update_stat: Alters copied stat for display on screen
adjust: Adds stat to self.stat (can use with negative number)
"""
def __init__(self, location, start_stat, width=100, height=30,
background_color=black, front_color=None, font="Comic Sans MS",
font_size=20, text_color=white, prefix=None, special=None):
self.x, self.y = location
self.start_stat = start_stat
self.stat = start_stat
self.displayed_stat = start_stat
self._width = width
self._height = height
self._background_color = background_color
self._front_color = front_color
self._font = pygame.font.SysFont(font, font_size)
self.text_color = text_color
self.background = True
self.prefix = prefix
self.game_over = False
self.special = special
def draw(self):
"""Draws background of stat display, calls set_text"""
# If front_color defined, draw front bar as percentage of back bar
if self._front_color and self.background:
if self.stat > 0:
stat_width = int(self._width * self.stat // self.start_stat)
else:
stat_width = 0
pygame.draw.rect(
gameDisplay, self._background_color,
(self.x, self.y, self._width, self._height))
pygame.draw.rect(
gameDisplay, self._front_color,
(self.x, self.y, stat_width, self._height))
# If no background color
elif self.background and not self._front_color:
pygame.draw.rect(
gameDisplay, self._background_color,
(self.x, self.y, self._width, self._height))
# Write text
self.set_text()
def set_text(self):
"""Writes text to stat display"""
# Update how text to display
self.update_stat()
# Add prefix if there is one and then render text surface
if self.prefix:
text_surface = self._font.render(
self.prefix + self.displayed_stat,
True, self.text_color)
else:
text_surface = self._font.render(
self.displayed_stat, True, self.text_color)
# Get rectangle for text and center text in it
text_rect = text_surface.get_rect()
text_rect.center = ((self.x + self._width // 2),
(self.y + self._height // 2))
# Display text on screen
gameDisplay.blit(text_surface, text_rect)
def update_stat(self):
"""Alters copied stat for display on screen"""
if self.special == "clock":
minutes_elapsed = self.stat // minutes
remaining_seconds = (self.stat % minutes) // seconds
self.displayed_stat = \
"Time: {0}:{1:02}".format(minutes_elapsed, remaining_seconds)
elif self.special == "castle":
self.displayed_stat = \
"Castle: {}/{}".format(self.stat, self.start_stat)
if self.stat <= 0:
self.game_over = True
else:
self.displayed_stat = str(self.stat)
def adjust(self, amount):
"""Adds stat to self.stat (can use with negative number)"""
self.stat += amount
class EndScreen:
"""Creates an end of game screen for winning or losing game
No Args
Attributes:
center_x (int): Gets the horizontal center of the screen
game_y (int): Y coordinate for 'Victory' or 'Defeat'
score_y (int): Y coordinate for end of game score
time_y (int): Y coordinate for end of game time
score: Players points obtained for display
time_elapsed (int): Games frames used to approx. time
game_font (obj): font for 'Victory/Defeat' display
score_font (obj): font for 'score' display
time_font (obj): font for 'time' display
text_color (obj): color of all displays' screen text
play_button (obj): Button for playing new game
quit_button (obj): Button for quitting game
main_button (obj): Button for returning to intro loop
Methods:
draw: Draws all message displays and buttons, calls set_text()
set_text: Displays texts for message displays
"""
def __init__(self):
self.center_x = display_width // 2
self.game_y = 100
self.score_y = 250
self.time_y = 325
self.score = 0
self.time_elapsed = 0 # Game time approximation, based on frames (slow)
self.game_font = pygame.font.SysFont("Comic Sans MS", 120)
self.score_font = pygame.font.SysFont("Comic Sans MS", 80)
self.time_font = pygame.font.SysFont("Comic Sans MS", 40)
self.text_color = black
self.play_button = Button(
(75, display_height - 320), message="Play", action="play",
font_size=40, width=200, height=60, color1=green,
color2=bright_green)
self.quit_button = Button(
(325, display_height - 320), message="Quit", action=sys.exit,
font_size=40, width=200, height=60, color1=red, color2=bright_red)
self.main_button = Button(
(575, display_height - 320), message="Main menu", action="main",
font_size=40, width=200, height=60, color1=yellow,
color2=bright_yellow)
def draw(self, win_loss):
"""Draws all message displays and buttons, calls set_text()"""
pygame.mixer.music.fadeout(750)
if win_loss == "lose":
pygame.mixer.music.load('music/Hero_Down.mp3')
pygame.mixer.music.play(-1, start=1.5)
if win_loss == "win":
pygame.mixer.music.load('music/Amazing_Plan_Silent_Film_Dark.mp3')
pygame.mixer.music.play(-1)
# Define time for display
minutes_elapsed = self.time_elapsed // minutes
remaining_seconds = (self.time_elapsed % minutes) // seconds
while True:
# Check for quit
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
# Show background
gameDisplay.blit(backgroundImage.image, backgroundImage.rect)
# Draw "Defeat/Victory" text
if win_loss == "lose":
self.set_text(self.center_x, self.game_y, "Defeat!",
self.game_font)
if win_loss == "win":
self.set_text(self.center_x, self.game_y, "Victory!!",
self.game_font)
# Draw "Score" text
self.set_text(self.center_x, self.score_y,
"score: {}".format(self.score), self.score_font)
# Draw "Time elapsed" text
self.set_text(self.center_x, self.time_y, "Time: {0}:{1:02}".format(
minutes_elapsed, remaining_seconds), self.time_font)
# Draw quit button
self.quit_button.draw()
# Draw play button
play = self.play_button.draw()
if play == "play":
return play
# Draw main button
main = self.main_button.draw()
if main == "main":
return main
# Update game
pygame.display.update()
clock.tick(30)
def set_text(self, x, y, message, font):
"""Displays texts for message displays"""
text_surface = font.render(message, True, self.text_color)
text_rect = text_surface.get_rect()
text_rect.center = (x, y)
gameDisplay.blit(text_surface, text_rect)
class Settings:
"""Stores settings for game difficulty
Attributes:
spawn_rate (int): Rate of enemy spawning
starting_gold (int): Money available at start of game
gold_generation (int): Rate at which money is passively generated
difficulty (int): Affects how quickly enemy difficulty ramps (default=1)
"""
def __init__(self):
self.spawn_rate = int(6 * seconds)
self.starting_gold = 1200
self.gold_generation = int(1 * seconds)
self.difficulty = 1
|
from dataclasses import dataclass
from typing import List
from omegaconf import DictConfig
@dataclass
class Data:
name: str
num_classes: int
root: str
@dataclass
class Logger:
name: str
save_dir: str
@dataclass
class Preprocess:
horizontal_flip: bool
horizontal_flip_rate: float
random_rotation: bool
random_rotation_degrees: int
@dataclass
class Train:
batch_size: int
gpus: List[int]
model: str
num_epochs: int
num_workers: int
pre_trained: bool
tags: List[str]
train_rate: float
@dataclass
class Config(DictConfig):
data: Data
logger: Logger
preprocess: Preprocess
train: Train
|
import logging
import sys
import warnings
import numpy as np
import pytest
import rasterio
from rasterio.enums import MaskFlags
from rasterio.errors import NodataShadowWarning, RasterioDeprecationWarning
@pytest.fixture(scope='function')
def tiffs(tmpdir):
with rasterio.open('tests/data/RGB.byte.tif') as src:
profile = src.profile
shadowed_profile = profile.copy()
shadowed_profile['count'] = 4
with rasterio.open(
str(tmpdir.join('shadowed.tif')), 'w',
**shadowed_profile) as dst:
for i, band in enumerate(src.read(masked=False), 1):
dst.write(band, i)
dst.write(band, 4)
del profile['nodata']
with rasterio.open(
str(tmpdir.join('no-nodata.tif')), 'w',
**profile) as dst:
dst.write(src.read(masked=False))
with rasterio.open(
str(tmpdir.join('sidecar-masked.tif')), 'w',
**profile) as dst:
dst.write(src.read(masked=False))
mask = np.zeros(src.shape, dtype='uint8')
dst.write_mask(mask)
return tmpdir
def test_mask_flags():
with rasterio.open('tests/data/RGB.byte.tif') as src:
for flags in src.mask_flag_enums:
assert MaskFlags.nodata in flags
assert MaskFlags.per_dataset not in flags
assert MaskFlags.alpha not in flags
def test_mask_flags_rgba():
with rasterio.open('tests/data/RGBA.byte.tif') as src:
for flags in src.mask_flag_enums[:3]:
assert MaskFlags.nodata not in flags
assert MaskFlags.per_dataset in flags
assert MaskFlags.alpha in flags
assert [MaskFlags.all_valid] == src.mask_flag_enums[3]
def test_mask_flags_sidecar(tiffs):
filename = str(tiffs.join('sidecar-masked.tif'))
with rasterio.open(filename) as src:
for flags in src.mask_flag_enums:
assert MaskFlags.nodata not in flags
assert MaskFlags.alpha not in flags
assert MaskFlags.per_dataset in flags
def test_mask_flags_shadow(tiffs):
filename = str(tiffs.join('shadowed.tif'))
with rasterio.open(filename) as src:
for flags in src.mask_flag_enums:
assert MaskFlags.nodata in flags
assert MaskFlags.alpha not in flags
assert MaskFlags.per_dataset not in flags
def test_warning_no():
"""No shadow warning is raised"""
with rasterio.open('tests/data/RGB.byte.tif') as src:
try:
rm, gm, bm = src.read_masks()
except NodataShadowWarning:
pytest.fail("Unexpected NodataShadowWarning raised")
def test_warning_shadow(tiffs):
"""Shadow warning is raised"""
filename = str(tiffs.join('shadowed.tif'))
with rasterio.open(filename) as src:
with pytest.warns(NodataShadowWarning):
src.read_masks()
def test_masks():
with rasterio.open('tests/data/RGB.byte.tif') as src:
rm, gm, bm = src.read_masks()
r, g, b = src.read(masked=False)
assert not r[rm == 0].any()
assert not g[gm == 0].any()
assert not b[bm == 0].any()
def test_masked_true():
with rasterio.open('tests/data/RGB.byte.tif') as src:
r, g, b = src.read(masked=True)
rm, gm, bm = src.read_masks()
assert (r.mask == ~rm.astype('bool')).all()
assert (g.mask == ~gm.astype('bool')).all()
assert (b.mask == ~bm.astype('bool')).all()
def test_masked_none():
with rasterio.open('tests/data/RGB.byte.tif') as src:
r, g, b = src.read(masked=True)
rm, gm, bm = src.read_masks()
assert (r.mask == ~rm.astype('bool')).all()
assert (g.mask == ~gm.astype('bool')).all()
assert (b.mask == ~bm.astype('bool')).all()
def test_masking_no_nodata(tiffs):
# if the dataset has no defined nodata values, all data is
# considered valid data. The GDAL masks bands are arrays of
# 255 values. ``read()`` returns masked arrays where `mask`
# is False.
filename = str(tiffs.join('no-nodata.tif'))
with rasterio.open(filename) as src:
for flags in src.mask_flag_enums:
assert MaskFlags.all_valid in flags
assert MaskFlags.alpha not in flags
assert MaskFlags.nodata not in flags
rgb = src.read(masked=False)
assert not hasattr(rgb, 'mask')
r = src.read(1, masked=False)
assert not hasattr(r, 'mask')
rgb = src.read(masked=True)
assert hasattr(rgb, 'mask')
assert not rgb.mask.any()
r = src.read(1, masked=True)
assert hasattr(r, 'mask')
assert not r.mask.any()
rgb = src.read(masked=True)
assert hasattr(rgb, 'mask')
assert not r.mask.any()
r = src.read(1, masked=True)
assert not r.mask.any()
masks = src.read_masks()
assert masks.all()
def test_masking_sidecar_mask(tiffs):
# If the dataset has a .msk sidecar mask band file, all masks will
# be derived from that file.
with rasterio.open(str(tiffs.join('sidecar-masked.tif'))) as src:
for flags in src.mask_flag_enums:
assert MaskFlags.per_dataset in flags
assert MaskFlags.alpha not in flags
assert MaskFlags.nodata not in flags
rgb = src.read(masked=True)
assert rgb.mask.all()
r = src.read(1, masked=True)
assert r.mask.all()
masks = src.read_masks()
assert not masks.any()
|
import tensorflow as tf
from sklearn.utils import shuffle
import pickle as pkl
import datetime
import numpy as np
import os
from utils import initialize_uninitialized, batch_retrieve
class BNN(object):
def __init__(self, sess, checkpoint_dir, log_dir,
x_dim, y_dim, num_models, n_data,
hidden_size, learning_rate, lambda_anchor,
init_std_1w, init_std_1b, init_std_2w,
init_std_2b, init_std_biasw, init_std_noisew,
load_from_epoch=False):
self.sess = sess
self.checkpoint_dir = checkpoint_dir
self.log_dir = log_dir
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.writer = tf.summary.FileWriter(self.log_dir)
self.model_name = 'BNN'
self.x_dim = x_dim
self.y_dim = y_dim
self.n_data = n_data
self.lambda_anchor = lambda_anchor
self.hidden_size = hidden_size
self.learning_rate = learning_rate
self.n_mdls = num_models
self.inputs = tf.placeholder(tf.float32, [None, x_dim], name='inputs')
self.modelpred = self.inputs[:, :self.n_mdls]
self.spacetime = self.inputs[:, self.n_mdls:]
self.y_target = tf.placeholder(tf.float32, [None, y_dim], name='target')
self.layer1 = tf.layers.Dense(hidden_size,
activation=tf.nn.tanh,
name='layer1',
kernel_initializer=tf.random_normal_initializer(mean=0.,
stddev=init_std_1w),
bias_initializer=tf.random_normal_initializer(mean=0.,
stddev=init_std_1b))
self.layer1_out = self.layer1(self.spacetime)
self.layer2 = tf.layers.Dense(num_models,
activation=None,
name='layer2',
kernel_initializer=tf.random_normal_initializer(mean=0.,
stddev=init_std_2w),
bias_initializer=tf.random_normal_initializer(mean=0.,
stddev=init_std_2b))
self.layer2_out = self.layer2(self.layer1_out)
self.model_coeffs = tf.nn.softmax(self.layer2_out)
self.modelbias_layer = tf.layers.Dense(y_dim,
activation=None,
name='layer-bias',
use_bias=False,
kernel_initializer=tf.random_normal_initializer(mean=0.,
stddev=init_std_biasw))
self.modelbias = self.modelbias_layer(self.layer1_out)
self.output = tf.reduce_sum(self.model_coeffs * self.modelpred, axis=1) + tf.reshape(self.modelbias, [-1])
self.noise_layer = tf.layers.Dense(self.y_dim,
activation=tf.nn.sigmoid,
name='layer-noise',
use_bias=False,
kernel_initializer=tf.random_normal_initializer(mean=0.,
stddev=init_std_noisew))
self.noise_pred = 0.06 * self.noise_layer(self.layer1_out)
self.opt_method = tf.train.AdamOptimizer(self.learning_rate)
self.noise_sq = tf.square(self.noise_pred)[:,0] + 1e-6
self.err_sq = tf.reshape(tf.square(self.y_target[:,0] - self.output), [-1])
num_data_inv = tf.cast(tf.divide(1, tf.shape(self.inputs)[0]), dtype=tf.float32)
self.mse_ = num_data_inv * tf.reduce_sum(self.err_sq)
self.loss_ = num_data_inv * (tf.reduce_sum(tf.divide(self.err_sq, self.noise_sq)) + tf.reduce_sum(tf.log(self.noise_sq)))
self.optimizer = self.opt_method.minimize(self.loss_)
# Summary stats
self.mse_sum = tf.summary.scalar("mse", self.mse_)
self.loss_sum = tf.summary.scalar("loss", self.loss_)
self.build_model()
self.saver = tf.train.Saver(max_to_keep=100)
if self.load_model(load_from_epoch):
print('Loading from pre-existing model')
else:
print('Initialising new model')
self.writer.add_graph(self.sess.graph)
return
def build_model(self):
""" The model is regularised around its initial parameters"""
# Initialise parameters
initialize_uninitialized(self.sess)
# Get initial vars
ops = [self.layer1.kernel,
self.layer1.bias,
self.layer2.kernel,
self.layer2.bias,
self.modelbias_layer.kernel,
self.noise_layer.kernel]
w1, b1, w2, b2, wbias, wnoise = self.sess.run(ops)
# Anchor the model
self.w1_init, self.b1_init, self.w2_init, self.b2_init, self.wbias_init, self.wnoise_init = w1, b1, w2, b2, wbias, wnoise
loss_anchor = self.lambda_anchor[0]*tf.reduce_sum(tf.square(self.w1_init - self.layer1.kernel))
loss_anchor += self.lambda_anchor[1]*tf.reduce_sum(tf.square(self.b1_init - self.layer1.bias))
loss_anchor += self.lambda_anchor[2]*tf.reduce_sum(tf.square(self.w2_init - self.layer2.kernel))
loss_anchor += self.lambda_anchor[3]*tf.reduce_sum(tf.square(self.b2_init - self.layer2.bias))
loss_anchor += self.lambda_anchor[4]*tf.reduce_sum(tf.square(self.wbias_init - self.modelbias_layer.kernel))
loss_anchor += self.lambda_anchor[5]*tf.reduce_sum(tf.square(self.wnoise_init - self.noise_layer.kernel)) # new param
self.loss_anchor = tf.cast(1.0/self.n_data, dtype=tf.float32) * loss_anchor
self.loss_anc_sum = tf.summary.scalar("loss_anchor", self.loss_anchor)
# combine with original loss
self.loss_ = self.loss_ + tf.cast(1.0/self.n_data, dtype=tf.float32) * self.loss_anchor
self.optimizer = self.opt_method.minimize(self.loss_)
return
def predict(self, X):
return batch_retrieve(self, self.output, X)
def save_model(self, epoch):
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
self.saver.save(self.sess,
os.path.join(self.checkpoint_dir, self.model_name),
global_step=epoch)
return
def load_model(self, load_from_epoch):
print("Reading checkpoints...")
if load_from_epoch:
ckpt_name = self.model_name + '-{}'.format(load_from_epoch)
print('Loading from checkpoint: {}'.format(ckpt_name))
print(os.path.join(self.checkpoint_dir, ckpt_name))
self.saver.restore(self.sess, os.path.join(self.checkpoint_dir, ckpt_name))
return True
else:
ckpt = tf.train.get_checkpoint_state(self.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
print('Loading from checkpoint: {}'.format(ckpt))
return True
else:
return False
def train(self, n_epochs, ep_0, X_train, y_train, shuff=50, batch_size=1000, print_freq=100, save_freq=250):
self.summary = tf.summary.merge([self.mse_sum, self.loss_sum, self.loss_anc_sum])
ep_ = ep_0
while ep_ < n_epochs + ep_0:
ep_ += 1
# Train in batches
j_max = int(X_train.shape[0]/batch_size)
for j in range(int(X_train.shape[0]/batch_size)):
feed_b = {}
feed_b[self.inputs] = X_train[j*batch_size:(j+1)*batch_size, :]
feed_b[self.y_target] = y_train[j*batch_size:(j+1)*batch_size, :]
blank = self.sess.run(self.optimizer, feed_dict=feed_b)
if (ep_ % print_freq) == 0:
feed_b = {}
feed_b[self.inputs] = X_train
feed_b[self.y_target] = y_train
summary_str = self.sess.run(self.summary, feed_dict=feed_b)
self.writer.add_summary(summary_str, ep_)
# loss_mse = self.sess.run(self.mse_, feed_dict=feed_b)
# loss_anch = self.sess.run(self.loss_, feed_dict=feed_b)
# loss_anch_term, summary_str = self.sess.run([self.loss_anchor, self.summary], feed_dict=feed_b)
# print('epoch:' + str(ep_) + ' at ' + str(datetime.datetime.now()))
# print(', rmse_', np.round(np.sqrt(loss_mse),5), ', loss_anch', np.round(loss_anch,5), ', anch_term', np.round(loss_anch_term,5))
if (ep_ % save_freq) == 0:
self.save_model(ep_)
# Shuffle
if (ep_ % shuff == 0):
X_train, y_train = shuffle(X_train, y_train, random_state=ep_)
return
|
from empire.python.typings import *
from empire.enums.base_enum import BaseEnum
class TemperatureStates(BaseEnum):
NORMAL: Final[str] = 'normal'
HIGH: Final[str] = 'high'
CRITICAL: Final[str] = 'critical'
UNKNOWN: Final[str] = 'unknown'
|
from .nodes import Node, Producer, Consumer, SynchronousConsumer
from .utils import connect, start, stop
from .queues import Queue
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
from pgadmin.utils.route import BaseTestGenerator
import config
class MasterPasswordTestCase(BaseTestGenerator):
"""
This class validates the change password functionality
by defining change password scenarios; where dict of
parameters describes the scenario appended by test name.
"""
scenarios = [
# This testcase validates invalid confirmation password
('TestCase for Create master password dialog', dict(
password="",
content=(
"Set Master Password",
[
"Please set a master password for pgAdmin.",
"This will be used to secure and later unlock saved "
"passwords and other credentials."
]
)
)),
('TestCase for Setting Master Password', dict(
password="masterpasstest",
check_if_set=True,
)),
('TestCase for Resetting Master Password', dict(
reset=True,
password="",
content=(
"Set Master Password",
[
"Please set a master password for pgAdmin.",
"This will be used to secure and later unlock saved "
"passwords and other credentials."
]
)
)),
]
def setUp(self):
config.MASTER_PASSWORD_REQUIRED = True
def runTest(self):
"""This function will check change password functionality."""
req_data = dict()
if hasattr(self, 'password'):
req_data['password'] = self.password
if hasattr(self, 'restart'):
req_data['restart'] = self.restart
if hasattr(self, 'reset'):
req_data['reset'] = self.reset
if config.SERVER_MODE:
response = self.tester.post(
'/browser/master_password',
data=json.dumps(req_data),
)
self.assertEquals(response.json['data']['present'], True)
else:
if 'reset' in req_data:
response = self.tester.delete(
'/browser/master_password'
)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.json['data'], False)
else:
response = self.tester.post(
'/browser/master_password',
data=json.dumps(req_data),
)
self.assertEquals(response.status_code, 200)
if hasattr(self, 'content'):
self.assertEquals(response.json['data']['title'],
self.content[0])
for text in self.content[1]:
self.assertIn(text, response.json['data']['content'])
if hasattr(self, 'check_if_set'):
response = self.tester.get(
'/browser/master_password'
)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.json['data'], True)
def tearDown(self):
config.MASTER_PASSWORD_REQUIRED = False
|
# encoding = utf-8
ERROR_TEMPLATE = '''{"error_code": "%s", "error_msg": "%s", "reason": "%s"}'''
class TraderError(Exception):
'''
交易错误和代码定义
'''
ERROR_CODE = '0'
ERROR_MSG = 'Success'
def __init__(self, reason):
super(TraderError, self).__init__(ERROR_TEMPLATE % (self.ERROR_CODE, self.ERROR_MSG, reason))
class VerifyCodeError(TraderError):
'''
校验码错误
'''
ERROR_CODE = '10001'
ERROR_MSG = 'Trader: Wrong Verify Code Error'
class LoginFailedError(TraderError):
'''
登录失败
'''
ERROR_CODE = '10002'
ERROR_MSG = 'Trader: Login Failed'
class TraderNetworkError(TraderError):
'''
网络连接失败
'''
ERROR_CODE = '10004'
ERROR_MSG = 'Trader: Network Error'
class TraderAPIError(TraderError):
'''
接口调用失败
'''
ERROR_CODE = '10005'
ERROR_MSG = 'Trader: Trade API Error'
class NotSupportAPIError(TraderError):
'''
接口不支持错误
'''
ERROR_CODE = '10006'
ERROR_MSG = 'Trader: Broker not support API'
class BrokerAttributeError(TraderError):
'''
接口参数错误
'''
ERROR_CODE = '10007'
ERROR_MSG = 'Trader: Broker not support API'
class TraderUnkownError(TraderError):
'''
未知错误
'''
ERROR_CODE = '-1'
ERROR_MSG = 'Unkown APIException Error.'
|
from .dataset import Dataset
from .dataset_group import DatasetGroup
from .image import DockerImage
from .image_config import DockerImageConfig
from .image_relationship import DockerImageRelationship
from .iteration import Iteration
from .model import Model
from .model_group import ModelGroup
from .project import Project
from .role import Role
from .task import Task
from .user import User
|
import csv
import os
from flask import Flask, request, send_file, render_template, flash, redirect, url_for
from io import BytesIO, StringIO
from vpg.VoucherPrint import VoucherPrint
import subprocess
import mysql.connector
from mysql.connector import errorcode
import tempfile
import itertools
PDFJAM_BIN = "/usr/bin/pdfjam"
VOUCHER_PRIVATE_KEY = os.environ['VOUCHER_PRIVATE_KEY']
VOUCHER_CFG = os.environ['VOUCHER_CFG']
VOUCHER_BIN = os.environ['VOUCHER_BIN']
FLASK_SECRET = os.environ['FLASK_SECRET']
app = Flask(__name__)
app.secret_key = FLASK_SECRET
def create_app():
return app
@app.route('/', methods=['GET'])
def home():
return render_template('home.html')
@app.route('/pdf/step', methods=['POST'])
def pdf_step():
roll = int(request.form['roll'])
count = int(request.form['count'])
return render_template('pdf/step.html', roll=roll, count=count)
def generate_vouchers(roll, count):
roll_csv = subprocess.check_output(
[VOUCHER_BIN, '-c', VOUCHER_CFG, '-p', VOUCHER_PRIVATE_KEY, str(roll), str(count)])
vouchers_reader = csv.reader(StringIO(roll_csv.decode('utf-8')), delimiter=';', quotechar='"')
vouchers = list(
filter(lambda voucher: not voucher[0].startswith('#'), vouchers_reader))
vouchers = [voucher[0].strip() for voucher in vouchers]
return vouchers
def generate_buffer_pdf(roll, count):
vouchers = generate_vouchers(roll, count)
voucher_buffer = BytesIO()
report = VoucherPrint(voucher_buffer, vouchers)
report.print_vouchers()
voucher_buffer.seek(0)
return voucher_buffer, len(vouchers)
def buffer_pdf_to_2x2(voucher_buffer):
process = subprocess.Popen([PDFJAM_BIN, "--nup", "2x2", "--outfile", "/dev/stdout", "--"], shell=False,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdoutdata, stderrdata = process.communicate(input=voucher_buffer.getvalue())
if stderrdata is not None:
return None, None
return stdoutdata
def shuffle_ads(ads_file_path, voucher_pdf_path, voucher_pdf_pages):
page_with_ads = 1
pdfjam_pages = [
[voucher_pdf_path, str(i + 1), ads_file_path,
str(page_with_ads)] for i in range(voucher_pdf_pages)]
process = subprocess.Popen(
[PDFJAM_BIN, *list(itertools.chain(*pdfjam_pages)), "--outfile", "/dev/stdout", "--paper", "a4paper",
"--rotateoversize",
"false"],
shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdoutdata, stderrdata = process.communicate()
if stderrdata is not None:
return None
return stdoutdata
@app.route('/pdf/generate', methods=['POST'])
def pdf_generate():
roll = int(request.form['roll'])
count = int(request.form['count'])
ads_file = request.files['ads_pdf']
if ads_file.filename == '':
flash("Error: Please provide an Ads file!")
return redirect(url_for('home'))
voucher_buffer, voucher_count = generate_buffer_pdf(roll, count)
if voucher_buffer is None:
flash("Error: Failed to generate 2x2 pdf!")
return redirect(url_for('home'))
with tempfile.NamedTemporaryFile(mode='wb', delete=True, suffix=".pdf") as ads_file_output:
ads_file.save(ads_file_output)
ads_file_output.flush()
with tempfile.NamedTemporaryFile(mode='wb', delete=True, suffix=".pdf") as output:
output.write(buffer_pdf_to_2x2(voucher_buffer))
output.flush()
final_pdf = shuffle_ads(ads_file_output.name, output.name, int(voucher_count / 4)) # /4 Because of 2x2 nup
if final_pdf is None:
flash("Error: Failed to shuffle ads!")
return redirect(url_for('home'))
return send_file(BytesIO(final_pdf),
mimetype='application/pdf',
as_attachment=True,
attachment_filename="vouchers_tatdf_roll%s.csv.pdf" % roll)
def activate_vouchers(cursor, vouchers):
for voucher in vouchers:
cursor.execute(
"SELECT username FROM radcheck WHERE radcheck.username = %s;",
(voucher,)
)
if len(cursor.fetchall()) == 0:
cursor.execute(
"INSERT INTO radcheck (username, attribute, op, value) VALUES (%s,'Max-All-Session',':=', %s);",
(voucher, str(36 * 24 * 60 * 60))
)
cursor.execute(
"INSERT INTO radcheck (username, attribute, op, value) VALUES (%s,'Cleartext-Password', ':=', 'dummy');",
(voucher,)
)
else:
return False
return True
@app.route('/activation/step', methods=['POST'])
def activate_step():
roll = int(request.form['roll'])
count = int(request.form['count'])
vouchers = generate_vouchers(roll, count)
connection = None
cursor = None
try:
connection = mysql.connector.connect(
host=os.environ.get("MYSQL_HOST"),
user=os.environ.get("MYSQL_USER"),
passwd=os.environ.get("MYSQL_PASSWORD"),
option_files=[os.environ.get("MYSQL_OPTION_FILE")],
database="radius"
)
connection.autocommit = False
cursor = connection.cursor()
if activate_vouchers(cursor, vouchers):
connection.commit()
flash("Successfully activated vouchers")
else:
flash("Error: Voucher already existed. Aborting!")
connection.rollback()
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
flash("Something is wrong with the database user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
flash("Database does not exist")
else:
flash("Something went wrong")
finally:
if connection is not None and connection.is_connected():
connection.close()
if cursor is not None:
cursor.close()
return render_template('activation/step.html')
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.portable.python_driver_operator."""
from typing import Any, Dict, List, Text
import tensorflow as tf
from tfx import types
from tfx.orchestration.portable import base_driver
from tfx.orchestration.portable import python_driver_operator
from tfx.proto.orchestration import driver_output_pb2
from tfx.proto.orchestration import executable_spec_pb2
_DEFAULT_DRIVER_OUTPUT = driver_output_pb2.DriverOutput()
class _FakeNoopDriver(base_driver.BaseDriver):
def run(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> driver_output_pb2.DriverOutput:
return _DEFAULT_DRIVER_OUTPUT
class PythonDriverOperatorTest(tf.test.TestCase):
def succeed(self):
custom_driver_spec = (executable_spec_pb2.PythonClassExecutableSpec())
custom_driver_spec.class_path = 'tfx.orchestration.portable.python_driver_operator._FakeNoopDriver'
driver_operator = python_driver_operator.PythonDriverOperator(
custom_driver_spec, None, None, None)
driver_output = driver_operator.run_driver(None, None, None)
self.assertEqual(driver_output, _DEFAULT_DRIVER_OUTPUT)
if __name__ == '__main__':
tf.test.main()
|
from time import sleep
from userbot import CMD_HELP
from userbot.events import register
@register(outgoing=True, pattern='^.yins(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
sleep(3)
await typew.edit("`𝙃𝙖𝙞 𝙋𝙚𝙧𝙠𝙚𝙣𝙖𝙡𝙠𝙖𝙣 𝙉𝙖𝙢𝙖 𝙂𝙪𝙖 𝘼𝙮𝙞𝙞𝙣`")
sleep(3)
await typew.edit("`23 𝙏𝙖𝙝𝙪𝙣`")
sleep(1)
await typew.edit("`𝙏𝙞𝙣𝙜𝙜𝙖𝙡 𝘿𝙞 𝘽𝙖𝙡𝙞, 𝙎𝙖𝙡𝙖𝙢 𝙆𝙚𝙣𝙖𝙡 😁`")
# Create by myself @Contoldisini
@register(outgoing=True, pattern='^.sayang(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
sleep(3)
await typew.edit("`𝘼𝙠𝙪 𝘾𝙪𝙢𝙖 𝙈𝙖𝙪 𝘽𝙞𝙡𝙖𝙣𝙜 👉👈`")
sleep(3)
await typew.edit("`𝘼𝙠𝙪 𝙎𝙖𝙮𝙖𝙣𝙜 𝙆𝙖𝙢𝙪 😘`")
sleep(1)
await typew.edit("`𝙈𝙪𝙖𝙖𝙘𝙘𝙝𝙝𝙝 😘💕`")
# Create by myself @Contoldisini
@register(outgoing=True, pattern='^.semangat(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
sleep(3)
await typew.edit("`𝘼𝙥𝙖𝙥𝙪𝙣 𝙔𝙖𝙣𝙜 𝙏𝙚𝙧𝙟𝙖𝙙𝙞`")
sleep(3)
await typew.edit("`𝙏𝙚𝙩𝙖𝙥𝙡𝙖𝙝 𝘽𝙚𝙧𝙣𝙖𝙥𝙖𝙨`")
sleep(1)
await typew.edit("`𝘿𝙖𝙣 𝙎𝙚𝙡𝙖𝙡𝙪 𝘽𝙚𝙧𝙨𝙮𝙪𝙠𝙪𝙧`")
# Create by myself @Contoldisini
@register(outgoing=True, pattern='^.mengeluh(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
sleep(3)
await typew.edit("`𝘼𝙥𝙖𝙥𝙪𝙣 𝙔𝙖𝙣𝙜 𝙏𝙚𝙧𝙟𝙖𝙙𝙞`")
sleep(3)
await typew.edit("`𝙏𝙚𝙩𝙖𝙥𝙡𝙖𝙝 𝙈𝙚𝙣𝙜𝙚𝙡𝙪𝙝`")
sleep(1)
await typew.edit("`𝘿𝙖𝙣 𝙎𝙚𝙡𝙖𝙡𝙪 𝙋𝙪𝙩𝙪𝙨 𝘼𝙨𝙖`")
# Create by myself @Contoldisini
CMD_HELP.update({
"oi": "𝘾𝙤𝙢𝙢𝙖𝙣𝙙: `yins`\
\n↳ : perkenalan yins\
\n\n𝘾𝙤𝙢𝙢𝙖𝙣𝙙: `.sayang`\
\n↳ : Gombalan maut`\
\n\n𝘾𝙤𝙢𝙢𝙖𝙣𝙙: `.semangat`\
\n↳ : Jan Lupa Semangat`\
n\n𝘾𝙤𝙢𝙢𝙖𝙣𝙙: `.mengeluh`\
\n↳ : Jan Lupa Mengeluh."
})
|
from typing import Any, List
import torch.nn as nn
from torch import Tensor
class BasicBlock(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int,
padding: int,
use_batch_norm: bool = False
) -> None:
super().__init__()
self.use_batch_norm = use_batch_norm
self.conv = nn.Conv2d(in_channels, out_channels,
kernel_size, stride, padding)
self.relu = nn.ReLU(inplace=True)
if self.use_batch_norm:
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x: Tensor) -> Tensor:
out = self.conv(x)
if self.use_batch_norm:
out = self.bn(out)
out = self.relu(out)
return out
class Backbone(nn.Module):
def __init__(self, in_channels: int):
super().__init__()
self.layer1 = BasicBlock(in_channels, 64, 3, 1, 1, False)
self.maxpool1 = nn.MaxPool2d(kernel_size=(2, 2))
self.layer2 = BasicBlock(64, 128, 3, 1, 1, False)
self.maxpool2 = nn.MaxPool2d(kernel_size=(2, 2))
self.layer3 = BasicBlock(128, 256, 3, 1, 1, False)
self.layer4 = BasicBlock(256, 256, 3, 1, 1, False)
self.maxpool3 = nn.MaxPool2d(kernel_size=(2, 1))
self.layer5 = BasicBlock(256, 512, 3, 1, 1, True)
self.layer6 = BasicBlock(512, 512, 3, 1, 1, True)
self.maxpool4 = nn.MaxPool2d(kernel_size=(2, 1))
self.layer7 = BasicBlock(512, 512, 2, 1, 0, False)
def forward(self, x: Tensor) -> Tensor:
out = self.layer1(x)
out = self.maxpool1(out)
out = self.layer2(x)
out = self.maxpool2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.maxpool3(out)
out = self.layer5(out)
out = self.layer6(out)
out = self.maxpool4(out)
out = self.layer7(out)
return out
class CRNN(nn.Module):
def __init__(
self,
img_channel: int,
img_height: int,
img_width: int,
num_class: int,
map_to_seq_hidden: int = 64,
rnn_hidden: int = 256
):
super().__init__()
self.backbone = Backbone(img_channel)
self.map_to_seq = nn.Linear(
512 * (img_height // 16 - 1), map_to_seq_hidden)
self.rnn1 = nn.LSTM(map_to_seq_hidden, rnn_hidden, bidirectional=True)
self.rnn2 = nn.LSTM(2 * rnn_hidden, rnn_hidden, bidirectional=True)
self.dense = nn.Linear(2 * rnn_hidden, num_class)
def forward(self, x):
out = self.backbone(x)
batch, channel, height, width = out.size()
out = out.view(batch, channel * height, width)
out = out.permute(2, 0, 1)
seq = self.map_to_seq(out)
out, _ = self.rnn1(seq)
out, _ = self.rnn2(out)
out = self.dense(out)
return out
if __name__ == '__main__':
model = CRNN(3, 128, 256, 1)
print(model)
|
# The MIT License
#
# Copyright (c) 2017 Transact Pro.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
class DataValidator:
EXCLUDED_DATA_KEYS = [
'general-data'
]
def validate_request_data(self, required_data, request_data):
"""
Validate all dict (json) structure with given dict of keys and class types
Args:
required_data (dict): Money amount in minor units
Example {'Key1': int, 'tiny_key': str}
request_data (dict): Data set for validation
Example {'Key1': int, 'Other_dor': {'tiny_key': str}}
Returns (dict): Invalid data
"""
result = {}
yielded_list = list(self.__search_and_validate(req_struct=required_data, source_struct=request_data))
try:
if len(yielded_list[-1]) > 0:
result = yielded_list[-1]
except IndexError:
if len(yielded_list) > 0:
result = yielded_list[0]
except:
raise
return result
def __search_and_validate(self, req_struct, source_struct):
"""
Args:
req_struct (dict): Required keys with data types
Example {'Key1': int, 'tiny_key': str}
source_struct (dict): Dict data whose must be validated with req_struct
Example {'Key1': int, 'Other_dor': {'tiny_key': str}}
"""
__required_params = req_struct.copy()
if hasattr(source_struct, 'items'):
for key, value in source_struct.items():
for data_key, data_value_type in __required_params.items():
for excluded_key in self.EXCLUDED_DATA_KEYS:
if key == excluded_key:
continue
if key == data_key:
if type(value) is data_value_type:
del req_struct[key]
yield req_struct
# Make recursion if contains nested structures
if isinstance(value, dict):
nested_dict = value
for result in self.__search_and_validate(req_struct, nested_dict):
yield result
elif isinstance(value, list):
nested_list = value
for nested_object in nested_list:
for result in self.__search_and_validate(req_struct, nested_object):
yield result
|
from smtplib import SMTPException
from typing import Tuple, Optional, Dict
from urllib.parse import urlencode
from django import forms
from django.utils.translation import gettext as _
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.urls import reverse
from polaris import settings
from polaris.models import Transaction
from polaris.utils import getLogger
from ..forms import KYCForm
from .. import settings as server_settings
from ..models import PolarisUser, PolarisStellarAccount, PolarisUserTransaction
CONFIRM_EMAIL_PAGE_TITLE = _("Confirm Email")
logger = getLogger(__name__)
def send_confirmation_email(user: PolarisUser, account: PolarisStellarAccount):
"""
Sends a confirmation email to user.email
In a real production deployment, you would never want to send emails
as part of the request/response cycle. Instead, use a job queue service
like Celery. This reference server is not intended to handle heavy
traffic so we are making an exception here.
"""
args = urlencode({"token": account.confirmation_token, "email": user.email})
url = f"{settings.HOST_URL}{reverse('confirm_email')}?{args}"
try:
send_mail(
_("Reference Anchor Server: Confirm Email"),
# email body if the HTML is not rendered
_("Confirm your email by pasting this URL in your browser: %s") % url,
server_settings.EMAIL_HOST_USER,
[user.email],
html_message=render_to_string(
"confirmation_email.html",
{"first_name": user.first_name, "confirmation_url": url},
),
)
except SMTPException as e:
logger.error(f"Unable to send email to {user.email}: {e}")
class SEP24KYC:
@staticmethod
def track_user_activity(form: forms.Form, transaction: Transaction):
"""
Creates a PolarisUserTransaction object, and depending on the form
passed, also creates a new PolarisStellarAccount and potentially a
new PolarisUser. This function ensures an accurate record of a
particular person's activity.
"""
if isinstance(form, KYCForm):
data = form.cleaned_data
user = PolarisUser.objects.filter(email=data.get("email")).first()
if not user:
user = PolarisUser.objects.create(
first_name=data.get("first_name"),
last_name=data.get("last_name"),
email=data.get("email"),
)
account = PolarisStellarAccount.objects.create(
user=user,
account=transaction.stellar_account,
muxed_account=transaction.muxed_account,
memo=transaction.account_memo,
memo_type=Transaction.MEMO_TYPES.id
if transaction.account_memo
else None,
)
if server_settings.EMAIL_HOST_USER:
# this would be where a confirmation email is sent
pass
else:
try:
account = PolarisStellarAccount.objects.get(
account=transaction.stellar_account,
muxed_account=transaction.muxed_account,
memo=transaction.account_memo,
)
except ObjectDoesNotExist:
raise RuntimeError(
f"Unknown address: {transaction.stellar_account}, KYC required."
)
if not PolarisUserTransaction.objects.filter(
transaction_id=transaction.id
).exists():
PolarisUserTransaction.objects.create(
user=account.user, account=account, transaction_id=transaction.id
)
@staticmethod
def check_kyc(
transaction: Transaction, post_data=None
) -> Tuple[Optional[forms.Form], Optional[Dict]]:
"""
Returns a KYCForm if there is no record of this stellar account,
otherwise returns None.
"""
account = PolarisStellarAccount.objects.filter(
account=transaction.stellar_account,
muxed_account=transaction.muxed_account,
memo=transaction.account_memo,
).first()
if not account: # Unknown stellar account, get KYC info
if post_data:
form = KYCForm(post_data)
else:
form = KYCForm()
return (
form,
{
"icon_label": _("Stellar Development Foundation"),
"title": _("Polaris KYC Information"),
"guidance": (
_(
"We're legally required to know our customers. "
"Please enter the information requested."
)
),
},
)
elif settings.LOCAL_MODE:
# When in local mode, request session's are not authenticated,
# which means account confirmation cannot be skipped. So we'll
# return None instead of returning the confirm email page.
return None, None
elif server_settings.EMAIL_HOST_USER and not account.confirmed:
return (
None,
{
"title": CONFIRM_EMAIL_PAGE_TITLE,
"guidance": _(
"We sent you a confirmation email. Once confirmed, "
"continue on this page."
),
"icon_label": _("Stellar Development Foundation"),
},
)
else:
return None, None
|
""" Sonde check attempts to read each sonde file with netCDF4, error
files are sent to a new directory. """
import glob
import os
import shutil
import stat
import netCDF4
files = glob.glob('/lustre/or-hydra/cades-arm/proj-shared/sgpsondewnpnC1.b1/*')
for file in files:
try:
sonde = netCDF4.Dataset(file)
sonde.close()
except OSError:
print(file + ' is corrupt!')
path = ('/lustre/or-hydra/cades-arm/proj-shared/'
+ 'sgpsondewnpnC1.b1/corrupt_soundings/')
if not os.path.exists(path):
os.makedirs(path)
os.chmod(
path,
stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP)
shutil.move(file, path)
|
import ai
from detect import detectar_resultado
tabuleiro = [' ', ' ', ' ',
' ', ' ', ' ',
' ', ' ', ' '
]
class JogadaInvalida(RuntimeError):
pass
def coordenada_esta_no_limite(c, texto):
while (c < 0) or (c > 2):
c = int(input('Digite o valor da '+ str(texto) + ': ' ))
return c
def entrada_do_usuario(): # Esta função representa a entrada do usuário.
i = int(input('Digite o valor da linha: '))
i = coordenada_esta_no_limite(i, 'linha')
j = int(input('Digite o valor da coluna: '))
j = coordenada_esta_no_limite(j, 'coluna')
return [i, j]
def humano_jogar(posicao, tabuleiro, simbolo): # Esta função representa a jogada do usuário.
if posicao == [0,0]:
posicao = 0
if posicao == [0,1]:
posicao = 1
if posicao == [0,2]:
posicao = 2
if posicao == [1,0]:
posicao = 3
if posicao == [1,1]:
posicao = 4
if posicao == [1,2]:
posicao = 5
if posicao == [2,0]:
posicao = 6
if posicao == [2,1]:
posicao = 7
if posicao == [2,2]:
posicao = 8
if tabuleiro[posicao] != " " :
raise JogadaInvalida("Jogada já feita.") #trata objeto como exceção
#("Jogada já feita") é o objeto da classe JogadaInvalida
tabuleiro[posicao] = simbolo
return tabuleiro
def mostrar_gui(tabuleiro): # Essa função mostra o tabuleiro para o usuário realizar as jogadas
print(' ' + ' | '.join(tabuleiro[0:3]))
print('+'.join(['---', '---', '---']))
print(' ' + ' | '.join(tabuleiro[3:6]))
print('+'.join(['---', '---', '---']))
print(' ' + ' | '.join(tabuleiro[6:9]))
print("="*12)
# Esta etapa determina as jogadas que serão feitas ao longo do jogo atual.
fim_de_jogo = False
simbolo = "X"
while fim_de_jogo == False:
try :
if simbolo == "X" :
jogada = entrada_do_usuario()
tabuleiro = humano_jogar(jogada, tabuleiro, simbolo)
else:
tabuleiro = ai.fazer_jogada(tabuleiro, simbolo)
fim_de_jogo = detectar_resultado (tabuleiro)
mostrar_gui(tabuleiro)
if simbolo == "X" :
simbolo = "O"
else:
simbolo = "X"
except JogadaInvalida as e:
print(str(e))
print("Jogador " + fim_de_jogo + " ganhou.") |
"""Abstract graphs in ReGraph.
This module contains abstract classes for graph objects in ReGraph. Such
graph objects represent simple graphs with dictionary-like attributes
on nodes and edges.
"""
import json
import os
import warnings
from abc import ABC, abstractmethod
from regraph.exceptions import (ReGraphError,
GraphError,
GraphAttrsWarning,
)
from regraph.utils import (load_nodes_from_json,
load_edges_from_json,
generate_new_id,
normalize_attrs,
safe_deepcopy_dict,
set_attrs,
add_attrs,
remove_attrs,
merge_attributes,
keys_by_value,
)
class Graph(ABC):
"""Abstract class for graph objects in ReGraph."""
@abstractmethod
def nodes(self, data=False):
"""Return the list of nodes."""
pass
@abstractmethod
def edges(self, data=False):
"""Return the list of edges."""
pass
@abstractmethod
def get_node(self, n):
"""Get node attributes.
Parameters
----------
s : hashable
Source node id.
t : hashable,
Target node id.
"""
pass
@abstractmethod
def get_edge(self, s, t):
"""Get edge attributes.
Parameters
----------
s : hashable
Source node id.
t : hashable
Target node id.
"""
pass
@abstractmethod
def add_node(self, node_id, attrs=None):
"""Abstract method for adding a node.
Parameters
----------
node_id : hashable
Prefix that is prepended to the new unique name.
attrs : dict, optional
Node attributes.
"""
pass
@abstractmethod
def remove_node(self, node_id):
"""Remove node.
Parameters
----------
node_id : hashable
Node to remove.
"""
pass
@abstractmethod
def add_edge(self, s, t, attrs=None, **attr):
"""Add an edge to a graph.
Parameters
----------
s : hashable
Source node id.
t : hashable
Target node id.
attrs : dict
Edge attributes.
"""
pass
@abstractmethod
def remove_edge(self, source_id, target_id):
"""Remove edge from the graph.
Parameters
----------
s : hashable
Source node id.
t : hashable
Target node id.
"""
pass
@abstractmethod
def update_node_attrs(self, node_id, attrs, normalize=True):
"""Update attributes of a node.
Parameters
----------
node_id : hashable
Node to update.
attrs : dict
New attributes to assign to the node
"""
pass
@abstractmethod
def update_edge_attrs(self, s, t, attrs, normalize=True):
"""Update attributes of a node.
Parameters
----------
s : hashable
Source node of the edge to update.
t : hashable
Target node of the edge to update.
attrs : dict
New attributes to assign to the node
"""
pass
@abstractmethod
def successors(self, node_id):
"""Return the set of successors."""
pass
@abstractmethod
def predecessors(self, node_id):
"""Return the set of predecessors."""
pass
@abstractmethod
def find_matching(self, pattern, nodes=None):
"""Find matching of a pattern in a graph."""
pass
def print_graph(self):
"""Pretty-print the graph."""
print("\nNodes:\n")
for n in self.nodes():
print(n, " : ", self.get_node(n))
print("\nEdges:\n")
for (n1, n2) in self.edges():
print(n1, '->', n2, ' : ', self.get_edge(n1, n2))
return
def __str__(self):
"""String representation of the graph."""
return "Graph({} nodes, {} edges)".format(
len(self.nodes()), len(self.edges()))
def __eq__(self, graph):
"""Eqaulity operator.
Parameters
----------
graph : regraph.Graph
Another graph object
Returns
-------
bool
True if two graphs are equal, False otherwise.
"""
if set(self.nodes()) != set(graph.nodes()):
return False
if set(self.edges()) != set(graph.edges()):
return False
for node in self.nodes():
if self.get_node(node) != graph.get_node(node):
return False
for s, t in self.edges():
if self.get_edge(s, t) != graph.get_edge(s, t):
return False
return True
def __ne__(self, graph):
"""Non-equality operator."""
return not (self == graph)
def get_node_attrs(self, n):
"""Get node attributes.
Parameters
----------
n : hashable
Node id.
"""
return self.get_node(n)
def get_edge_attrs(self, s, t):
"""Get edge attributes.
Parameters
----------
graph : networkx.(Di)Graph
s : hashable, source node id.
t : hashable, target node id.
"""
return self.get_edge(s, t)
def in_edges(self, node_id):
"""Return the set of in-coming edges."""
return [(p, node_id) for p in self.predecessors(node_id)]
def out_edges(self, node_id):
"""Return the set of out-going edges."""
return [(node_id, s) for s in self.successors(node_id)]
def add_nodes_from(self, node_list):
"""Add nodes from a node list.
Parameters
----------
node_list : iterable
Iterable containing a collection of nodes, optionally,
with their attributes
"""
for n in node_list:
if type(n) != str:
try:
node_id, node_attrs = n
self.add_node(node_id, node_attrs)
except (TypeError, ValueError):
self.add_node(n)
else:
self.add_node(n)
def add_edges_from(self, edge_list):
"""Add edges from an edge list.
Parameters
----------
edge_list : iterable
Iterable containing a collection of edges, optionally,
with their attributes
"""
for e in edge_list:
if len(e) == 2:
self.add_edge(e[0], e[1])
elif len(e) == 3:
self.add_edge(e[0], e[1], e[2])
else:
raise ReGraphError(
"Was expecting 2 or 3 elements per tuple, got %s." %
str(len(e))
)
def exists_edge(self, s, t):
"""Check if an edge exists.
Parameters
----------
s : hashable
Source node id.
t : hashable
Target node id.
"""
# print("\n\n\n\n\n\n\n\n\n\n", s, t, self.edges())
return((s, t) in self.edges())
def set_node_attrs(self, node_id, attrs, normalize=True, update=True):
"""Set node attrs.
Parameters
----------
node_id : hashable
Id of the node to update
attrs : dict
Dictionary with new attributes to set
normalize : bool, optional
Flag, when set to True attributes are normalized to be set-valued.
True by default
update : bool, optional
Flag, when set to True attributes whose keys are not present
in attrs are removed, True by default
Raises
------
GraphError
If a node `node_id` does not exist.
"""
if node_id not in self.nodes():
raise GraphError("Node '{}' does not exist!".format(node_id))
node_attrs = safe_deepcopy_dict(self.get_node(node_id))
set_attrs(node_attrs, attrs, normalize, update)
self.update_node_attrs(node_id, node_attrs, normalize)
def add_node_attrs(self, node, attrs):
"""Add new attributes to a node.
Parameters
----------
node : hashable
Id of a node to add attributes to.
attrs : dict
Attributes to add.
Raises
------
GraphError
If a node `node_id` does not exist.
"""
if node not in self.nodes():
raise GraphError("Node '{}' does not exist!".format(node))
node_attrs = safe_deepcopy_dict(self.get_node(node))
add_attrs(node_attrs, attrs, normalize=True)
self.update_node_attrs(node, node_attrs)
def remove_node_attrs(self, node_id, attrs):
"""Remove attrs of a node specified by attrs_dict.
Parameters
----------
node_id : hashable
Node whose attributes to remove.
attrs : dict
Dictionary with attributes to remove.
Raises
------
GraphError
If a node with the specified id does not exist.
"""
if node_id not in self.nodes():
raise GraphError("Node '%s' does not exist!" % str(node_id))
elif attrs is None:
warnings.warn(
"You want to remove attrs from '{}' with an empty attrs_dict!".format(
node_id), GraphAttrsWarning
)
node_attrs = safe_deepcopy_dict(self.get_node(node_id))
remove_attrs(node_attrs, attrs, normalize=True)
self.update_node_attrs(node_id, node_attrs)
def set_edge_attrs(self, s, t, attrs, normalize=True, update=True):
"""Set edge attrs.
Parameters
----------
attrs : dict
Dictionary with new attributes to set
normalize : bool, optional
Flag, when set to True attributes are normalized to be set-valued.
True by default
update : bool, optional
Flag, when set to True attributes whose keys are not present
in attrs are removed, True by default
Raises
------
GraphError
If an edge between `s` and `t` does not exist.
"""
if not self.exists_edge(s, t):
raise GraphError(
"Edge {}->{} does not exist".format(s, t))
edge_attrs = safe_deepcopy_dict(self.get_edge(s, t))
set_attrs(edge_attrs, attrs, normalize, update)
self.update_edge_attrs(s, t, edge_attrs, normalize=normalize)
def set_edge(self, s, t, attrs, normalize=True, update=True):
"""Set edge attrs.
Parameters
----------
s : hashable
Source node id.
t : hashable
Target node id.
attrs : dictionary
Dictionary with attributes to set.
Raises
------
GraphError
If an edge between `s` and `t` does not exist.
"""
self.set_edge_attrs(s, t, attrs, normalize, update)
def add_edge_attrs(self, s, t, attrs):
"""Add attributes of an edge in a graph.
Parameters
----------
s : hashable
Source node id.
t : hashable
Target node id.
attrs : dict
Dictionary with attributes to remove.
Raises
------
GraphError
If an edge between `s` and `t` does not exist.
"""
if not self.exists_edge(s, t):
raise GraphError(
"Edge {}->{} does not exist".format(s, t))
edge_attrs = safe_deepcopy_dict(self.get_edge(s, t))
add_attrs(edge_attrs, attrs, normalize=True)
self.update_edge_attrs(s, t, edge_attrs)
def remove_edge_attrs(self, s, t, attrs):
"""Remove attrs of an edge specified by attrs.
Parameters
----------
s : hashable
Source node id.
t : hashable
Target node id.
attrs : dict
Dictionary with attributes to remove.
Raises
------
GraphError
If an edge between `s` and `t` does not exist.
"""
if not self.exists_edge(s, t):
raise GraphError(
"Edge {}->{} does not exist".format(s, t))
edge_attrs = safe_deepcopy_dict(self.get_edge(s, t))
remove_attrs(edge_attrs, attrs, normalize=True)
self.update_edge_attrs(s, t, edge_attrs)
def clone_node(self, node_id, name=None):
"""Clone node.
Create a new node, a copy of a node with `node_id`, and reconnect it
with all the adjacent nodes of `node_id`.
Parameters
----------
node_id : hashable,
Id of a node to clone.
name : hashable, optional
Id for the clone, if is not specified, new id will be generated.
Returns
-------
new_node : hashable
Id of the new node corresponding to the clone
Raises
------
GraphError
If node wiht `node_id` does not exists or a node with
`name` (clone's name) already exists.
"""
if node_id not in self.nodes():
raise GraphError("Node '{}' does not exist!".format(node_id))
# generate new name for a clone
if name is None:
i = 1
new_node = str(node_id) + str(i)
while new_node in self.nodes():
i += 1
new_node = str(node_id) + str(i)
else:
if name in self.nodes():
raise GraphError("Node '{}' already exists!".format(name))
else:
new_node = name
self.add_node(new_node, self.get_node(node_id))
# Connect all the edges
self.add_edges_from(
set([(n, new_node) for n, _ in self.in_edges(node_id)
if (n, new_node) not in self.edges()]))
self.add_edges_from(
set([(new_node, n) for _, n in self.out_edges(node_id)
if (new_node, n) not in self.edges()]))
# Copy the attributes of the edges
for s, t in self.in_edges(node_id):
self.set_edge(
s, new_node,
safe_deepcopy_dict(self.get_edge(s, t)))
for s, t in self.out_edges(node_id):
self.set_edge(
new_node, t,
safe_deepcopy_dict(self.get_edge(s, t)))
return new_node
def relabel_node(self, node_id, new_id):
"""Relabel a node in the graph.
Parameters
----------
node_id : hashable
Id of the node to relabel.
new_id : hashable
New label of a node.
"""
if new_id in self.nodes():
raise ReGraphError(
"Cannot relabel '{}' to '{}', '{}' ".format(
node_id, new_id, new_id) +
"already exists in the graph")
self.clone_node(node_id, new_id)
self.remove_node(node_id)
def merge_nodes(self, nodes, node_id=None, method="union",
edge_method="union"):
"""Merge a list of nodes.
Parameters
----------
nodes : iterable
Collection of node id's to merge.
node_id : hashable, optional
Id of a new node corresponding to the result of merge.
method : optional
Method of node attributes merge: if `"union"` the resulting node
will contain the union of all attributes of the merged nodes,
if `"intersection"`, the resulting node will contain their
intersection. Default value is `"union"`.
edge_method : optional
Method of edge attributes merge: if `"union"` the edges that were
merged will contain the union of all attributes,
if `"intersection"` -- their ntersection.
Default value is `"union"`.
"""
if len(nodes) > 1:
if method is None:
method = "union"
if edge_method is None:
method = "union"
# Generate name for new node
if node_id is None:
node_id = "_".join(sorted([str(n) for n in nodes]))
if node_id in self.nodes():
node_id = self.generate_new_node_id(node_id)
elif node_id in self.nodes() and (node_id not in nodes):
raise GraphError(
"New name for merged node is not valid: "
"node with name '%s' already exists!" % node_id
)
# Merge data attached to node according to the method specified
# restore proper connectivity
if method == "union":
attr_accumulator = {}
elif method == "intersection":
attr_accumulator = safe_deepcopy_dict(
self.get_node(nodes[0]))
else:
raise ReGraphError("Merging method '{}' is not defined!".format(
method))
self_loop = False
self_loop_attrs = {}
source_nodes = set()
target_nodes = set()
source_dict = {}
target_dict = {}
for node in nodes:
attr_accumulator = merge_attributes(
attr_accumulator, self.get_node(node), method)
in_edges = self.in_edges(node)
out_edges = self.out_edges(node)
# manage self loops
for s, t in in_edges:
if s in nodes:
self_loop = True
if len(self_loop_attrs) == 0:
self_loop_attrs = self.get_edge(s, t)
else:
self_loop_attrs = merge_attributes(
self_loop_attrs,
self.get_edge(s, t),
edge_method)
for s, t in out_edges:
if t in nodes:
self_loop = True
if len(self_loop_attrs) == 0:
self_loop_attrs = self.get_edge(s, t)
else:
self_loop_attrs = merge_attributes(
self_loop_attrs,
self.get_edge(s, t),
edge_method)
source_nodes.update(
[n if n not in nodes else node_id
for n, _ in in_edges])
target_nodes.update(
[n if n not in nodes else node_id
for _, n in out_edges])
for edge in in_edges:
if not edge[0] in source_dict.keys():
attrs = self.get_edge(edge[0], edge[1])
source_dict.update({edge[0]: attrs})
else:
attrs = merge_attributes(
source_dict[edge[0]],
self.get_edge(edge[0], edge[1]),
edge_method)
source_dict.update({edge[0]: attrs})
for edge in out_edges:
if not edge[1] in target_dict.keys():
attrs = self.get_edge(edge[0], edge[1])
target_dict.update({edge[1]: attrs})
else:
attrs = merge_attributes(
target_dict[edge[1]],
self.get_edge(edge[0], edge[1]),
edge_method)
target_dict.update({edge[1]: attrs})
self.remove_node(node)
self.add_node(node_id, attr_accumulator)
if self_loop:
self.add_edges_from([(node_id, node_id)])
self.set_edge(node_id, node_id, self_loop_attrs)
for n in source_nodes:
if not self.exists_edge(n, node_id):
self.add_edge(n, node_id)
for n in target_nodes:
if not self.exists_edge(node_id, n):
self.add_edge(node_id, n)
# Attach accumulated attributes to edges
for node, attrs in source_dict.items():
if node not in nodes:
self.set_edge(node, node_id, attrs)
for node, attrs in target_dict.items():
if node not in nodes:
self.set_edge(node_id, node, attrs)
return node_id
else:
raise ReGraphError(
"More than two nodes should be specified for merging!")
def copy_node(self, node_id, copy_id=None):
"""Copy node.
Create a copy of a node in a graph. A new id for the copy is
generated by regraph.primitives.unique_node_id.
Parameters
----------
node_id : hashable
Node to copy.
Returns
-------
new_name
Id of the copy node.
"""
if copy_id is None:
copy_id = self.generate_new_node_id(node_id)
if copy_id in self.nodes():
raise ReGraphError(
"Cannot create a copy of '{}' with id '{}', ".format(
node_id, copy_id) +
"node '{}' already exists in the graph".format(copy_id))
attrs = self.get_node(node_id)
self.add_node(copy_id, attrs)
return copy_id
def relabel_nodes(self, mapping):
"""Relabel graph nodes inplace given a mapping.
Similar to networkx.relabel.relabel_nodes:
https://networkx.github.io/documentation/development/_modules/networkx/relabel.html
Parameters
----------
mapping: dict
A dictionary with keys being old node ids and their values
being new id's of the respective nodes.
Raises
------
ReGraphError
If new id's do not define a set of distinct node id's.
"""
unique_names = set(mapping.values())
if len(unique_names) != len(self.nodes()):
raise ReGraphError(
"Attempt to relabel nodes failed: the IDs are not unique!")
temp_names = {}
# Relabeling of the nodes: if at some point new ID conflicts
# with already existing ID - assign temp ID
for key, value in mapping.items():
if key != value:
if value not in self.nodes():
new_name = value
else:
new_name = self.generate_new_node_id(value)
temp_names[new_name] = value
self.relabel_node(key, new_name)
# Relabeling the nodes with the temp ID to their new IDs
for key, value in temp_names.items():
if key != value:
self.relabel_node(key, value)
return
def generate_new_node_id(self, basename):
"""Generate new unique node identifier."""
return generate_new_id(self.nodes(), basename)
def filter_edges_by_attributes(self, attr_key, attr_cond):
"""Filter graph edges by attributes.
Removes all the edges of the graph (inplace) that do not
satisfy `attr_cond`.
Parameters
----------
attrs_key : hashable
Attribute key
attrs_cond : callable
Condition for an attribute to satisfy: callable that returns
`True` if condition is satisfied, `False` otherwise.
"""
for (s, t) in self.edges():
edge_attrs = self.get_edge(s, t)
if (attr_key not in edge_attrs.keys() or
not attr_cond(edge_attrs[attr_key])):
self.remove_edge(s, t)
def to_json(self):
"""Create a JSON representation of a graph."""
j_data = {"edges": [], "nodes": []}
# dump nodes
for node in self.nodes():
node_data = {}
node_data["id"] = node
node_attrs = self.get_node(node)
if node_attrs is not None:
attrs = {}
for key, value in node_attrs.items():
attrs[key] = value.to_json()
node_data["attrs"] = attrs
j_data["nodes"].append(node_data)
# dump edges
for s, t in self.edges():
edge_data = {}
edge_data["from"] = s
edge_data["to"] = t
edge_attrs = self.get_edge(s, t)
if edge_attrs is not None:
attrs = {}
for key, value in edge_attrs.items():
attrs[key] = value.to_json()
edge_data["attrs"] = attrs
j_data["edges"].append(edge_data)
return j_data
def to_d3_json(self,
attrs=True,
node_attrs_to_attach=None,
edge_attrs_to_attach=None,
nodes=None):
"""Create a JSON representation of a graph."""
j_data = {"links": [], "nodes": []}
if nodes is None:
nodes = self.nodes()
# dump nodes
for node in nodes:
node_data = {}
node_data["id"] = node
if attrs:
node_attrs = self.get_node(node)
normalize_attrs(node_attrs)
attrs_json = dict()
for key, value in node_attrs.items():
attrs_json[key] = value.to_json()
node_data["attrs"] = attrs_json
else:
node_attrs = self.get_node(node)
if node_attrs_to_attach is not None:
for key in node_attrs_to_attach:
if key in node_attrs.keys():
node_data[key] = list(node_attrs[key])
j_data["nodes"].append(node_data)
# dump edges
for s, t in self.edges():
if s in nodes and t in nodes:
edge_data = {}
edge_data["source"] = s
edge_data["target"] = t
if attrs:
edge_attrs = self.get_edge(s, t)
normalize_attrs(edge_attrs)
attrs_json = dict()
for key, value in edge_attrs.items():
attrs_json[key] = value.to_json()
edge_data["attrs"] = attrs_json
else:
if edge_attrs_to_attach is not None:
for key in edge_attrs_to_attach:
edge_attrs = self.get_edge(s, t)
if key in edge_attrs.keys():
edge_data[key] = list(edge_attrs[key])
j_data["links"].append(edge_data)
return j_data
def export(self, filename):
"""Export graph to JSON file.
Parameters
----------
filename : str
Name of the file to save the json serialization of the graph
"""
with open(filename, 'w') as f:
j_data = self.to_json()
json.dump(j_data, f)
return
@classmethod
def from_json(cls, json_data):
"""Create a NetworkX graph from a json-like dictionary.
Parameters
----------
json_data : dict
JSON-like dictionary with graph representation
"""
graph = cls()
graph.add_nodes_from(load_nodes_from_json(json_data))
graph.add_edges_from(load_edges_from_json(json_data))
return graph
@classmethod
def load(cls, filename):
"""Load a graph from a JSON file.
Create a `networkx.(Di)Graph` object from
a JSON representation stored in a file.
Parameters
----------
filename : str
Name of the file to load the json serialization of the graph
Returns
-------
Graph object
Raises
------
ReGraphError
If was not able to load the file
"""
if os.path.isfile(filename):
with open(filename, "r+") as f:
j_data = json.loads(f.read())
return cls.from_json(j_data)
else:
raise ReGraphError(
"Error loading graph: file '{}' does not exist!".format(
filename)
)
def rewrite(self, rule, instance=None):
"""Perform SqPO rewiting of the graph with a rule.
Parameters
----------
rule : regraph.Rule
SqPO rewriting rule
instance : dict, optional
Instance of the input rule. If not specified,
the identity map of the rule's left-hand side
is used
"""
if instance is None:
instance = {
n: n for n in self.lhs.nodes()
}
# Restrictive phase
p_g = dict()
cloned_lhs_nodes = set()
# Clone nodes
for lhs, p_nodes in rule.cloned_nodes().items():
for i, p in enumerate(p_nodes):
if i == 0:
p_g[p] = instance[lhs]
cloned_lhs_nodes.add(lhs)
else:
clone_id = self.clone_node(instance[lhs])
p_g[p] = clone_id
# Delete nodes and add preserved nodes to p_g dictionary
removed_nodes = rule.removed_nodes()
for n in rule.lhs.nodes():
if n in removed_nodes:
self.remove_node(instance[n])
elif n not in cloned_lhs_nodes:
p_g[keys_by_value(rule.p_lhs, n)[0]] =\
instance[n]
# Delete edges
for u, v in rule.removed_edges():
self.remove_edge(p_g[u], p_g[v])
# Remove node attributes
for p_node, attrs in rule.removed_node_attrs().items():
self.remove_node_attrs(
p_g[p_node],
attrs)
# Remove edge attributes
for (u, v), attrs in rule.removed_edge_attrs().items():
self.remove_edge_attrs(p_g[u], p_g[v], attrs)
# Expansive phase
rhs_g = dict()
merged_nodes = set()
# Merge nodes
for rhs, p_nodes in rule.merged_nodes().items():
merge_id = self.merge_nodes(
[p_g[p] for p in p_nodes])
merged_nodes.add(rhs)
rhs_g[rhs] = merge_id
# Add nodes and add preserved nodes to rhs_g dictionary
added_nodes = rule.added_nodes()
for n in rule.rhs.nodes():
if n in added_nodes:
if n in self.nodes():
new_id = self.generate_new_node_id(n)
else:
new_id = n
new_id = self.add_node(new_id)
rhs_g[n] = new_id
elif n not in merged_nodes:
rhs_g[n] = p_g[keys_by_value(rule.p_rhs, n)[0]]
# Add edges
for u, v in rule.added_edges():
if (rhs_g[u], rhs_g[v]) not in self.edges():
self.add_edge(rhs_g[u], rhs_g[v])
# Add node attributes
for rhs_node, attrs in rule.added_node_attrs().items():
self.add_node_attrs(
rhs_g[rhs_node], attrs)
# Add edge attributes
for (u, v), attrs in rule.added_edge_attrs().items():
self.add_edge_attrs(
rhs_g[u], rhs_g[v], attrs)
return rhs_g
def number_of_edges(self, u, v):
"""Return number of directed edges from u to v."""
return 1
def ancestors(self, t):
"""Return the set of ancestors."""
current_level = set(self.predecessors(t))
visited = set()
while len(current_level) > 0:
next_level = set()
for el in current_level:
if el not in visited:
visited.add(el)
next_level.update([
p
for p in self.predecessors(el)
if p not in visited])
current_level = next_level
return visited
def descendants(self, s):
"""Return the set of ancestors."""
current_level = set(self.successors(s))
visited = set()
while len(current_level) > 0:
next_level = set()
for el in current_level:
if el not in visited:
visited.add(el)
next_level.update([
p
for p in self.successors(el)
if p not in visited])
current_level = next_level
return visited
|
#对比ecl prt中trans结果与cloud trans结果,NNC要实用 'ALLNNC'
##参数
PRTPATH = r"C:\Users\shixi\Documents\newLandProj\zaoyuan\SimCases\C_project\C\C.PRT"
CLOUDTRANSPATH=r"C:\Users\shixi\Desktop\trans.test"
NX=71
NY=140
NZ=42
##PRTPATH = r"C:\Users\shixi\Desktop\cases\ZhanJiang\BASE_newC\BASE_newC.PRT"
##CLOUDTRANSPATH=r"C:\Users\shixi\Desktop\trans.test"
##NX=72
##NY=32
##NZ=112
#CLOUDTRANSPATH=r"C:\Users\shixi\Desktop\Cloud_VS2017_SC\CLOUD2017\transCloud.txt"
#PRTPATH = r"C:\Users\shixi\Desktop\cases\big\KZ0_ecl\KZ0_ECL.PRT"
#NX=171
#NY=77
#NZ=12
##全局变量
IJK_SEC_FLAG=r'(I, J, K)'
PROP_END_FLAG=r'1 **********************************************************************'
import matplotlib.pyplot as plt
####################### One IJK Sec Read Function ##########
def readIJKSection(ijksec,IIDX,JIDX,KIDX,VAL,offset):
r"传入从 ...(I, J, K) I= 1... 开始的list,直到此段结束,将结果按I,J,K,Val存入指定位置 \
其中ijksec是文件内容按行的list,offset是几个数组的写入起始位置"
ijksec[0] = [ijksec[0][:15],ijksec[0][15:].split()]
ijksec[1:] = [[ll[:12],ll[12:].split()] for ll in ijksec[1:]]
IRange=[0 for i in range(15)]
idx=0
nCol=0
#print(ijksec[0])
#print(ijksec[1])
for i in ijksec[0][1]: #解析I下标,对应列
IRange[nCol]=int(i)
nCol+=1
#print(nCol)
nN = nCol*(len(ijksec)-2)
#print(nN)
idx=offset
for ll in ijksec[2:]: #对每一数据行,得到JK下标
jj = int(ll[0].split(',')[1])
kk = int(ll[0].split(',')[2][:-1])
nCol=0
for i in ll[1]:
if '--' in i:
#VAL[idx]=float('nan')
VAL[idx]=0
else:
VAL[idx]=float(i)
JIDX[idx]=jj
KIDX[idx]=kk
IIDX[idx]=IRange[nCol]
nCol+=1
idx+=1
#print(idx)
return nN
####################### Function End ############
####################### One Property multi-IJK Read Function ##########
def readProperty(LineList,PropStartFlag,IIDX,JIDX,KIDX,VAL):
r"读取某prt文件LineList中的属性,注意PropStartFlag要有唯一性,用于定位开始位置 "
lineNum=0
for ll in LineList:
if PropStartFlag in ll:
break
else:
lineNum+=1
if lineNum==len(LineList) :
print("Make sure ",PropStartFlag," is in the .RPT file!")
return -1
lineNum+=2
ijkSl=0
ijkEl=0
findStartIJK=False
findEndIJK=False
breakflag=False
offset=0
while lineNum<len(LineList) and breakflag==False:
lineNum+=1
if lineNum==len(LineList):
if findStartIJK==True and findEndIJK==False:
findEndIJK==True
ijkEl=lineNum
else:
ll=LineList[lineNum]
if PROP_END_FLAG in ll:
if findStartIJK==True and findEndIJK==False:
findEndIJK=True
ijkEl=lineNum
breakflag=True
elif IJK_SEC_FLAG in ll and findStartIJK==False:
ijkSl=lineNum
findStartIJK=True
lineNum+=1
elif ll[0]=='\n' and findStartIJK==True and findEndIJK==False:
ijkEl=lineNum
findEndIJK=True
if findStartIJK==True and findEndIJK==True:
findStartIJK=False
findEndIJK=False
tmpi = readIJKSection(LineList[ijkSl:ijkEl],IIDX,JIDX,KIDX,VAL,offset)
offset+=tmpi
#print(' ',tmpi,' entries read. ',offset,' entries read in total.')
return 0
####################### Function End ############
########## ============NNC Reading ==========###########
def readNNC(LineList):
r"nnc flag 后第12行是正文,以 --- 结束 "
NNCFlag_ = 'ALL NNCS AT 0.00 DAYS'
NNC_END_Flag_ = r'---'
lineNum=0
for ll in LineList:
if NNCFlag_ in ll:
break
else:
lineNum+=1
if lineNum==len(LineList) :
print("Make sure ",NNCFlag_," is in the .RPT file!")
return 0,0,0
lineNum+=12
ijkSl=lineNum
for ll in LineList[ijkSl:]:
if NNC_END_Flag_ in ll:
break
else:
lineNum+=1
ijkEl=lineNum
I1 = [0 for i in range(ijkSl,ijkEl)]
J1 =[0 for i in range(ijkSl,ijkEl)]
K1=[0 for i in range(ijkSl,ijkEl)]
I2=[0 for i in range(ijkSl,ijkEl)]
J2=[0 for i in range(ijkSl,ijkEl)]
K2=[0 for i in range(ijkSl,ijkEl)]
VAL= [0.0 for i in range(ijkSl,ijkEl)]
IJK1=[0 for i in range(ijkSl,ijkEl)]
IJK2=[0 for i in range(ijkSl,ijkEl)]
idx=0
for ll in LineList[ijkSl:ijkEl]:
ll=ll.split()
I1[idx]=int(ll[0])
J1[idx]=int(ll[1])
K1[idx]=int(ll[2])
I2[idx]=int(ll[3])
J2[idx]=int(ll[4])
K2[idx]=int(ll[5])
VAL[idx]=float(ll[6])
IJK1[idx]=(K1[idx]-1)*NX*NY+(J1[idx]-1)*NX+(I1[idx]-1)
IJK2[idx]=(K2[idx]-1)*NX*NY+(J2[idx]-1)*NX+(I2[idx]-1)
idx+=1
return IJK1,IJK2,VAL
####################### Function End ############
## TRANS XYZ READ#####
def DirTransRead(RPTF,tranDir,IIDX,JIDX,KIDX,VAL,IJKIDX):
if tranDir=='X':
PropFlag = "TRANX AT 0.00 DAYS"
elif tranDir=='Y':
PropFlag = "TRANY AT 0.00 DAYS"
elif tranDir=='Z':
PropFlag = "TRANZ AT 0.00 DAYS"
else:
print('Trans dirction error')
exit(0)
flag = readProperty(RPTF,PropFlag,IIDX,JIDX,KIDX,VAL)
if flag<0:
exit(0)
for i in range(NALLCELL):
IJKIDX[i]=(KIDX[i]-1)*NX*NY+(JIDX[i]-1)*NX+(IIDX[i]-1)
print('TRANS ',tranDir,' RPT Read Complete')
return 1
### DIFF PLOT ######
def DiffPlot(picHandle,p2,tranDir,CloudTransDic,IJKIDX,VAL,IJKIDX2=0):
r'计算差异,对于每个ecl的trans值,找cloud对应.xyz ecl只有IJKIDX,nnc ecl另有IJKIDX2'
if tranDir=='X':
di=1
elif tranDir=='Y':
di=NX
elif tranDir=='Z':
di = NX*NY
else:
di = -9999
tmpidx=0
difflist=[0 for i in IJKIDX]
tmpidx2=0
difflist2=[0 for i in IJKIDX]
for ijk in range(0,len(IJKIDX)):
if ijk%10000==0:
print('.',end='')
tecl=VAL[ijk]
if di>=0:
keyi = (IJKIDX[ijk],IJKIDX[ijk]+di)
keyi2 = (IJKIDX[ijk]+di,IJKIDX[ijk])
else:
keyi = (IJKIDX[ijk],IJKIDX2[ijk])
keyi2 = (IJKIDX2[ijk],IJKIDX[ijk])
if keyi in CloudTransDic:
tcloud=CloudTransDic[keyi]
elif keyi2 in CloudTransDic:
tcloud=CloudTransDic[keyi2]
else:
tcloud=0
if tecl<1e-5 :
if tcloud<1e-5:
continue
else:
difflist2[tmpidx2]=1
tmpidx2+=1
continue
elif tcloud<1e-5 :
if tecl<1e-5:
continue
else:
difflist2[tmpidx2]=-1
tmpidx2+=1
continue
else:
difflist[tmpidx]=(tcloud-tecl)/tecl
if tranDir=='X':
maxDiff=5
elif tranDir=='Y':
maxDiff=5
elif tranDir=='Z':
maxDiff=5
else:
maxDiff=5
maxDiff=0.5
if abs(difflist[tmpidx])>maxDiff :
difflist[tmpidx]=difflist[tmpidx]/abs(difflist[tmpidx])*maxDiff
#print('IJK ',ijk,' I J K ',IIDX[ijk],' ',JIDX[ijk],' ',KIDX[ijk], ' TECL ',tecl,' TC ',tcloud)
tmpidx+=1
print('Diff Calculating End')
#直方图
picHandle.set_ylabel('NUMBER')
if tranDir=='X':
picHandle.set_xlabel('TRANX RELDIFF(+ means C>E)')
elif tranDir=='Y':
picHandle.set_xlabel('TRANY RELDIFF(+ means C>E)')
elif tranDir=='Z':
picHandle.set_xlabel('TRANZ RELDIFF(+ means C>E)')
else:
picHandle.set_xlabel('TRAN NNC RELDIFF')
#picHandle.set_xticks([-5,-4,-3,-2,-1,0,1,2,3,4,5])
picHandle.hist(
difflist[:tmpidx], #要统计的数据
#bins=[-6,-5,-4,-3,-2,-1,-0.5,-0.1,0.1,0.5,1,2,3,4,5,6], #柱的个数 -1:0.01 1%误差以内,50%
#bins=[-1.2,-1,-0.9,-0.8,-0.7,-0.6,-0.5,-0.4,-0.3,-0.2,-0.1,0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0,1.1],
bins=[-0.5,-0.45 ,-0.4, -0.35, -0.3, -0.25, -0.2, -0.15, -0.1, -0.05, -0.01, 0, 0.01, 0.05, 0.1, 0.15, 0.2 ,0.25, 0.3, 0.35, 0.4, 0.45,0.5],
#range =None, #默认即可
#normed=True, #true为概率密度
#stacked=True #频率
#log=True,
histtype='bar',
rwidth=0.8
)
p2.set_ylabel('NUMBER')
if tranDir=='X':
p2.set_xlabel('TRANX DISMATCH(+ means C>E)')
elif tranDir=='Y':
p2.set_xlabel('TRANY DISMATCH(+ means C>E)')
elif tranDir=='Z':
p2.set_xlabel('TRANZ DISMATCH(+ means C>E)')
else:
p2.set_xlabel('TRAN NNC DISMATCH')
p2.set_xticks([-1,1])
p2.hist(
difflist2[:tmpidx2], #要统计的数据
bins=[-1.5,0,1.5], #柱的个数 -1:0.01 1%误差以内,50%
#range =None, #默认即可
#normed=True, #true为概率密度
#stacked=True #频率
#log=True,
histtype='bar',
rwidth=0.8
)
#p2=plt.plot(range(0,tmpidx),difflist[0:tmpidx])
#plt.show()
# 柱状图 bar/barh
#rects1=plt.bar( #(x,data) 就是所要画的二维数据
# e100_idxArray, #x 是X坐标轴数据,即每个块的x轴起始位置
# e100_valArray, #data是Y坐标轴的数据,即每个块的y轴高度
#width=[0.1,0.2,0.3], #每一个块的显示宽度
#bottom=[1,2,3], #每一个块的底部高度
## color='y', #块的颜色
## edgecolor='g', #块的边界颜色
## linewidth=2, #块的线条宽度
## xerr=1, #x轴误差bar
## yerr=1, #y轴误差bar
## ecolor='r', #误差bar的颜色
## capsize=1, #误差bar的线条宽度
## orientation='vertical', #块的方向 (horizontal,vertical)
## align="center", #块的位置 (center, left, right)
## hold=None
# )
return 1
###==========MAIN LOOP============
#读cloud文件
ftmp = open(CLOUDTRANSPATH)
flines=ftmp.readlines()
flines=[ll.split() for ll in flines]
Aidx = [int(ll[0]) for ll in flines[:]]
Bidx = [int(ll[1]) for ll in flines[:]]
Tran = [float(ll[2]) for ll in flines[:]]
A_B = [0.0 for ll in flines[:]]
for i in range(len(Aidx)):
A_B[i]=(Aidx[i],Bidx[i])
CloudTransDic = dict(zip(A_B,Tran))
print('Cloud connList Read Complete')
ftmp.close()
#读rpt文件
ftmp = open(PRTPATH)
flines = ftmp.readlines()
#用于存储xyz trans
NALLCELL=NX*NY*NZ
IIDX=[-1 for i in range(NALLCELL)]
JIDX=[-1 for i in range(NALLCELL)]
KIDX=[-1 for i in range(NALLCELL)]
VAL=[-1 for i in range(NALLCELL)]
IJKIDX=[0 for i in range(NALLCELL)]
fig,axes = plt.subplots(2,2)
fig1,axes1 = plt.subplots(2,2)
#xyz trans read
DirTransRead(flines,'X',IIDX,JIDX,KIDX,VAL,IJKIDX)
DiffPlot(axes[0,0],axes1[0,0],'X',CloudTransDic,IJKIDX,VAL)
#将ecl trans写为cloud可识别的CONN
fconnOut=open(r"ECL_CONN",'w')
nConns=0
writeOutList=[0 for i in range(NALLCELL*4)]
for i in range(NALLCELL):
if VAL[i]>0:
writeOutList[nConns]=str(IJKIDX[i])+' '+str(IJKIDX[i]+1)+' '+str(VAL[i])+'\n'
nConns=nConns+1
DirTransRead(flines,'Y',IIDX,JIDX,KIDX,VAL,IJKIDX)
DiffPlot(axes[0,1],axes1[0,1],'Y',CloudTransDic,IJKIDX,VAL)
#将ecl trans写为cloud可识别的CONN
for i in range(NALLCELL):
if VAL[i]>0:
writeOutList[nConns]=str(IJKIDX[i])+' '+str(IJKIDX[i]+NX)+' '+str(VAL[i])+'\n'
nConns=nConns+1
DirTransRead(flines,'Z',IIDX,JIDX,KIDX,VAL,IJKIDX)
DiffPlot(axes[1,0],axes1[1,0],'Z',CloudTransDic,IJKIDX,VAL)
#将ecl trans写为cloud可识别的CONN
for i in range(NALLCELL):
if VAL[i]>0:
writeOutList[nConns]=str(IJKIDX[i])+' '+str(IJKIDX[i]+NX*NY)+' '+str(VAL[i])+'\n'
nConns=nConns+1
#nnc trans
IJKIDX,IJKIDX2,VALNNC = readNNC(flines)
if IJKIDX==0 and IJKIDX2==0 and VALNNC==0:
print("NO nnc\n")
else:
DiffPlot(axes[1,1],axes1[1,1],'N',CloudTransDic,IJKIDX,VALNNC,IJKIDX2)
#将ecl trans写为cloud可识别的CONN
for i in range(len(IJKIDX)):
if VALNNC[i]>0:
writeOutList[nConns]=str(IJKIDX[i])+' '+str(IJKIDX2[i])+' '+str(VALNNC[i])+'\n'
nConns=nConns+1
fconnOut.write("CONN\n")
fconnOut.write(str(nConns)+" 2\n") #2代表ecl输入的conn
fconnOut.writelines(writeOutList[:nConns])
fconnOut.write(r"/")
fconnOut.close()
fig.tight_layout()
fig1.tight_layout()
plt.show()
print('END')
|
"""
A test for the buttons on the mini-PiTFT display from adafruit.
The code is based on an example on adafruit.com
Note:
Inside the while-loop, a sleep function is used to avoid 100% CPU usage.
This is the because the script is run very fast and the loop is repeating
continuously. Sleeping for 0.01s make not visible difference for the
responsiveness of the display.
"""
import time
import digitalio
import board
from adafruit_rgb_display.rgb import color565
import adafruit_rgb_display.st7789 as st7789
# Configuration for CS and DC pins for Raspberry Pi
cs_pin = digitalio.DigitalInOut(board.CE0)
dc_pin = digitalio.DigitalInOut(board.D25)
reset_pin = None
BAUDRATE = 64000000 # The pi can be very fast!
# Create the ST7789 display:
display = st7789.ST7789(board.SPI(), cs=cs_pin, dc=dc_pin, rst=reset_pin, baudrate=BAUDRATE,
width=135, height=240, x_offset=53, y_offset=40)
backlight = digitalio.DigitalInOut(board.D22)
backlight.switch_to_output()
backlight.value = True
buttonA = digitalio.DigitalInOut(board.D23)
buttonB = digitalio.DigitalInOut(board.D24)
buttonA.switch_to_input()
buttonB.switch_to_input()
# Main loop:
while True:
if buttonA.value and buttonB.value:
backlight.value = False # turn off backlight
else:
backlight.value = True # turn on backlight
if buttonB.value and not buttonA.value: # just button A pressed
display.fill(color565(255, 0, 0)) # red
if buttonA.value and not buttonB.value: # just button B pressed
display.fill(color565(0, 0, 255)) # blue
if not buttonA.value and not buttonB.value: # none pressed
display.fill(color565(0, 255, 0)) # green
time.sleep(0.01)
|
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for Wipeout."""
from __future__ import absolute_import
from __future__ import unicode_literals
from core import utils
from core.platform import models
USER_DELETION_SUCCESS = 'SUCCESS'
USER_DELETION_ALREADY_DONE = 'ALREADY DONE'
USER_VERIFICATION_NOT_DELETED = 'NOT DELETED'
USER_VERIFICATION_SUCCESS = 'SUCCESS'
USER_VERIFICATION_FAILURE = 'FAILURE'
class PendingDeletionRequest:
"""Domain object for a PendingDeletionRequest."""
def __init__(
self,
user_id,
email,
normalized_long_term_username,
deletion_complete,
pseudonymizable_entity_mappings):
"""Constructs a PendingDeletionRequest domain object.
Args:
user_id: str. The ID of the user who is being deleted.
email: str. The email of the user who is being deleted.
normalized_long_term_username: str|None. The normalized username of
the user who is being deleted. Can be None when the user was on
the Oppia site only for a short time and thus the username
hasn't been well-established yet.
deletion_complete: bool. Whether the deletion is completed.
pseudonymizable_entity_mappings: dict(str, str). Mapping between
the entity IDs and pseudonymized user IDs.
"""
self.user_id = user_id
self.email = email
self.normalized_long_term_username = normalized_long_term_username
self.deletion_complete = deletion_complete
self.pseudonymizable_entity_mappings = pseudonymizable_entity_mappings
@classmethod
def create_default(
cls, user_id, email, normalized_long_term_username=None):
"""Creates a PendingDeletionRequest object with default values.
Args:
user_id: str. The ID of the user who is being deleted.
email: str. The email of the user who is being deleted.
normalized_long_term_username: str|None. The normalized username of
the user who is being deleted. Can be None when the user was on
the Oppia site only for a short time and thus the username
hasn't been well-established yet.
Returns:
PendingDeletionRequest. The default pending deletion request
domain object.
"""
return cls(
user_id, email, normalized_long_term_username, False, {})
def validate(self):
"""Checks that the domain object is valid.
Raises:
ValidationError. The field pseudonymizable_entity_mappings
contains wrong key.
"""
for key in self.pseudonymizable_entity_mappings.keys():
if key not in [name.value for name in models.NAMES]:
raise utils.ValidationError(
'pseudonymizable_entity_mappings contain wrong key')
|
class ContextError(Exception):
"""
Top-level error handling all context problems.
"""
def __init__(self):
Exception.__init__(self, 'General run context error')
class ContextRegistrationError(RuntimeError):
pass
class ContextAlreadyRegisteredError(ContextRegistrationError):
def __init__(self, name: str, existing_class: type, requested_class: type):
ContextRegistrationError.__init__(
self,
'Attempted to register context `{0}` as type {1}, but it is already registered as {2}'.format(
name, existing_class.__name__,requested_class.__name__
)
)
self.name = name
self.existing_class = existing_class
self.requested_class = requested_class
class NoSuchContextError(ContextRegistrationError):
def __init__(self, name):
ContextRegistrationError.__init__(
self,
'No such registered context named `{0}`'.format(name)
)
class InvalidContextEnterError(ContextError):
def __init__(self, parent_context, child_context, name):
InvalidContextEnterError.__init__(self)
self.parent_context = parent_context
self.child_context = child_context
self.requested_context_name = name
class InvalidParentContextError(InvalidContextEnterError):
"""
The current parent context does not match the active context.
"""
def __init__(self, parent_context, child_context):
InvalidContextEnterError.__init__(self, parent_context, child_context, child_context.name)
def __str__(self):
return 'Current run context ({0}) is not the parent of new context ({1})'.format(
self.parent_context, self.child_context
)
class InvalidChildContextNameError(InvalidContextEnterError):
"""
The child context didn't set the name right.
"""
def __init__(self, parent_context, child_context, name):
InvalidContextEnterError.__init__(self, parent_context, child_context, name)
def __str__(self):
return 'Expected child context named `{0}`, but found {1})'.format(
self.requested_context_name, self.child_context.name
)
class InvalidContextExitError(ContextError):
def __init__(self, parent_context, child_context):
ContextError.__init__(self)
self.parent_context = parent_context
self.child_context = child_context
def __str__(self):
return 'Current run context ({0}) does not match the run context just left ({1})'.format(
self.parent_context, self.child_context
)
class NestedContextError(ContextError):
"""
The child context could not be created due to parent context restrictions.
"""
def __init__(self, parent_context, context_args):
ContextError.__init__(self)
self.parent_context = parent_context
self.context_args = context_args
def __str__(self):
return 'Parent run context ({0}) refused entering a child run context as {1}'.format(
self.parent_context, repr(self.context_args)
)
class ExpandedPermissionContextError(NestedContextError):
"""
The child context expected wider permissions than the parent permitted.
"""
def __init__(self, parent_context, parent_permissions, requested_permissions):
NestedContextError.__init__(self, parent_context, {
'parentPermissions': parent_permissions, 'requestedPermissions': requested_permissions
})
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sepia.DataContainer import DataContainer
sns.set()
class SepiaData(object):
"""
Data object used for SepiaModel, containing potentially both `sim_data` and `obs_data` objects of type `sepia.DataContainer`.
:var numpy.ndarray/NoneType x_sim: controllable inputs/experimental conditions, shape (n, p) or None
:var numpy.ndarray/NoneType t_sim: non-controllable inputs, shape (n, q) or None
:var numpy.ndarray y_sim: simulation outputs, shape (n, ell_sim)
:var numpy.ndarray/NoneType y_ind_sim: indices for multivariate y, shape (ell_sim, ), required if ell_sim > 1
:var numpy.ndarray/NoneType x_obs: controllable inputs for observation data, shape (m, p) or None
:var numpy.ndarray/list/NoneType y_obs: observed outputs, shape (m, ell_obs), or list length m of 1D arrays (for ragged y_ind_obs), or None
:var numpy.ndarray/list/NoneType y_ind_obs: vector of indices for multivariate y, shape (l_obs, ), or list length m of 1D arrays (for ragged y_ind_obs), or None
:var bool sim_only: is it simulation-only data?
:var bool scalar_out: is the output y scalar?
:var bool ragged_obs: do the observations have ragged (non-shared) multivariate indices across instances?
:var numpy.ndarray/list x_cat_ind: indices of x that are categorical (0 = not cat, int > 0 = how many categories)
:var numpy.ndarray/list t_cat_ind: indices of t that are categorical (0 = not cat, int > 0 = how many categories)
:var numpy.ndarray/list/NoneType xt_sim_sep: for separable design, list of kronecker composable matrices
:var bool dummy_x: is there a dummy x? (used in problems where no x is provided)
:var bool sep_design: is there a Kronecker separable design?
"""
def __init__(self, x_sim=None, t_sim=None, y_sim=None, y_ind_sim=None, x_obs=None, y_obs=None, Sigy=None, y_ind_obs=None,
x_cat_ind=None, t_cat_ind=None, xt_sim_sep=None):
"""
Create SepiaData object. Many arguments are optional depending on the type of model.
Users should instantiate with all data needed for the desired model. See documentation pages for more detail.
:param numpy.ndarray/NoneType x_sim: controllable inputs/experimental conditions, shape (n, p), or None
:param numpy.ndarray/NoneType t_sim: non-controllable inputs, shape (n, q), or None
:param numpy.ndarray y_sim: simulation outputs, shape (n, ell_sim)
:param numpy.ndarray/NoneType y_ind_sim: indices for multivariate y, shape (ell_sim, ), required if ell_sim > 1
:param numpy.ndarray/NoneType x_obs: controllable inputs for observation data, shape (m, p) or None
:param numpy.ndarray/list/NoneType y_obs: observed outputs, shape (m, ell_obs), or list length m of 1D arrays (for ragged y_ind_obs), or None
:param numpy.ndarray/list/NoneType y_ind_obs: vector of indices for multivariate y, shape (l_obs, ), or list length m of 1D arrays (for ragged y_ind_obs), or None
:param numpy.ndarray/NoneType Sigy: optional observation covariance matrix (default is identity)
:param numpy.ndarray/list/NoneType x_cat_ind: indices of x that are categorical (0 = not cat, int > 0 = how many categories), or None
:param numpy.ndarray/list/NoneType t_cat_ind: indices of t that are categorical (0 = not cat, int > 0 = how many categories), or None
:param numpy.ndarray/list/NoneType xt_sim_sep: for separable design, list of kronecker composable matrices; it is a list of 2 or
more design components that, through Kronecker expansion, produce the full input space (`x` and `t`) for the simulations.
:raises: TypeError if shapes not conformal or required data missing.
.. note: At least one of x_sim and t_sim must be provided, and y_sim must always be provided.
"""
self.sep_design = xt_sim_sep is not None
self.dummy_x = (not self.sep_design and x_sim is None) or \
(self.sep_design and y_obs is not None and x_obs is None)
self.sim_only = y_obs is None
# Initial Checks
if y_sim is None:
raise TypeError('y_sim is required to set up model.')
if not self.sep_design:
if y_obs is not None and ((x_obs is None and x_sim is not None) or (x_obs is not None and x_sim is None)):
raise ValueError('x_sim and x_obs must both be either not None or None (which is the no-x model case)')
if x_sim is None and t_sim is None:
raise TypeError('At least one of x_sim or t_sim is required to set up model.')
if self.dummy_x:
if y_obs is not None:
x_obs = 0.5 * np.ones((len(y_obs), 1)) # sets up dummy x_obs
if not self.sep_design: # set up dummy_x in x_sim, or delays until sep/kron processing just below
x_sim = 0.5 * np.ones((t_sim.shape[0], 1))
if self.sep_design:
if x_sim is not None or t_sim is not None:
raise ValueError('Cannot specify x_sim or t_sim if separable design is supplied')
if self.dummy_x: # augment the composed design with dummy_x column
xt_sim_sep.insert(0,np.array([0.5]).reshape(1,1))
# Expand out the design from the components by kronecker product into x_sim and t_sim (as needed)
temp_des=xt_sim_sep[-1]
for ndes in reversed(xt_sim_sep[:-1]):
r1,r2=np.meshgrid(np.arange(ndes.shape[0]),np.arange(temp_des.shape[0]))
temp_des=np.hstack((ndes[r1.reshape(-1,order='F'),:],temp_des[r2.reshape(-1,order='F'),:]))
# separate the composed design into x and t components
if self.sim_only: # Emulator-only model
x_sim=temp_des # the design can only be attributed to x's
else: # extract the shape
p=x_obs.shape[1]
x_sim=temp_des[:,:p]
t_sim=temp_des[:,p:]
# At this point, dummy_x should be place if needed
# if it's a separable design, that's composed and split into x_sim and t_sim appropriately
# the separable design components will be used in logLik and predict, nobody else needs to worry about it now
# (except carrying it along in SetupModel
self.sim_data = DataContainer(x=x_sim, y=y_sim, t=t_sim, y_ind=y_ind_sim, xt_sep_design=xt_sim_sep)
self.scalar_out = (self.sim_data.y.shape[1] == 1)
self.mean_basis = None
if self.sim_only:
self.obs_data = None
else:
if x_sim.shape[1] != x_obs.shape[1]:
raise TypeError('x_sim and x_obs do not contain the same number of variables/columns.')
self.obs_data = DataContainer(x=x_obs, y=y_obs, y_ind=y_ind_obs, Sigy=Sigy)
self.sim_only = False
self.ragged_obs = isinstance(y_obs, list)
# Set up Sigy - now done in scaling code
# Process categorical indices
if x_cat_ind is not None:
if len(x_cat_ind) != x_sim.shape[1]:
raise TypeError('x_cat_ind length should equal p.')
for i, ci in enumerate(x_cat_ind):
if ci > 0 and ci != np.max(x_sim[:, i]):
raise TypeError('Nonzero values of x_cat_ind should equal number of categories.')
else:
x_cat_ind = np.zeros(x_sim.shape[1])
self.x_cat_ind = x_cat_ind
if t_cat_ind is not None:
if t_sim is None:
raise TypeError('Cannot use t_cat_ind if t_sim is not provided.')
if len(t_cat_ind) != t_sim.shape[1]:
raise TypeError('t_cat_ind length should equal p.')
for i, ci in enumerate(t_cat_ind):
if ci > 0 and ci != np.max(t_sim[:, i]):
raise TypeError('Nonzero values of t_cat_ind should equal number of categories.')
else:
if t_sim is None:
t_cat_ind = []
else:
t_cat_ind = np.zeros(t_sim.shape[1])
self.t_cat_ind = t_cat_ind
# Prints pretty representation of the SepiaData object for users to check their setup.
def __str__(self):
res = ''
res += 'This SepiaData instance implies the following:\n'
if self.sim_only:
res += 'This is a simulator (eta)-only model, y dimension %d\n' % self.sim_data.y.shape[1]
res += 'm = %5d (number of simulated data)\n' % self.sim_data.x.shape[0]
res += 'p = %5d (number of inputs)\n' % self.sim_data.x.shape[1]
if self.sim_data.t is not None:
res += 'q = %5d (number of additional simulation inputs)\n' % self.sim_data.t.shape[1]
if self.scalar_out:
res += 'pu = 1 (univariate response dimension)\n'
elif self.sim_data.K is not None:
res += 'pu = %5d (transformed response dimension)\n' % self.sim_data.K.shape[0]
else:
res += 'pu NOT SET (transformed response dimension); call method create_K_basis \n'
else:
if self.ragged_obs:
res += 'This is a simulator and obs model, sim y dimension %d, obs y dimension ragged\n' % self.sim_data.y.shape[1]
else:
res += 'This is a simulator and obs model, sim y dimension %d, obs y dimension %d\n' % (self.sim_data.y.shape[1], self.obs_data.y.shape[1])
res += 'n = %5d (number of observed data)\n' % self.obs_data.x.shape[0]
res += 'm = %5d (number of simulated data)\n' % self.sim_data.x.shape[0]
res += 'p = %5d (number of inputs)\n' % self.sim_data.x.shape[1]
res += 'q = %5d (number of additional simulation inputs to calibrate)\n' % self.sim_data.t.shape[1]
if self.scalar_out:
res += 'pu = 1 (univariate response dimension)'
else:
if self.sim_data.K is not None and self.obs_data.K is not None:
res += 'pu = %5d (transformed response dimension)\n' % self.sim_data.K.shape[0]
else:
res += 'pu NOT SET (transformed response dimension); call method create_K_basis\n'
if self.obs_data.D is not None:
if self.ragged_obs:
res += 'pv = %5d (transformed discrepancy dimension)\n' % self.obs_data.D[0].shape[0]
else:
res += 'pv = %5d (transformed discrepancy dimension)\n' % self.obs_data.D.shape[0]
else:
res += 'pv not set, indicating (unusual case of) no discrepancy; call method create_D_basis to fix \n'
# Info on separable design, if that's in place.
if self.sep_design:
res += 'This is a separable simulation design with components: \n'
for ii in range(len(self.sim_data.xt_sep_design)):
res += ' x component %d has m = %5d (simulated data design size) \n' % (
ii, self.sim_data.xt_sep_design[ii].shape[0])
res += ' x component %d has p = %5d (number of inputs) \n' % (
ii, self.sim_data.xt_sep_design[ii].shape[1])
# Print info on categorical variables
if np.any(np.array(self.x_cat_ind) > 0):
res += 'Categorical x input variables:\n'
for i, ci in enumerate(self.x_cat_ind):
if ci > 0:
res += 'x index %d with %d categories\n' % (i, ci)
if np.any(np.array(self.t_cat_ind) > 0):
res += 'Categorical t input variables:\n'
for i, ci in enumerate(self.t_cat_ind):
if ci > 0:
res += 't index %d with %d categories\n' % (i, ci)
return res
def transform_xt(self, x_notrans=None, t_notrans=None, x_range=None, t_range=None, x=None, t=None, native=False):
"""
Transforms sim_data x and t and obs_data x to lie in [0, 1], columnwise, or applies
same transformation to new x and t.
:param list/NoneType x_notrans: column indices of x that should not be transformed or None
:param list/NoneType t_notrans: column indices of t that should not be transformed or None
:param numpy.ndarray/NoneType x: new x values to transform to [0, 1] using same rules as original x data or None
:param numpy.ndarray/NoneType t: new t values to transform to [0, 1] using same rules as original t data or None
:param numpy.ndarray/NoneType x_range: user specified data ranges, first row is min, second row is max for each variable
:param numpy.ndarray/NoneType t_range: user specified data ranges, first row is min, second row is max for each variable
:param bool native: boolean for reverse transformation on x,t from [0, 1] to native scale
:returns: tuple of x_trans, t_trans if x and t arguments provided; otherwise returns (None, None)
.. note:: A column is not transformed if min/max of the column values are equal, if the column is categorical,
or if the user specifies no transformation using x_notrans or t_notrans arguments.
"""
x_trans, t_trans = None, None
if x_notrans is None:
x_notrans = []
if t_notrans is None:
t_notrans = []
if x_notrans is True:
x_notrans = np.arange(self.sim_data.x.shape[1])
# making notes to transform the separable design elements, if needed
transform_sep = False
# Transform x to unit hypercube or user-specified ranges
# if not computed, compute orig x min and orig x max, accounting for notrans_x, all equal x, and categorical x
if self.sim_data.orig_x_min is None or self.sim_data.orig_x_max is None or self.sim_data.x_trans is None:
if self.sep_design:
transform_sep=True
nx = self.sim_data.x.shape[1]
if x_range is None:
orig_x_min = np.min(self.sim_data.x, 0, keepdims=True)
orig_x_max = np.max(self.sim_data.x, 0, keepdims=True)
else:
if x_range.shape[0] !=2:
raise ValueError('user-specified ranges are first row min, second row max')
if x_range.shape[1] != nx:
raise ValueError('user-specified ranges must be given for every x variable')
orig_x_min = x_range[[0],:].reshape((1,-1))
orig_x_max = x_range[[1],:].reshape((1,-1))
# If any xmin/xmax are equal, don't transform
xmm = orig_x_max - orig_x_min
x_notrans = list(set(x_notrans) | set([i for i in range(nx) if xmm[:, i] == 0]))
# If there are cat inds, do not transform
if self.x_cat_ind is not None:
x_notrans = list(set(x_notrans) | set([i for i in range(nx) if self.x_cat_ind[i] > 0]))
orig_x_min[:, x_notrans] = 0
orig_x_max[:, x_notrans] = 1
self.sim_data.x_trans = (self.sim_data.x - orig_x_min) / (orig_x_max - orig_x_min)
self.sim_data.orig_x_min = orig_x_min
self.sim_data.orig_x_max = orig_x_max
if not self.sim_only:
self.obs_data.orig_x_min = orig_x_min
self.obs_data.orig_x_max = orig_x_max
self.obs_data.x_trans = (self.obs_data.x - orig_x_min) / (orig_x_max - orig_x_min)
# If a new x was passed in, transform it
if x is not None and not native:
x_trans = (x - self.sim_data.orig_x_min) / (self.sim_data.orig_x_max - self.sim_data.orig_x_min)
if x is not None and native:
x_trans = (x * (self.sim_data.orig_x_max - self.sim_data.orig_x_min)) + self.sim_data.orig_x_min
# Transform t to unit hypercube or user-specified ranges
if self.sim_data.t is not None:
if t_notrans is True:
t_notrans = np.arange(self.sim_data.t.shape[1])
# if not computed, compute orig t min and orig t max, accounting for notrans_t, all equal t, and categorical t
if self.sim_data.orig_t_min is None or self.sim_data.orig_t_max is None or self.sim_data.t_trans is None:
nt = self.sim_data.t.shape[1]
if t_range is None:
orig_t_min = np.min(self.sim_data.t, 0, keepdims=True)
orig_t_max = np.max(self.sim_data.t, 0, keepdims=True)
else:
if t_range.shape[0] != 2:
raise ValueError('user-specified ranges are first row min, second row max')
if t_range.shape[1] != nt:
raise ValueError('user-specified ranges must be given for every t variable')
orig_t_min = t_range[0, :].reshape((1,-1))
orig_t_max = t_range[1, :].reshape((1,-1))
# If any tmin/tmax are equal, don't transform
tmm = orig_t_max - orig_t_min
t_notrans = list(set(t_notrans) | set([i for i in range(nt) if tmm[:, i] == 0]))
# If there are cat inds, do not transform
if self.t_cat_ind is not None:
t_notrans = list(set(t_notrans) | set([i for i in range(nt) if self.t_cat_ind[i] > 0]))
orig_t_min[:, t_notrans] = 0
orig_t_max[:, t_notrans] = 1
self.sim_data.t_trans = (self.sim_data.t - orig_t_min) / (orig_t_max - orig_t_min)
self.sim_data.orig_t_min = orig_t_min
self.sim_data.orig_t_max = orig_t_max
if not self.sim_only:
self.obs_data.orig_t_min = orig_t_min
self.obs_data.orig_t_max = orig_t_max
# If a new t was passed in, transform it
if t is not None and not native:
t_trans = (t - self.sim_data.orig_t_min) / (self.sim_data.orig_t_max - self.sim_data.orig_t_min)
if t is not None and native:
t_trans = (t * (self.sim_data.orig_t_max - self.sim_data.orig_t_min)) + self.sim_data.orig_t_min
if transform_sep:
self.sim_data.xt_sep_design_orig = self.sim_data.xt_sep_design.copy()
if self.sim_data.orig_t_min is not None:
sep_min = np.hstack((self.sim_data.orig_x_min, self.sim_data.orig_t_min))
sep_max = np.hstack((self.sim_data.orig_x_max, self.sim_data.orig_t_max))
else:
sep_min = self.sim_data.orig_x_min
sep_max = self.sim_data.orig_x_max
tind=0
for ii,dele in enumerate(self.sim_data.xt_sep_design):
dlen = dele.shape[1]
self.sim_data.xt_sep_design[ii] = \
(dele - sep_min[0,tind:tind+dlen]) / (sep_max[0,tind:tind+dlen] - sep_min[0,tind:tind+dlen])
tind = tind + dlen
return x_trans, t_trans
def standardize_y(self, center=True, scale='scalar', y_mean=None, y_sd=None):
"""
Standardizes both `sim_data` and `obs_data` outputs y based on sim_data.y mean/SD.
:param bool center: subtract simulation mean (across observations)?
:param string/bool scale: how to rescale: 'scalar': single SD over all demeaned data, 'columnwise': SD for each column of demeaned data, False: no rescaling
:param numpy.ndarray/float/NoneType y_mean: y_mean for sim; optional, should match length of y_ind_sim or be scalar
:param numpy.ndarray/float/NoneType y_sd: y_sd for sim; optional, should match length of y_ind_sim or be scalar
"""
if center:
if y_mean is None:
self.sim_data.orig_y_mean = np.mean(self.sim_data.y, 0)
else:
self.sim_data.orig_y_mean = y_mean
else:
self.sim_data.orig_y_mean = 0.
y_dm = self.sim_data.y - self.sim_data.orig_y_mean
if y_sd is not None:
self.sim_data.orig_y_sd = y_sd
else:
if scale == 'scalar':
self.sim_data.orig_y_sd = np.std(y_dm, ddof=1)
elif scale == 'columnwise':
self.sim_data.orig_y_sd = np.std(y_dm, ddof=1, axis=0)
elif scale is False:
self.sim_data.orig_y_sd = 1.
else:
raise ValueError('standardize_y: invalid value for scale parameter, allowed are {''scalar'',''columnwise'',False}')
self.sim_data.y_std = y_dm/self.sim_data.orig_y_sd
if not self.sim_only:
if not self.scalar_out and not np.isscalar(self.sim_data.orig_y_mean):
if self.ragged_obs:
orig_y_mean = []
for i in range(len(self.obs_data.y)):
orig_y_mean.append(np.interp(self.obs_data.y_ind[i], self.sim_data.y_ind.squeeze(), self.sim_data.orig_y_mean))
else:
orig_y_mean = np.interp(self.obs_data.y_ind.squeeze(), self.sim_data.y_ind.squeeze(), self.sim_data.orig_y_mean)
self.obs_data.orig_y_mean = orig_y_mean
else:
if self.ragged_obs:
self.obs_data.orig_y_mean = [self.sim_data.orig_y_mean for i in range(len(self.obs_data.y))]
else:
self.obs_data.orig_y_mean = self.sim_data.orig_y_mean
if not self.scalar_out and not np.isscalar(self.sim_data.orig_y_sd):
if self.ragged_obs:
orig_y_sd = []
for i in range(len(self.obs_data.y)):
orig_y_sd.append(np.interp(self.obs_data.y_ind[i], self.sim_data.y_ind.squeeze(), self.sim_data.orig_y_sd))
else:
orig_y_sd = np.interp(self.obs_data.y_ind, self.sim_data.y_ind, self.sim_data.orig_y_sd)
self.obs_data.orig_y_sd = orig_y_sd
else:
if self.ragged_obs:
self.obs_data.orig_y_sd = [self.sim_data.orig_y_sd for i in range(len(self.obs_data.y))]
else:
self.obs_data.orig_y_sd = self.sim_data.orig_y_sd
def cov_norm(ysd):
if np.isscalar(ysd):
return ysd**2
ysd=ysd.reshape((1,-1))
return(ysd.T @ ysd)
if self.ragged_obs:
ty_std=[]; tSigy_std=[]
for i in range(len(self.obs_data.y)):
ty_std.append( (self.obs_data.y[i] - self.obs_data.orig_y_mean[i]) / self.obs_data.orig_y_sd[i] )
if self.obs_data.Sigy is None:
tSigy_std.append(np.atleast_2d(np.diag(np.ones(self.obs_data.y[i].shape))))
else:
tSigy_std.append(self.obs_data.Sigy[i] / cov_norm(self.obs_data.orig_y_sd[i]) )
else:
ty_std = (self.obs_data.y - self.obs_data.orig_y_mean) / self.obs_data.orig_y_sd
if self.obs_data.Sigy is None:
tSigy_std = np.diag(np.ones(self.obs_data.y.shape[1]))
else:
tSigy_std = self.obs_data.Sigy / cov_norm(self.obs_data.orig_y_sd)
self.obs_data.y_std = ty_std
self.obs_data.Sigy_std=tSigy_std
def set_mean_basis(self, basis_type='linear'):
"""
Sets a mean basis (H) for a scalar respose model
:param str/None basis_type: name of basis to be used
"""
if not self.scalar_out:
raise RuntimeError('Cannot specify an emulator mean basis unless model is scalar output \n'+
'(which is based on y passed to SepiaData having y.shape[1]==1')
if basis_type not in ['constant','linear']:
raise ValueError('basis_type options are constant or linear (default)')
if self.sim_data.x_trans is None and self.sim_data.t_trans is None:
raise RuntimeError('Scale data i.e. transform_xt() before requesting basis')
self.mean_basis=basis_type
# the w -related basis
if self.dummy_x:
# living on just t's
sim_indeps = self.sim_data.t_trans
elif self.sim_only:
# living on just x's
sim_indeps = self.sim_data.x_trans
else:
# living on x's and t's
sim_indeps = np.hstack((self.sim_data.x_trans,self.sim_data.t_trans))
self.sim_data.H=self.make_mean_basis(sim_indeps)
# the u-related basis
if not self.sim_only:
if self.dummy_x:
# waiting for t's
self.obs_data.H=self.make_mean_basis(np.array([[]]))
else:
# living on x's (t's come later)
obs_indeps = self.obs_data.x_trans
self.obs_data.H=self.make_mean_basis(x=obs_indeps)
def make_mean_basis(self,x=None):
# Internal helper method
if self.mean_basis=='constant':
H=np.ones((x[0],1))
elif self.mean_basis=='linear':
H = np.hstack( (np.ones((x.shape[0],1)), x) )
return H
def make_obs_mean_basis(self,theta=None):
# Internal helper method, returns the correct mean basis for obs, given a specified theta
if self.mean_basis=='constant':
pass
elif self.mean_basis=='linear':
Haug=np.hstack((self.obs_data.H,np.tile(theta,(self.obs_data.H.shape[0],1))))
return Haug
def create_K_basis(self, n_pc=0.995, K=None):
"""
Creates `K_sim` and `K_obs` basis functions using PCA on sim_data.y_std, or using given `K_sim` matrix.
:param float/int n_pc: proportion in [0, 1] of variance, or an integer number of components
:param numpy.ndarray/None K: a basis matrix on sim indices of shape (n_basis_elements, ell_sim) or None
.. note:: if standardize_y() method has not been called first, it will be called automatically by this method.
"""
if self.scalar_out:
if n_pc == 1:
print('Scalar output, using pu = 1 basis.')
self.sim_data.K = np.zeros((n_pc, 1))
self.scalar_out = False
return
else:
print('Scalar output, no basis used.')
return
if K is not None:
if not isinstance(K,np.ndarray):
raise TypeError('create_K_basis: K specified must be a numpy ndarray')
if len(K.shape)!=2 or K.shape[1]!=self.sim_data.y.shape[1]:
raise ValueError('create_K_basis: must be 2D, and K and y_sim must have the same second dimension')
self.sim_data.K = K
else:
self.compute_sim_PCA_basis(n_pc)
# interpolate PC basis to observed, if present
if not self.sim_only:
pu = self.sim_data.K.shape[0]
if self.ragged_obs:
K_obs = []
for ki in range(len(self.obs_data.y)):
K_obs_tmp = np.zeros((pu, self.obs_data.y_ind[ki].shape[0]))
for i in range(pu):
K_obs_tmp[i, :] = np.interp(self.obs_data.y_ind[ki], self.sim_data.y_ind, self.sim_data.K[i, :])
K_obs.append(K_obs_tmp)
else:
K_obs = np.zeros((pu, self.obs_data.y_ind.shape[0]))
for i in range(pu):
K_obs[i, :] = np.interp(self.obs_data.y_ind, self.sim_data.y_ind, self.sim_data.K[i, :])
self.obs_data.K = K_obs
def compute_sim_PCA_basis(self, n_pc):
# Does PCA basis computation on sim_data.y_std attribute, sets K attribute to calculated basis.
# Used internally by create_K_basis.
# :param float/int n_pc: number of components or a proportion of variance explained, in [0, 1].
y_std = self.sim_data.y_std
if y_std is None:
print('WARNING: y not standardized, applying default standardization before PCA...')
self.standardize_y()
U, s, V = np.linalg.svd(y_std.T, full_matrices=False)
s2 = np.square(s)
if n_pc < 1:
cum_var = s2 / np.sum(s2)
pu = np.sum(np.cumsum(cum_var) < n_pc) + 1
else:
pu = int(n_pc)
self.sim_data.K = np.transpose(np.dot(U[:, :pu], np.diag(s[:pu])) / np.sqrt(y_std.shape[0]))
def create_D_basis(self, D_type='constant', D_obs=None, D_sim=None, norm=True):
"""
Create `D_obs`, `D_sim` discrepancy bases. Can specify a type of default basis (constant/linear) or provide matrices.
:param string D_type: 'constant' or 'linear' to set up constant or linear D_sim and D_obs
:param numpy.ndarray/list/NoneType D_obs: a basis matrix on obs indices of shape (n_basis_elements, ell_obs),
or list of matrices for ragged observations.
:param numpy.ndarray/NoneType D_sim: a basis matrix on sim indices of shape (n_basis_elements, sim_obs).
:param bool norm: normalize D basis?
.. note:: `D_type` parameter is ignored if `D_obs` and `D_sim` are provided.
"""
# Return early if sim only or univariate output
if self.sim_only:
print('Model only has simulation data, skipping discrepancy...')
return
if self.scalar_out:
print('Model has univariate output, skipping discrepancy...')
return
# Check if passed in D_sim/D_obs are correct shape and if so, set them into objects
if D_sim is not None:
if not D_sim.shape[1] == self.sim_data.y.shape[1]:
raise TypeError('D_sim basis shape incorrect; second dim should match ell_sim')
self.sim_data.D = D_sim
if D_obs is not None:
if self.ragged_obs:
for i in range(len(D_obs)):
if not D_obs[i].shape[1] == (self.obs_data.y[i].shape[1] if self.obs_data.y[i].ndim == 2 else self.obs_data.y[i].shape[0]):
raise TypeError('D basis shape incorrect; second dim should match ell_obs')
else:
if not D_obs.shape[1] == self.obs_data.y.shape[1]:
raise TypeError('D_obs basis shape incorrect; second dim should match ell_obs')
self.obs_data.D = D_obs
elif D_type == 'constant':
if self.ragged_obs:
self.obs_data.D = [np.ones((1, self.obs_data.y[i].shape[0])) for i in range(len(self.obs_data.y))]
else:
self.obs_data.D = np.ones((1, self.obs_data.y.shape[1]))
self.sim_data.D = np.ones((1, self.sim_data.y.shape[1]))
elif D_type == 'linear':
self.sim_data.D = np.vstack([np.ones(self.sim_data.y.shape[1]), self.sim_data.y_ind])
if self.ragged_obs:
self.obs_data.D = [np.vstack([np.ones(self.obs_data.y[i].shape[0]), self.obs_data.y_ind[i]]) for i in range(len(self.obs_data.y))]
else:
self.obs_data.D = np.vstack([np.ones(self.obs_data.y.shape[1]), self.obs_data.y_ind])
# Normalize D to match priors
if norm:
if D_sim is not None:
norm_scl = np.sqrt(np.max(np.dot(self.sim_data.D, self.sim_data.D.T)))
self.sim_data.D /= norm_scl
if self.ragged_obs:
for i in range(len(self.obs_data.D)):
self.obs_data.D[i] /= norm_scl
else:
self.obs_data.D /= norm_scl
else:
if self.ragged_obs:
norm_scl = np.sqrt(np.max(np.dot(self.obs_data.D[0], self.obs_data.D[0].T)))
for i in range(len(self.obs_data.D)):
self.obs_data.D[i] /= norm_scl
else:
norm_scl = np.sqrt(np.max(np.dot(self.obs_data.D, self.obs_data.D.T)))
self.obs_data.D /= norm_scl
|
import sys
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.core.management.base import BaseCommand
from core.utils import create_temporary_password
from ...utils import ensure_group_exists, create_user, ensure_user_is_member_of_group, CrowdError
def dot(ch='.'):
sys.stdout.write(ch)
sys.stdout.flush()
class Command(BaseCommand):
args = ''
help = 'Make sure all users belong to their respective labour groups'
def handle(*args, **options):
User = get_user_model()
for group in Group.objects.all():
ensure_group_exists(group.name)
dot()
for user in User.objects.filter(person__isnull=False):
try:
create_user(user, create_temporary_password())
dot()
except CrowdError:
dot('+')
for group in user.groups.all():
ensure_user_is_member_of_group(user, group.name)
dot()
|
import numpy as np
from helper_class1.data_reader import DataReader
from helper_class1.hyper_parameters import HyperParameters
from helper_class1.neural_net import NeuralNet
from matplotlib import pyplot as plt
file_name = '../../../ai-edu/A-基础教程/A2-神经网络基本原理简明教程/data/ch04.npz'
data_reader = DataReader(file_name)
data_reader.read_data()
def train(eta, max_epoch, batch_size, eps):
hp = HyperParameters(1, 1, eta=eta, max_epoch=max_epoch, batch_size=batch_size, eps=eps)
net = NeuralNet(hp)
net.train(data_reader)
show_result(net)
def show_result(net):
matrix_x, matrix_y = data_reader.get_whole_train_samples()
# draw sample data
plt.plot(matrix_x, matrix_y, "b.")
# draw predication data
px = np.linspace(0, 1, 5).reshape(5, 1)
pz = net.inference(px)
plt.plot(px, pz, "r")
plt.title("Air Conditioner Power")
plt.xlabel("Number of Servers(K)")
plt.ylabel("Power of Air Conditioner(KW)")
plt.show()
if __name__ == "__main__":
train(0.5, 1000, -1, 0.001)
|
#!/usr/bin/env python
from sys import stdin
def main():
m = int(stdin.readline())
dom = {}
count = [0]*7
for i in range(7):
for j in range(7):
if i != j: dom[i, j] = []
for i in range(m):
j, k = stdin.readline().split()
j, k = int(j), int(k)
count[j] += 1
count[k] += 1
dom[j, k].append(i+1)
odd = 0
n = 0
for i in range(7):
if count[i]%2:
start = i
odd += 1
if odd != 0 and odd != 2:
print 'No'
return
stack = [(start,)]
while stack:
sign = ''
for i in range(7):
if (stack[-1][0], i) in dom and dom[stack[-1][0], i]:
d = dom[stack[-1][0], i]
sign = '+'
break
if (i, stack[-1][0]) in dom and dom[i, stack[-1][0]]:
d = dom[i, stack[-1][0]]
sign = '-'
break
print sign, stack
if sign:
stack.append((i, '%d %s' % (d.pop(), sign)))
else:
d = stack.pop()
if (len(d) > 1): print d[1]
if __name__ == '__main__':
main()
|
from rest_framework.permissions import BasePermission
from cride.circles.models import Membership
class isCircleAdmin(BasePermission):
def has_object_permission(self, request, view, obj):
"""verify user have a membership in the obj
Args:
request ([type]): [description]
view ([type]): [description]
obj ([type]): [description]
"""
try:
Membership.objects.get(
user=request.user,
circle=obj,
is_admin=True,
is_active=True,
)
except Membership.DoesNotExist:
return False
return True |
from csrv.model import actions
from csrv.model import timing_phases
from csrv.model.cards import asset
from csrv.model.cards import card_info
class YesCard01087(actions.Action):
DESCRIPTION = 'Give the runner tags'
def __init__(self, game, player, advancements):
self.advancements = advancements
def resolve(self, response=None):
self.game.insert_next_phase(
timing_phases.TakeTags(self.game, self.game.runner, self.advancements))
class NoCard01087(actions.Action):
DESCRIPTION = 'Do not give the runner tags'
class DecideCard01087(timing_phases.ActivateAbilityChoice):
"""Choose whether to use Card01087."""
def __init__(self, game, player, advancements):
timing_phases.ActivateAbilityChoice.__init__(
self, game, player,
YesCard01087(game, player, advancements),
NoCard01087(game, player),
None)
class Card01087(asset.Asset):
NAME = u'Card01087'
SET = card_info.CORE
NUMBER = 87
SIDE = card_info.CORP
FACTION = card_info.NEWSCORP
INFLUENCE = 1
UNIQUE = False
KEYWORDS = set([
card_info.AMBUSH,
card_info.FACILITY,
])
COST = 0
ADVANCEABLE = True
IMAGE_SRC = '01087.png'
TRASH_COST = 0
WHEN_INSTALLED_PROVIDES_CHOICES_FOR = {
timing_phases.CorpTurnActions: 'installed_actions',
}
def build_actions(self):
asset.Asset.build_actions(self)
self._advance_action = actions.AdvanceCard(self.game, self.player, self)
def installed_actions(self):
return [self._advance_action]
def on_access(self):
asset.Asset.on_access(self)
if self.is_installed and self.advancement_tokens:
phase = DecideCard01087(
self.game, self.player, self.advancement_tokens)
self.game.insert_next_phase(phase)
phase.begin()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.