text stringlengths 8 6.05M |
|---|
from _typeshed import Incomplete
from collections.abc import Generator
class ISMAGS:
graph: Incomplete
subgraph: Incomplete
node_equality: Incomplete
edge_equality: Incomplete
def __init__(
self,
graph,
subgraph,
node_match: Incomplete | None = None,
edge_match: Incomplete | None = None,
cache: Incomplete | None = None,
) -> None: ...
def find_isomorphisms(
self, symmetry: bool = True
) -> Generator[Incomplete, Incomplete, Incomplete]: ...
def largest_common_subgraph(
self, symmetry: bool = True
) -> Generator[Incomplete, Incomplete, None]: ...
def analyze_symmetry(self, graph, node_partitions, edge_colors): ...
def is_isomorphic(self, symmetry: bool = False): ...
def subgraph_is_isomorphic(self, symmetry: bool = False): ...
def isomorphisms_iter(
self, symmetry: bool = True
) -> Generator[Incomplete, Incomplete, None]: ...
def subgraph_isomorphisms_iter(self, symmetry: bool = True): ...
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
#############################################################################
# #
# Scripts de configuracion para Server tomar los datos de las #
# peticiones enviadas desde la pagina por medio de un formulario #
# fue construido utilizando varios codigos de python desde los foros y #
# wikis aun esta en desarrollo. #
# Copyright (C) 2013 linuxknow@gmail.com #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/> #
# #
#############################################################################
"""
import os
import sys
import cgi
import BaseHTTPServer
import subprocess
from urlparse import urlparse, parse_qs
class RequestHandler (BaseHTTPServer.BaseHTTPRequestHandler):
def do_POST(self):
"""Tomamos los parametros desde POST con uso de cgi"""
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD':'POST',
'CONTENT_TYPE':self.headers['Content-Type'],
})
arch=""
di=""
distro=""
nombreiso=""
filesystem=""
area=""
repo=""
desktop=""
# Begin the response
self.send_response(200)
self.end_headers()
self.wfile.write(self._post_status())
for field in form.keys():
if field == "arch":
arch=form[field].value
print arch
elif field== "di":
di=form[field].value
print di
elif field == "distro":
distro=form[field].value
print distro
elif field=="area":
area=form[field].value
print area
elif field=="repoextra":
repo=form[field].value
print repo
elif field=="desktop":
desktop=form[field].value
print desktop
elif field=="nombreiso":
nombreiso=form[field].value
print nombreiso
elif field=="filesystem":
filesystem=form[field].value
print filesystem
print "clave=%s valor=%s" % (field, form[field].value)
str="cd /usr/src/lihuen/Lihuen/testing;./configLihuen.sh %s %s http://%s/debian %s %s %s %s %s" % (arch,nombreiso,repo,desktop,filesystem,di,area,distro)
p = subprocess.Popen(str, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
print output
return
def do_GET (self):
"""Tomamos los parametros desde GET """
parametros_from_get=parse_qs(urlparse(self.path).query)
""" Los parametros los pasamos a un diccionario para tener un string simple"""
parametros_to_string=dict((clave,valor if len(valor)>1 else valor[0] )
for clave,valor in parametros_from_get.iteritems() )
""" Imprimimos los valores para debug"""
for clave, valor in parametros_to_string.items():
print "%s=%s" % (clave, valor)
""" Enviamos un 200 ok"""
self.send_response(200)
self.end_headers()
self.wfile.write(self._get_status())
return
def _post_status (self):
""" informamos del estado """
return "Procesamiento en proceso: %s" % ("%01.2f, %01.2f, %01.2f" % os.getloadavg())
def _get_status (self):
""" informamos del estado """
return "Status:\n" \
"--\n" \
"Load average: %s\n" % \
("%01.2f, %01.2f, %01.2f" % \
os.getloadavg())
def main (args):
httpd = BaseHTTPServer.HTTPServer(('localhost', 8000), RequestHandler)
httpd.serve_forever()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
import scraperwiki
import lxml.html
import datetime
"""Scrapes Yahoo Finance Stock Page and builds data set for daily \
stock information. Modify HTML as needed for different stock pages."""
#Website definition
html = scraperwiki.scrape("http://finance.yahoo.com/q?s=NYX")
root = lxml.html.fromstring(html)
def storeData(websiteData):
lastNum = 0
try:
scraperwiki.sqlite.execute("""\
create table storage (id INTEGER PRIMARY KEY AUTOINCREMENT)""")
except:
print "Table already exists."
scraperwiki.sqlite.save(unique_keys=[], \
data = websiteData,\
table_name='storage')
def parenthesisCutter(string):
while string[0] == ' ':
stringlen = len(string)
string = string[1:stringlen]
firstplace = string.find('(')
secondplace = string.find(')')
part1 = string[0:firstplace]
part2 = string[firstplace+1:secondplace]
return (part1 , part2)
def remX(string):
while string[0] == ' ':
stringlen = len(string)
string = string[1:stringlen]
firstplace = string.find('x')
hold = firstplace+2
part1 = string[0:firstplace]
part2 = string[hold:len(string)]
return (part1, part2)
def remsign(string):
while string[0] == ' ':
stringlen = len(string)
string = string[1:stringlen]
firstplace = string.find('-')
hold = firstplace+2
part1 = string[0:firstplace]
part2 = string[hold:len(string)]
return (part1, part2)
#Data
info = {}
info['exchange'] = 'NYSE'
info['ticker'] = 'NYX'
info['accessTime'] = datetime.datetime.now()
info['close'] = float(root.cssselect("span.time_rtq_ticker")[0].text_content())
hold_change = parenthesisCutter(str(root.cssselect("span.up_g")[0].text_content()))
info['change'] = float(hold_change[0])
info['percentChange'] = hold_change[1]
info['prevClose'] = float(root.cssselect("td.yfnc_tabledata1")[0].text_content())
info['open'] = float(root.cssselect("td.yfnc_tabledata1")[1].text_content())
hold_bid = remX(str(root.cssselect("td.yfnc_tabledata1")[2].text_content()))
hold_ask = remX(str(root.cssselect("td.yfnc_tabledata1")[3].text_content()))
info['bid'] = float(hold_bid[0])
info['bid_quant'] = int(hold_bid[1])
info['ask'] = float(hold_ask[0])
info['ask_quant'] = int(hold_ask[1])
info['1yrTarEst'] = float(str(root.cssselect("td.yfnc_tabledata1")[4].text_content()))
info['beta'] = float(str(root.cssselect("td.yfnc_tabledata1")[4].text_content()))
hold_spread = remsign(str(root.cssselect("td.yfnc_tabledata1")[7].text_content()))
info['spreadMax'] = float(hold_spread[0])
info['spreadMin'] = float(hold_spread[1])
hold_52wk = remsign(str(root.cssselect("td.yfnc_tabledata1")[8].text_content()))
info['spreadMax'] = float(hold_52wk[0])
info['spreadMin'] = float(hold_52wk[1])
info['volume'] = int(root.cssselect("td.yfnc_tabledata1")[9].text_content().replace(",", ""))
info['avVolume'] = int(root.cssselect("td.yfnc_tabledata1")[10].text_content().replace(",", ""))
info['markcap'] = str(root.cssselect("td.yfnc_tabledata1")[11].text_content())
info['pe'] = float(root.cssselect("td.yfnc_tabledata1")[12].text_content())
info['eps'] = float(root.cssselect("td.yfnc_tabledata1")[13].text_content())
hold_divyld = parenthesisCutter(str(root.cssselect("td.yfnc_tabledata1")[14].text_content()))
info['div'] = float(hold_divyld[0])
info['yield'] = hold_divyld[1]
print info
storeData(info)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tadretangulo.py
import tadreta
import tadponto
def criarVtx(xsupE, ysupE, xinfD, yinfD):
alt = yinfD - ysupE
larg = xinfD - xsupE
return [[xsupE, ysupE],[xinfD, yinfD]]
#
def criarDim(xsupE, ysupE, larg, alt):
return [[xsupE, ysupE],[xsupE + larg,ysupE + alt]]
#
# Recebe como parâmetro um TAD retângulo e retorna uma lista de tad pontos com os cantos superior esquerdo e
# inferior direito, respectivamente.
def getCantos(paramTadRet):
cse = paramTadRet[0]
cid = paramTadRet[3]
return [tadponto.criar(cse[0], cse[1]), tadponto.criar(cid[0], cid[1])]
#
def _getLados(paramTadRet):
base = tadponto.distancia(paramTadRet[0], paramTadRet[1])
altura = tadponto.distancia(paramTadRet[0], paramTadRet[2])
return base, altura
#
# Retorna um float com o valor do perímetro.
def perimetro(paramTadRet):
base, altura = _getLados(paramTadRet)
return base * 2 + altura * 2
#
def area(paramTadRet):
base, altura = _getLados(paramTadRet)
return (base * altura) / 2
#
def igual(paramTadRetA, paramTadRetB):
for i in range(len(paramTadRetA)):
if paramTadRetA[i] != paramTadRetB[i]:
return False
#
#
return True
#
# Desloca, no plano, o retângulo (sua origem) de dx na coordenada x e
# dy na coordenada y.
def move(paramTadRet, dx, dy):
paramTadRet[0][0] += dx
paramTadRet[0][1] += dy
paramTadRet[1][0] += dx
paramTadRet[1][1] += dy
#return criarVtx(xsupE, ysupE, xinfD, yinfD)
#
# Função que verifica se um dado ponto está dentro de um retângulo
def ponto_in_ret(retangulo, ponto):
cse = retangulo[0]
cid = retangulo[1]
xP = ponto[0]
yP = ponto[1]
if (cse[0] < xP) and (cse[1] < yP) and (xP < cid[0]) and (yP < cid[1]):
return True
else:
return False
#
#
def intersec(retA, retB):
cseB = retB[0]
csdB = [retB[1][0], retB[0][1]]
cieB = [retB[0][0], retB[1][1]]
cidB = retB[1]
cseA = retA[0]
csdA = [retA[1][0], retA[0][1]]
cieA = [retA[0][0], retA[1][1]]
cidA = retA[1]
if supEsq(retA, cseB) and supDir(retA, csdB) and infEsq(retA, cieB) and infDir(retA, cidB):
return retB
elif infDir(retA, cidB) and infEsq(retA, cieB):
return [[cseB[0], cseA[1]], cidB]
elif supEsq(retA, cseB) and supDir(retA, csdB):
return [cseB, [csdB[0], cieB[1]]]
elif supEsq(retA, cseB) and infEsq(retA, cieB):
return [cseB, [csdA[0], cieB[1]]]
elif supDir(retA, csdB) and infDir(retA, cidB):
return [[cseA[0], csdB[1]], cidB]
elif supEsq(retA, cseB):
return [cseB, cidA]
elif supDir(retA, csdB):
return [[cseA[0], csdB[1]],[csdB[0], cidA[1]]]
elif infDir(retA, cidB):
return [cseA, cidB]
elif infEsq(retA, cieB):
return [[cseB[0], cseA[1]],[csdA[0], cieB[1]]]
#
#
#~ def intersec(paramTadRetA, paramTadRetB):
#~ cseA = paramTadRetA[0]; cseB = paramTadRetB[0]
#~ csdA = paramTadRetA[1]; csdB = paramTadRetB[1]
#~ cieA = paramTadRetA[2]; cieB = paramTadRetB[2]
#~ cidA = paramTadRetA[3]; cidB = paramTadRetB[3]
#~ tp = 0
#~
#~ if (supEsq(paramTadRetA, paramTadRetB) and supDir(paramTadRetA, paramTadRetB)
#~ and infEsq(paramTadRetA, paramTadRetB) and infDir(paramTadRetA, paramTadRetB)):
#~ #print(cseA)
#~ tp = paramTadRetB
#~ print("1 o/")
#~ elif (infDir(paramTadRetA, paramTadRetB) and not supDir(paramTadRetA, paramTadRetB) and not infEsq(paramTadRetA, paramTadRetB)
#~ and not supEsq(paramTadRetA, paramTadRetB)):
#~ tp = criarVtx(cseA[0], cseA[1], cidB[0], cidB[1]) # (cidB, cseA)
#~ print("2")
#~ elif (infEsq(paramTadRetA, paramTadRetB) and not infDir(paramTadRetA, paramTadRetB) and not supDir(paramTadRetA, paramTadRetB)
#~ and not supEsq(paramTadRetA, paramTadRetB)):
#~ xsupE = cieB[0]
#~ ysupE = csdA[1]
#~
#~ larg = csdA[0] - xsupE; alt = cieB[1] - ysupE
#~
#~ xinfD = larg + xsupE
#~ yinfD = alt + ysupE
#~
#~ tp = criarVtx(xsupE, ysupE, xinfD, yinfD) #(cieB, csdA)
#~ print("3")
#~ elif (supDir(paramTadRetA, paramTadRetB) and not infEsq(paramTadRetA, paramTadRetB) and not infDir(paramTadRetA, paramTadRetB)
#~ and not supEsq(paramTadRetA, paramTadRetB)):
#~ xsupE = cieA[0]
#~ ysupE = csdB[1]
#~
#~ larg = csdB[0] - xsupE; alt = cieA[1] - ysupE
#~
#~ xinfD = larg + xsupE
#~ yinfD = alt + ysupE
#~
#~ tp = criarVtx(xsupE, ysupE, xinfD, yinfD) #(csdB, cieA)
#~ print("4")
#~ elif (supEsq(paramTadRetA, paramTadRetB) and not infEsq(paramTadRetA, paramTadRetB) and not infDir(paramTadRetA, paramTadRetB)
#~ and not supDir(paramTadRetA, paramTadRetB)):
#~ tp = criarVtx(cseB[0], cseB[1], cidA[0], cidA[1]) #(cseB, cieA)
#~ print("5")
#~ elif supEsq(paramTadRetA, paramTadRetB) and not infDir(paramTadRetA, paramTadRetB) and not supDir(paramTadRetA, paramTadRetB):
#~ tp = criarVtx(cseB[0], cseB[1], cidA[0], cidB[1])
#~ #print(tp)
#~ tp = criarVtx(cseB[0], cseB[1], csdA[0], cieB[1])
#~ print("HAHAHAHAH")
#~ elif supDir(paramTadRetA, paramTadRetB) and infDir(paramTadRetA, paramTadRetB): # and not infEsq(paramTadRetA, paramTadRetB):
#~ tp = criarVtx(cseA[0], csdB[1], cidB[0], cidB[1])
#~ print("LALALAL")
#~ return tp
#~ #
#~ def intersec(paramTadRetA, paramTadRetB):
#~ cseA = paramTadRetA[0]; cseB = paramTadRetB[0]
#~ csdA = paramTadRetA[1]; csdB = paramTadRetB[1]
#~ cieA = paramTadRetA[2]; cieB = paramTadRetB[2]
#~ cidA = paramTadRetA[3]; cidB = paramTadRetB[3]
#~ tp = -1
#~
#~ if (supEsq(paramTadRetA, paramTadRetB) and supDir(paramTadRetA, paramTadRetB)
#~ and infEsq(paramTadRetA, paramTadRetB) and infDir(paramTadRetA, paramTadRetB)):
#~ print(cseA)
#~ print("1 o/")
#~ return [paramTadRetB[0], paramTadRetB[3]]
#~ elif (infDir(paramTadRetA, paramTadRetB) and not infEsq(paramTadRetA, paramTadRetB) and not supEsq(paramTadRetA, paramTadRetB)
#~ and not supDir(paramTadRetA, paramTadRetB)):
#~ print("2")
#~ return [paramTadRetA[0], paramTadRetB[3]] # (cidB, cseA)
#~ elif (infEsq(paramTadRetA, paramTadRetB) and not infDir(paramTadRetA, paramTadRetB) and not supEsq(paramTadRetA, paramTadRetB)
#~ and not supDir(paramTadRetA, paramTadRetB)):
#~ xsupE = cieB[0]
#~ ysupE = csdA[1]
#~
#~ larg = csdA[0] - xsupE; alt = cieB[1] - ysupE
#~
#~ xinfD = larg + xsupE
#~ yinfD = alt + ysupE
#~
#~ return [paramTadRetB[2], paramTadRetA[1]] #(cieB, csdA)
#~ print("3")
#~ elif (cieA[0] < csdB[0] and cieA[1] > csdB[1]) and (cieB[0] < cieA[0] and cieB[1] > cieA[1]):
#~ xsupE = cieA[0]
#~ ysupE = csdB[1]
#~
#~ larg = csdB[0] - xsupE; alt = cieA[1] - ysupE
#~
#~ xinfD = larg + xsupE
#~ yinfD = alt + ysupE
#~
#~ return criarVtx(xsupE, ysupE, xinfD, yinfD) #(csdB, cieA)
#~ print("4")
#~ elif (cidA[0] > cseB[0] and cidA[1] > cseB[1]) and (cidA[0] < cidB[0] and cidA[1] < cidB[1]):
#~ return criarVtx(cseB[0], cseB[1], cidA[0], cidA[1]) #(cseB, cieA)
#~ print("5")
#~ elif supEsq(paramTadRetA, paramTadRetB) and not infDir(paramTadRetA, paramTadRetB) and not supDir(paramTadRetA, paramTadRetB):
#~ tp = criarVtx(cseB[0], cseB[1], cidA[0], cidB[1])
#~ #print(tp)
#~ return criarVtx(cseB[0], cseB[1], csdA[0], cieB[1])
#~ print("HAHAHAHAH")
#~ elif supDir(paramTadRetA, paramTadRetB) and infDir(paramTadRetA, paramTadRetB): # and not infEsq(paramTadRetA, paramTadRetB):
#~ return criarVtx(cseA[0], csdB[1], cidB[0], cidB[1])
#~ print("LALALAL")
#~ else:
#~ return None
#
#
# VERIFICA SE O CANTO SUPERIOR ESQUERDO DE B ESTÁ DENTRO DO RETÂNGULO A
def supEsq(retA, ponto):
return ponto_in_ret(retA, ponto)
#
# VERIFICA SE O CANTO SUPERIOR DIREITO DE B ESTÁ DENTRO DO RETÂNGULO A
def supDir(retA, ponto):
return ponto_in_ret(retA, ponto)
#
# VERIFICA SE O CANTO INFERIOR ESQUERDO DE B ESTÁ DENTRO DO RETÂNGULO A
def infEsq(retA, ponto):
return ponto_in_ret(retA, ponto)
#
# VERIFICA SE O CANTO INFERIOR DIREITO DE B ESTÁ DENTRO DO RETÂNGULO A
def infDir(retA, ponto):
return ponto_in_ret(retA, ponto)
#
def main():
ret = criarVtx(10,10,20,20)
ret2 = criarVtx(0,0,15,15)
#~ print(getCantos(ret))
#~ print(igual(ret, ret2))
#~ print((30,30) > (20,30))
#~ print(ret)
#~ print(ret2)
print(intersec(ret, ret2))
print("Perímetro: ", perimetro(ret))
print("Área: ", area(ret))
return 0
if __name__ == '__main__':
main()
|
'''
# will be randomly generated and spoken
'''
import time
import random as r
t = 0
rNumber = r.randint(10000, 99999)
while t < 1000000:
t = t + 1
rWait = r.randint(0, 60)
print('Number is', rNumber)
time.sleep(rWait)
|
import cv2
import numpy as np
from matplotlib import pyplot as plt##optional
from imglib import *#optional
from motion import *#optional
# comment here
def detectCellVal(img_gray,grid_map):
for i in range(0,2):
# print i
imgname='digits/'+str(i)+'.jpg'
template = cv2.imread(imgname)
temp_gray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
w, h = temp_gray.shape[::-1]
# print w,h
res = cv2.matchTemplate(img_gray,temp_gray,cv2.TM_CCOEFF_NORMED)
threshold = 0.400#change this to match
# if i==1:
# threshold=0.3500
loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
# print pt
x,y=getcoor(pt[0]+w/2,pt[1]+h/2,m,n)
grid_map[y][x]=i;
cv2.rectangle(img_gray, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
# cv2.imshow('num',img_gray)
# cv2.waitKey()
# plus sign detection
return grid_map
|
from sisyphus import *
import gzip
from recipe.lib.corpus import Corpus
class BlissExtractRawText(Job):
"""
Extract the Text from a Bliss corpus into a raw gziptext file
"""
def __init__(self, corpus, segments=None, segment_key_only=True):
self.corpus_path = corpus
self.out = self.output_path("text.gz")
self.segments_file_path = segments
self.segment_key_only = segment_key_only
def tasks(self):
yield Task('run', mini_task=True)
def run(self):
import gzip
corpus = Corpus()
corpus.load(tk.uncached_path(self.corpus_path))
outfile = gzip.open(tk.uncached_path(self.out), "wt")
segments = None
if self.segments_file_path:
if tk.uncached_path(self.segments_file_path).endswith("gz"):
segment_file = gzip.open(tk.uncached_path(self.segments_file_path), "rb")
else:
segment_file = open(tk.uncached_path(self.segments_file_path), "rt")
segments = [line.decode().strip() for line in segment_file]
for recording in corpus.all_recordings():
for segment in recording.segments:
full_segment_key = "/".join([corpus.name, recording.name, segment.name])
if segments:
if full_segment_key not in segments:
continue
orth = segment.orth.strip() + "\n"
outfile.write(orth)
outfile.close() |
import sunspec2.mb as mb
import pytest
def test_create_unimpl_value():
with pytest.raises(ValueError):
mb.create_unimpl_value(None)
with pytest.raises(ValueError):
mb.create_unimpl_value('string')
assert mb.create_unimpl_value('string', len=8) == b'\x00\x00\x00\x00\x00\x00\x00\x00'
assert mb.create_unimpl_value('ipv6addr') == b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
assert mb.create_unimpl_value('int16') == b'\x80\x00'
assert mb.create_unimpl_value('uint16') == b'\xff\xff'
assert mb.create_unimpl_value('acc16') == b'\x00\x00'
assert mb.create_unimpl_value('enum16') == b'\xff\xff'
assert mb.create_unimpl_value('bitfield16') == b'\xff\xff'
assert mb.create_unimpl_value('int32') == b'\x80\x00\x00\x00'
assert mb.create_unimpl_value('uint32') == b'\xff\xff\xff\xff'
assert mb.create_unimpl_value('acc32') == b'\x00\x00\x00\x00'
assert mb.create_unimpl_value('enum32') == b'\xff\xff\xff\xff'
assert mb.create_unimpl_value('bitfield32') == b'\xff\xff\xff\xff'
assert mb.create_unimpl_value('ipaddr') == b'\x00\x00\x00\x00'
assert mb.create_unimpl_value('int64') == b'\x80\x00\x00\x00\x00\x00\x00\x00'
assert mb.create_unimpl_value('uint64') == b'\xff\xff\xff\xff\xff\xff\xff\xff'
assert mb.create_unimpl_value('acc64') == b'\x00\x00\x00\x00\x00\x00\x00\x00'
assert mb.create_unimpl_value('float32') == b'N\xff\x80\x00'
assert mb.create_unimpl_value('sunssf') == b'\x80\x00'
assert mb.create_unimpl_value('eui48') == b'\x00\x00\xff\xff\xff\xff\xff\xff'
assert mb.create_unimpl_value('pad') == b'\x00\x00'
def test_data_to_s16():
assert mb.data_to_s16(b'\x13\x88') == 5000
def test_data_to_u16():
assert mb.data_to_u16(b'\x27\x10') == 10000
def test_data_to_s32():
assert mb.data_to_s32(b'\x12\x34\x56\x78') == 305419896
assert mb.data_to_s32(b'\xED\xCB\xA9\x88') == -305419896
def test_data_to_u32():
assert mb.data_to_u32(b'\x12\x34\x56\x78') == 305419896
def test_data_to_s64():
assert mb.data_to_s64(b'\x12\x34\x56\x78\x12\x34\x56\x78') == 1311768465173141112
assert mb.data_to_s64(b'\xED\xCB\xA9\x87\xED\xCB\xA9\x88') == -1311768465173141112
def test_data_to_u64():
assert mb.data_to_u64(b'\xff\xff\xff\xff\xff\xff\xff\xff') == 18446744073709551615
def test_data_to_ipv6addr():
assert mb.data_to_ipv6addr(b'\x20\x01\x0d\xb8\x85\xa3\x00\x00\x00\x00\x8a\x2e\x03\x70\x73\x34') == '20010DB8:85A30000:00008A2E:03707334'
def test_data_to_eui48():
# need test to test for python 2
assert mb.data_to_eui48(b'\x00\x00\x12\x34\x56\x78\x90\xAB') == '12:34:56:78:90:AB'
def test_data_to_f64():
assert mb.data_to_f64(b'\x44\x9a\x43\xf3\x00\x00\x00\x00') == 3.1008742600725133e+22
def test_data_to_str():
assert mb.data_to_str(b'test') == 'test'
assert mb.data_to_str(b'444444') == '444444'
def test_s16_to_data():
assert mb.s16_to_data(5000) == b'\x13\x88'
def test_u16_to_data():
assert mb.u16_to_data(10000) == b'\x27\x10'
def test_s32_to_data():
assert mb.s32_to_data(305419896) == b'\x12\x34\x56\x78'
assert mb.s32_to_data(-305419896) == b'\xED\xCB\xA9\x88'
def test_u32_to_data():
assert mb.u32_to_data(305419896) == b'\x12\x34\x56\x78'
def test_s64_to_data():
assert mb.s64_to_data(1311768465173141112) == b'\x12\x34\x56\x78\x12\x34\x56\x78'
assert mb.s64_to_data(-1311768465173141112) == b'\xED\xCB\xA9\x87\xED\xCB\xA9\x88'
def test_u64_to_data():
assert mb.u64_to_data(18446744073709551615) == b'\xff\xff\xff\xff\xff\xff\xff\xff'
def test_ipv6addr_to_data():
assert mb.ipv6addr_to_data('20010DB8:85A30000:00008A2E:03707334') == \
b'\x20\x01\x0d\xb8\x85\xa3\x00\x00\x00\x00\x8a\x2e\x03\x70\x73\x34'
# need additional test to test for python 2
def test_f32_to_data():
assert mb.f32_to_data(32500.43359375) == b'F\xfd\xe8\xde'
def test_f64_to_data():
assert mb.f64_to_data(3.1008742600725133e+22) == b'\x44\x9a\x43\xf3\x00\x00\x00\x00'
def test_str_to_data():
assert mb.str_to_data('test') == b'test'
assert mb.str_to_data('444444') == b'444444'
assert mb.str_to_data('test', 5) == b'test\x00'
def test_eui48_to_data():
assert mb.eui48_to_data('12:34:56:78:90:AB') == b'\x00\x00\x12\x34\x56\x78\x90\xAB'
def test_is_impl_int16():
assert not mb.is_impl_int16(-32768)
assert mb.is_impl_int16(1111)
assert mb.is_impl_int16(None)
def test_is_impl_uint16():
assert not mb.is_impl_uint16(0xffff)
assert mb.is_impl_uint16(0x1111)
def test_is_impl_acc16():
assert not mb.is_impl_acc16(0)
assert mb.is_impl_acc16(1111)
def test_is_impl_enum16():
assert not mb.is_impl_enum16(0xffff)
assert mb.is_impl_enum16(0x1111)
def test_is_impl_bitfield16():
assert not mb.is_impl_bitfield16(0xffff)
assert mb.is_impl_bitfield16(0x1111)
def test_is_impl_int32():
assert not mb.is_impl_int32(-2147483648)
assert mb.is_impl_int32(1111111)
def test_is_impl_uint32():
assert not mb.is_impl_uint32(0xffffffff)
assert mb.is_impl_uint32(0x11111111)
def test_is_impl_acc32():
assert not mb.is_impl_acc32(0)
assert mb.is_impl_acc32(1)
def test_is_impl_enum32():
assert not mb.is_impl_enum32(0xffffffff)
assert mb.is_impl_enum32(0x11111111)
def test_is_impl_bitfield32():
assert not mb.is_impl_bitfield32(0xffffffff)
assert mb.is_impl_bitfield32(0x11111111)
def test_is_impl_ipaddr():
assert not mb.is_impl_ipaddr(0)
assert mb.is_impl_ipaddr('192.168.0.1')
def test_is_impl_int64():
assert not mb.is_impl_int64(-9223372036854775808)
assert mb.is_impl_int64(111111111111111)
def test_is_impl_uint64():
assert not mb.is_impl_uint64(0xffffffffffffffff)
assert mb.is_impl_uint64(0x1111111111111111)
def test_is_impl_acc64():
assert not mb.is_impl_acc64(0)
assert mb.is_impl_acc64(1)
def test_is_impl_ipv6addr():
assert not mb.is_impl_ipv6addr('\0')
assert mb.is_impl_ipv6addr(b'\x20\x01\x0d\xb8\x85\xa3\x00\x00\x00\x00\x8a\x2e\x03\x70\x73\x34')
def test_is_impl_float32():
assert not mb.is_impl_float32(None)
assert mb.is_impl_float32(0x123456)
def test_is_impl_string():
assert not mb.is_impl_string('\0')
assert mb.is_impl_string(b'\x74\x65\x73\x74')
def test_is_impl_sunssf():
assert not mb.is_impl_sunssf(-32768)
assert mb.is_impl_sunssf(30000)
def test_is_impl_eui48():
assert not mb.is_impl_eui48('FF:FF:FF:FF:FF:FF')
assert mb.is_impl_eui48('00:00:00:00:00:00')
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2015 Marcos Organizador de Negocios SRL http://marcos.do
# Write by Eneldo Serrata (eneldo@marcos.do)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
class rim_stock_history(osv.osv):
_name = 'rim.stock.history'
_auto = False
_order = 'date asc'
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
res = super(rim_stock_history, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy)
if context is None:
context = {}
date = context.get('history_date')
prod_dict = {}
if 'inventory_value' in fields:
for line in res:
if '__domain' in line:
lines = self.search(cr, uid, line['__domain'], context=context)
inv_value = 0.0
product_tmpl_obj = self.pool.get("product.template")
lines_rec = self.browse(cr, uid, lines, context=context)
for line_rec in lines_rec:
if line_rec.product_id.cost_method == 'real':
price = line_rec.price_unit_on_quant
else:
if not line_rec.product_id.id in prod_dict:
prod_dict[line_rec.product_id.id] = product_tmpl_obj.get_history_price(cr, uid, line_rec.product_id.product_tmpl_id.id, line_rec.company_id.id, date=date, context=context)
price = prod_dict[line_rec.product_id.id]
inv_value += price * line_rec.quantity
line['inventory_value'] = inv_value
return res
def _get_inventory_value(self, cr, uid, ids, name, attr, context=None):
if context is None:
context = {}
date = context.get('history_date')
product_tmpl_obj = self.pool.get("product.template")
res = {}
for line in self.browse(cr, uid, ids, context=context):
if line.product_id.cost_method == 'real':
res[line.id] = line.quantity * line.price_unit_on_quant
else:
res[line.id] = line.quantity * product_tmpl_obj.get_history_price(cr, uid, line.product_id.product_tmpl_id.id, line.company_id.id, date=date, context=context)
return res
_columns = {
'move_id': fields.many2one('stock.move', 'Stock Move', required=True),
'location_id': fields.many2one('stock.location', 'Location', required=True),
'company_id': fields.many2one('res.company', 'Company'),
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_categ_id': fields.many2one('product.category', 'Product Category', required=True),
'quantity': fields.float('Product Quantity'),
'date': fields.datetime('Operation Date'),
'price_unit_on_quant': fields.float('Value'),
'inventory_value': fields.function(_get_inventory_value, string="Inventory Value", type='float', readonly=True),
'source': fields.char('Source')
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'rim_stock_history')
cr.execute("""
CREATE OR REPLACE VIEW rim_stock_history AS (
SELECT MIN(id) as id,
move_id,
location_id,
company_id,
product_id,
product_categ_id,
SUM(quantity) as quantity,
date,
price_unit_on_quant,
source
FROM
((SELECT
stock_move.id::text || '-' || quant.id::text AS id,
quant.id AS quant_id,
stock_move.id AS move_id,
dest_location.id AS location_id,
dest_location.company_id AS company_id,
stock_move.product_id AS product_id,
product_template.categ_id AS product_categ_id,
quant.qty AS quantity,
stock_move.date AS date,
quant.cost as price_unit_on_quant,
stock_move.origin AS source
FROM
stock_quant as quant, stock_quant_move_rel, stock_move
LEFT JOIN
stock_location dest_location ON stock_move.location_dest_id = dest_location.id
LEFT JOIN
stock_location source_location ON stock_move.location_id = source_location.id
LEFT JOIN
product_product ON product_product.id = stock_move.product_id
LEFT JOIN
product_template ON product_template.id = product_product.product_tmpl_id
WHERE stock_move.state = 'done' AND dest_location.usage in ('internal', 'transit') AND stock_quant_move_rel.quant_id = quant.id
AND stock_quant_move_rel.move_id = stock_move.id AND ((source_location.company_id is null and dest_location.company_id is not null) or
(source_location.company_id is not null and dest_location.company_id is null) or source_location.company_id != dest_location.company_id)
) UNION
(SELECT
'-' || stock_move.id::text || '-' || quant.id::text AS id,
quant.id AS quant_id,
stock_move.id AS move_id,
source_location.id AS location_id,
source_location.company_id AS company_id,
stock_move.product_id AS product_id,
product_template.categ_id AS product_categ_id,
- quant.qty AS quantity,
stock_move.date AS date,
quant.cost as price_unit_on_quant,
stock_move.origin AS source
FROM
stock_quant as quant, stock_quant_move_rel, stock_move
LEFT JOIN
stock_location source_location ON stock_move.location_id = source_location.id
LEFT JOIN
stock_location dest_location ON stock_move.location_dest_id = dest_location.id
LEFT JOIN
product_product ON product_product.id = stock_move.product_id
LEFT JOIN
product_template ON product_template.id = product_product.product_tmpl_id
WHERE stock_move.state = 'done' AND source_location.usage in ('internal', 'transit') AND stock_quant_move_rel.quant_id = quant.id
AND stock_quant_move_rel.move_id = stock_move.id AND ((dest_location.company_id is null and source_location.company_id is not null) or
(dest_location.company_id is not null and source_location.company_id is null) or dest_location.company_id != source_location.company_id)
))
AS foo
GROUP BY move_id, location_id, company_id, product_id, product_categ_id, date, price_unit_on_quant, source
)""")
|
#%% [markdown]
# # Recommender systems
#%% [markdown]
# ***Описание задачи***
#
# Небольшой интернет-магазин попросил вас добавить ранжирование товаров в блок "Смотрели ранее" - в нём теперь надо показывать не последние просмотренные пользователем товары, а те товары из просмотренных, которые он наиболее вероятно купит. Качество вашего решения будет оцениваться по количеству покупок в сравнении с прошлым решением в ходе А/В теста, т.к. по доходу от продаж статзначимость будет достигаться дольше из-за разброса цен. Таким образом, ничего заранее не зная про корреляцию оффлайновых и онлайновых метрик качества, в начале проекта вы можете лишь постараться оптимизировать recall@k и precision@k.
#
# Это задание посвящено построению простых бейзлайнов для этой задачи: ранжирование просмотренных товаров по частоте просмотров и по частоте покупок. Эти бейзлайны, с одной стороны, могут помочь вам грубо оценить возможный эффект от ранжирования товаров в блоке - например, чтобы вписать какие-то числа в коммерческое предложение заказчику, а с другой стороны, могут оказаться самым хорошим вариантом, если данных очень мало (недостаточно для обучения даже простых моделей).
#%% [markdown]
# ***Входные данные***
#
# Вам дается две выборки с пользовательскими сессиями - id-шниками просмотренных и id-шниками купленных товаров. Одна выборка будет использоваться для обучения (оценки популярностей товаров), а другая - для теста.
#
# В файлах записаны сессии по одной в каждой строке. Формат сессии: id просмотренных товаров через , затем идёт ; после чего следуют id купленных товаров (если такие имеются), разделённые запятой. Например, 1,2,3,4; или 1,2,3,4;5,6.
#
# Гарантируется, что среди id купленных товаров все различные.
#%% [markdown]
# ***Важно:***
#
# Сессии, в которых пользователь ничего не купил, исключаем из оценки качества.
# Если товар не встречался в обучающей выборке, его популярность равна 0.
# Рекомендуем разные товары. И их число должно быть не больше, чем количество различных просмотренных пользователем товаров.
# Рекомендаций всегда не больше, чем минимум из двух чисел: количество просмотренных пользователем товаров и k в recall@k / precision@k.
#%% [markdown]
# ***Задание***
#
# 1. На обучении постройте частоты появления id в просмотренных и в купленных (id может несколько раз появляться в просмотренных, все появления надо учитывать)
# 2. Реализуйте два алгоритма рекомендаций:
# * сортировка просмотренных id по популярности (частота появления в просмотренных),
# * сортировка просмотренных id по покупаемости (частота появления в покупках).
# 3. Для данных алгоритмов выпишите через пробел AverageRecall@1, AveragePrecision@1, AverageRecall@5, AveragePrecision@5 на обучающей и тестовых выборках, округляя до 2 знака после запятой. Это будут ваши ответы в этом задании. Посмотрите, как они соотносятся друг с другом. Где качество получилось выше? Значимо ли это различие? Обратите внимание на различие качества на обучающей и тестовой выборке в случае рекомендаций по частотам покупки.
#
# Если частота одинаковая, то сортировать нужно по возрастанию момента просмотра (чем раньше появился в просмотренных, тем больше приоритет)
#%%
from __future__ import division, print_function
import numpy as np
import pandas as pd
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
#%% [markdown]
# #### 1. Reading sessions train and test datasets.
#%%
import os
cwd = os.getcwd() # Get the current working directory (cwd)
files = os.listdir(cwd) # Get all the files in that directory
print("Files in '%s': %s" % (cwd, files))
#%%
# Reading train and test data
with open('/home/hq/git/ML_Yandex_MIPT/5 Data analysis applications/Homework/Week 4/coursera_sessions_train.txt', 'r') as file:
sess_train = file.read().splitlines()
with open('/home/hq/git/ML_Yandex_MIPT/5 Data analysis applications/Homework/Week 4/coursera_sessions_test.txt', 'r') as file:
sess_test = file.read().splitlines()
#%% [markdown]
# #### 2. Split datasets by looks and purchases.
#%%
# Create train array splitted by looks (look_items) and purchases (pur_items)
sess_train_lp = []
for sess in sess_train:
look_items, pur_items = sess.split(';')
look_items = map(int, look_items.split(','))
if len(pur_items) > 0:
pur_items = map(int, pur_items.split(','))
else:
pur_items = []
sess_train_lp.append([look_items, pur_items])
# Create test array splitted by looks (look_items) and purchases (pur_items)
sess_test_lp = []
for sess in sess_test:
look_items, pur_items = sess.split(';')
look_items = map(int, look_items.split(','))
if len(pur_items) > 0:
pur_items = map(int, pur_items.split(','))
else:
pur_items = []
sess_test_lp.append([look_items, pur_items])
#%% [markdown]
# #### 3. Create and sort arrays of unique ids counters for looks and purchases for train dataset.
#%%
# Array of looks
sess_train_l = [row[0] for row in sess_train_lp]
sess_train_l_np = np.array( [id_n for sess in sess_train_l for id_n in sess] )
# Array of unique ids and looks in train data
sess_train_l_cnt = np.transpose(np.unique(sess_train_l_np, return_counts=True))
#%%
sess_train_l_cnt
#%%
# Array of purchases
sess_train_p = [row[1] for row in sess_train_lp]
sess_train_p_np = np.array( [id_n for sess in sess_train_p for id_n in sess] )
# Array of unique ids and purchases in train dataset
sess_train_p_cnt = np.transpose(np.unique(sess_train_p_np, return_counts=True))
#%%
sess_train_p_cnt
#%%
# Sorting arrays of looks and purchases by counts
sess_train_l_cnt = sess_train_l_cnt[sess_train_l_cnt[:,1].argsort()][::-1]
sess_train_p_cnt = sess_train_p_cnt[sess_train_p_cnt[:,1].argsort()][::-1]
#%% [markdown]
# #### 4. Calculating metrics for train dataset with suggestions based on looks.
#%%
def prec_rec_metrics(session, reccomendations, k):
purchase = 0
for ind in reccomendations:
if ind in session:
purchase += 1
precision = purchase / k
recall = purchase / len(session)
return(precision, recall)
#%%
# Calculate metrics for train dataset, suggestions based on looks
prec_at_1_tr_l, rec_at_1_tr_l = [], []
prec_at_5_tr_l, rec_at_5_tr_l = [], []
k1, k5 = 1, 5
for i, sess_p in enumerate(sess_train_p):
# skip sessions without purchases
if sess_p == []: continue
# looks ids
sess_l = sess_train_l[i]
# sorted looks ids indices in sess_train_l_cnt array
# sort in accordance with looks counts
l_ind_sess = []
for j in range(len(list(sess_l))):
l_ind_sess.append(np.where(sess_train_l_cnt[:,0] == sess_l[j])[0][0])
l_ind_sess_sorted = np.unique(l_ind_sess)
# k1 recommendations
num_of_recs_k1 = min(k1, len(list(sess_l)))
if num_of_recs_k1 == 0: continue
recs_k1 = sess_train_l_cnt[l_ind_sess_sorted[:num_of_recs_k1],0]
# k1 metrics
prec_1, rec_1 = prec_rec_metrics(sess_p, recs_k1, k1)
prec_at_1_tr_l.append(prec_1)
rec_at_1_tr_l.append(rec_1)
# k5 recommendations
num_of_recs_k5 = min(k5, len(list(sess_l)))
if num_of_recs_k5 == 0: continue
recs_k5 = sess_train_l_cnt[l_ind_sess_sorted[:num_of_recs_k5],0]
# k5 metrics
prec_5, rec_5 = prec_rec_metrics(sess_p, recs_k5, k5)
prec_at_5_tr_l.append(prec_5)
rec_at_5_tr_l.append(rec_5)
#%%
avg_prec_at_1_tr_l = np.mean(prec_at_1_tr_l)
avg_rec_at_1_tr_l = np.mean(rec_at_1_tr_l)
avg_prec_at_5_tr_l = np.mean(prec_at_5_tr_l)
avg_rec_at_5_tr_l = np.mean(rec_at_5_tr_l)
#%%
with open('ans1.txt', 'w') as file:
r1 = '%.2f' % round(avg_rec_at_1_tr_l, 2)
p1 = '%.2f' % round(avg_prec_at_1_tr_l, 2)
r5 = '%.2f' % round(avg_rec_at_5_tr_l, 2)
p5 = '%.2f' % round(avg_prec_at_5_tr_l, 2)
ans1 = ' '.join([r1, p1, r5, p5])
print('Answer 1:', ans1)
file.write(ans1)
#%% [markdown]
# #### 5. Calculating metrics for train dataset with suggestions based on purchases.
#%%
# Calculate metrics for train dataset, suggestions based on purchases
prec_at_1_tr_p, rec_at_1_tr_p = [], []
prec_at_5_tr_p, rec_at_5_tr_p = [], []
k1, k5 = 1, 5
for i, sess_p in enumerate(sess_train_p):
# skip sessions without purchases
if sess_p == []: continue
# looks ids
sess_l = sess_train_l[i]
# sorted looks ids indices in sess_train_p_cnt array
# sort in accordance with purchases counts
l_ind_sess = []
for j in range(len(sess_l)):
if sess_l[j] not in sess_train_p_cnt[:,0]: continue
l_ind_sess.append(np.where(sess_train_p_cnt[:,0] == sess_l[j])[0][0])
l_ind_sess_sorted = np.unique(l_ind_sess)
# k1 recommendations
num_of_recs_k1 = min(k1, len(sess_l), len(l_ind_sess_sorted))
if num_of_recs_k1 == 0: continue
recs_k1 = sess_train_p_cnt[l_ind_sess_sorted[:num_of_recs_k1],0]
# k1 metrics
prec_1, rec_1 = prec_rec_metrics(sess_p, recs_k1, k1)
prec_at_1_tr_p.append(prec_1)
rec_at_1_tr_p.append(rec_1)
# k5 recommendations
num_of_recs_k5 = min(k5, len(sess_l), len(l_ind_sess_sorted))
if num_of_recs_k5 == 0: continue
recs_k5 = sess_train_p_cnt[l_ind_sess_sorted[:num_of_recs_k5],0]
# k5 metrics
prec_5, rec_5 = prec_rec_metrics(sess_p, recs_k5, k5)
prec_at_5_tr_p.append(prec_5)
rec_at_5_tr_p.append(rec_5)
#%%
avg_prec_at_1_tr_p = np.mean(prec_at_1_tr_p)
avg_rec_at_1_tr_p = np.mean(rec_at_1_tr_p)
avg_prec_at_5_tr_p = np.mean(prec_at_5_tr_p)
avg_rec_at_5_tr_p = np.mean(rec_at_5_tr_p)
#%%
with open('ans2.txt', 'w') as file:
r1 = '%.2f' % round(avg_rec_at_1_tr_p, 2)
p1 = '%.2f' % round(avg_prec_at_1_tr_p, 2)
r5 = '%.2f' % round(avg_rec_at_5_tr_p, 2)
p5 = '%.2f' % round(avg_prec_at_5_tr_p, 2)
ans2 = ' '.join([r1, p1, r5, p5])
print('Answer 2:', ans2)
file.write(ans2)
#%% [markdown]
# #### 6. Create and sort arrays of unique ids counters for looks and purchases for test dataset.
#%%
# Array of looks
sess_test_l = [row[0] for row in sess_test_lp]
sess_test_l_np = np.array( [id_n for sess in sess_test_l for id_n in sess] )
# Array of unique ids and looks in train data
#sess_test_l_cnt = np.transpose(np.unique(sess_test_l_np, return_counts=True))
#%%
sess_test_l_np
#sess_test_l_cnt
#%%
# Array of purchases
sess_test_p = [row[1] for row in sess_test_lp]
sess_test_p_np = np.array( [id_n for sess in sess_test_p for id_n in sess] )
# Array of unique ids and purchases in train dataset
#sess_test_p_cnt = np.transpose(np.unique(sess_test_p_np, return_counts=True))
#%%
sess_test_p_np
#sess_test_p_cnt
#%%
# Sorting arrays of looks and purchases by counts
#sess_train_l_cnt = sess_train_l_cnt[sess_train_l_cnt[:,1].argsort()][::-1]
#sess_train_p_cnt = sess_train_p_cnt[sess_train_p_cnt[:,1].argsort()][::-1]
#%% [markdown]
# #### 7. Calculating metrics for test dataset with suggestions based on looks.
#%%
# Calculate metrics for test dataset, suggestions based on looks
prec_at_1_tst_l, rec_at_1_tst_l = [], []
prec_at_5_tst_l, rec_at_5_tst_l = [], []
k1, k5 = 1, 5
for i, sess_p in enumerate(sess_test_p):
# skip sessions without purchases
if sess_p == []: continue
# looks ids
sess_l = sess_test_l[i]
# sorted looks ids indices in sess_train_l_cnt array
# sort in accordance with looks counts
l_ind_sess = []
new_ids = []
for j in range(len(sess_l)):
if sess_l[j] not in sess_train_l_cnt[:,0]:
new_ids.append(sess_l[j])
continue
l_ind_sess.append(np.where(sess_train_l_cnt[:,0] == sess_l[j])[0][0])
l_ind_sess_sorted = np.unique(l_ind_sess)
# k1 recommendations
num_of_recs_k1 = min(k1, len(sess_l))
if num_of_recs_k1 == 0: continue
if l_ind_sess != []:
recs_k1 = sess_train_l_cnt[l_ind_sess_sorted[:num_of_recs_k1],0]
else:
recs_k1 = []
recs_k1 = np.concatenate((np.array(recs_k1, dtype='int64'), np.unique(np.array(new_ids, dtype='int64'))))[:num_of_recs_k1]
#recs_k1
# k1 metrics
prec_1, rec_1 = prec_rec_metrics(sess_p, recs_k1, k1)
prec_at_1_tst_l.append(prec_1)
rec_at_1_tst_l.append(rec_1)
# k5 recommendations
num_of_recs_k5 = min(k5, len(sess_l))
if num_of_recs_k5 == 0: continue
if l_ind_sess != []:
recs_k5 = sess_train_l_cnt[l_ind_sess_sorted[:num_of_recs_k5],0]
else:
recs_k5 = []
recs_k5 = np.concatenate((np.array(recs_k5, dtype='int64'), np.unique(np.array(new_ids, dtype='int64'))))[:num_of_recs_k5]
#recs_k5
# k5 metrics
prec_5, rec_5 = prec_rec_metrics(sess_p, recs_k5, k5)
prec_at_5_tst_l.append(prec_5)
rec_at_5_tst_l.append(rec_5)
#%%
avg_prec_at_1_tst_l = np.mean(prec_at_1_tst_l)
avg_rec_at_1_tst_l = np.mean(rec_at_1_tst_l)
avg_prec_at_5_tst_l = np.mean(prec_at_5_tst_l)
avg_rec_at_5_tst_l = np.mean(rec_at_5_tst_l)
#%%
with open('ans3.txt', 'w') as file:
r1 = '%.2f' % round(avg_rec_at_1_tst_l, 2)
p1 = '%.2f' % round(avg_prec_at_1_tst_l, 2)
r5 = '%.2f' % round(avg_rec_at_5_tst_l, 2)
p5 = '%.2f' % round(avg_prec_at_5_tst_l, 2)
ans3 = ' '.join([r1, p1, r5, p5])
print('Answer 3:', ans3)
file.write(ans3)
#%% [markdown]
# #### 8. Calculating metrics for test dataset with suggestions based on purchases.
#%%
def uniquifier(seq):
seen = set()
return [x for x in seq if not (x in seen or seen.add(x))]
#%%
# Calculate metrics for test dataset, suggestions based on purchases
prec_at_1_tst_p, rec_at_1_tst_p = [], []
prec_at_5_tst_p, rec_at_5_tst_p = [], []
k1, k5 = 1, 5
for i, sess_p in enumerate(sess_test_p):
# skip sessions without purchases
if sess_p == []: continue
# looks ids
sess_l = sess_test_l[i]
# sorted looks ids indices in sess_train_p_cnt array
# sort in accordance with purchases counts
l_ind_sess = []
new_ids = []
for j in range(len(sess_l)):
if sess_l[j] not in sess_train_p_cnt[:,0]:
new_ids.append(sess_l[j])
continue
l_ind_sess.append(np.where(sess_train_p_cnt[:,0] == sess_l[j])[0][0])
l_ind_sess_sorted = np.unique(l_ind_sess)
# k1 recommendations
num_of_recs_k1 = min(k1, len(sess_l))
if num_of_recs_k1 == 0: continue
if l_ind_sess != []:
recs_k1 = sess_train_p_cnt[l_ind_sess_sorted[:num_of_recs_k1],0]
else:
recs_k1 = []
recs_k1 = np.concatenate((np.array(recs_k1, dtype='int64'), np.array(uniquifier(np.array(new_ids, dtype='int64')))))[:num_of_recs_k1]
# k1 metrics
prec_1, rec_1 = prec_rec_metrics(sess_p, recs_k1, k1)
prec_at_1_tst_p.append(prec_1)
rec_at_1_tst_p.append(rec_1)
# k5 recommendations
num_of_recs_k5 = min(k5, len(sess_l))
if num_of_recs_k5 == 0: continue
if l_ind_sess != []:
recs_k5 = sess_train_p_cnt[l_ind_sess_sorted[:num_of_recs_k5],0]
else:
recs_k5 = []
recs_k5 = np.concatenate((np.array(recs_k5, dtype='int64'), np.array(uniquifier(np.array(new_ids, dtype='int64')))))[:num_of_recs_k5]
# k5 metrics
prec_5, rec_5 = prec_rec_metrics(sess_p, recs_k5, k5)
prec_at_5_tst_p.append(prec_5)
rec_at_5_tst_p.append(rec_5)
#%%
avg_prec_at_1_tst_p = np.mean(prec_at_1_tst_p)
avg_rec_at_1_tst_p = np.mean(rec_at_1_tst_p)
avg_prec_at_5_tst_p = np.mean(prec_at_5_tst_p)
avg_rec_at_5_tst_p = np.mean(rec_at_5_tst_p)
#%%
with open('ans4.txt', 'w') as file:
r1 = '%.2f' % round(avg_rec_at_1_tst_p, 2)
p1 = '%.2f' % round(avg_prec_at_1_tst_p, 2)
r5 = '%.2f' % round(avg_rec_at_5_tst_p, 2)
p5 = '%.2f' % round(avg_prec_at_5_tst_p, 2)
ans4 = ' '.join([r1, p1, r5, p5])
print('Answer 4:', ans4)
file.write(ans4)
|
import random
def throw_die():
# random.randrange(a, b) geeft een getal tussen a (inclusief) en b (exclusief)
return random.randrange(1, 7)
def probability_of_sum_higher_than(dice_count, minimum_sum, samples):
raise NotYetImplemented()
|
from sqlalchemy.dialects.mysql import json
import json
from app import app
import requests
from flask_babel import _
def translate(text, source_language, dest_language):
if not (not ('MS_TRANSLATOR_KEY' not in app.config) and app.config['MS_TRANSLATOR_KEY']):
return _('Error: the translation service is not configured.')
auth = {'Ocp-Apim-Subscription-Key': app.config['MS_TRANSLATOR_KEY']}
r = requests.get('https://api.microsofttranslator.com/v2/Ajax.svc'
'/Translate?text={}&from={}&to={}'.format(text, source_language, dest_language), headers=auth)
if r.status_code != 200:
return _('Error: the translation service failed.')
return json.loads(r.content.decode('utf-8-sig'))
|
"""
Fixes duplicates on the DLLs files
"""
import argparse
import pandas as pd
def fixer_func(dlls):
"""
Removes duplicated dll entries and sorts dlls
"""
if type(dlls) is str:
return ';'.join(sorted(set(dlls.split(';'))))
return dlls
def dll_fixer(input_csv, output_csv):
"""
Main function
"""
dframe = pd.read_csv(input_csv)
dframe = dframe.set_index('link')
# Apply function
dframe.dlls = dframe.dlls.map(fixer_func)
# Save again
dframe.to_csv(output_csv, compression='gzip')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_csv')
parser.add_argument('output_csv')
args = parser.parse_args()
dll_fixer(args.input_csv, args.output_csv) |
# Generated by Django 3.0.3 on 2020-10-02 13:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Diabetes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('age', models.CharField(max_length=50)),
('bp', models.CharField(max_length=50)),
('glucose', models.CharField(max_length=50)),
('skin', models.CharField(max_length=50)),
('bmi', models.CharField(max_length=50)),
('pregnancies', models.CharField(max_length=50)),
('insulin', models.CharField(max_length=50)),
('diab', models.CharField(max_length=50)),
('probability', models.CharField(max_length=50)),
('result', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Heart',
fields=[
('sno', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('age', models.CharField(max_length=50)),
('bp', models.CharField(max_length=50)),
('chol', models.CharField(max_length=50)),
('beat', models.CharField(max_length=50)),
('gender', models.CharField(max_length=50)),
('cp', models.CharField(max_length=50)),
('fbs', models.CharField(max_length=50)),
('restecg', models.CharField(max_length=50)),
('exang', models.CharField(max_length=50)),
('oldpeak', models.CharField(max_length=50)),
('slope', models.CharField(max_length=50)),
('ca', models.CharField(max_length=50)),
('thal', models.CharField(max_length=50)),
('probability', models.CharField(max_length=50)),
('result', models.CharField(max_length=50)),
],
),
]
|
#!/usr/bin/python
"""
Output lines selected randomly from a file
Copyright 2005, 2007 Paul Eggert.
Copyright 2010 Darrell Benjamin Carbajal.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Please see <http://www.gnu.org/licenses/> for a copy of the license.
$Id: randline.py,v 1.4 2010/04/05 20:04:43 eggert Exp $
"""
import random, sys
from optparse import OptionParser
class randline:
def __init__(self, filename):
f = open(filename, 'r')
self.lines = f.readlines()
f.close()
def makeUnique(self):
self.lines = list(set(self.lines))
def nLines(self):
return len(self.lines)
def chooseline(self,notReplaceable):
tempChoice=random.choice(self.lines)
if(notReplaceable==1):
self.lines.remove(tempChoice)
return tempChoice
def main():
version_msg = "%prog 2.0"
usage_msg = """%prog [OPTION]... FILE...
Output randomly selected lines from selected FILE(s)."""
parser = OptionParser(version=version_msg,
usage=usage_msg)
parser.add_option("-n", "--numlines",
action="store", dest="numlines", default=1,
help="output NUMLINES lines (default 1)")
parser.add_option("-u", "--unique",
action="store_true", dest="unique", default=False,
help="allows for each different line to have same prob")
parser.add_option("-w", "--without-replacement",
action="store_true", dest="without", default=False,
help="outputs wihout replacing the string in the list")
options, args = parser.parse_args(sys.argv[1:])
try:
numlines = int(options.numlines)
except:
parser.error("invalid NUMLINES: {0}".
format(options.numlines))
if numlines < 0:
parser.error("negative count: {0}".
format(numlines))
if len(args) < 1:
parser.error("wrong number of operands")
for n in args:
try:
gen=randline(n)
except IOError as halp:
errno,strerror=halp.args
parser.error("I/O error({0}): {1}".
format(halp.args[0], halp.args[1]))
setup=open("temp.txt",'w')
setup.write("")
setup.close()
for x in args:
tempFile=open(x,'r')
tempLines=tempFile.readlines()
length=len(tempLines)
if tempLines[length-1][-1]!='\n':
tempLines[length-1]=tempLines[length-1]+'\n'
newFile=open("temp.txt",'a')
newFile.writelines(tempLines)
newFile.close()
input_file = "temp.txt"
try:
generator = randline(input_file)
if(options.unique==True):
generator.makeUnique()
if(numlines>generator.nLines()):
parser.error("Not enough lines in file")
for index in range(numlines):
if(options.without==True):
sys.stdout.write(generator.chooseline(1))
else:
sys.stdout.write(generator.chooseline(0))
except IOError as halp:
errno,sterror=halp.args
parser.error("I/O error({0}): {1}".
format(halp.args[0], halp.args[1]))
if __name__ == "__main__":
main()
|
import logging
import requests
class DoubanAPI:
logger = logging.getLogger(__name__)
@classmethod
def get_user_info(cls, uid):
"""获得用户基本信息"""
url = "https://api.douban.com/v2/user/{}".format(uid)
r = requests.get(url, timeout=10)
return r.json()
@classmethod
def get_life_stream(cls, uid, count=100, raw=False):
"""动态"""
params = {
'slice': 'recent-2018-10',
'hot': 'false',
'count': count,
'ck': '2oMs',
'for_mobile': 1
}
headers = {
'Accept': 'application/json',
'Referer': 'https://m.douban.com/people/{}/'.format(uid),
'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
'Host': 'm.douban.com'
}
url = "https://m.douban.com/rexxar/api/v2/user/{}/lifestream".format(uid)
r = requests.get(url, params=params, headers=headers, timeout=10)
raw_result = r.json()
if raw:
return raw_result
else:
events = []
for item in raw_result["items"]:
if item["type"] == "subjects":
# 标记了多个
for subject in item["content"]["subjects"]:
event = {
'time': item['time'],
'activity_type': 'mark', # 标记书影音
'activity': item['activity'], # 在读、读过、想读
'url': subject['url'], # 书影音的链接
'title': subject['title'], # 书影音的标题
'rating': "%.1f分" % subject['rating']['value'],
'pic': subject['pic']['normal'],
'type': subject['type'] # book, movie, tv, music
}
events.append(event)
elif item['type'] == 'card':
# 标记了一个
content = item["content"]
event = {
'time': item['time'],
'activity_type': 'mark', # 标记书影音
'activity': item['activity'], # 在读、读过、想读
'url': item['url'], # 书影音的链接
'title': content['title'], # 书影音的标题
'rating': content['description'].split("/")[0],
'pic': content['cover_url'],
'type': content.get("type", "unknown") # book, movie, tv, music
}
events.append(event)
elif item['type'] == 'status':
# 发广播
content = item['content']
event = {
'time': item['time'],
'activity_type': 'status', # 发广播
'activity': item['activity'], # 发广播
'text': content['text'], # 动态文字内容
'images': [i["normal"]["url"] for i in content["images"]]
}
events.append(event)
return events
|
import csv
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def hello():
with open('/var/lib/misc/dnsmasq.leases', 'rb') as leasesFile:
leasesReader = csv.reader(leasesFile, delimiter=' ')
return render_template('default.html', leases=leasesReader)
if __name__ == "__main__":
#app.debug = True
app.run('0.0.0.0',80)
|
from socket import *
from os import listdir
from os.path import isfile, join
onlyfiles = [f for f in listdir('./files') if isfile(join('./files', f))]
serverAddress = ("127.0.0.1",12000) # a tuple containing the IP Address of the Server and Socket ID (Port)
serverSocket = socket(AF_INET,SOCK_DGRAM) # Datagram because we're using UDP
serverSocket.bind(serverAddress)
print('starting up on %s port %s' % serverAddress) # debug
while True:
message, clientAddress = serverSocket.recvfrom(2048)
modifiedMessasge = message.decode()
print(modifiedMessasge+" from %s port %s"%clientAddress) # debug
if modifiedMessasge == 'ls':
serverSocket.sendto("\n".join(onlyfiles).encode(),clientAddress)
elif modifiedMessasge in onlyfiles:
data=''
with open('./files/'+modifiedMessasge, 'r') as file:
data = file.read()
serverSocket.sendto(data.encode(),clientAddress)
else:
serverSocket.sendto("File not found".encode(),clientAddress)
modifiedMessasge = message |
'''• 2! = 2.1 = 2
• 3! = 3.2.1 = 3.2! = 3.2 = 6
• 4! = 4.3.2.1 = 4.3! = 4.3.2! = 4.3.2 = 24
• 5! = 5.4.3.2.1 = 5.4! = 5.4.3! = 5.4.6 = 20.6 = 120
• 6! = 6.5.4.3.2.1 = 6.5! = 6.120 = 720
• 7! = 7.6.5.4.3.2.1 = 7.6! = 7.720 = 5040 '''
def factorialz(number):
if number == 0:
return 1
return number * factorialz(number - 1)
print(factorialz(5))
#print("Another method")
'''
import math
math.factorial(n)
'''
|
#!/usr/bin/python
"""
The main script of gess, the generator for synthetic streams
of financial transactions.
@author: Michael Hausenblas, http://mhausenblas.info/#i
@since: 2013-11-07
@status: init
"""
import logging
import os
from fintrans import FinTransSource
DEBUG = False
CONFIG_FILE = 'gess.conf'
if DEBUG:
FORMAT = '%(asctime)-0s %(levelname)s %(message)s [at line %(lineno)d]'
logging.basicConfig(level=logging.DEBUG, format=FORMAT, datefmt='%Y-%m-%dT%I:%M:%S')
else:
FORMAT = '%(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
# expecting the config file 'gess.conf' in the gess root directory.
# parses out the ATM location CSV file pathes to be used in the generator and
# returns a list of absolute pathes to the CSV files specified.
def read_config():
atm_loc_sources = []
cf = os.path.abspath(CONFIG_FILE)
if os.path.exists(cf):
logging.info('Using config file %s, parsing ATM location sources to be used' %cf)
lines = tuple(open(CONFIG_FILE, 'r'))
for line in lines:
l = str(line).strip()
if l and not l.startswith('#'): # non-empty or non-comment line
atm_loc_source = os.path.abspath(l)
atm_loc_sources.append(atm_loc_source)
logging.debug(' -> added %s as a source' %atm_loc_source)
else:
logging.info('No gess config file found, using default source (data/osm-atm-garmin.csv)')
atm_loc_source = os.path.abspath('data/osm-atm-garmin.csv')
atm_loc_sources.append(atm_loc_source)
return atm_loc_sources
################################################################################
## Main script
if __name__ == '__main__':
fns = FinTransSource(read_config())
fns.run() |
# -*- coding: utf-8 -*-
#
# This file is part of Python-ASN1. Python-ASN1 is free software that is
# made available under the MIT license. Consult the file "LICENSE" that
# is distributed together with this file for the exact licensing terms.
#
# Python-ASN1 is copyright (c) 2007-2016 by the Python-ASN1 authors. See the
# file "AUTHORS" for a complete overview.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import base64
from builtins import int
import pytest
import asn1
class TestEncoder(object):
"""Test suite for ASN1 Encoder."""
def test_boolean(self):
enc = asn1.Encoder()
enc.start()
enc.write(True, asn1.Numbers.Boolean)
res = enc.output()
assert res == b'\x01\x01\xff'
def test_integer(self):
enc = asn1.Encoder()
enc.start()
enc.write(1)
res = enc.output()
assert res == b'\x02\x01\x01'
def test_long_integer(self):
enc = asn1.Encoder()
enc.start()
enc.write(0x0102030405060708090a0b0c0d0e0f)
res = enc.output()
assert res == b'\x02\x0f\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
def test_negative_integer(self):
enc = asn1.Encoder()
enc.start()
enc.write(-1)
res = enc.output()
assert res == b'\x02\x01\xff'
def test_long_negative_integer(self):
enc = asn1.Encoder()
enc.start()
enc.write(-0x0102030405060708090a0b0c0d0e0f)
res = enc.output()
assert res == b'\x02\x0f\xfe\xfd\xfc\xfb\xfa\xf9\xf8\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf1'
def test_twos_complement_boundaries(self):
enc = asn1.Encoder()
enc.start()
enc.write(0)
res = enc.output()
assert res == b'\x02\x01\x00'
enc = asn1.Encoder()
enc.start()
enc.write(1)
res = enc.output()
assert res == b'\x02\x01\x01'
enc = asn1.Encoder()
enc.start()
enc.write(-0)
res = enc.output()
assert res == b'\x02\x01\x00'
enc = asn1.Encoder()
enc.start()
enc.write(-1)
res = enc.output()
assert res == b'\x02\x01\xff'
enc = asn1.Encoder()
enc.start()
enc.write(127)
res = enc.output()
assert res == b'\x02\x01\x7f'
enc.start()
enc.write(128)
res = enc.output()
assert res == b'\x02\x02\x00\x80'
enc.start()
enc.write(-127)
res = enc.output()
assert res == b'\x02\x01\x81'
enc.start()
enc.write(-128)
res = enc.output()
assert res == b'\x02\x01\x80'
enc.start()
enc.write(-129)
res = enc.output()
assert res == b'\x02\x02\xff\x7f'
enc.start()
enc.write(32767)
res = enc.output()
assert res == b'\x02\x02\x7f\xff'
enc.start()
enc.write(32768)
res = enc.output()
assert res == b'\x02\x03\x00\x80\x00'
enc.start()
enc.write(32769)
res = enc.output()
assert res == b'\x02\x03\x00\x80\x01'
enc.start()
enc.write(-32767)
res = enc.output()
assert res == b'\x02\x02\x80\x01'
enc.start()
enc.write(-32768)
res = enc.output()
assert res == b'\x02\x02\x80\x00'
enc.start()
enc.write(-32769)
res = enc.output()
assert res == b'\x02\x03\xff\x7f\xff'
def test_octet_string(self):
enc = asn1.Encoder()
enc.start()
enc.write(b'foo')
res = enc.output()
assert res == b'\x04\x03foo'
def test_bitstring(self):
enc = asn1.Encoder()
enc.start()
enc.write(b'\x12\x34\x56', asn1.Numbers.BitString)
res = enc.output()
assert res == b'\x03\x04\x00\x12\x34\x56'
def test_printable_string(self):
enc = asn1.Encoder()
enc.start()
enc.write(u'foo', nr=asn1.Numbers.PrintableString)
res = enc.output()
assert res == b'\x13\x03foo'
def test_unicode_octet_string(self):
enc = asn1.Encoder()
enc.start()
enc.write(u'fooé', nr=asn1.Numbers.OctetString)
res = enc.output()
assert res == b'\x04\x05\x66\x6f\x6f\xc3\xa9'
def test_unicode_printable_string(self):
enc = asn1.Encoder()
enc.start()
enc.write(u'fooé', nr=asn1.Numbers.PrintableString)
res = enc.output()
assert res == b'\x13\x05\x66\x6f\x6f\xc3\xa9'
def test_null(self):
enc = asn1.Encoder()
enc.start()
enc.write(None)
res = enc.output()
assert res == b'\x05\x00'
def test_object_identifier(self):
enc = asn1.Encoder()
enc.start()
enc.write('1.2.3', asn1.Numbers.ObjectIdentifier)
res = enc.output()
assert res == b'\x06\x02\x2a\x03'
def test_long_object_identifier(self):
enc = asn1.Encoder()
enc.start()
enc.write('39.2.3', asn1.Numbers.ObjectIdentifier)
res = enc.output()
assert res == b'\x06\x03\x8c\x1a\x03'
enc.start()
enc.write('1.39.3', asn1.Numbers.ObjectIdentifier)
res = enc.output()
assert res == b'\x06\x02\x4f\x03'
enc.start()
enc.write('1.2.300000', asn1.Numbers.ObjectIdentifier)
res = enc.output()
assert res == b'\x06\x04\x2a\x92\xa7\x60'
def test_real_object_identifier(self):
enc = asn1.Encoder()
enc.start()
enc.write('1.2.840.113554.1.2.1.1', asn1.Numbers.ObjectIdentifier)
res = enc.output()
assert res == b'\x06\x0a\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x01'
def test_enumerated(self):
enc = asn1.Encoder()
enc.start()
enc.write(1, asn1.Numbers.Enumerated)
res = enc.output()
assert res == b'\x0a\x01\x01'
def test_sequence(self):
enc = asn1.Encoder()
enc.start()
enc.enter(asn1.Numbers.Sequence)
enc.write(1)
enc.write(b'foo')
enc.leave()
res = enc.output()
assert res == b'\x30\x08\x02\x01\x01\x04\x03foo'
def test_sequence_of(self):
enc = asn1.Encoder()
enc.start()
enc.enter(asn1.Numbers.Sequence)
enc.write(1)
enc.write(2)
enc.leave()
res = enc.output()
assert res == b'\x30\x06\x02\x01\x01\x02\x01\x02'
def test_set(self):
enc = asn1.Encoder()
enc.start()
enc.enter(asn1.Numbers.Set)
enc.write(1)
enc.write(b'foo')
enc.leave()
res = enc.output()
assert res == b'\x31\x08\x02\x01\x01\x04\x03foo'
def test_set_of(self):
enc = asn1.Encoder()
enc.start()
enc.enter(asn1.Numbers.Set)
enc.write(1)
enc.write(2)
enc.leave()
res = enc.output()
assert res == b'\x31\x06\x02\x01\x01\x02\x01\x02'
def test_context(self):
enc = asn1.Encoder()
enc.start()
enc.enter(1, asn1.Classes.Context)
enc.write(1)
enc.leave()
res = enc.output()
assert res == b'\xa1\x03\x02\x01\x01'
def test_application(self):
enc = asn1.Encoder()
enc.start()
enc.enter(1, asn1.Classes.Application)
enc.write(1)
enc.leave()
res = enc.output()
assert res == b'\x61\x03\x02\x01\x01'
def test_private(self):
enc = asn1.Encoder()
enc.start()
enc.enter(1, asn1.Classes.Private)
enc.write(1)
enc.leave()
res = enc.output()
assert res == b'\xe1\x03\x02\x01\x01'
def test_long_tag_id(self):
enc = asn1.Encoder()
enc.start()
enc.enter(0xffff)
enc.write(1)
enc.leave()
res = enc.output()
assert res == b'\x3f\x83\xff\x7f\x03\x02\x01\x01'
def test_contextmanager_construct(self):
enc = asn1.Encoder()
enc.start()
with enc.construct(asn1.Numbers.Sequence):
enc.write(1)
enc.write(b'foo')
res = enc.output()
assert res == b'\x30\x08\x02\x01\x01\x04\x03foo'
def test_contextmanager_calls_enter(self):
class TestEncoder(asn1.Encoder):
def enter(self, nr, cls=None):
raise RuntimeError()
enc = TestEncoder()
enc.start()
with pytest.raises(RuntimeError):
with enc.construct(asn1.Numbers.Sequence):
enc.write(1)
def test_contextmanager_calls_leave(self):
class TestEncoder(asn1.Encoder):
def leave(self):
raise RuntimeError()
enc = TestEncoder()
enc.start()
with pytest.raises(RuntimeError):
with enc.construct(asn1.Numbers.Sequence):
enc.write(1)
def test_long_tag_length(self):
enc = asn1.Encoder()
enc.start()
enc.write(b'x' * 0xffff)
res = enc.output()
assert res == b'\x04\x82\xff\xff' + b'x' * 0xffff
def test_error_init(self):
enc = asn1.Encoder()
pytest.raises(asn1.Error, enc.enter, asn1.Numbers.Sequence)
pytest.raises(asn1.Error, enc.leave)
pytest.raises(asn1.Error, enc.write, 1)
pytest.raises(asn1.Error, enc.output)
def test_error_stack(self):
enc = asn1.Encoder()
enc.start()
pytest.raises(asn1.Error, enc.leave)
enc.enter(asn1.Numbers.Sequence)
pytest.raises(asn1.Error, enc.output)
enc.leave()
pytest.raises(asn1.Error, enc.leave)
def test_error_object_identifier(self):
enc = asn1.Encoder()
enc.start()
pytest.raises(asn1.Error, enc.write, '1', asn1.Numbers.ObjectIdentifier)
pytest.raises(asn1.Error, enc.write, '40.2.3', asn1.Numbers.ObjectIdentifier)
pytest.raises(asn1.Error, enc.write, '1.40.3', asn1.Numbers.ObjectIdentifier)
pytest.raises(asn1.Error, enc.write, '1.2.3.', asn1.Numbers.ObjectIdentifier)
pytest.raises(asn1.Error, enc.write, '.1.2.3', asn1.Numbers.ObjectIdentifier)
pytest.raises(asn1.Error, enc.write, 'foo', asn1.Numbers.ObjectIdentifier)
pytest.raises(asn1.Error, enc.write, 'foo.bar', asn1.Numbers.ObjectIdentifier)
def test_default_encoding(self):
" Check that the encoder implicitly chooses the correct asn1 type "
def check_defaults(value, number):
default, explicit = asn1.Encoder(), asn1.Encoder()
default.start()
explicit.start()
default.write(value)
explicit.write(value, number)
assert default.output() == explicit.output(), \
"default asn1 type for '{}' should be {!r}".format(type(value).__name__, number)
check_defaults(True, asn1.Numbers.Boolean)
check_defaults(12345, asn1.Numbers.Integer)
check_defaults(b"byte string \x00\xff\xba\xdd", asn1.Numbers.OctetString)
check_defaults(u"unicode string \U0001f4a9", asn1.Numbers.PrintableString)
check_defaults(None, asn1.Numbers.Null)
def test_context_no_tag_number(self):
enc = asn1.Encoder()
enc.start()
with pytest.raises(asn1.Error):
enc.write(b'\x00\x01\x02\x03\x04', typ=asn1.Types.Primitive, cls=asn1.Classes.Context)
def test_context_with_tag_number_10(self):
enc = asn1.Encoder()
enc.start()
enc.write(b'\x00\x01\x02\x03\x04', nr=10, typ=asn1.Types.Primitive, cls=asn1.Classes.Context)
res = enc.output()
assert res == b'\x8a\x05\x00\x01\x02\x03\x04'
class TestDecoder(object):
"""Test suite for ASN1 Decoder."""
def test_boolean(self):
buf = b'\x01\x01\xff'
dec = asn1.Decoder()
dec.start(buf)
tag = dec.peek()
assert tag == (asn1.Numbers.Boolean, asn1.Types.Primitive, asn1.Classes.Universal)
tag, val = dec.read()
assert isinstance(val, int)
assert val
buf = b'\x01\x01\x01'
dec.start(buf)
tag, val = dec.read()
assert isinstance(val, int)
assert val
buf = b'\x01\x01\x00'
dec.start(buf)
tag, val = dec.read()
assert isinstance(val, int)
assert not val
def test_integer(self):
buf = b'\x02\x01\x01'
dec = asn1.Decoder()
dec.start(buf)
tag = dec.peek()
assert tag == (asn1.Numbers.Integer, asn1.Types.Primitive, asn1.Classes.Universal)
tag, val = dec.read()
assert isinstance(val, int)
assert val == 1
def test_long_integer(self):
buf = b'\x02\x0f\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
dec = asn1.Decoder()
dec.start(buf)
tag, val = dec.read()
assert val == 0x0102030405060708090a0b0c0d0e0f
def test_negative_integer(self):
buf = b'\x02\x01\xff'
dec = asn1.Decoder()
dec.start(buf)
tag, val = dec.read()
assert val == -1
def test_long_negative_integer(self):
buf = b'\x02\x0f\xfe\xfd\xfc\xfb\xfa\xf9\xf8\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf1'
dec = asn1.Decoder()
dec.start(buf)
tag, val = dec.read()
assert val == -0x0102030405060708090a0b0c0d0e0f
def test_twos_complement_boundaries(self):
buf = b'\x02\x01\x7f'
dec = asn1.Decoder()
dec.start(buf)
tag, val = dec.read()
assert val == 127
buf = b'\x02\x02\x00\x80'
dec.start(buf)
tag, val = dec.read()
assert val == 128
buf = b'\x02\x01\x80'
dec.start(buf)
tag, val = dec.read()
assert val == -128
buf = b'\x02\x02\xff\x7f'
dec.start(buf)
tag, val = dec.read()
assert val == -129
def test_octet_string(self):
buf = b'\x04\x03foo'
dec = asn1.Decoder()
dec.start(buf)
tag = dec.peek()
assert tag == (asn1.Numbers.OctetString, asn1.Types.Primitive, asn1.Classes.Universal)
tag, val = dec.read()
assert val == b'foo'
def test_printable_string(self):
buf = b'\x13\x03foo'
dec = asn1.Decoder()
dec.start(buf)
tag = dec.peek()
assert tag == (asn1.Numbers.PrintableString, asn1.Types.Primitive, asn1.Classes.Universal)
tag, val = dec.read()
assert val == u'foo'
def test_bitstring(self):
buf = b'\x03\x04\x00\x12\x34\x56'
dec = asn1.Decoder()
dec.start(buf)
tag = dec.peek()
assert tag == (asn1.Numbers.BitString, asn1.Types.Primitive, asn1.Classes.Universal)
tag, val = dec.read()
assert val == b'\x12\x34\x56'
def test_bitstring_unused_bits(self):
buf = b'\x03\x04\x04\x12\x34\x50'
dec = asn1.Decoder()
dec.start(buf)
tag = dec.peek()
assert tag == (asn1.Numbers.BitString, asn1.Types.Primitive, asn1.Classes.Universal)
tag, val = dec.read()
assert val == b'\x01\x23\x45'
def test_unicode_printable_string(self):
buf = b'\x13\x05\x66\x6f\x6f\xc3\xa9'
dec = asn1.Decoder()
dec.start(buf)
tag = dec.peek()
assert tag == (asn1.Numbers.PrintableString, asn1.Types.Primitive, asn1.Classes.Universal)
tag, val = dec.read()
assert val == u'fooé'
def test_null(self):
buf = b'\x05\x00'
dec = asn1.Decoder()
dec.start(buf)
tag = dec.peek()
assert tag == (asn1.Numbers.Null, asn1.Types.Primitive, asn1.Classes.Universal)
tag, val = dec.read()
assert val is None
def test_object_identifier(self):
dec = asn1.Decoder()
buf = b'\x06\x02\x2a\x03'
dec.start(buf)
tag = dec.peek()
assert tag == (asn1.Numbers.ObjectIdentifier, asn1.Types.Primitive,
asn1.Classes.Universal)
tag, val = dec.read()
assert val == u'1.2.3'
def test_long_object_identifier(self):
dec = asn1.Decoder()
buf = b'\x06\x03\x8c\x1a\x03'
dec.start(buf)
tag, val = dec.read()
assert val == u'39.2.3'
buf = b'\x06\x02\x4f\x03'
dec.start(buf)
tag, val = dec.read()
assert val == u'1.39.3'
buf = b'\x06\x04\x2a\x92\xa7\x60'
dec.start(buf)
tag, val = dec.read()
assert val == u'1.2.300000'
def test_real_object_identifier(self):
dec = asn1.Decoder()
buf = b'\x06\x0a\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x01'
dec.start(buf)
tag, val = dec.read()
assert val == u'1.2.840.113554.1.2.1.1'
def test_enumerated(self):
buf = b'\x0a\x01\x01'
dec = asn1.Decoder()
dec.start(buf)
tag = dec.peek()
assert tag == (asn1.Numbers.Enumerated, asn1.Types.Primitive, asn1.Classes.Universal)
tag, val = dec.read()
assert isinstance(val, int)
assert val == 1
def test_sequence(self):
buf = b'\x30\x08\x02\x01\x01\x04\x03foo'
dec = asn1.Decoder()
dec.start(buf)
tag = dec.peek()
assert tag == (asn1.Numbers.Sequence, asn1.Types.Constructed, asn1.Classes.Universal)
dec.enter()
tag, val = dec.read()
assert val == 1
tag, val = dec.read()
assert val == b'foo'
def test_sequence_of(self):
buf = b'\x30\x06\x02\x01\x01\x02\x01\x02'
dec = asn1.Decoder()
dec.start(buf)
tag = dec.peek()
assert tag == (asn1.Numbers.Sequence, asn1.Types.Constructed, asn1.Classes.Universal)
dec.enter()
tag, val = dec.read()
assert val == 1
tag, val = dec.read()
assert val == 2
def test_set(self):
buf = b'\x31\x08\x02\x01\x01\x04\x03foo'
dec = asn1.Decoder()
dec.start(buf)
tag = dec.peek()
assert tag == (asn1.Numbers.Set, asn1.Types.Constructed, asn1.Classes.Universal)
dec.enter()
tag, val = dec.read()
assert val == 1
tag, val = dec.read()
assert val == b'foo'
def test_set_of(self):
buf = b'\x31\x06\x02\x01\x01\x02\x01\x02'
dec = asn1.Decoder()
dec.start(buf)
tag = dec.peek()
assert tag == (asn1.Numbers.Set, asn1.Types.Constructed, asn1.Classes.Universal)
dec.enter()
tag, val = dec.read()
assert val == 1
tag, val = dec.read()
assert val == 2
def test_context(self):
buf = b'\xa1\x03\x02\x01\x01'
dec = asn1.Decoder()
dec.start(buf)
tag = dec.peek()
assert tag == (1, asn1.Types.Constructed, asn1.Classes.Context)
dec.enter()
tag, val = dec.read()
assert val == 1
def test_application(self):
buf = b'\x61\x03\x02\x01\x01'
dec = asn1.Decoder()
dec.start(buf)
tag = dec.peek()
assert tag == (1, asn1.Types.Constructed, asn1.Classes.Application)
dec.enter()
tag, val = dec.read()
assert val == 1
def test_private(self):
buf = b'\xe1\x03\x02\x01\x01'
dec = asn1.Decoder()
dec.start(buf)
tag = dec.peek()
assert tag == (1, asn1.Types.Constructed, asn1.Classes.Private)
dec.enter()
tag, val = dec.read()
assert val == 1
def test_long_tag_id(self):
buf = b'\x3f\x83\xff\x7f\x03\x02\x01\x01'
dec = asn1.Decoder()
dec.start(buf)
tag = dec.peek()
assert tag == (0xffff, asn1.Types.Constructed, asn1.Classes.Universal)
dec.enter()
tag, val = dec.read()
assert val == 1
def test_long_tag_length(self):
buf = b'\x04\x82\xff\xff' + b'x' * 0xffff
dec = asn1.Decoder()
dec.start(buf)
tag, val = dec.read()
assert val == b'x' * 0xffff
def test_read_multiple(self):
buf = b'\x02\x01\x01\x02\x01\x02'
dec = asn1.Decoder()
dec.start(buf)
tag, val = dec.read()
assert val == 1
tag, val = dec.read()
assert val == 2
assert dec.eof()
def test_skip_primitive(self):
buf = b'\x02\x01\x01\x02\x01\x02'
dec = asn1.Decoder()
dec.start(buf)
dec.read()
tag, val = dec.read()
assert val == 2
assert dec.eof()
def test_skip_constructed(self):
buf = b'\x30\x06\x02\x01\x01\x02\x01\x02\x02\x01\x03'
dec = asn1.Decoder()
dec.start(buf)
dec.read()
tag, val = dec.read()
assert val == 3
assert dec.eof()
def test_error_init(self):
dec = asn1.Decoder()
pytest.raises(asn1.Error, dec.peek)
pytest.raises(asn1.Error, dec.read)
pytest.raises(asn1.Error, dec.enter)
pytest.raises(asn1.Error, dec.leave)
def test_error_stack(self):
buf = b'\x30\x08\x02\x01\x01\x04\x03foo'
dec = asn1.Decoder()
dec.start(buf)
pytest.raises(asn1.Error, dec.leave)
dec.enter()
dec.leave()
pytest.raises(asn1.Error, dec.leave)
def test_no_input(self):
dec = asn1.Decoder()
dec.start(b'')
tag = dec.peek()
assert tag is None
def test_error_missing_tag_bytes(self):
buf = b'\x3f'
dec = asn1.Decoder()
dec.start(buf)
pytest.raises(asn1.Error, dec.peek)
buf = b'\x3f\x83'
dec.start(buf)
pytest.raises(asn1.Error, dec.peek)
def test_error_no_length_bytes(self):
buf = b'\x02'
dec = asn1.Decoder()
dec.start(buf)
pytest.raises(asn1.Error, dec.read)
def test_error_missing_length_bytes(self):
buf = b'\x04\x82\xff'
dec = asn1.Decoder()
dec.start(buf)
pytest.raises(asn1.Error, dec.read)
def test_error_too_many_length_bytes(self):
buf = b'\x04\xff' + b'\xff' * 0x7f
dec = asn1.Decoder()
dec.start(buf)
pytest.raises(asn1.Error, dec.read)
def test_error_no_value_bytes(self):
buf = b'\x02\x01'
dec = asn1.Decoder()
dec.start(buf)
pytest.raises(asn1.Error, dec.read)
def test_error_missing_value_bytes(self):
buf = b'\x02\x02\x01'
dec = asn1.Decoder()
dec.start(buf)
pytest.raises(asn1.Error, dec.read)
def test_error_non_normalized_positive_integer(self):
buf = b'\x02\x02\x00\x01'
dec = asn1.Decoder()
dec.start(buf)
pytest.raises(asn1.Error, dec.read)
def test_error_non_normalized_negative_integer(self):
buf = b'\x02\x02\xff\x80'
dec = asn1.Decoder()
dec.start(buf)
pytest.raises(asn1.Error, dec.read)
def test_error_non_normalised_object_identifier(self):
buf = b'\x06\x02\x80\x01'
dec = asn1.Decoder()
dec.start(buf)
pytest.raises(asn1.Error, dec.read)
def test_error_object_identifier_with_too_large_first_component(self):
buf = b'\x06\x02\x8c\x40'
dec = asn1.Decoder()
dec.start(buf)
pytest.raises(asn1.Error, dec.read)
def test_error_bitstring_with_too_many_unused_bits(self):
buf = b'\x03\x04\x08\x12\x34\x50'
dec = asn1.Decoder()
dec.start(buf)
pytest.raises(asn1.Error, dec.read)
def test_big_negative_integer(self):
buf = b'\x02\x10\xff\x7f\x2b\x3a\x4d\xea\x48\x1e\x1f\x37\x7b\xa8\xbd\x7f\xb0\x16'
dec = asn1.Decoder()
dec.start(buf)
tag, val = dec.read()
assert val == -668929531791034950848739021124816874
assert dec.eof()
def test_mix_context_universal(self):
encoded = 'tYHKgAETgwgBgDgJAGMS9aQGgAQBAAAChQUAh7Mfc6YGgAQBAAABhwx0ZXN0LnRlc3Quc2WIAgEhqQigBoAECtiCBIsBAawuM' \
'CyCDAIjYh+TlkBYdGMQQIMBAIQBAIUBAoYJFwkVAClUKwAAiAgAIvIQAG0Yj40JFwkUIylUKwAAjgIOEI8BAJEBAZIJRENQMk' \
'dHU04xlQEAlgmRI3cAUGBTA/CXAgAAmAEAmwMi8hCdCFOTKXBYgkMQngECnx8CgAGfIAgAIvIQAG0Yjw=='
buf = base64.b64decode(encoded)
dec = asn1.Decoder()
dec.start(buf)
tag = dec.peek()
assert tag.typ == asn1.Types.Constructed
assert tag.cls == asn1.Classes.Context
assert tag.nr == 21
dec.enter()
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 0
assert value == b'\x13'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 3
assert value == b'\x01\x80\x38\x09\x00\x63\x12\xf5'
tag = dec.peek()
assert tag.typ == asn1.Types.Constructed
assert tag.cls == asn1.Classes.Context
assert tag.nr == 4
dec.enter()
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 0
assert value == b'\x01\x00\x00\x02'
dec.leave()
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 5
assert value == b'\x00\x87\xB3\x1F\x73'
tag = dec.peek()
assert tag.typ == asn1.Types.Constructed
assert tag.cls == asn1.Classes.Context
assert tag.nr == 6
dec.enter()
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 0
assert value == b'\x01\x00\x00\x01'
dec.leave()
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 7
assert value == b'test.test.se'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 8
assert value == b'\x01\x21'
tag = dec.peek()
assert tag.typ == asn1.Types.Constructed
assert tag.cls == asn1.Classes.Context
assert tag.nr == 9
dec.enter()
tag = dec.peek()
assert tag.typ == asn1.Types.Constructed
assert tag.cls == asn1.Classes.Context
assert tag.nr == 0
dec.enter()
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 0
assert value == b'\x0A\xD8\x82\x04'
dec.leave()
dec.leave()
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 11
assert value == b'\x01'
tag = dec.peek()
assert tag.typ == asn1.Types.Constructed
assert tag.cls == asn1.Classes.Context
assert tag.nr == 12
dec.enter()
tag = dec.peek()
assert tag.typ == asn1.Types.Constructed
assert tag.cls == asn1.Classes.Universal
assert tag.nr == 16
dec.enter()
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 2
assert value == b'\x02\x23\x62\x1F\x93\x96\x40\x58\x74\x63\x10\x40'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 3
assert value == b'\x00'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 4
assert value == b'\x00'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 5
assert value == b'\x02'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 6
assert value == b'\x17\x09\x15\x00\x29\x54\x2B\x00\x00'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 8
assert value == b'\x00\x22\xF2\x10\x00\x6D\x18\x8F'
dec.leave()
dec.leave()
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 13
assert value == b'\x17\x09\x14\x23\x29\x54\x2B\x00\x00'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 14
assert value == b'\x0E\x10'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 15
assert value == b'\x00'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 17
assert value == b'\x01'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 18
assert value == b'DCP2GGSN1'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 21
assert value == b'\x00'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 22
assert value == b'\x91\x23\x77\x00\x50\x60\x53\x03\xF0'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 23
assert value == b'\x00\x00'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 24
assert value == b'\x00'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 27
assert value == b'\x22\xF2\x10'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 29
assert value == b'\x53\x93\x29\x70\x58\x82\x43\x10'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 30
assert value == b'\x02'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 31
assert value == b'\x80\x01'
tag, value = dec.read()
assert tag.typ == asn1.Types.Primitive
assert tag.cls == asn1.Classes.Context
assert tag.nr == 32
assert value == b'\x00\x22\xF2\x10\x00\x6D\x18\x8F'
assert dec.peek() is None
class TestEncoderDecoder(object):
"""Test suite for ASN1 Encoder and Decoder."""
@staticmethod
def assert_encode_decode(v, t):
encoder = asn1.Encoder()
encoder.start()
encoder.write(v, t)
encoded_bytes = encoder.output()
decoder = asn1.Decoder()
decoder.start(encoded_bytes)
tag, value = decoder.read()
assert value == v
def test_boolean(self):
for v in (True, False):
TestEncoderDecoder.assert_encode_decode(v, asn1.Numbers.Boolean)
def test_big_numbers(self):
for v in \
(
668929531791034950848739021124816874,
667441897913742713771034596334288035,
664674827807729028941298133900846368,
666811959353093594446621165172641478,
):
encoder = asn1.Encoder()
encoder.start()
encoder.write(v, asn1.Numbers.Integer)
encoded_bytes = encoder.output()
decoder = asn1.Decoder()
decoder.start(encoded_bytes)
tag, value = decoder.read()
assert value == v
def test_big_negative_numbers(self):
for v in \
(
-668929531791034950848739021124816874,
-667441897913742713771034596334288035,
-664674827807729028941298133900846368,
-666811959353093594446621165172641478,
):
encoder = asn1.Encoder()
encoder.start()
encoder.write(v, asn1.Numbers.Integer)
encoded_bytes = encoder.output()
decoder = asn1.Decoder()
decoder.start(encoded_bytes)
tag, value = decoder.read()
assert value == v
def test_bitstring(self):
for v in \
(
b'\x12\x34\x56',
b'\x01',
b''
):
TestEncoderDecoder.assert_encode_decode(v, asn1.Numbers.BitString)
def test_octet_string(self):
for v in \
(
b'foo',
b'',
b'A' * 257
):
TestEncoderDecoder.assert_encode_decode(v, asn1.Numbers.OctetString)
def test_null(self):
TestEncoderDecoder.assert_encode_decode(None, asn1.Numbers.Null)
def test_object_identifier(self):
TestEncoderDecoder.assert_encode_decode(
'1.2.840.113554.1.2.1.1',
asn1.Numbers.ObjectIdentifier
)
def test_enumerated(self):
for v in (1, 2, 42):
TestEncoderDecoder.assert_encode_decode(v, asn1.Numbers.Enumerated)
def test_utf8_string(self):
for v in \
(
'foo',
u'fooé'
):
TestEncoderDecoder.assert_encode_decode(v, asn1.Numbers.UTF8String)
def test_printable_string(self):
for v in \
(
'foo',
u'fooé'
):
TestEncoderDecoder.assert_encode_decode(v, asn1.Numbers.PrintableString)
def test_ia5_string(self):
TestEncoderDecoder.assert_encode_decode('foo', asn1.Numbers.IA5String)
def test_utc_time(self):
for v in \
(
'920521000000Z',
'920622123421Z',
'920722132100Z'
):
TestEncoderDecoder.assert_encode_decode(v, asn1.Numbers.UTCTime)
def test_generalized_time(self):
for v in \
(
'19920521000000Z',
'19920622123421.123Z',
'20920722132100-0500',
'20920722132100+0200',
'20920722132100.123-0500',
'20920722132100.123+0200',
):
TestEncoderDecoder.assert_encode_decode(v, asn1.Numbers.GeneralizedTime)
def test_unicode_string(self):
for v in \
(
b'foo',
u'fooé'.encode('utf-8')
):
TestEncoderDecoder.assert_encode_decode(v, asn1.Numbers.UnicodeString)
|
import os
import sys
import read_tree
sys.path.insert(0, 'tools/trees')
"""
This function assumes that both input trees have the exact
same rooted topology, and only differ by their branch lengths
and internal node labels.
It loads tree_to_relabel_path, changes its internal node
labels to match the ones from ref_tree_path and returns the
new tree
"""
def load_relabeled(tree_to_relabel_path, ref_tree_path):
ref_tree = read_tree.read_tree(ref_tree_path)
ref_label_to_node = {}
for node in ref_tree.traverse():
if (not node.is_root()):
ref_label_to_node[node.name] = node
tree = read_tree.read_tree(tree_to_relabel_path)
for node in tree.traverse("postorder"):
if (not node.is_root()):
ref_node = ref_label_to_node[node.name]
node.up.name = ref_node.up.name
return tree
if (__name__ == "__main__"):
if (len(sys.argv) < 4):
print("Syntax python " + os.path.basename(__file__) + "tree_to_relabel_path, ref_tree_path, output")
sys.exit(1)
tree_to_relabel_path = sys.argv[1]
ref_tree_path = sys.argv[2]
output = sys.argv[3]
tree = load_relabeled(tree_to_relabel_path, ref_tree_path)
tree.write(outfile=output, format = 1)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:hua
from flask import Flask
from Machine_Learning.KNN import KNN_blue
from Machine_Learning.test import test_blue
app = Flask(__name__)
# 将蓝图注册到app
app.register_blueprint(KNN_blue, url_prefix='/KNNS')
app.register_blueprint(test_blue, url_prefix='/test')
if __name__ == '__main__':
# print(app.url_map)
app.run(debug=True)
# 启用多线程
# app.run(debug=False,threaded=True)
# 启用多进程,但是在win系统下进程数不能超过1,否则报错
# app.run(processes=1) |
from statistics import mean, pstdev
amount = 10
loss = 2
profit = 8
class Bollinger:
def __init__(self, name, fx):
self.name = name
self.fx = fx
self.arr5 = fx.get_candles(instrument=self.name, period='m1', number=20)
self.arr15 = fx.get_candles(instrument=self.name, period='m5', number=20)
deviation = pstdev(self.arr5['bidclose'])
self.mid = mean(self.arr5['bidclose'])
self.top = self.mid + 2 * deviation
self.bot = self.mid - 2 * deviation
deviation = pstdev(self.arr5['askclose'])
self.amid = mean(self.arr5['askclose'])
self.atop = self.amid + 2 * deviation
self.abot = self.amid - 2 * deviation
self.diff = self.top - self.bot
self.adiff = self.atop - self.abot
def calculate_price(self, percentage, _type):
if (_type == "buy"):
return self.bot + self.diff * (percentage / 100)
if (_type == "sell"):
return self.abot + self.adiff * (percentage / 100)
def calculate_margin(self, last_values, rate_type, _type):
if _type == "sell":
if rate_type == "stop":
return last_values['Ask'] + self.adiff * (loss / 100)
if rate_type == "limit":
return last_values['Ask'] - self.adiff * (profit / 100)
if _type == "buy":
if rate_type == "stop":
return last_values['Bid'] - self.diff * (loss / 100)
if rate_type == "limit":
return last_values['Bid'] + self.diff * (profit / 100)
def check_buy(self):
last_values = self.fx.get_last_price(self.name) # [bid, ask, high, low]
if self.arr5['askclose'][19] < self.arr5['askclose'][18] and self.arr15['askclose'][19] < self.arr15['askclose'][18]\
and last_values['Ask'] < self.calculate_price(80, "sell") and last_values['Ask'] > self.calculate_price(20, "sell"):
stop = self.calculate_margin(last_values, "stop", "sell")
limit = self.calculate_margin(last_values, "limit", "sell")
print("Open a sell position for " + self.name)
print("open = {:.6}\tstop = {:.6}\tlimit = {:.6}".format(last_values['Ask'], stop, limit))
self.fx.open_trade(symbol=self.name, is_buy=False, amount=str(amount), time_in_force='GTC', order_type="AtMarket", is_in_pips=False, stop=str(stop), limit=str(limit))
return {
"type": "sell",
"open": last_values['Ask'],
"stop": stop,
"limit": limit
}
if self.arr5['bidclose'][19] > self.arr5['bidclose'][18] and self.arr15['bidclose'][19] > self.arr15['bidclose'][18]\
and last_values['Bid'] < self.calculate_price(80, "buy") and last_values['Bid'] > self.calculate_price(20, "buy"):
stop = self.calculate_margin(last_values, "stop", "buy")
limit = self.calculate_margin(last_values, "limit", "buy")
print("Open a buy position for " + self.name)
print("open = {:.6}\tstop = {:.6}\tlimit = {:.6}".format(last_values['Bid'], stop, limit))
self.fx.open_trade(symbol=self.name, is_buy=True, amount=str(amount), time_in_force='GTC', order_type="AtMarket", is_in_pips=False, stop=str(stop), limit=str(limit))
return {
"type": "buy",
"open": last_values['Bid'],
"stop": stop,
"limit": limit
}
return {
"type": "",
"open": 0.0,
"stop": 0.0,
"limit": 0.0
}
def check_PL(self, tradeId, position, l):
if position["type"] == "sell" and self.arr5['askclose'][19] < self.arr5['askclose'][18] and self.arr15['askclose'][19] < self.arr15['askclose'][18]:
last_values = self.fx.get_last_price(self.name) # [bid, ask, high, low]
stop = self.calculate_margin(last_values, "stop", "buy")
# limit = self.calculate_margin(last_values, "limit", "buy")
if stop < position['stop']:
print("Change stop rate for " + self.name)
return {
"type": position['type'],
"open": position['open'],
"stop": stop,
"limit": position['limit']
}
if position["type"] == "buy" and self.arr5['bidclose'][19] > self.arr5['bidclose'][18] and self.arr15['bidclose'][19] > self.arr15['bidclose'][18]:
last_values = self.fx.get_last_price(self.name) # [bid, ask, high, low]
stop = self.calculate_margin(last_values, "stop", "buy")
# limit = self.calculate_margin(last_values, "limit", "buy")
if stop > position['stop']:
print("Change stop rate for " + self.name)
return {
"type": position['type'],
"open": position['open'],
"stop": stop,
"limit": position['limit']
}
return position
|
import warnings
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from time import time
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_score, recall_score, accuracy_score
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation
from tensorflow.keras.layers import Embedding, LSTM, SpatialDropout1D
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import Adam
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
warnings.filterwarnings("ignore") # Hide messy Numpy warnings
class LSTM_model:
def __init__(self):
self.model = None
self.history = []
def tokenize(self, MAX_NB_WORDS, MAX_SEQUENCE_LENGTH, df):
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~')
tokenizer.fit_on_texts(df.text.values)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
X = tokenizer.texts_to_sequences(df.text.values)
X = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH)
print('Shape of data tensor:', X.shape)
y = df['funny or not'].values
print('Shape of label tensor:', y.shape)
return X, y
def split_train_test(self, X, y, test_size=0.2):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_size, random_state = 42)
print('Shape of X and y train:', X_train.shape, y_train.shape)
print('Shape of X and y test:', X_test.shape, y_test.shape)
return X_train, X_test, y_train, y_test
def balance_train_undersample(self, X_train, y_train):
X_train_funny = pd.DataFrame(X_train[y_train == 1])
X_train_not_funny = pd.DataFrame(X_train[y_train == 0]).sample(n=len(X_train_funny), random_state=42)
y_train_funny = pd.DataFrame(y_train[y_train == 1])
y_train_not_funny = pd.DataFrame(y_train[y_train == 0]).sample(n=len(y_train_funny), random_state=42)
X_train_balanced = pd.concat([X_train_funny, X_train_not_funny])
y_train_balanced = pd.concat([y_train_funny, y_train_not_funny])
print('Shape of balanced X and y train:', X_train_balanced.shape, y_train_balanced.shape)
return X_train_balanced, y_train_balanced
def build_model1(self, X, MAX_NB_WORDS, EMBEDDING_DIM, LSTM_OUT, optimizer):
print('>> Building model...')
model = Sequential()
model.add(Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=X.shape[1]))
model.add(SpatialDropout1D(0.4))
model.add(LSTM(LSTM_OUT, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=['accuracy', tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall')])
print('>> Compiled')
self.model = model
return self.model
def build_model2(self, X, MAX_NB_WORDS, EMBEDDING_DIM, LSTM_OUT_1, LSTM_OUT_2, optimizer):
print('>> Building model...')
model = Sequential()
model.add(Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=X.shape[1]))
model.add(SpatialDropout1D(0.2))
model.add(LSTM(LSTM_OUT_1, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
model.add(LSTM(LSTM_OUT_2, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy', tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall')])
print('>> Compiled')
self.model = model
return self.model
def fit(self, X_train, y_train, batch_size=64, epochs=3, validation_split=0.20):
start_time = time()
print('>> Fitting model...')
self.history = self.model.fit(X_train, y_train,
epochs=epochs,
batch_size=batch_size,
validation_split=validation_split,
verbose = 1,
callbacks=[EarlyStopping(monitor='val_loss', patience=3, min_delta=0.0001)])
print(self.model.summary())
elapsed_time = time() - start_time
print('>> Completed')
print('>> Training duration (s): {0}'.format(elapsed_time))
return self.model
def evaluate(self, X_test, y_test):
scores = self.model.evaluate(X_test, y_test)
print('Test set\n Loss: {:0.4f}\n Accuracy: {:0.4f}\n Precision: {:0.4}\n Recall: {:0.4f}'.format(scores[0],scores[1], scores[2], scores[3]))
def plot_cm(self, X_test, y_test):
y_pred = self.model.predict_classes(X_test)
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
print('Accuracy: {:0.4f}\nPrecision: {:0.4}\nRecall: {:0.4f}'.format(accuracy, precision, recall))
cm = confusion_matrix(y_test, y_pred)
# plot confusion matrix
plt.style.use('default')
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=['funny','not funny'])
disp.plot(cmap='Spectral_r', values_format='')
plt.title('Confusion Matrix of Funny vs Not Funny')
plt.tight_layout()
#plt.savefig('confusion_matrix.png')
plt.show()
def plot_recall(self):
plt.title('Recall')
plt.plot(self.history.history['recall'], label='train')
plt.plot(self.history.history['val_recall'], label='test')
plt.ylabel('recall')
plt.xlabel('epoch')
plt.legend()
#plt.savefig('model_recall.png')
plt.show() |
# -*- coding: utf-8 -*-
from sefaria.model import *
from sefaria.tracker import add
import urllib
import urllib2
from urllib2 import URLError, HTTPError
import json
def post_index(index):
url = 'http://www.sefaria.org/api/index/' + index["title"].replace(" ", "_")
indexJSON = json.dumps(index)
print indexJSON
values = {
'json': indexJSON,
'apikey': 'F4J2j3RF6fHWHLtmAtOTeZHE3MOIcsgvcgtYSwMzHtM'
}
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
try:
response = urllib2.urlopen(req)
print response.read()
except HTTPError, e:
print 'Error code: ', e.code
root = SchemaNode()
root.add_title("Netivot Olam", "en", primary=True)
root.add_title(u"נתיבות עולם", "he", primary=True)
root.key = "netivot"
part1 = JaggedArrayNode()
part1.add_title(u"הקדמה", "he", primary=True)
part1.add_title("Introduction to Netivot Olam", "en", primary=True)
part1.depth = 1
part1.sectionNames = ["Paragraph"]
part1.addressTypes = ["Integer"]
part1.key = "intro"
root.append(part1)
subsections = [
["Netiv Hatorah", 18, u"נתיב התורה"],
["Netiv Ha'Avodah", 19, u"נתיב העבודה"],
["Netiv Gmilut Chasadim", 5, u"נתיב גמילות חסדים"],
["Netiv Hatzdaka", 6, u"נתיב הצדקה"],
["Netiv Hadin", 2, u"נתיב הדין"],
["Netiv Haemet", 3, u"נתיב האמת"],
["Netiv Haemuna", 2, u"נתיב האמונה"],
["Netiv Hashalom", 3, u"נתיב השלום"],
["Netiv Ahavat Reia", 3, u"נתיב אהבת ריע"],
["Netiv Haanava", 8, u"נתיב הענוה"],
["Netiv Yirat Hashem", 6, u"נתיב יראת השם"],
["Netiv Halashon", 11, u"נתיב הלשון"],
["Netiv Ahavat Hashem", 2, u"נתיב אהבת השם"],
["Netiv Hashtika", 1, u"נתיב השתיקה"],
["Netiv Haprishut", 3, u"נתיב הפּרישות"],
["Netiv Hatzniut", 4, u"נתיב הצניעות"],
["Netiv Koach Hayeitzer", 4, u"נתיב כח היצר"],
["Netiv Hatzedek", 3, u"נתיב הצדק"],
["Netiv Hatshuva", 8, u"נתיב התשובה"],
["Netiv Hayisurin", 3, u"נתיב היסורין"],
["Netiv Hazrizut", 2, u"נתיב הזריזות"],
["Netiv Hatochacha", 3, u"נתיב התוכחה"],
["Netiv Habusha", 2, u"נתיב הבושה"],
["Netiv Hatmimut", 2, u"נתיב התמימות"],
["Netiv Lev Tov", 1, u"נתיב לב טוב"],
["Netiv Ayin Tov", 1, u"נתיב עין טוב"],
["Netiv Haleitzanut", 2, u"נתיב הליצנות"],
["Netiv Haosher", 2, u"נתיב העושר"],
["Netiv Habitachon", 1, u"נתיב הבטחון"],
["Netiv Hakaas", 2, u"נתיב הכעס"],
["Netiv Hanedivut", 1, u"נתיב הנדיבות"],
["Netiv Sheim Tov", 1, u"נתיב שם טוב"],
["Netiv Derech Eretz", 1, u"נתיב דרך ארץ"],
]
for sub in subsections:
n = JaggedArrayNode()
n.key = sub[0]
n.add_title(sub[0], "en", primary=True)
n.add_title(sub[2], "he", primary=True)
n.depth = 2
n.lengths = [sub[1]]
n.sectionNames = ["Chapter", "Paragraph"]
n.addressTypes = ["Integer", "Integer"]
root.append(n)
root.validate()
index = {
"title": "Netivot Olam",
"categories": ["Philosophy", "Maharal"],
"schema": root.serialize()
}
post_index(index)
Index(index).save()
|
import boto3
def create_instance(instance_name, avail_zone, image_name, os_name, bundle):
"""
A function to create a lightsail instance
"""
conn = boto3.client('lightsail', region_name='ap-south-1')
# get blueprint ids to verify the image number
response = conn.get_blueprints()['blueprints']
blueprint_id = []
for res in response:
blueprint_id.append(res['blueprintId'])
# check if version is correct
if os_name in blueprint_id:
pass
else:
print("Wrong Id")
exit(0)
bundle_ids = []
# get budle id to verify bundle ids from user input
response = conn.get_bundles()['bundles']
for res in response:
bundle_ids.append(res['bundleId'])
# check if id is correct
if bundle in bundle_ids:
pass
else:
print("Wrong bundle")
exit(0)
# create the lightsail instance
response = conn.create_instances(
instanceNames=[instance_name],
availabilityZone=avail_zone,
customImageName=image_name,
blueprintId=os_name,
bundleId=bundle
)
print(response)
create_instance('instance1', 'ap-south-1a', 'ubuntu1', 'ubuntu_18_04', 'nano_2_1')
|
#!/usr/bin/env python
import sys, re
from unidecode import unidecode
import bibtexparser
from bibtexparser.bwriter import BibTexWriter
import http.client as httplib
import urllib
# Search for the DOI given a title; e.g. "computation in Noisy Radio Networks"
# Credit to user13348, slight modifications
# http://tex.stackexchange.com/questions/6810/automatically-adding-doi-fields-to-a-hand-made-bibliography
def searchdoi(title, author):
params = urllib.parse.urlencode({"titlesearch":"titlesearch", "auth2" : author, "atitle2" : title, "multi_hit" : "on", "article_title_search" : "Search", "queryType" : "author-title"})
headers = {"User-Agent": "Mozilla/5.0" , "Accept": "text/html", "Content-Type" : "application/x-www-form-urlencoded", "Host" : "www.crossref.org"}
# conn = httplib.HTTPConnection("www.crossref.org:80") # Not working any more, HTTPS required
conn = httplib.HTTPSConnection("www.crossref.org")
conn.request("POST", "/guestquery/", params, headers)
response = conn.getresponse()
#print(response.status, response.reason)
data = response.read()
conn.close()
return re.search(r'doi\.org/([^"^<^>]+)', str(data))
def normalize(string):
"""Normalize strings to ascii, without latex."""
string = re.sub(r'[{}\\\'"^]',"", string)
string = re.sub(r"\$.*?\$","",string) # better remove all math expressions
return unidecode(string)
def get_authors(entry):
"""Get a list of authors' or editors' last names."""
def get_last_name(authors):
for author in authors :
author = author.strip(" ")
if "," in author:
yield author.split(",")[0]
elif " " in author:
yield author.split(" ")[-1]
else:
yield author
try:
authors = entry["author"]
except KeyError:
authors = entry["editor"]
authors = normalize(authors).split("and")
return list(get_last_name(authors))
print("Reading Bibliography...")
with open(sys.argv[1]) as bibtex_file:
bibliography = bibtexparser.load(bibtex_file)
print("Looking for Dois...")
before = 0
new = 0
total = len(bibliography.entries)
for i,entry in enumerate(bibliography.entries):
print("\r{i}/{total} entries processed, please wait...".format(i=i,total=total),flush=True,end="")
try:
if "doi" not in entry or entry["doi"].isspace():
title = entry["title"]
authors = get_authors(entry)
for author in authors:
doi_match = searchdoi(title,author)
if doi_match:
doi = doi_match.groups()[0]
entry["doi"] = doi
new += 1
else:
before += 1
except:
pass
print("")
template="We added {new} DOIs !\nBefore: {before}/{total} entries had DOI\nNow: {after}/{total} entries have DOI"
print(template.format(new=new,before=before,after=before+new,total=total))
outfile = sys.argv[1]+"_doi.bib"
print("Writing result to ",outfile)
writer = BibTexWriter()
writer.indent = ' ' # indent entries with 4 spaces instead of one
with open(outfile, 'w') as bibfile:
bibfile.write(writer.write(bibliography))
|
import logging
import os
import sqlalchemy as sa
from sqlalchemy.orm import scoped_session, sessionmaker
from settings import *
from models import Base, ChatMessage
logger = logging.getLogger('chat_message_parser')
def create_mysql_pool():
mysql_host = MYSQL_HOST
mysql_port = MYSQL_PORT
mysql_db = MYSQL_DB
mysql_user = MYSQL_USER
mysql_passwd = MYSQL_PASSWD
# max_connections default for mysql = 100
# set mysql connections to 90 and 5 for sqlalchemy buffer
mysql_pool = sa.create_engine(
'mysql://{user}:{passwd}@{host}'.format(
user=mysql_user,
passwd=mysql_passwd,
host=mysql_host
),
pool_size=10,
max_overflow=5,
pool_recycle=3600,
)
try:
mysql_pool.execute("USE {db}".format(
db=mysql_db)
)
except sa.exc.OperationalError:
logger.info('DATABASE {db} DOES NOT EXIST. CREATING...'.format(
db=mysql_db)
)
mysql_pool.execute("CREATE DATABASE {db}".format(
db=mysql_db)
)
mysql_pool.execute("USE {db}".format(
db=mysql_db)
)
mysql_pool = sa.create_engine(
'mysql://{user}:{passwd}@{host}/{db}'.format(
user=mysql_user,
passwd=mysql_passwd,
host=mysql_host,
db=mysql_db
),
pool_size=10,
pool_recycle=3600,
)
return mysql_pool
init_mysql_pool = create_mysql_pool()
Base.metadata.create_all(init_mysql_pool, checkfirst=True)
init_mysql_pool.dispose()
def batch_insert(session, result):
"""
Start transactional insert
"""
# save to sql database
batch_insert = []
for batch_object in result:
record = session.query(ChatMessage).get({batch_object.id})
if record:
#logger.info('duplicate entry in db {0}'.format(batch_object.id))
continue
batch_insert.append(batch_object)
if batch_insert:
try:
session.add_all(batch_insert)
session.commit()
except sa.exc.IntegrityError:
logger.info('something HORRIBLE has happened')
session.rollback()
session.close()
return True
|
from django.conf.urls import url, include
from rest_framework import routers
from . import views, api_views, serializers
router = routers.DefaultRouter()
router.register(r'users', api_views.UserViewSet)
router.register(r'groups', api_views.GroupViewSet)
router.register(r'category', serializers.CategoryViewSet)
router.register(r'artist', api_views.ArtistViewSet)
urlpatterns = [
url(r'^rest-api/', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^album/', views.album_page, name="album"),
url(r'^album_search/', views.album_search_page, name="albumSearch"),
url(r'^track/', views.track_page, name="track"),
url(r'^video/', views.video_page, name="video"),
url(r'^carousel/', views.carousel_page, name="carousel"),
url(r'^category/', views.category_page, name="category"),
url(r'^artist/', views.artist_page, name="artist"),
url(r'$', views.welcome, name="welcome"),
]
|
#!/usr/bin/python
"""
check_updates - A tool for checking whether a local repository is in-sync with
a remote repository.
The script assumes the classic NVR format for the remote repository and NVrR
format for local. NVrR format is where the upstream release is tagged on to the
end of the version. This allows a local package to have its own local release
while still maintaining the full version information of the upstream project(s).
For example:
mypkg-1.2.3-4 upstream would become mypkg-1.2.3r4-X locally (where X is the
local release number).
The docstrings in this module use the following terminology:
Package list - "A list of tuples,
each tuple containing the package (Name, Ver, Rel)"
Package dictionary - "A dictionary of tuples, the key is package name,
the value is the (Ver, Rel)"
"""
import os
import re
import sys
import rpm
import urllib
SPEC_NAME_RE = "Name:\s+(.*)"
SPEC_VERS_RE = "Version:\s+(.*)r([0-9]+)"
# There is no way to write this REGEXP any shorter
#pylint: disable=line-too-long
SRPM_LINK_RE = "([a-zA-Z0-9._+-]+)-([0-9][a-zA-Z0-9.-]*)-([0-9]+)(.[a-zA-Z0-9.-]+)?.(src.|s)rpm"
UPSTREAM_REPOS = [
"http://emisoft.web.cern.ch/emisoft/dist/EMI/3/sl6/SRPMS/base/",
"http://emisoft.web.cern.ch/emisoft/dist/EMI/3/sl6/SRPMS/updates/",
]
def load_urls(urllist):
""" Takes a list of URLs and returns a package list.
This loads the URLs one-by-one and applies SRPM_LINK_RE to each line.
If a match is found, it's added on to the return list. The return list
ordering is undefined. The packages should be in NVR format.
"""
pkg_list = []
matcher = re.compile(SRPM_LINK_RE)
for url in urllist:
handle = urllib.urlopen(url)
for data in handle:
match = matcher.search(data)
if not match:
continue
pkg_list.append((match.group(1), match.group(2), match.group(3)))
handle.close()
return pkg_list
def find_specs(dirname):
""" This takes a path and returns a list of spec files
contained within the directory tree under that path.
"""
speclist = []
for root, _, files in os.walk(dirname):
for name in files:
if not name.endswith(".spec"):
continue
speclist.append(os.path.join(root, name))
return speclist
def load_specs(speclist):
""" This loads a list of spec files and returns a package list from the
information contained in the spec files. The ordering of the returned
list is undefined. The spec files should be in NVrR format.
NOTE: Spec/RPM macros are not processed.
"""
pkg_list = []
name_matcher = re.compile(SPEC_NAME_RE)
vers_matcher = re.compile(SPEC_VERS_RE)
for spec in speclist:
handle = open(spec, 'r')
name = None
vers = None
rels = None
# Read all the lines and pull out the info
for data in handle:
name_match = name_matcher.match(data)
if name_match:
name = name_match.group(1)
vers_match = vers_matcher.match(data)
if vers_match:
vers = vers_match.group(1)
rels = vers_match.group(2)
handle.close()
pkg_list.append((name, vers, rels))
return pkg_list
def find_newest(pkg_list, pkg_name):
""" Find the newest version of pkg_name from the given package list,
returns the (N, V, R) tuple for the newest package.
If the package can't be found (pkg_name, '0', '0') will be returned.
"""
best_ver = '0'
best_rel = '0'
for name, ver, rel in pkg_list:
if name == pkg_name:
if rpm.labelCompare(('1', ver, rel), ('1', best_ver, best_rel)):
best_ver = ver
best_rel = rel
return (pkg_name, best_ver, best_rel)
def filter_newest(pkg_list):
""" Takes a package list and returns a package dictionary containing
only the details of the newest version of each package.
"""
newest_pkgs = {}
for pkg in set(zip(*pkg_list)[0]):
newest = find_newest(pkg_list, pkg)
newest_pkgs[newest[0]] = (newest[1], newest[2])
return newest_pkgs
def print_diffs(upstream_pkgs, local_pkgs):
""" Prints out all the differents between two package dictionaries in
a user friendly format.
"""
# Compare the two and print summaries of mismatches
upstream_names = set(upstream_pkgs.keys())
local_names = set(local_pkgs.keys())
if len(upstream_names - local_names):
# There are missing packages from the local repo
missing_pkgs = list(upstream_names - local_names)
missing_pkgs.sort()
num_missing = "%d" % len(missing_pkgs)
print ""
print "%s Missing Packages" % num_missing
print "%s=================" % ("=" * len(num_missing))
for pkg in missing_pkgs:
ver, rel = upstream_pkgs[pkg]
print "%s-%s-%s" % (pkg, ver, rel)
print ""
if len(local_names - upstream_names):
# There are extra packages in the local repo?
extra_pkgs = list(local_names - upstream_names)
extra_pkgs.sort()
num_extra = "%d" % len(extra_pkgs)
print ""
print "%s Extra Packages" % num_extra
print "%s===============" % ("=" * len(num_extra))
for pkg in extra_pkgs:
ver, rel = local_pkgs[pkg]
print "%s-%s-%s" % (pkg, ver, rel)
print ""
# Now just compare the versions of everything in both
old_pkgs = []
for pkg in local_names & upstream_names:
upstream_ver, upstream_rel = upstream_pkgs[pkg]
local_ver, local_rel = local_pkgs[pkg]
if rpm.labelCompare(('1', upstream_ver, upstream_rel),
('1', local_ver, local_rel)):
old_pkgs.append(pkg)
if len(old_pkgs):
old_pkgs.sort()
num_old = "%d" % len(old_pkgs)
print ""
print "%s Outdated Packages (Local != Remote)" % num_old
print "%s====================================" % ("=" * len(num_old))
for pkg in old_pkgs:
upstream_ver, upstream_rel = upstream_pkgs[pkg]
local_ver, local_rel = local_pkgs[pkg]
print "%s-%s-%s != %s-%s-%s" % (pkg, local_ver, local_rel,
pkg, upstream_ver, upstream_rel)
print ""
return
def usage():
""" Print the application usage information. """
print ""
print "Usage: check_updates <directory>"
print " Directory is local repo containing spec files."
print ""
return
def main():
""" The main entry point for the application. """
if len(sys.argv) != 2:
usage()
return
pkg_path = sys.argv[1]
# Get & filter the remote package list
upstream_pkgs_all = load_urls(UPSTREAM_REPOS)
if not len(upstream_pkgs_all):
print "ERROR: Failed to find any upstream packages."
return
upstream_pkgs = filter_newest(upstream_pkgs_all)
# Get & filter the local package list
local_pkgs_all = load_specs(find_specs(pkg_path))
if not len(local_pkgs_all):
print "ERROR: Failed to find any local packages."
return
local_pkgs = filter_newest(local_pkgs_all)
# Print the comparison
print_diffs(upstream_pkgs, local_pkgs)
return
if __name__ == '__main__':
main()
|
# Copyright 2020 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Channel subclasses."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
from pulser.channels.base_channel import Channel, Literal
from pulser.channels.eom import RydbergEOM
@dataclass(init=True, repr=False, frozen=True)
class Raman(Channel):
"""Raman beam channel.
Channel targeting the transition between the hyperfine ground states, in
which the 'digital' basis is encoded. See base class.
"""
@property
def basis(self) -> Literal["digital"]:
"""The addressed basis name."""
return "digital"
@dataclass(init=True, repr=False, frozen=True)
class Rydberg(Channel):
"""Rydberg beam channel.
Channel targeting the transition between the ground and rydberg states,
thus encoding the 'ground-rydberg' basis. See base class.
"""
eom_config: Optional[RydbergEOM] = None
def __post_init__(self) -> None:
super().__post_init__()
if self.eom_config is not None and not isinstance(
self.eom_config, RydbergEOM
):
raise TypeError(
"When defined, 'eom_config' must be a valid 'RydbergEOM'"
f" instance, not {type(self.eom_config)}."
)
@property
def basis(self) -> Literal["ground-rydberg"]:
"""The addressed basis name."""
return "ground-rydberg"
@dataclass(init=True, repr=False, frozen=True)
class Microwave(Channel):
"""Microwave adressing channel.
Channel targeting the transition between two rydberg states, thus encoding
the 'XY' basis. See base class.
"""
@property
def basis(self) -> Literal["XY"]:
"""The addressed basis name."""
return "XY"
def default_id(self) -> str:
"""Generates the default ID for indexing this channel in a Device."""
return f"mw_{self.addressing.lower()}"
|
import unittest
from katas.beta.super_duper_easy import problem
class ProblemTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(problem('hello'), 'Error')
def test_equals_2(self):
self.assertEqual(problem(1), 56)
|
def calculateState(inputConnections, recurrenceConnection):
"""
Performs state calculation given a list of|inputConnections| and an optional
|recurrenceConnection|, which can be provided as None.
"""
# Calculate sum of products for input connections
state = sum(connection.calculate() for connection in inputConnections)
# Calculate recurrent product
try: state += recurrenceConnection.calculate()
except AttributeError: pass
return state
def calculateProjectedError(derivative, projectedConnections):
"""
Calculates the projected error for a neuron given
a list of |projectedConnections|.
"""
# e = f'j(sj) * E (ek * gkj * wkj)
return derivative * sum(
c.outputNeuron.error * c.getGateValue() * c.weight
for c in projectedConnections)
def calculateGateError(derivative, gatedConnections):
"""
Calculates the gate error for a given |hostNeuron|.
"""
return derivative * \
sum(output.error * \
sum(c.weight * c.getInputValue()
for c in gatedConnections[output])
for output in gatedConnections)
def calculateEligibility(connection, targetRecurrence, oldEligibility):
"""
Calculates this connection's eligibility trace, and extended
eligibility traces.
eji = (gjj * wjj * e^ji) + (gji * yi)
"""
# e+= gji * yi
eligibility = connection.getGateValue() * connection.getInputValue()
# e += gjj * wjj * e^ji
# e += gjj * e^ji // wjj is 1 if recurrent
try: eligibility += targetRecurrence.getGateValue() * oldEligibility
except AttributeError: pass
return eligibility
def calculateExtendedEligibility(derivative, eligibility, targetRecurrence,
gatedConnections, oldExtendedEligibility):
"""
Calulate extended eligibility trace for a gated neuron.
ejik = ( gkk * wkk * e^jik ) +
( f'j(sj) * eji *
[((dgkk / dyj) * wkk * s^k) + E a=/=k(wka * ya)] )
"""
ex = derivative * eligibility * \
sum(c.weight * c.getInputValue()
for c in gatedConnections)
try:
ex += targetRecurrence.getGateValue() * oldExtendedEligibility
except AttributeError: pass
# Except if the gated neuron doesn't have a recurrent connection.
except KeyError: pass
# Except if the key doesn't exist yet. It will be set below.
return ex
|
class OrderedLinkedList(object):
class _Node(object):
def __init__(self, data, next=None):
self.data = data
self.next = next
def __init__(self, lst=[]):
self.head = OrderedLinkedList._Node(0)
def add(self, value):
def _add(node, value):
# recursive.
if node is None:
return OrderedLinkedList._Node(val)
if val < node.val:
return OrderedLinkedList._Node(val, node)
node.next = _add(node.next, val)
return node
self.head.next = _add(self.head.next, val)
|
# a simple script used for webscrapping chosen funds data from a website for a
# specific day and save/append it to a json file
# script can be set up in windows scheduler to run everyday automatically
from bs4 import BeautifulSoup
import requests
import pandas as pd
import json
# website for scrapping
url = "https://www.bankier.pl/fundusze/notowania/wszystkie"
# getting the entire website as lxml
html_content = requests.get(url)
# change encoding to process polish signs
html_content.encoding = 'utf-8'
# get text
content = html_content.text
soup = BeautifulSoup(content, 'lxml')
# finding specific table in website
fund_table = soup.find("table", attrs = {"class": "sortTableMixedData floatingHeaderTable"})
# finding all tr in table head
fund_table_data = fund_table.thead.find_all("tr")
# getting headers for table columns
t_headers = []
for th in fund_table_data[0].find_all("th"):
t_headers.append(th.text.replace("\n",' ').strip())
# getting values from table/ rows
table_data = []
for tr in fund_table.tbody.find_all("tr"): # each row in tbody of table is tr
t_row = {}
for td, th in zip(tr.find_all("td"), t_headers): # each cell in row is td
t_row[th] = td.text.replace('\n',' ').strip()
table_data.append(t_row)
# import list of funds avaible for me
funds_list = pd.read_csv('funds_list.csv', encoding='utf-8', index_col=0)
# convert to list
funds_list = list(funds_list['0'])
# create dictionary for funds from the list
table_dict = {}
for ii in table_data:
for jj in funds_list:
if jj in ii["Nazwa funduszu AD"]:
table_dict[ii["Nazwa funduszu AD"]]={ii["Data AD"]:float(ii["Kurs AD"].replace(',','.'))}
def merge(old, new, path=None):
# merges dictonaries
# works for dictionaries of dictionaries
# changes old
if path is None:
# initial path, for outermost dic is empty
path = []
for key in new:
# checks if keys match
if key in old:
# checks if there is dictionary inside of dictionary
if isinstance(old[key], dict) and isinstance(new[key], dict):
# if there is then runs the function again going into that dict
merge(old[key], new[key], path + [str(key)])
elif old[key]==new[key]:
pass
else:
raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))
else:
# if there is no such key in old then it creates it
old[key]=new[key]
return old
try:
# open json file to dict
with open("C:\\Users\\Pawel\\Documents\\Github\\projects\\personal_invest_dash\\data\\funds_rates.json", 'r', encoding='utf-8') as f_saved:
funds_file = json.load(f_saved)
# update dict
merge(funds_file, table_dict)
# save back to file/overwrite the existing file
with open("C:\\Users\\Pawel\\Documents\\Github\\projects\\personal_invest_dash\\data\\funds_rates.json", 'w', encoding='utf-8') as f_save:
json.dump(funds_file, f_save, ensure_ascii=False)
except:
# save dict to json
with open("C:\\Users\\Pawel\\Documents\\Github\\projects\\personal_invest_dash\\data\\funds_rates.json", "w", encoding='utf-8') as f_save:
json.dump(table_dict, f_save, ensure_ascii=False) |
import numpy as np
import tensorflow.keras as keras
import pickle
class DataGenerator(keras.utils.Sequence):
def __init__(self, file_list, y_list,shape = (53, 63, 52, 53), batch_size = 32):
self.file_list = file_list
self.y_list = y_list
self.batch_size = batch_size
self.dim = shape
self.indexes = np.arange(len(self.file_list))
def __len__(self):
return int(np.floor(len(self.file_list) / self.batch_size))
def __getitem__(self, index):
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
x, y = self.__data_generation(indexes)
return x, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.file_list))
def __data_generation(self, indexes):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros(shape = (self.batch_size, *self.dim), dtype = np.float32)
y = np.zeros(shape = (self.batch_size, 5), dtype = np.float32)
# Generate data
for i in indexes:
# Store sample
x = pickle.load(open(self.file_list[i], 'rb'))
for j in range(x.shape[0]):
mean = np.mean(x[j,])
std = np.std(x[j,])
if std == 0.0:
pass
else:
x[j,] = ( x[j,] - mean ) / std
x = x.transpose()
# Store class
X[i,] = x
y[i,] = self.y_list[i,]
return X, y |
import asciinema
import sys
from setuptools import setup
if sys.version_info[0] < 3:
sys.exit('Python < 3 is unsupported.')
url_template = 'https://github.com/asciinema/asciinema/archive/v%s.tar.gz'
requirements = []
setup(
name='asciinema',
version=asciinema.__version__,
packages=['asciinema', 'asciinema.commands'],
license='GNU GPLv3',
description='Terminal session recorder',
author=asciinema.__author__,
author_email='m@ku1ik.com',
url='https://asciinema.org',
download_url=(url_template % asciinema.__version__),
entry_points={
'console_scripts': [
'asciinema = asciinema.__main__:main',
],
},
install_requires=requirements,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: System :: Shells',
'Topic :: Terminals',
'Topic :: Utilities'
],
)
|
import getpass
import json
import requests
import shutil
import subprocess
import sys
import os
def checkFlaky(slugs, pnumber):
projects = []
idx = 0
for project in slugs:
gitlink = 'https://github.com/' + project + '.git'
# print("gitlink", gitlink)
p = project.split('/')[1]
rootPath = "/mnt2/majid/project/flaky/"
folder = rootPath+p
print("folder", folder)
os.system("git clone "+gitlink)
os.chdir(folder)
if(not os.path.isfile("../"+str(pnumber)+"."+str(idx)+ "." +p+".flaky.txt")):
os.system("git log --all --grep='flaky' > ../"+str(pnumber)+"."+str(idx)+ "." +p+".flaky.txt")
os.system("git log --all --grep='flakey' > ../"+str(pnumber)+"."+str(idx)+ "."+ p+".flakey.txt")
os.system("git log --all --grep='intermittent' > ../"+str(pnumber)+"."+str(idx)+ "."+ p+".intermittent.txt")
os.chdir(rootPath)
shutil.rmtree(folder)
idx = idx+1
def main(args):
uname = args[1] # Username
out_file = args[2]
passwd = getpass.getpass()
# Get all the Cpp projects on GitHub
slugs = []
url = 'https://api.github.com/search/repositories?q=language:cpp&sort=stars&order=desc&per_page=100'
for i in range(0, 10):
print("page#", i)
suffix = '&page=' + str(i)
request = url + suffix
response = requests.get(request, auth=(uname, passwd))
if response.ok:
data = json.loads(response.text)
for k in data['items']:
slugs.append(k['full_name'])
# repo_name = k['name']
# owner = k['owner']['login']
# repo_info = requests.get('https://api.github.com/repos/'+owner+'/'+repo_name)
# stars = repo_info.json()['stargazers_count']
# stars = repo_info.json()
# print(repo_name)
# print(stars)
else:
print("No response!", response)
break
print("Digging into the page", i)
checkFlaky(slugs, i)
slugs = []
print ('ALL PROJECTS:', len(slugs))
if __name__ == '__main__':
main(sys.argv)
|
/Users/daniel/anaconda/lib/python3.6/_bootlocale.py |
import re
import json
import pandas as pd
import matplotlib.pyplot as plt
tweets_data_path = 'twittermi.txt'
tweets_data = []
tweets_file = open(tweets_data_path, 'r')
for line in tweets_file:
try:
tweet = json.loads(line)
tweets_data.append(tweet)
except:
continue
print(len(tweets_data))
tweets = pd.DataFrame()
def word_in_text(word,text):
if text == None:
return False
word = word.lower()
text = text.lower()
match = re.search(word,text)
if match:
return True
else:
return False
tweets['text'] = [tweet['text'] for tweet in tweets_data]
tweets['python'] = tweets['text'] .apply(lambda tweet: word_in_text('python', tweet))
tweets['javascript'] = tweets['text'] .apply(lambda tweet: word_in_text('python', tweet))
tweets['ruby'] = tweets['text'] .apply(lambda tweet: word_in_text('python', tweet))
print (tweets['python'].value_counts() [True])
print (tweets['javascript'].value_counts() [True])
print (tweets['ruby'].value_counts() [True])
prg_langs = ['python', 'javascript','ruby']
tweets_by_prg_lang = [tweets['python'].value_counts()[True], tweets['javascript'].value_counts()[True], tweets['ruby'].value_counts()[True]]
x_pos = list(range(len(prg_langs)))
width = 0.8
fig, ax = plt.subplots()
plt.bar(x_pos, tweets_by_prg_lang, width, alpha = 1, color = 'g')
#setting axis label dan ketebalan
ax.set_ylabel('Number of tweets', fontsize=15)
ax.set_title('Ranking: python vs. javascript vs. ruby (Raw data)', fontsize=10, fontweight='bold')
ax.set_xticks([p +0.1 *width for p in x_pos])
ax.set_xticklabels(prg_langs)
plt.grid()
plt.show
|
import json
i = 0
with open('mood.json') as data:
data = json.load(data)
email = data['embassies'][i]['email']
print(email) |
# Given two arrays arr1 and arr2, the elements of arr2 are distinct,
# and all elements in arr2 are also in arr1.
#
# Sort the elements of arr1 such that the relative ordering of items
# in arr1 are the same as in arr2. Elements that don't appear in arr2
# should be placed at the end of arr1 in ascending order.
class Solution:
def relativeSortArray(self, arr1, arr2):
arr3 = []
diff = set(arr1) - set(arr2)
for elem in arr2:
arr3 += [elem] * arr1.count(elem)
for elem in sorted(diff):
arr3 += [elem] * arr1.count(elem)
return arr3
if __name__ == "__main__":
testinput1 = [2, 3, 1, 3, 2, 4, 6, 7, 9, 2, 19]
testinput2 = [2, 1, 4, 3, 9, 6]
print(Solution.relativeSortArray(Solution, testinput1, testinput2))
|
s = '0'
i = int(s)
print(10 / i)
# 利用python -m pdb .py 文件
# 然后按n进行单步调试
|
# In this program we define a class and its attributes.
# will call the class in file import_test.py
class importTest:
def test (self, name):
self.name = name
return self.name |
n, t = map(int, input().split())
sequence = input()
for i in range(t):
sequence = sequence.replace('BG', 'GB')
print(sequence)
|
from django.db import models
# Create your models here.
#번호,제목,작성자,등록일,조회수,글,첨부파일
class Notice(models.Model):
#num=models.IntegerField() #번호 model.id로 대체 가능
title=models.CharField(max_length=100) #제목
writer=models.CharField(max_length=25) #작성자
pub_date=models.DateField(auto_now=True) #등록일
hit=models.PositiveIntegerField(default=0) #조회수
body=models.CharField(max_length=500) #글
#file=models.FileField(null=True,default='') #첨부파일
def __str__(self):
return self.title
@property #조회수 세주는 거
def hit_counter(self):
self.hit+=1
self.save()
return self.hit
class Meta:
ordering=['-id']
class Comment(models.Model):
notice=models.ForeignKey(Notice,on_delete=models.CASCADE,null=True,related_name='comments')
comment_date=models.DateTimeField(auto_now_add=True)
comment_contents=models.CharField(max_length=200)
comment_writer=models.CharField(max_length=200,default="admin")
class Meta:
ordering=['-id'] |
from flask import g, session
class Auth(object):
"""
This is just a simple app extension to handle basic authentication as
used by this specific application.
"""
def __init__(self, app=None):
self.app = app
if self.app is not None:
self.init_app(app)
def init_app(self, app):
app.context_processor(self.auth_processor)
app.before_request(self.before_request)
self.app = app
def auth_processor(self):
"""
Inject some simple account status flags into each page.
"""
result = {
'is_logged_in': False,
'is_admin': False}
if hasattr(g, 'user') and g.user is not None:
result['is_logged_in'] = True
result['is_admin'] = g.user.is_admin
return result
def before_request(self):
"""
Handle login information and also allow for a fake login to be made
during initial development.
"""
from .. import db, models
if 'FAKE_LOGIN' in self.app.config and 'openid' not in session:
session['openid'] = self.app.config['FAKE_LOGIN']
if 'openid' in session:
g.user = db.session.query(models.User)\
.join(models.OpenID)\
.filter(models.OpenID.id == session['openid']).first()
else:
g.user = None
|
n = int(input())
for i in range(n):
li = sorted(map(int, input().split()))
if(pow(li[0], 2) + pow (li[1], 2) == pow(li[2], 2)):
print('YES')
else:
print('NO') |
#RDC algorithym simulation
import numpy as np
from scipy import signal
from fxpmath import Fxp
import matplotlib.pyplot as plt
from functools import reduce
RAW = Fxp(None,dtype='S1.15')
DATA = Fxp(None,dtype='S1.15')
ANGLE = Fxp(None,dtype='S4.20')
PANGLE = Fxp(None,dtype='S1.23',overflow='wrap')
CONST = Fxp(None,dtype='U0.18')
ADDER = Fxp(None,dtype='S4.24')
PADDER = Fxp(None,dtype='S1.23',overflow='wrap')
MULTER = Fxp(None,dtype='S1.15')
COMEGA = Fxp(None,dtype='U0.3',overflow='wrap')
CANGLE = Fxp(None,dtype='S4.6',overflow='wrap')
UANGLE = Fxp(None,dtype='S1.9',overflow='wrap')
INV_PI_CONST = Fxp(1/(2*np.pi)).like(CONST)
def AGWN(Ps,snr):
SNR = 10**(snr/10)
# print(SNR)
# Ps = reduce(lambda x,y:x+y,map(lambda x:x**2,sin))/sin.size
# print(Ps)
Pn = Ps/SNR
# print(Pn)
# np.random.seed(4)
agwn = np.random.randn(1)[0]*(Pn**0.5)
return agwn
def my_fft(din,fft_size):
temp = din[:fft_size]
fftx = np.fft.rfft(temp)/fft_size
xfp = np.abs(fftx)*2
return xfp
class my_filter:
def __init__(self,N_ord,filt_zone=[0.1],filt_type='lowpass'):
self.b,self.a = signal.butter(N_ord, filt_zone, filt_type)
self.z = np.zeros(max(len(self.a),len(self.b))-1)
# print("filter coefs a:",self.a)
# print("filter coefs b:",self.b)
def filt(self,din):
dout, self.z = signal.lfilter(self.b, self.a, din, zi=self.z)
return dout
class LoopFilter(object):
def __init__(self, gain, Bn, zeta,opf='R'):
self.kp = (1/gain)*(4*zeta/(zeta+1/(4*zeta)))*Bn
self.ki = (1/gain)*(4/(zeta+1/(4*zeta))**2)*(Bn**2)
self.integrater = 0
self.lf_out = 0
self.opf = opf
if opf=='Q':
self.kp = Fxp(self.kp).like(CONST)
self.ki = Fxp(self.ki).like(CONST)
self.integrater = Fxp(self.integrater).like(ADDER)
self.lf_out = Fxp(self.lf_out).like(ADDER)
print("kp:%f, ki:%f" %(self.kp,self.ki))
def advance_filter(self, phase_difference):
if self.opf=='R':
self.integrater += self.ki*phase_difference
self.lf_out = self.integrater + self.kp*phase_difference
else:
self.integrater(self.integrater + self.ki*phase_difference)
self.lf_out(self.integrater + self.kp*phase_difference)
return self.lf_out
class PhaseDetector(object):
def __init__(self,mode = 'pll',opf='R'):
self.phase_difference = 0
self.temp = 0
if opf=='Q':
self.phase_difference = Fxp(self.phase_difference).like(DATA)
self.temp = Fxp(self.temp).like(MULTER)
self.mode = mode;
self.monitor = 0
self.pdf0 = 0
self.pdf1 = 0
self.lpf0 = my_filter(3,[0.08])
self.lpf1 = my_filter(3,[0.08])
self.mon = 0
self.opf = opf
def pd(self, d_iq, vco):
if(self.mode == 'costas'):
if self.opf=='R':
self.temp = vco*d_iq.real
self.phase_difference = self.temp.real * self.temp.imag
else:
self.temp(vco*Fxp(d_iq.real).like(RAW))
self.phase_difference(self.temp.real * self.temp.imag)
self.pdf0 = 2*self.lpf0.filt([vco.imag*d_iq.real])
self.pdf1 = 2*self.lpf1.filt([vco.imag*d_iq.imag])
self.mon = self.pdf0**2 + self.pdf1**2
elif(self.mode == 'pll'):
if self.opf=='R':
self.phase_difference = (np.conjugate(d_iq)*vco).imag
else:
self.phase_difference((Fxp(np.conjugate(d_iq)).like(RAW)*vco).imag)
else:
if self.opf=='R':
self.phase_difference = (np.conjugate(d_iq)*vco).imag
else:
self.phase_difference((Fxp(np.conjugate(d_iq)).like(RAW)*vco).imag)
return self.phase_difference
class PLL(object):
def __init__(self,fs,fc, lf_gain, lf_bandwidth, lf_damping,lf_delay = 1,pd_type='pll',opf='R'):
self.delay = lf_delay
self.fs = fs
self.fc = fc
self.phase_estimate = 0.0
self.phase_difference = 0.0
self.vco = np.exp(0j)
self.omega_const = self.fc/self.fs
self.omega = self.fc/self.fs
self.loop_reg = np.zeros(32)
if opf == 'Q':
self.phase_difference = Fxp(self.phase_difference).like(DATA)
self.phase_estimate = Fxp(self.phase_estimate).like(PANGLE)
self.vco = Fxp(self.vco).like(DATA)
self.omega_const = Fxp(self.omega).like(COMEGA)
self.omega = Fxp(self.omega).like(COMEGA)
self.ph_sum = Fxp(self.omega).like(UANGLE)
self.loop_filter = LoopFilter(lf_gain, lf_bandwidth, lf_damping,opf=opf)
self.phase_detector = PhaseDetector(pd_type,opf=opf)
self.opf = opf
print("%s omega:%f" %(pd_type,self.omega))
def update_phase_estimate(self):
if self.opf == 'R':
self.phase_estimate += self.loop_reg[self.delay]
self.omega += self.omega_const
self.vco = np.exp(1j*(2*np.pi*self.omega + self.phase_estimate))
else:
self.phase_estimate(self.phase_estimate + self.loop_reg[self.delay]*INV_PI_CONST)
self.omega(self.omega+self.omega_const)
self.ph_sum(self.omega+self.phase_estimate)
self.vco(np.exp(1j*(2*np.pi*(self.ph_sum.get_val()))))
self.loop_reg[1:] = self.loop_reg[:-1]
self.loop_reg[0] = self.loop_filter.lf_out
def step(self, d_iq):
# Takes an instantaneous sample of a signal and updates the PLL's inner state
if self.opf == 'R':
self.phase_difference = self.phase_detector.pd(d_iq,self.vco)
else:
self.phase_difference(self.phase_detector.pd(d_iq,self.vco))
self.loop_filter.advance_filter(self.phase_difference)
self.update_phase_estimate()
class COMB(object):
def __init__(self,fs,fc,fm, lf_gain, lf_bandwidth, lf_damping, lf_delay=1,opf='R'):
self.costas = PLL(fs,fc,0.5, 0.02, 0.707,lf_delay,pd_type='costas',opf=opf)
self.delay = lf_delay
self.demod = 0
self.monitor = 0
self.phase_detector = PhaseDetector('pll',opf)
self.fs = fs
self.fm = fm
self.phase_estimate = 0.0
self.rad = 0.0
self.vco = np.exp(0j)
self.phase_difference = 0.0
if opf == 'Q':
self.phase_difference = Fxp(self.phase_difference).like(ANGLE)
self.phase_estimate = Fxp(self.phase_estimate).like(PADDER)
self.vco = Fxp(self.vco).like(DATA)
self.demod = Fxp(self.demod).like(ANGLE)
self.loop_filter = LoopFilter(lf_gain, lf_bandwidth, lf_damping,opf=opf)
self.loop_reg = np.zeros(32)
self.opf = opf
def update_phase_estimate(self):
if self.opf == 'R':
self.phase_estimate += self.loop_reg[self.delay]
self.rad = self.phase_estimate
else:
self.phase_estimate(self.phase_estimate + self.loop_reg[self.delay]*INV_PI_CONST)
self.rad = 2*np.pi*self.phase_estimate.get_val()
if self.opf == 'R':
self.vco = np.exp(1j*self.phase_estimate)
else:
self.vco(np.exp(1j*2*np.pi*self.phase_estimate.get_val()))
self.loop_reg[1:] = self.loop_reg[:-1]
self.loop_reg[0] = self.loop_filter.lf_out
def step(self, d_iq):
if self.opf == 'R':
self.phase_difference = self.phase_detector.pd(d_iq,self.vco)
else:
self.phase_difference(self.phase_detector.pd(d_iq,self.vco))
self.costas.step(d_iq)
if self.opf == 'R':
self.demod = self.costas.vco.real*self.phase_difference
else:
self.demod(self.costas.vco.real*self.phase_difference)
self.loop_filter.advance_filter(self.demod)
self.update_phase_estimate()
#max motor angle freq
fmax = 4000
#motor angle frequency
fm = 2000
#acceleration
a = 00000
#frequency deviation
fd = 0.0
#dc offset
dc = 0.0*(1+1j)
#gain error
ac = 0.00
#differential phase shift
ps = 0.0
#RDC sample frequency
fs = 160000
#RDC stimulus carrier frequency
fc = 20000
#simlulation time stamp
N = 8000
#equivalent quantatization noise
# SNR = 120.76
SNR = 61.76
# SNR = 73.76
# SNR = 80.0
Tf = 0.001
#loop gain
Kd = 1
#equivalent noise bandwidth
Bn = 0.02
# damping ratio
Zeta = 0.707
ts = np.arange(N-1)/fs
usr_opf = 'R'
def costas_tb():
pll = PLL(fs,fc,0.5, 0.02, 0.707,lf_delay = 1,pd_type='costas')
phi = np.pi*(0)
sig_fc = []
out = []
ed = []
ef = []
pe = []
mod = []
alpha = []
beta = []
gamma = []
for i in range(0, N - 1):
sig_fc.append(np.cos(2*np.pi*fc/fs*i + phi))
in_sig = np.cos(2*np.pi*fc/fs*i + phi)*np.cos(2*np.pi*fm/fs*i)
pll.step(in_sig)
mod.append(in_sig)
out.append(pll.vco.imag)
ed.append(pll.phase_difference)
ef.append(pll.loop_filter.lf_out)
pe.append(pll.phase_estimate)
plt.close('all')
plt.figure()
ax = plt.subplot(411)
# ax.plot(ref,label='sig_in')
ax.plot(ts,sig_fc,label='carrier')
ax.plot(ts,out,label='out')
ax.set_ylabel('V')
plt.legend()
ax = plt.subplot(412)
ax.plot(ts,ed,label='ed')
plt.legend()
ax = plt.subplot(413)
ax.plot(ts,ef,label='ef')
plt.legend()
ax = plt.subplot(414)
ax.plot(ts,pe,label='pe')
plt.legend()
plt.show()
plt.figure()
ax = plt.subplot(111)
ax.plot(ts,list(map(lambda x: x+2*np.pi if x<-np.pi else (x-2*np.pi if x>np.pi else x),np.array(beta)-np.array(gamma))),label='phase_error')
# ax.plot(np.array(beta)-np.array(gamma),label='phase_error')
plt.grid(1)
plt.show()
err_a = list(map(lambda x: x+2*np.pi if x<-np.pi else (x-2*np.pi if x>np.pi else x),np.array(beta)-np.array(gamma)))
err_arr = np.array(err_a[int(N*0.67):])
std = np.sqrt(np.sum(np.square(err_arr-np.mean(err_arr)))/err_arr.size)
print("rms: ",std)
print("enob: ",np.log(1/std)/np.log(2)-1.76)
def pll_tb():
pll = PLL(fs,fm,0.5, 0.05, 0.707)
phi = np.pi/1.1
ref = []
diff = []
demod = []
for i in range(0, N - 1):
d_iq = np.exp(1j*(2*np.pi*fm/fs*i+phi))
pll.step(d_iq)
ref.append(d_iq.imag)
demod.append(pll.vco.imag)
diff.append(pll.loop_filter.lf_out)
plt.figure()
ax = plt.subplot(311)
ax.plot(ref,label='sig_in')
ax.plot(demod,label='demod')
plt.legend()
ax = plt.subplot(312)
ax.plot(diff,label='diff')
plt.legend()
ax = plt.subplot(313)
ax.plot(demod,label='demod')
plt.legend()
plt.show()
def comb_tb():
print("data mode:",usr_opf)
print("freq deviation:",fd)
print("acceleration:",a)
print("simulation points:",N)
print("AGWN:%fdB" %(SNR))
print("======================================================")
pll = COMB(fs,fc,fm,Kd, Bn, Zeta,lf_delay=1,opf=usr_opf)
# pll = COMB(fs,fc,fm,0.2, 0.01, 0.707,lf_delay=1)
#phase delay in range
# phi = 0.1*np.pi
#phase delay out of range
phi = 0*np.pi
theta = 1.3*np.pi
raw = []
iq_dr = []
iq_d = []
mix = []
pll_vco = []
costas_vco = []
ed = []
ed_c = []
ef = []
ef_c = []
pe = []
mo0 = []
mo1 = []
mon = []
pe_c = []
alpha = []
beta = []
gamma = []
at = 0
at_reg = []
k=0
for i in range(0, N - 1):
if(fm+a/fs*i)<fmax:
k = i
d_iq = np.exp(1j*(2*np.pi*(fm/fs*i+0.5*a/fs/fs*i*i)+theta))
else:
d_iq = np.exp(1j*(2*np.pi*(fmax/fs*(i-k)+fm/fs*k+0.5*a/fs/fs*k*k)+theta))
# clockwise rotation
# if(i<(N/2)):
# d_iq = np.exp(1j*(2*np.pi*(fm+0.5*0*i)/fs*i+theta))
# else:
# d_iq = np.exp(1j*(2*np.pi*(fm+0.5*a*i)/fs*i+theta))
# d_iq = np.exp(1j*(2*np.pi*(fm+0.5*a*i)/fs*i+theta))
# anti-clockwise rotation
#d_iq = complex(d_iq.imag,d_iq.real)
raw.append(d_iq)
tt = complex(d_iq.real*(1-ac),d_iq.imag)
iq_dr.append(tt+complex(AGWN(0.5,SNR),AGWN(0.5,SNR))+dc)
alpha.append(np.arctan2(iq_dr[-1].imag,iq_dr[-1].real))
gamma.append(np.arctan2(raw[-1].imag,raw[-1].real))
beta.append((pll.rad+np.pi)%(2*np.pi)-np.pi)
mix.append(np.sin(2*np.pi*fc/fs*(1+fd)*i + phi)*iq_dr[-1])
pll.step(mix[-1])
iq_d.append(d_iq)
pll_vco.append(pll.vco.get_val() if usr_opf=='Q' else pll.vco)
costas_vco.append(pll.costas.vco.get_val() if usr_opf=='Q' else pll.costas.vco)
# print(costas_vco)
# print(pll.costas.vco)
ed.append(pll.phase_difference)
ed_c.append(pll.costas.vco.real)
ef.append(pll.loop_filter.lf_out.get_val() if usr_opf=='Q' else pll.loop_filter.lf_out)
ef_c.append(pll.demod.get_val() if usr_opf=='Q' else pll.demod)
pe.append(pll.phase_estimate.get_val() if usr_opf=='Q' else pll.phase_estimate)
pe_c.append(pll.costas.phase_estimate.get_val() if usr_opf=='Q' else pll.costas.phase_estimate)
# mo.append(pll.phase_detector.monitor)
mo0.append(pll.costas.phase_detector.pdf0)
mo1.append(pll.costas.phase_detector.pdf1)
mon.append(pll.costas.phase_detector.mon)
print("end angle freq:%f,last freq:%f" %(at,at))
plt.close('all')
plt.figure()
ax = plt.subplot(311)
ax.plot(ts,list(map(lambda x:x.real,mix)),label='I')
ax.plot(ts,list(map(lambda x:x.imag,mix)),label='Q')
ax.set_ylabel('Amplitude')
ax.set_title('input signal')
plt.legend()
# ax = plt.subplot(412)
# ax.plot(ts,list(map(lambda x:x.imag,costas_vco)),label='costas_q')
# # print("costas_q:",costas_vco[:10])
# #ax.plot(list(map(lambda x:x.real,costas_vco)),label='costas_i')
# ax.plot(ts,list(map(lambda x:x.imag,iq_dr)),label='input q')
# ax.plot(ts,list(map(lambda x:x.imag,pll_vco)),label='recovered q')
# #ax.plot(list(map(lambda x:x.real,pll_vco)),label='pll_i')
# ax.set_title('VCO')
# plt.legend()
ax = plt.subplot(312)
ax.plot(ts,(np.array(pe_c)+np.pi)%(2*np.pi)-np.pi,label='phase_c')
ax.plot(ts,ef_c,label='accelleration')
ax.plot(ts,ef,label='velocity')
# ax.plot(ts,np.array(ef)*fs/(2*np.pi),label='ef')
ax.set_ylabel('velocity(rps)')
# ax.plot(ts,at_reg,label='at')
# ax.plot(ts,pe,label='pe')
ax.set_title('phase error')
plt.legend()
plt.grid(1)
ax = plt.subplot(313)
ax.plot(ts,alpha,label='input data')
ax.plot(ts,beta,label='recovered')
ax.set_ylabel('angle(rad)')
ax.set_title('phase compare')
plt.legend()
plt.show()
print("ef:%f,pe_c:%f,pe:%f" %(ef[-1],pe_c[-1]/(np.pi),pe[-1]))
print("ts:",-np.log(Tf*np.sqrt(1-Zeta**2))/(Zeta*Bn*fs*np.pi))
print("angular freq:%f(rps)" %(ef[-1]*fs/(2*np.pi)))
print("costas phase(scaled):",pe_c[-1]/(np.pi))
plt.figure()
ax = plt.subplot(211)
ax.plot(ts,list(map(lambda x: x+2*np.pi if x<-np.pi else (x-2*np.pi if x>np.pi else x),np.array(beta)-np.array(gamma))),label='phase_error')
# ax.plot(np.array(beta)-np.array(gamma),label='phase_error')
# ax.plot(ts,np.unwrap(np.array(beta)-np.array(gamma)),label='phase_error')
plt.grid(1)
ax = plt.subplot(212)
# m0 = list(map(lambda x:x[0],mo))SNR
# m1 = list(map(lambda x:x[1],mo))
ax.plot(ts,mo0,label='cos')
ax.plot(ts,mo1,label='sin')
ax.plot(ts,mon,label='mon')
# ax.plot(ts,mo[1,:],label='monitor')
plt.legend()
plt.show()
err_a = list(map(lambda x: x+2*np.pi if x<-np.pi else (x-2*np.pi if x>np.pi else x),np.array(beta)-np.array(gamma)))
err_arr = np.array(err_a[int(N*0.5):])
std = np.sqrt(np.sum(np.square(err_arr-np.mean(err_arr)))/err_arr.size)
print("phase indicators:")
print("mean: %f,rms: %f,range:%f" %(np.mean(err_arr),std,np.ptp(err_arr)))
print("enob: ",np.log(2*np.pi/std)/np.log(2)-1.76)
err_arr = np.array(ef[int(N*0.5):])
std = np.sqrt(np.sum(np.square(err_arr-np.mean(err_arr)))/err_arr.size)
v_mean = np.mean(err_arr)*fs/(2*np.pi)
print("velocity indicators:")
print("mean: %f,rms: %f,range:%f" %(v_mean,std*fs/(2*np.pi),np.ptp(err_arr)*fs/(2*np.pi)))
print("enob: ",np.log(1/std)/np.log(2)-1.76)
# print("std: ",np.std(err_arr))
# plt.figure()
# plt.plot(pe_c,label='pe_c')
# plt.plot(pe,label='pe')
# plt.legend()
# plt.figure()
# hs_costas = my_fft(costas_vc,N-1)
# hs_pll = my_fft(pll_vc,N-1)
# hs_id = my_fft(i_d,N-1)
# # print(costas_vc)
# ax = plt.subplot(111)
# ax.plot(hs_costas,label='carrier')
# ax.plot(hs_pll,label='velocity')
# ax.plot(hs_id,label='velocity modulation')
# plt.legend()
# ax = plt.subplot(212)
# ax.plot(hs_id,label='velocity modulation')
# # ax.plot(hs_id)
# plt.legend()
# ax = plt.subplot(212)
# ax.plot(i_dr,label='i')
# ax.plot(q_dr,label='q')
# plt.legend()
# return i_d
# costas_tb()
# pll_tb()
comb_tb()
|
from MulLayer import *
orange = 1000
orange_num = 3
tax = 1.5
mul_orange_layer = MulLayer()
mul_tax_layer = MulLayer()
# forward
orange_price = mul_orange_layer.forward(orange, orange_num)
price = mul_tax_layer.forward(orange_price, tax)
# backward
dprice = 1
dorange_price, dtax = mul_tax_layer.backward(dprice)
dorange, dorange_num = mul_orange_layer.backward(dorange_price)
print("price:", int(price))
print("dOrange:", dorange)
print("dOrange_num:", int(dorange_num))
print("dTax:", dtax)
|
import numpy as np
import argparse
import imutils
import pickle
import cv2
import os
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to input image")
ap.add_argument("-d", "--detector", required=True,
help="path to OpenCV's deep learning face detector")
ap.add_argument("-m", "--embedding-model", required=True,
help="path to OpenCV's deep learning face embedding model")
ap.add_argument("-r", "--recognizer", required=True,
help="path to model trained to recognize faces")
ap.add_argument("-l", "--le", required=True,
help="path to label encoder")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# loading serialized face_detector
print('Loading Face detector...')
prototype_path = os.path.sep.join([args['detector'], 'deploy.prototxt'])
model_path = os.path.sep.join([args["detector"],"res10_300x300_ssd_iter_140000.caffemodel"])
detector = cv2.dnn.readNetFromCaffe(prototype_path, model_path)
# loading serialized face_embedding_model
print('loading face recognizer...')
embedder = cv2.dnn.readNetFromTorch(args["embedding_model"])
# load the actual face recognition model along with label encoder
recognizer = pickle.loads(open(args["recognizer"], "rb").read())
le = pickle.loads(open(args["le"], "rb").read())
'''
load the image, resize it to have a width of 600 pixels while maintaining
the aspect ratio and then grab the image dimensions
'''
image = cv2.imread(args['image'])
image = imutils.resize(image, width=600)
(h, w) = image.shape[:2]
# construct a blob from the image
image_blob = cv2.dnn.blobFromImage(
cv2.resize(image, (300, 300)), 1.0, (300, 300),
(104.0, 177.0, 123.0), swapRB=False, crop=False)
'''
apply OpenCV's deep learning-based face detector to localize
faces in the input image
'''
detector.setInput(image_blob)
detections = detector.forward()
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence/probability
confidence = detections[0, 0, i, 2]
# filter out weak detections
if confidence > args["confidence"]:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# extract the face ROI
face = image[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
# ensure the face width and height are sufficiently large
if fW < 20 or fH < 20:
continue
'''
construct a blob for the face ROI, then pass the blob
through our face embedding model to obtain the 128-d
quantification of the face
'''
face_blob = cv2.dnn.blobFromImage(face, 1.0 / 255, (96, 96),
(0, 0, 0), swapRB=True, crop=False)
embedder.setInput(face_blob)
vec = embedder.forward()
# perform classifications to recognixe the face
preds = recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j]
# draw the bounding box of the face along with the associated probability
text = '{}: {:.2f}%'.format(name, proba*100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(image, (startX, startY), (endX, endY), (0, 0, 255), 2)
cv2.putText(image, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
# show the image
cv2.imshow("Image", image)
cv2.waitKey(0)
|
from . import home_user
from flask import render_template, request, redirect, session, flash, jsonify, url_for, current_app
from .forms import UserBaseForm, ModifyPassowrd, UserImg
from app.utils.response_code import RET
from app.utils.qiniu.image_storage import storage
from app import constants
# /user
# user的默认界面
@home_user.route('/')
def index():
from app.models import User
id = session["user_id"]
try:
user = User.query.filter_by(id=id).first()
except Exception as e:
current_app.logger.exception('%s', e)
return render_template("news/user.html", user=None)
return render_template("news/user.html", user=user)
@home_user.route('/logout')
def logout():
session.pop("user_id", None)
return redirect("/index/")
# 基本信息
@home_user.route('/user_base/', methods=["GET", "POST"])
def user_base():
user = getUser()
user_id = user.id
if request.method == "GET":
form = UserBaseForm()
else:
form = UserBaseForm(formdata=request.form)
if form.validate_on_submit():
data = form.data
user.signature = data["signature"]
user.nick_name = data["nick_name"]
user.gender = data["gender"]
from app import db
try:
db.session.commit()
except Exception as e:
current_app.logger.exception('%s', "数据库commit异常")
flash("修改成功! ", "ok")
return render_template("news/user_base_info.html", form=form, user=user)
# 头像设置
@home_user.route('/user_pic_info/', methods=["POST", "GET"])
def user_pic_info():
form = UserImg()
if request.method == "POST":
if form.validate_on_submit():
try:
file = request.files.get('url').read()
key = storage(file)
user = getUser()
url_str = "http://pv875q204.bkt.clouddn.com/" + key
user.avatar_url = url_str
from app import db
db.session.commit()
flash("修改失败! ", "error")
except Exception as e:
current_app.logger.exception('%s', "头像上传失败")
return render_template('news/user_pic_info.html', form=form, user=getUser())
# 我的关注
@home_user.route('/user_follow/<int:page>')
def user_follow(page=None):
from app.models import User, News
if page is None:
page = 1
# 获取当前用户
user = getUser()
# 获取粉丝并按照id升序排列,前者页码,后者每页最大数量
# page_data = user.followers.order_by(User.id.asc()).paginate(page=page, per_page=4)
# 获取所有关注
page_data = user.followed.order_by(User.id.asc()).paginate(page=page, per_page=4)
# 关注列表
user_list = []
# 关注用户的发布新闻数量
news_count = []
# 关注的人的新闻
attention_count = []
for v in page_data.items:
user_list.append(v)
# 单个关注人的新闻数量
count = News.query.filter(News.user_id == v.id).count()
news_count.append(count)
# 取出单个关注的人
attention = User.query.filter(User.id == v.id).first()
# 关注的人的粉丝数量
count = attention.followers.count()
attention_count.append(count)
return render_template('news/user_follow.html', user=getUser(), user_list=user_list, news_count=news_count,
attention_count=attention_count, page_data=page_data)
# 修改密码
@home_user.route('/user_pass_info/', methods=["GET", "POST"])
def user_pass_info():
from app.models import User
if request.method == "GET":
form = ModifyPassowrd()
else:
form = ModifyPassowrd(formdata=request.form)
if form.validate_on_submit():
user = getUser()
# 验证密码
if user.check_password(form.data["oldPassword"]):
user.password = form.data["newPassword"]
from app import db
db.session.commit()
flash("提交成功", "ok")
else:
flash("密码错误", "error")
return render_template('news/user_pass_info.html', form=form)
# 我的收藏
@home_user.route('/user_collection/<int:page>')
def user_collection(page=None):
from app.models import News
if page is None:
page = 1
user = getUser()
# 我收藏的新闻 一夜最多6项
page_data = user.collection_news.order_by(News.id.asc()).paginate(page=page, per_page=6)
return render_template('news/user_collection.html', page_data=page_data)
def getUser():
from app.models import User
id = session["user_id"]
user = None
try:
user = User.query.filter_by(id=id).first()
except Exception as e:
current_app.logger.exception('%s', e)
return user
# 取关
@home_user.route('/unattention/<name>', methods=["GET", "POST"])
def unattention(name):
from app.models import User
# 取关的人
usr = getUser()
# 被取关的人
user = User.query.filter(User.nick_name == name).first()
user.followers.remove(usr)
from app import db
db.session.commit()
return jsonify({"msg": "true"})
# 关注
@home_user.route('/attention/<name>', methods=["POST", "GET"])
def attention(name):
from app.models import User
# 关注的人
usr = getUser()
# 被关注的人
user = User.query.filter(User.nick_name == name).first()
user.followers.append(usr)
from app import db
db.session.commit()
return jsonify({"msg": "true"})
# 进入ta人的界面
@home_user.route('/atnuser/<name>,<int:page>')
def atnuser(name, page):
from app.models import User, News
# ta人的信息
idol = User.query.filter(User.nick_name == name).first()
list = None
if "user_id" in session: # 登录后查看ta人信息
user = getUser()
# 查看我是否关注
list = idol.followers.filter(User.id == user.id).first()
else: # 未登录查看ta人信息
user = None
if list is not None:
is_attention = 'True' # 记录是否被关注
else:
is_attention = "False"
# ta人的文章
page_data = idol.news_list.order_by(News.id.asc()).paginate(page=page, per_page=6)
return render_template("news/other.html", user=user, idol=idol, page_data=page_data, is_attention=is_attention)
# 拦截器
@home_user.before_request
def before_request():
import re
# 匹配查看用户路由
pattern = re.compile('/user/atnuser/(.*),[0-9]*')
if "user_id" in session or pattern.match(request.path):
pass
else:
return redirect("/index")
@home_user.route('/examine_news_list/<int:page>')
def examine_news_list(page):
from app.models import User, News
# 1. 取参数
examine_id = session["user_id"]
# 2. 判断参数
try:
page = int(page)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
try:
examine = User.query.get(examine_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据查询失败")
if not examine:
return jsonify(errno=RET.NODATA, errmsg="当前用户不存在")
try:
paginate = examine.news_list.order_by(News.create_time.desc()).paginate(page,
constants.USER_COLLECTION_MAX_NEWS,
False)
news_li = paginate.items
current_page = paginate.page
total_page = paginate.pages
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据查询失败")
news_dict_li = []
for news_li_item in news_li:
news_dict_li.append(news_li_item.to_review_dict())
return render_template('news/user_news_list.html', news_list=news_dict_li, total_page=total_page,
current_page=current_page)
@home_user.route('/news_release', methods=["GET", "POST"])
def user_news_release():
from app.models import Category, News
from app.utils.qiniu.image_storage import storage
from app import db
if request.method == "GET":
# 加载新闻分类数据
categories = []
try:
categories = Category.query.all()
except Exception as e:
current_app.logger.error(e)
category_dict_li = []
for category in categories:
category_dict_li.append(category.to_dict())
# 移除最新的分类
category_dict_li.pop(0)
return render_template('news/user_news_release.html', data={"categories": category_dict_li})
# 取参数
user_id = session["user_id"]
# 1. 取到请求参数
source = "个人发布" # 来源
title = request.form.get("title")
category_id = request.form.get("category_id")
digest = request.form.get("digest")
index_image = request.files.get("index_image")
content = request.form.get("content")
# 参数转换
try:
category_id = int(category_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="参数有误")
# 上传七牛云
try:
index_image_data = index_image.read()
key = storage(index_image_data)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="参数有误")
# 2. 判断参数
if not all([title, category_id, digest, index_image, content]):
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
new = News()
new.title = title
new.digest = digest
new.source = source
new.content = content
new.index_image_url = "http://pv875q204.bkt.clouddn.com/" + key
new.category_id = category_id
new.user_id = user_id
# 1代表待审核状态
new.status = 1
try:
# 插入提交数据
db.session.add(new)
db.session.commit()
except Exception as e:
# 数据库回滚
db.session.rollback()
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据库操作有误")
return jsonify(errno=RET.OK, errmsg="OK")
@home_user.route('/news_release1/<int:new_id>', methods=["GET", "POST"])
def user_news_release1(new_id):
from app.models import User, Category, News
from app.utils.qiniu.image_storage import storage
from app import db
if request.method == "GET":
# 加载新闻数据
news = News.query.get(new_id)
session["new_id"] = new_id
category = Category.query.get(news.category_id)
print(category)
data = {
"news": news.to_dict(),
"category": category.to_dict()
}
return render_template('news/user_news_release1.html', data=data)
@home_user.route('/news_release2', methods=["GET", "POST"])
def user_news_release2():
from app.models import User, Category, News
from app.utils.qiniu.image_storage import storage
from app import db
# 取参数
new_id = session.get("new_id")
news = News.query.get(new_id)
user_id = session["user_id"]
# 1. 取到请求参数
source = "个人发布" # 来源
title = request.form.get("title")
category_id = request.form.get("category_id")
digest = request.form.get("digest")
index_image = request.files.get("index_image")
content = request.form.get("content")
# 参数转换
try:
category_id = int(category_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="参数误")
# 上传七牛云
if index_image:
index_image_data = index_image.read()
key = storage(index_image_data)
# # 2. 判断参数
# if not all([title, category_id, digest, index_image, content]):
# return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
news.title = title
news.digest = digest
news.source = source
news.content = content
if not index_image:
news.index_image_url = news.index_image_url
else:
news.index_image_url = "http://pv875q204.bkt.clouddn.com/" + key
news.category_id = category_id
news.user_id = user_id
# 1代表待审核状态
news.status = 1
try:
db.session.commit()
except Exception as e:
# 数据库回滚
db.session.rollback()
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据库操作有误")
return jsonify(errno=RET.OK, errmsg="OK")
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 19 22:17:08 2015
@author: lenovo
"""
"""
if 问题足够简单:
直接解决问题
返回解
else:
将问题分解为与原问题同构的一个或多个更小的问题
逐个解决这些更小的问题
将结果组合为,获得最终的解
返回解
"""
##循环的方式#
#def p(n):
# x=1
# i=1
# while i <= n:
# x=x*i
# i=i+1
# return x
#
##递归的方式#
##掐头去尾留中间#
#def p(n):
# if n == 1 or n ==0:
# return 1
# else:
# return n*p(n-1)
"""
斐波拉契数列
"""
def fib(n):
if n == 1 or n == 2:
return 1
else:
return fib(n-1) + fib(n-2)
print fib(15) |
import json
a = input()
dict_json = json.loads(a)
dict_result = {}
def add_parents(key_glob, key, d):
d_copy = d.copy()
for child in d_copy[key]:
d[key_glob].add(child)
add_parents(key_glob, child, d)
for obj in dict_json:
for parent in obj["parents"]:
if not dict_result.get(parent):
dict_result[parent] = {obj["name"]}
else:
dict_result[parent].add(obj["name"])
if not dict_result.get(obj["name"]):
dict_result[obj["name"]] = set()
for i in dict_result:
add_parents(i, i, dict_result)
for key in sorted(dict_result.keys()):
print(key, ":", len(dict_result[key]) + 1)
print(dict_result)
|
import time
def countdown(func):
def some_time():
for i in range(1, 4):
time.sleep(1)
print(i)
func()
return some_time()
@countdown
def what_time_is_it_now():
print(time.strftime('%H:%M')) |
"""
Description of program
"""
import numpy as np
from astropy.table import Table as table
from astropy.io import fits
import matplotlib.pyplot as plt
# ==============================================================================
# Define variables
# ==============================================================================
# luminosity class determines size
lumorder = [-100, 0, 100]
lumfmt = {100: 10, 0: 5, -100: 2}
lumstr = {100: 'Giants', 0: 'Main Sequence', -100: 'Subdwarfs'}
# spectral subclass determines marker
subtypeorder = [0, 2, 4, 6, 8]
sptsubfmt = {0: 'o', 1: 'o', 2: 's', 3: 's', 4: 'd', 5: 'd',
6: '^', 7: '^', 8: 'v', 9: 'v'}
sptsubstr = {0: r'$X0-X2$', 2: r'$X2-X4$', 4: r'$X4-X6$', 6: r'$X6-X8$',
8: r'$X8+$'}
sptsubalp = {0: 0.40, 1: 0.40, 2: 0.55, 3: 0.55, 4: 0.70, 5: 0.70,
6: 0.85, 7: 0.85, 8: 1.00, 9: 1.00}
# spectral class determines colour
spts_inorder = ['O', 'B', 'A', 'F', 'G', 'K', 'M', 'L', 'T', 'Y']
sptclassfmt = {'O': 'red', 'B': 'crimson', 'A': 'darkorange',
'F': 'darkgreen', 'G': 'lawngreen',
'K': 'palegreen', 'M': 'blue', 'L': 'saddlebrown',
'T': 'silver', 'Y': 'gray'}
# ==============================================================================
# Define functions
# ==============================================================================
def simbad_spt_to_num_spt(sptcol):
"""
Converts SIMBAD spectral type to numerical spectral type (M0.0 = 0.0) and
luminosity class and provides any binary flags
:param sptcol: list or array containing string of SIMBAD spectral types
:return: numerical spectral type
- M0.0 --> +00.0
- L4.5 --> +14.5
- G0.0 --> -20.0
- K7.0 --> -03.0
luminosity class, array of float:
- giant star (I, II, III, IV...) = 100
- main sequence star (V) = 0
- sub dwarf star (sd) = -100
binary flag, array of boolean (whether flagged as binary)
- binary is identified by a '+' in the string
- the numerical spectral type and luminosity class will
only be calculated for the first spectral type found
luminosity classes and spectral type information from:
http://simbad.u-strasbg.fr/simbad/sim-display?data=sptypes
"""
# check sptcol for length and string
if not hasattr(sptcol, '__len__') or type(sptcol) == str:
if type(sptcol) == str:
sptcol = [sptcol]
else:
sptcol = [str(sptcol)]
# check that all elemets are strings
for si in range(len(sptcol)):
if type(sptcol[si]) != str:
sptcol[si] = str(si)
# get spcs
spckey, spcval, spckey1, spcval1 = get_spcs()
# get lum classes
lcckey, lccval = get_lum_classes()
# get cont
cont = get_cont()
# don't edit original spt col
strspts = np.array(sptcol)
numspts = np.repeat(np.nan, len(sptcol))
lumclass = np.repeat(np.nan, len(sptcol))
binary = np.zeros(len(sptcol), dtype=bool)
# loop round the spt rows
for row in range(len(sptcol)):
rawspt = strspts[row].replace(' ', '').upper()
# if NaN then skip
if len(rawspt) == 0:
continue
# remove the cont
for crow in cont:
rawspt = rawspt.replace(crow.upper(), '')
# deal with binaries (i.e. a +) assume spectral type is primary
# but flag as binary
if '+' in rawspt:
rawspt = rawspt.split('+')[0]
binary[row] = True
# deal with '...' (substrings that need to be split)
for char in ['...']:
if char in rawspt:
rawspt = rawspt.split(char)[0]
# strip and save luminosity class
for lrow in range(len(lcckey)):
if lcckey[lrow].upper() in rawspt:
rawspt = rawspt.replace(lcckey[lrow].upper(), '')
lumclass[row] = lccval[lrow]
# remove any characters that aren't numbers or in spckey or spckey1
for char in range(len(rawspt)):
cond1 = rawspt[char].isdigit()
cond2 = rawspt[char] in spckey
cond3 = rawspt[char] in ['.', '/', '-']
if (not cond1) and (not cond2) and (not cond3):
rawspt = rawspt.replace(rawspt[char], ' ')
# finally we should be able to extract a spectral type
found = False
# first need to take out annoying X-Y and X/Y --> assume these are the
# second spectral type (i.e. Y)
for srow in range(len(spckey1)):
sp0, sp1, sp2 = spckey1[srow].upper()
if sp0 in rawspt and sp1 in rawspt and sp2 in rawspt:
rawspt = rawspt.split(sp1)[-1]
rawspt = rawspt.replace(sp2.upper(), '')
# if we are only given a letter assume it is X0.0
if rawspt.replace(' ', '') == '':
numspts[row] = float(spcval1[srow])
else:
numspts[row] = float(rawspt) + float(spcval1[srow])
found = True
# now remove all "/" and "-" left (should not be in here after last
# step)
if '/' in rawspt:
rawspt = rawspt.split('/')[0]
if '-' in rawspt:
rawspt = rawspt.split('-')[0]
# if we haven't yet found spectral try finding it for signal objects
if found is False:
for srow in range(len(spckey)):
if spckey[srow].upper() in rawspt:
rawspt = rawspt.split(spckey[srow].upper())[-1]
# may still be other spt chars in
if np.sum(np.in1d(list(rawspt), spckey)) > 0:
continue
# if we are only given a letter assume it is X0.0
if rawspt.replace(' ', '') == '':
numspts[row] = float(spcval[srow])
else:
numspts[row] = float(rawspt) + float(spcval[srow])
# finally return the numerical spectral types and the luminosity class
return numspts, lumclass, binary
def get_spcs():
"""
Defines the SIMBAD Spectral classes and then sorts them largest to
smallest
from http://simbad.u-strasbg.fr/simbad/sim-display?data=sptypes
:return:
"""
# define string spectral type convertion i.e. M0.0 --> 0.0
spckey = ['O', 'B', 'A', 'F', 'G', 'K', 'M', 'L', 'T', 'Y']
spcval = [-60, -50, -40, -30, -20, -10, 0, 10, 20, 30]
# define uncertain spts as the lower spectral type
spckey1 = ['o-b', 'o/b', 'b-a', 'b/a', 'a-f', 'a/f', 'f-g', 'f/g',
'g-k', 'g/k', 'k-m', 'k/m', 'm-l', 'm/l', 'l-t', 'l/t', 't-y',
't/y']
spcval1 = [-50, -50, -40, -40, -30, -30, -20, -20, -10, -10, 0, 0, 10, 10,
20, 20, 30, 30]
spckey, spcval = np.array(spckey), np.array(spcval)
spckey1, spcval1 = np.array(spckey1), np.array(spcval1)
mask = sort_str_array_by_length(spckey)
mask1 = sort_str_array_by_length(spckey1)
return spckey[mask], spcval[mask], spckey1[mask1], spcval1[mask1]
def get_lum_classes():
"""
Defines the SIMBAD luminosity classes and then sorts them largest to
smallest
from http://simbad.u-strasbg.fr/simbad/sim-display?data=sptypes
:return:
"""
# define luminosity classes
lcckey = ['Ia0', 'II-III', 'VI', 'Iab-Ib', 'IIb-IIIa', 'Ia-0', 'IIIa', 'I',
'Iab/b', 'III/I', 'Ia0-Ia', 'III', 'I-II', 'Iab/II', 'III-IIIa',
'Ia-0/Ia', 'IIIb', 'I/II', 'Ib/III', 'III-IIIb', 'Ia', 'III-IV',
'I-III', 'Ib-IIIa', 'III/III-IV', 'Ia-ab', 'IVa', 'I/III', 'Ib/V',
'III/IV', 'Ia/Iab', 'IV', 'III-V', 'II-IIb', 'IV-V/V', 'Iab',
'IVb', 'III/V', 'II/II-III', 'V/III', 'Iab/Ib', 'IVa/V', '0-Ia',
'II/III', 'c', 'Ib', 'IV-V', 'I/V', 'II-III/III', 'Ib-II',
'IV/V', 'Ia/ab', 'II-IV', 'd', 'Ib/II', 'Va', 'Ia-Iab', 'II/IV',
'sd', 'IIa', 'V', 'Ia-Ib', 'II/V', 'esd ', 'II', 'Vb', 'Ia/Ib',
'IIab-IIb', 'usd', 'IIb ', 'V-VI', 'Iab-b', 'IIb-III']
lccval = [100, 100, -100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 0, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100,
100, 100, 100, 0, 100, 0, 100, 100,
-100, 100, 0, 100, 100, -100, 100, 0, 100,
100, -100, 100, 0, 100, 100]
# additional not in SIMBAD ref list
lcckey += ['Vk', 'V(k)']
lccval += [0, 0]
lcckey, lccval = np.array(lcckey), np.array(lccval)
mask = sort_str_array_by_length(lcckey)
return lcckey[mask], lccval[mask]
def get_cont():
"""
Defines the SIMBAD spectral types currently deemed contamination and
then sorts them largest to smallest
:return:
from http://simbad.u-strasbg.fr/simbad/sim-display?data=sptypes
"""
# remove these as contamination
cont = ['OB+', 'OB', 'OB-', 'Of', 'Of*', 'Of+', 'O(f)', 'O(f+)', 'O((f*))',
'O((f+))', 'O((f))', 'WN', 'WNE', 'WR', 'WC', 'WC+WN', 'R', 'N',
'CH', 'C', 'DA', 'WD', 'DB', 'DO', 'DC', 'PG1159', 'DQ', 'DZ',
'DX', 'SN', 'SN.I', 'SN.Ia', 'SN.Ia/c', 'SN.Ib', 'SN.Ib', 'SN.II',
'SN.II/Ib', 'SN.II/Ic', 'SN.II/IIb', 'SN.IIn', 'SN.IIb', 'SN.IIL',
'SN.IIP']
# additional not in SIMBAD ref list
cont += ['Fe', 'Am+', 'h', '_CN0.5']
cont = np.array(cont)
mask = sort_str_array_by_length(cont)
return cont[mask]
def sort_str_array_by_length(array, first='longest'):
"""
Takes a list or array of strings (array) and returns a mask of the sorting
order (sorted by the length of each string) if first=shortest then shortest
first else longest first
:param array: list or array of string objects (or any object with length)
:param first: if shortest returned mask has shortest first, else longest
:return: sorting mask (longest to shortest or shortest to longest string)
"""
lengths = []
for row in array:
lengths = np.append(lengths, len(row))
if first == 'shortest':
return np.argsort(lengths)
else:
return np.argsort(lengths)[::-1]
def get_spt_class_subclass(x):
"""
Turn a numerical spectral type into a class and subclass
i.e. 0.0 --> M0.0 -22.5 --> F7.5 22.5 --> T2.5
:param x: float, numerical spectral type
:return:
"""
# get spectral type conversion information
spckey, spcval, spckey1, spcval1 = get_spcs()
# convert to dict
spc = dict(zip(spcval, spckey))
# find the nearest spectral type class to x
nclass = np.floor(x / 10.0) * 10
# use the spc dictionary to select this spectral class string
sclass = spc[nclass]
# spectral subclass is just the remainder
ssubclass = x - nclass
# return spectral class and subclass
return sclass, ssubclass
# ==============================================================================
# Start of code
# ==============================================================================
if __name__ == '__main__':
# Test 1
test1 = 'M4.0V'
spt, lumt, b = simbad_spt_to_num_spt(test1)
args = [test1, spt[0], lumt[0], b[0]]
print 'String = {0}\tSpt = {1}\tLtype = {2}\tbinary={3}'.format(*args)
# Test 2
test2 = 'K7.5II'
spt, lumt, b = simbad_spt_to_num_spt(test2)
args = [test2, spt[0], lumt[0], b[0]]
print 'String = {0}\tSpt = {1}\tLtype = {2}\tbinary={3}'.format(*args)
# Test 3
test3 = 'L4.5 esd + T8.0'
spt, lumt, b = simbad_spt_to_num_spt(test3)
args = [test3, spt[0], lumt[0], b[0]]
print 'String = {0}\tSpt = {1}\tLtype = {2}\tbinary={3}'.format(*args)
# Test 4
test4 = 4
spt, lumt, b = simbad_spt_to_num_spt(test4)
args = [test4, spt[0], lumt[0], b[0]]
print 'String = {0}\tSpt = {1}\tLtype = {2}\tbinary={3}'.format(*args)
# ------------------------------------------------------------------------------
# ==============================================================================
# End of code
# ==============================================================================
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-09 15:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("elections", "0015_rename_mayor_type")]
operations = [
migrations.AlterField(
model_name="election",
name="suggested_status",
field=models.CharField(
choices=[
("rejected", "Rejected"),
("suggested", "Suggested"),
("accepted", "Accepted"),
],
default="suggested",
max_length=255,
),
)
]
|
l = ['hello', 'world', 'my', 'name', 'is', 'Anna']
for x in l:
for new in x:
if new.endswith('o'):
print x
l = ['hello', 'world', 'my', 'name', 'is', 'Anna']
char = 'o'
for find in l:
for new in find:
if new == char:
print find
|
import numpy
import pytest
from helpers import *
from tigger.elementwise import Elementwise
import tigger.cluda.dtypes as dtypes
def test_errors(ctx):
argnames = (('output',), ('input',), ('param',))
elw = Elementwise(ctx).set_argnames(*argnames)
code = dict(kernel="""
${input.ctype} a1 = ${input.load}(idx);
${input.ctype} a2 = ${input.load}(idx + ${size});
${output.store}(idx, a1 + a2 + ${param});
""")
argtypes = dict(output=numpy.float32, input=numpy.float32,
param=numpy.float32)
N = 1000
a = get_test_array(N * 2, numpy.float32)
a_dev = ctx.to_device(a)
b_dev = ctx.allocate(N, numpy.float32)
param = 1
elw.prepare_for(b_dev, a_dev, numpy.float32(param), code=code)
elw(b_dev, a_dev, param)
assert diff_is_negligible(ctx.from_device(b_dev), a[:N] + a[N:] + param)
def test_nontrivial_code(ctx):
argnames = (('output',), ('input',), ('param',))
elw = Elementwise(ctx).set_argnames(*argnames)
code = dict(
kernel="""
${input.ctype} a1 = ${input.load}(idx);
${input.ctype} a2 = ${input.load}(idx + ${size});
${output.store}(idx, a1 + test(a2, ${param}));
""",
functions="""
WITHIN_KERNEL ${output.ctype} test(${input.ctype} val, ${param.ctype} param)
{
return val + param;
}
""")
N = 1000
a = get_test_array(N * 2, numpy.float32)
a_dev = ctx.to_device(a)
b_dev = ctx.allocate(N, numpy.float32)
param = 1
elw.prepare_for(b_dev, a_dev, numpy.float32(param), code=code)
elw(b_dev, a_dev, param)
assert diff_is_negligible(ctx.from_device(b_dev), a[:N] + a[N:] + param)
|
import tornado.web
from tornado.web import RequestHandler
from tornado.httpclient import AsyncHTTPClient
import json
import time
class StaticFileHandler(tornado.web.StaticFileHandler):
def __init__(self, *args, **kwargs):
super(StaticFileHandler, self).__init__(*args, **kwargs)
self.xsrf_token
class Students1Handler(RequestHandler):
def on_response(self, response):
if response.error:
self.send_error(500)
else:
data = json.loads(response.body)
self.write(data)
self.finish()
@tornado.web.asynchronous #不关闭通信的通道
def get(self, *args, **kwargs):
#获取所有学生的信息
# time.sleep(30)
url = "http://s.budejie.com/topic/tag-topic/64/hot/budejie-android-6.6.9/0-20.json?market=xiaomi&ver=6.6.9&visiting=&os=7.1.1&appname=baisibudejie&client=android&udid=863254032906009&mac=02%3A00%3A00%3A00%3A00%3A00"
#创建客户端
client = AsyncHTTPClient()
client.fetch(url, self.on_response)
class HomeHandler(RequestHandler):
def get(self, *args, **kwargs):
self.write("home")
class Students2Handler(RequestHandler):
@tornado.gen.coroutine
def get(self, *args, **kwargs):
url = "http://s.budejie.com/topic/tag-topic/64/hot/budejie-android-6.6.9/0-20.json?market=xiaomi&ver=6.6.9&visiting=&os=7.1.1&appname=baisibudejie&client=android&udid=863254032906009&mac=02%3A00%3A00%3A00%3A00%3A00"
client = AsyncHTTPClient()
res = yield client.fetch(url)
if res.error:
self.send_error(500)
else:
data = json.loads(res.body)
self.write(data)
class Students3Handler(RequestHandler):
@tornado.gen.coroutine
def get(self, *args, **kwargs):
res = yield self.getData()
self.write(res)
@tornado.gen.coroutine
def getData(self):
url = "http://s.budejie.com/topic/tag-topic/64/hot/budejie-android-6.6.9/0-20.json?market=xiaomi&ver=6.6.9&visiting=&os=7.1.1&appname=baisibudejie&client=android&udid=863254032906009&mac=02%3A00%3A00%3A00%3A00%3A00"
client = AsyncHTTPClient()
res = yield client.fetch(url)
if res.error:
ret = {"ret":0}
else:
ret = json.loads(res.body)
raise tornado.gen.Return(ret) |
from __future__ import print_function ,division
import os
import torch
from skimage import io,transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset,DataLoader
from torchvision import transforms,utils
import warnings
import cv2
import random
warnings.filterwarnings("ignore")
MEAN=[0.410,0.383,0.288]
STD=[0.156,0.126,0.123]
class RoadExtrationDataset(Dataset):
""" INTRODUCTION:
https://competitions.codalab.org/competitions/18467
@InProceedings{DeepGlobe18,
author = {Demir, Ilke and Koperski, Krzysztof and Lindenbaum,
David and Pang, Guan and Huang, Jing and Basu, Saikat and Hughes,
Forest and Tuia, Devis and Raskar, Ramesh},title = {DeepGlobe 2018:
A Challenge to Parse the Earth Through Satellite Images},booktitle
= {The IEEE Conference on Computer Vision and Pattern Recognition
(CVPR) Workshops},month = {June},year = {2018}
}
DATA:
The training data for Road Challenge contains 6226 statellite imagery
in RGB,size 1024X1024.
Label:
* Each satellite image is paired with a mask image for road labels. The
mask is a grayscale image, with white standing for road pixel, and
black standing for background.
* File names for satellite images and the
corresponding mask image are <id>_sat.jpg and <id>_mask.png. <id> is a
randomized integer.
* Please note:
** The values of the mask image may not be pure 0 and 255. When
converting to labels, please binarize them at threshold 128.
** The labels are not perfect due to the cost for annotating
segmentation mask, specially in rural regions. In addition, we
intentionally didn't annotate small roads within farmlands.
"""
def __init__(self,root_dir,n_train,random_seed=32,val=False,transform=None):
"""
Args:
"""
self.class_info=[]
self.img_h=1024
self.img_w=1024
self.new_img_h=512
self.new_img_w=512
self.transform=transform
self.val=val
self.n_train = n_train
images=os.listdir(root_dir)
images.sort()
for x in range(0,len(images),2):
id=images[x][:-9]
img_source=id+"_sat.jpg"
label_source=id+"_mask.png"
self.class_info.append({"img_source":os.path.join(root_dir,img_source),
"label_source":os.path.join(root_dir,label_source)})
random.seed(random_seed)
random.shuffle(self.class_info)
if self.val:
self.class_info = self.class_info[self.n_train:]
else:
self.class_info = self.class_info[:self.n_train]
def __len__(self):
"""
"""
return len(self.class_info)
def __getitem__(self,idx):
img=io.imread(self.class_info[idx]["img_source"])
label=io.imread(self.class_info[idx]["label_source"])[:,:,0]
label[np.where(label>128)]=1
label[np.where(label<0)]=0
sample={"image":img,"label":label}
if self.transform:
sample=self.transform(sample)
return sample
class Rescale(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self,sample):
image,label=sample['image'],sample['label']
# resize img and label without interpolation (want the image to still match
# label_img, which we resize below):
image=cv2.resize(image,(self.output_size,self.output_size),
interpolation=cv2.INTER_NEAREST)
label=cv2.resize(label,(self.output_size,self.output_size),
interpolation=cv2.INTER_NEAREST)
return {'image':image,'label':label}
class RandomFlip(object):
"""
flip the image and the label with 0.5 probability
"""
def __call__(self,sample):
flip = np.random.randint(low=-1,high=2)
image,label=sample['image'],sample['label']
if flip ==1:
image=cv2.flip(image,1)
label=cv2.flip(label,1)
if flip ==0:
image=cv2.flip(image,0)
label=cv2.flip(label,0)
if flip ==-1:
image=cv2.flip(image,-1)
label=cv2.flip(label,-1)
return {'image':image,'label':label}
class RandomScale(object):
"""Random rescale the image in a sample to a given scale_scope.
Args:
randomly scale the img and the label
"""
def __init__(self, scale_scope):
assert isinstance(scale_scope, tuple)
self.scale_scope = scale_scope
def __call__(self,sample):
image,label=sample['image'],sample['label']
image_h=image.shape[1]
image_w=image.shape[0]
###################################################
# randomly scale the img and the label:
###################################################
scale = np.random.uniform(low=self.scale_scope[0],high=self.scale_scope[1])
new_img_h=int(scale*image_h)
new_img_w=int(scale*image_w)
# resize img and label without interpolation (want the image to still match
# label_img, which we resize below):
image=cv2.resize(image,(new_img_w,new_img_h),
interpolation=cv2.INTER_NEAREST)
label=cv2.resize(label,(new_img_w,new_img_h),
interpolation=cv2.INTER_NEAREST)
return {'image':image,'label':label}
class RandomCorp(object):
"""
select a NXN random crop from the img and label
"""
def __init__(self, crop_size):
assert isinstance(crop_size, int)
self.crop_size = crop_size
def __call__(self,sample):
image,label=sample['image'],sample['label']
image_h=image.shape[1]
image_w=image.shape[0]
start_x=np.random.randint(low=0,high=(image_w-self.crop_size))
end_x=start_x+self.crop_size
start_y=np.random.randint(low=0,high=(image_h-self.crop_size))
end_y=start_y+self.crop_size
image = image[start_y:end_y,start_x:end_x]
label=label[start_y:end_y,start_x:end_x]
return {'image':image,'label':label}
class Normalize(object):
"""Normalize a tensor image with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform
will normalize each channel of the input ``torch.*Tensor`` i.e.
``input[channel] = (input[channel] - mean[channel]) / std[channel]``
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
"""
def __init__(self,mean,std):
self.mean=mean
self.std=std
def __call__(self,sample):
image,label=sample['image'],sample['label']
image=image/255
image=image-np.array(self.mean)#subtract mean value of the dataset then divided the std
image=image/np.array(self.std)
image=image.astype(np.float32)
return {'image':image,'label':label}
class RandomRotate(object):
def __call__(self, sample):
rand=np.random.randint(low=-1,high=3)
if rand == -1:
sat_img = np.rot90(sample['image'], k=1)
map_img = np.rot90(sample['label'], k=1)
elif rand == 0:
sat_img = np.rot90(sample['image'], k=2)
map_img = np.rot90(sample['label'], k=2)
elif rand == 1:
sat_img = np.rot90(sample['image'], k=3)
map_img = np.rot90(sample['label'], k=3)
elif rand == 2:
sat_img = sample['image']
map_img = sample['label']
return {'image': sat_img.copy(), 'label': map_img.copy()}
class ToTensor(object):
"""
Conver ndarrays in sample to Tensors.
"""
def __call__(self, sample):
image, label = sample['image'], sample['label']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
image = torch.from_numpy(image.astype(np.float32))
label = torch.from_numpy(label.astype(np.float32))
label = torch.unsqueeze(label,dim=0)
return {'image': image,'label': label}
if __name__=="__main__":
dataset=RoadExtrationDataset(root_dir="./data/RoadExtraction/train",
n_train = 5000,
random_seed=32,
transform=transforms.Compose([Rescale(512),
RandomFlip(),
RandomScale((0.75,1.2)),
RandomCorp(256),
RandomRotate(),
#Normalize([0.410,0.383,0.288],[0.156,0.126,0.123]),
#ToTensor(),
],))
fig=plt.figure()
print(len(dataset))
for i in range(len(dataset)):
sample=dataset[i]
print(i,sample["image"].shape,sample['label'].shape)
ax=plt.subplot(1,2,1)
plt.tight_layout()
ax.axis('off')
plt.imshow(sample["label"])
ax=plt.subplot(1,2,2)
plt.tight_layout()
ax.axis('off')
plt.imshow(sample["image"])
plt.show()
if i==1:
break |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect
from .models import Username
from django.contrib import messages
def index(request):
return render(request, 'user_validation/index.html')
def create(request):
if len(Username.objects.filter(username=request.POST['name'])) > 0:
messages.error(request, 'That username is already registered!')
return redirect("/")
else:
if 7 < len(request.POST['name']) < 27:
Username.objects.create(username=request.POST['name'])
messages.success(request, 'The username you entered (' + request.POST['name'] + ') is valid. Thank you!')
return redirect("/success")
else:
messages.error(request, 'Username needs to be between 8 and 26 characters long, buddy.')
return redirect("/")
def success(request):
context = {
"usernames": Username.objects.all()}
return render(request, 'user_validation/success.html', context)
def delete(request, id):
context = {
"username" : Username.objects.get(id=id),
}
Username.objects.filter(id=id).delete()
messages.success(request, 'Removed the offending name!')
return redirect("/success")
|
from selenium import webdriver
from lxml import etree
import os
import time
import random
COMMENT_FILE_PATH = '02_jd_commentsBySelenium.txt'
# 有头模式
#driver = webdriver.Firefox()
#无头模式
options = webdriver.FirefoxOptions()
options.add_argument('-headless')
driver = webdriver.Firefox(options=options)
driver.get("https://item.jd.com/42217334747.html#comment")
time.sleep(random.random()*5)
'''
# @1,直接选择器
if os.path.exists(COMMENT_FILE_PATH) :
os.remove(COMMENT_FILE_PATH)
for i in range(1,100) :
time.sleep(random.random()*5)
if i != 1 :
driver.find_element_by_css_selector('a.ui-pager-next').click()
comments = driver.find_elements_by_css_selector('p.comment-con')
for eachcomment in comments :
with open(COMMENT_FILE_PATH,'a+') as file :
file.write(eachcomment.text+"\n")
print(eachcomment.text)
'''
'''
# @2,直接XPATH
'''
if os.path.exists(COMMENT_FILE_PATH) :
os.remove(COMMENT_FILE_PATH)
for i in range(1,100) :
time.sleep(random.random()*5)
if i != 1 :
driver.find_element_by_css_selector('a.ui-pager-next').click()
for i in range(1,10):
if i <= 10 :
eachcomment = driver.find_element_by_xpath('//*[@id="comment-0"]/div['+str(i)+']/div[2]/p')
with open(COMMENT_FILE_PATH,'a+') as file :
file.write(eachcomment.text+"\n")
print(eachcomment.text)
'''
# @3,转换为lxml后,XPATH
if os.path.exists(COMMENT_FILE_PATH) :
os.remove(COMMENT_FILE_PATH)
res = driver.page_source
rs1 = etree.HTML(res)
for i in range(1,100) :
time.sleep(random.random()*5)
if i != 1 :
driver.find_element_by_css_selector('a.ui-pager-next').click()
for i in range(1,10):
if i <= 10 :
with open(COMMENT_FILE_PATH,'a+') as file :
file.write(rs1.xpath('//*[@id="comment-0"]/div['+str(i)+']/div[2]/p/text()')+"\n")
print(rs1.xpath('//*[@id="comment-0"]/div['+str(i)+']/div[2]/p/text()'))
'''
'''
# @4 Beautiful, 放弃.
'''
driver.close()
|
from django.conf.urls import url, include
from django.urls import path, re_path
from django.contrib.auth import views as auth_views
from django.contrib.auth import forms as auth_forms
from django.contrib.auth.views import PasswordResetCompleteView
from django.contrib.auth.views import PasswordResetConfirmView
from django.contrib.auth.views import PasswordResetDoneView
from django.contrib.auth.views import PasswordResetForm
from django.contrib.auth.views import PasswordResetView
from . import views
app_name = 'user'
urlpatterns = [
re_path(r'^Behzad_test0_Path', views.index, name='index'), #/user/Behzad_test0_PathABCDEFGHI
re_path(r'^Behzad_test1_Path/$', views.index, name='index'), #/user/Behzad_test1_Path/
re_path(r'^Behzad_test2_Path$', views.index, name='index'), #/user/Behzad_test2_Path
re_path(r'^$', views.home, name='Accounts_Home'),
re_path(r'^about$', views.about, name='Accounts_About'),
re_path(r'^home$', views.home, name='Accounts_Home'),
re_path(r'^contact$', views.contact, name='Accounts_Contact'),
re_path(r'^$', views.index, name='home'),
re_path(r'^login/$', auth_views.LoginView.as_view(template_name='user/login.html'),name='Accounts_Login'),
re_path(r'^logout/$', auth_views.LogoutView.as_view(template_name='user/loggedoff.html'),name='Accounts_Loggedoff'),
re_path(r'^register/$', views.register, name='Accounts_Register'),
re_path(r'^profile/$', views.profile_view, name='Accounts_ViewProfile'),
re_path(r'^profile/edit/$', views.profile_edit, name='Accounts_EditProfile'),
re_path(r'^profile/change-password/$', views.change_password, name='Accounts_ChangePassword'),
re_path(r'^password-reset/$', auth_views.PasswordResetView.as_view(),name='Accounts_ResetPassword'),
#re_path('', include('django.contrib.auth.urls'))
]
|
#!/usr/bin/env python
import time, unittest, os, sys
from selenium import webdriver
from utils.function.setup import *
from main.activity.desktop_v3.activity_wishlist import *
class TestWishlist(unittest.TestCase):
dict = {
"site" : "beta",
"loop" : 1,
"domain_shop" : "alvin",
"until_page" : 500,
"shop_keyword": "Toko",
"product_keyword" : "jaket",
"catalog_keyword" : "Nokia",
"min_price" : 5000,
"max_price" : 1000000,
"location" : "Wonosobo",
"email_buyer": "tkpd.qc+13@gmail.com",
"password_buyer": "1234asdf"
}
# setUp function
def setUp(self):
self.driver = tsetup("chrome")
self.activity = WishlistActivity(self.driver)
self.activity.set_parameter(self.dict)
def test_1000_do_wishlist(self):
print("Test 1000 wishlist!")
self.activity.sequence_click("tutup")
"""
def test_case_search_product(self):
print("Test Case Seacrh product with keyword '" + self.data['product_keyword'] + "'")
self.activity.do_search(self.data['product_keyword'], "search_product")
def test_case_search_shop(self):
print("Test Case Search shop with keyword '" + self.data['shop_keyword'] + "'")
self.activity.do_search(self.data['shop_keyword'], "search_shop")
def test_case_search_catalog(self):
print("Test Case Search catalog with keyword '" + self.data['catalog_keyword'] + "'")
self.activity.do_search(self.data['catalog_keyword'], "search_catalog")
"""
def tearDown(self):
print("Testing akan selesai dalam beberapa saat..")
time.sleep(5)
self.driver.quit()
# main
if (__name__ == "__main__"):
unittest.main(warnings='ignore')
|
#!/usr/bin/env python
"""A tiny tool used to test the `convert` plugin. It copies a file and appends
a specified text tag.
"""
import sys
import locale
# From `beets.util`.
def arg_encoding():
try:
return locale.getdefaultlocale()[1] or 'utf-8'
except ValueError:
return 'utf-8'
def convert(in_file, out_file, tag):
"""Copy `in_file` to `out_file` and append the string `tag`.
"""
if not isinstance(tag, bytes):
tag = tag.encode('utf-8')
with open(out_file, 'wb') as out_f:
with open(in_file, 'rb') as in_f:
out_f.write(in_f.read())
out_f.write(tag)
if __name__ == '__main__':
convert(sys.argv[1], sys.argv[2], sys.argv[3])
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 15 17:11:46 2020
@author: MERT
"""
from sklearn.datasets import load_files
import os
import nltk
import pandas as pd
import xlrd
import re
from nltk.corpus import stopwords
import sklearn
import pickle
konumum = os.getcwd()
print("konumun :",konumum)
# Openning the excel files normmalde i do it this with pd.read_csv("....")
WBook = xlrd.open_workbook("ReviewsAndRatings_all_19_06.xls")
WSheets = WBook.sheets()
ReviewWS,RatingsWS = WSheets
Ratings = RatingsWS.col_values(0)
Reviews = ReviewWS.col_values(0)
Ratings_Normal = RatingsWS.col_values(1)
#pos_sentences = []
#neg_sentences = []
mertoRating = []
for i in Ratings_Normal:
if i >2.5:
mertoRating.append(1)
else :
mertoRating.append(0)
X,y =Reviews,mertoRating
corpus = []
for i in range (0,len(X)):
review = re.sub(r"\W"," ",str(X[i]))
review = review.lower()
review = re.sub(r"\s+[a-z]\s+"," ",review)
review = re.sub(r"^[a-z]\s+"," ",review)
review = re.sub(r"\s+"," ",review)
corpus.append(review)
from sklearn.feature_extraction.text import CountVectorizer
cv=CountVectorizer(max_features=5000,min_df=3 , max_df=0.6, stop_words=stopwords.words("turkish"))
X= cv.fit_transform(corpus).toarray()
from sklearn.feature_extraction.text import TfidfTransformer
transformer = TfidfTransformer()
X = transformer.fit_transform(X).toarray()
print(X.shape)
from sklearn.feature_extraction.text import TfidfVectorizer
cv=TfidfVectorizer(max_features=2000,min_df=3 , max_df=0.6, stop_words=stopwords.words("turkish"))
X= cv.fit_transform(corpus).toarray()
from sklearn.model_selection import train_test_split
X_train,X_test ,y_train , y_test = train_test_split(X,y,test_size =0.2,random_state = 1)
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_pred ,y_test)
print("with logistic regression your confusion matrix is ")
print(cm)
from sklearn.metrics import accuracy_score
acc_scpre = accuracy_score(y_pred,y_test,normalize = True)
print(acc_scpre)
#Classifierimi ve vectorizer i pickle ediyorum
with open("classifierim.pickle","wb") as f:
pickle.dump(classifier,f)
with open("model.pickle","wb") as f:
pickle.dump(cv,f)
#Unpickle yapıp test ediicem burda claısıyorusa twitterde türkce data çekmeye başlıyıcam
with open("classifierim.pickle","rb") as f:
clf = pickle.load(f)
with open("model.pickle","rb") as f:
vectorizerim = pickle.load(f)
#------>> OKEY ÇALIŞTI BU PROJE BENIM CLASSİFİERİM
|
# Напишите reducer, который реализует симметричную разность множеств A и B (т.е. оставляет только те элементы, которые есть только в одном из множеств).
# На вход в reducer приходят пары key / value, где key - элемент множества, value - маркер множества (A или B)
# Sample Input:
# 1 A
# 2 A
# 2 B
# 3 B
# Sample Output:
# 1
# 3
import sys
key, prev = '', ''
duplicate = False
for line in sys.stdin:
key, value = line.strip().split('\t')
if not prev:
pass
elif prev != key:
if not duplicate:
print(prev)
else:
duplicate = False
else:
duplicate = True
prev = key
if not duplicate:
print(key) |
# -*- coding: utf-8 -*-
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class ChloroformAppConfig(AppConfig):
name = 'chloroform'
verbose_name = _('Chloroform Contact form builder')
def ready(self):
import chloroform.checks # noqa
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, numpy as np
try:
from hashlib import md5
except ImportError:
from md5 import md5
from opticks.ana.nbase import array_digest
def test_hello():
s = 'hello'
dig = md5()
dig.update(s)
print s, dig.hexdigest()
def test_array_digest():
"""
digest on the file includes the header, but array_digest
covers just the data
"""
i = np.eye(4, dtype=np.float32)
a = np.vstack([i,i,i]).reshape(-1,4,4)
print array_digest(a)
np.save(os.path.expandvars("$TMP/test_array_digest.npy"), a )
if __name__ == '__main__':
test_hello()
test_array_digest()
|
import cv2
import numpy as np
from faceplusplus_api import faceplusplus_api
from PIL import Image, ImageDraw
import math
def load_api_data(filepath):
data = faceplusplus_api(filepath)
left_eye = [data['faces'][0]['landmark']['left_eye_center']['x'],
data['faces'][0]['landmark']['left_eye_center']['y']]
right_eye = [data['faces'][0]['landmark']['right_eye_center']['x'],
data['faces'][0]['landmark']['right_eye_center']['y']]
nose_tip = [data['faces'][0]['landmark']['nose_tip']['x'], data['faces'][0]['landmark']['nose_tip']['y']]
face_rectangle = {'width': data['faces'][0]['face_rectangle']['width'],
'top': data['faces'][0]['face_rectangle']['top'],
'left': data['faces'][0]['face_rectangle']['left'],
'height': data['faces'][0]['face_rectangle']['height']}
face_area = face_rectangle['width'] * face_rectangle['height']
image = Image.open(filepath)
image_width, image_height = image.size
image_area = image_width * image_height
# draw = ImageDraw.Draw(image)
# draw.ellipse([left_eye[0] - 3, left_eye[1] - 3, left_eye[0] + 3, left_eye[1] + 3], fill='red')
# draw.ellipse([right_eye[0] - 3, right_eye[1] - 3, right_eye[0] + 3, right_eye[1] + 3], fill='red')
# draw.ellipse([nose_tip[0] - 3, nose_tip[1] - 3, nose_tip[0] + 3, nose_tip[1] + 3], fill='red')
# draw.rectangle([(face_rectangle['left'], face_rectangle['top']),
# (face_rectangle['left'] + face_rectangle['width'],
# face_rectangle['top'] + face_rectangle['height'])],
# outline='red')
# image.show()
return image, left_eye, right_eye, nose_tip, face_area, image_area
def resize(filepath):
image, left_eye, right_eye, nose_tip, face_area, image_area = load_api_data(filepath)
center_coor = [((left_eye[0] + right_eye[0]) / 2 + nose_tip[0]) / 2,
((left_eye[1] + right_eye[1]) / 2 + nose_tip[1]) / 2]
# print(face_area, image_area)
if image_area / face_area > 3.5:
resize_area = face_area * 3.5
width = math.sqrt(resize_area / 1.4)
height = width * 1.4
print(width, height)
coor1 = [int(center_coor[0] - width / 2), int(center_coor[1] - height / 2)]
coor2 = [int(center_coor[0] + width / 2), int(center_coor[1] + height / 2)]
coor = coor1 + coor2
print(image.size, coor)
image2 = image.crop(coor)
return image2
if __name__ =='__main__':
for i in range(2, 16):
filepath = 'data/data_test/' + str(i) + '.png-photo0.png'
image2 = resize(filepath)
# image2.show()
image2.save(filepath)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 10:23:54 2020
@author: Alex
"""
import numpy as np
import matplotlib.pyplot as plt
#To validate the newton 2D method, hardcode an example function
def f(u):
return -np.exp(-u[0]**3/3 + u[0] -u[1]**2)
#Hardcode the analytic partial derivatives so that the grad vector
# and hessian matrix can be calculated
def d2f_dy2(u):
return f(u)*(((-2*u[1])**2)-2)
def d2f_dx2(u):
return f(u)*(((-u[0]**2+1)**2)-2*u[0])
def d2f_dxdy(u):
return f(u)*(-u[0]**2+1)*(-2*u[1])
def df_dx(u):
return f(u)*(-u[0]**2+1)
def df_dy(u):
return f(u)*(-2*u[1])
#Calculate the hessian matrix/curvature matrix
def hess(u):
return np.array([[d2f_dx2(u), d2f_dxdy(u)], [d2f_dxdy(u), d2f_dy2(u)]])
#Calculate the gradient vector
def grad(u):
return np.array([df_dx(u), df_dy(u)])
#Impliment the newton method algorithm
def Newton(u_in):
u_1 = u_in
#use a residual method to break the loop once we have converged to a value
res = 10
while res > 0:
g = grad(u_1)
h = hess(u_1)
inv = np.linalg.inv(h)
u_2 = u_1 - np.matmul(inv, g)
res = np.linalg.norm(u_2-u_1)
u_1 = u_2
return u_2
#Define an initial guess
u_in = (0.6, 0.0021172686496694995)
#Compute newton algo
u = Newton(u_in)
print(" The curvature matrix is ", hess(u))
print(" The function minimum is ", u)
#VISUALISE 2D PARABOLA LIKE EXPONENTIAL FUNCTION
#Import some extra data visualisation libraries
from pylab import *
from mpl_toolkits.mplot3d import Axes3D
#define meshgrid that we will now plot
x = np.arange(-1.5, 2.5, 0.3)
y = np.arange(-1.5, 2.5, 0.3)
X, Y = np.meshgrid(x, y)
Z = -np.exp(-X**3/3 + X -Y**2)
#chose a nice colour map, here lighter colours corresponding to lower function values
cmap = 'gnuplot2_r'
fig = figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z, cmap= cmap)
title(cmap) |
from . import article, admin
article.populate()
admin.populate() |
#Python program to get the least common multiple (LCM) of two positive integers:
a=int(input("Enter first number: "))
b=int(input("Enter second number: "))
for m in range(1,a*b+1):
if m%a == 0 and m%b == 0:
print("LCM between two no. is: ",m)
break |
import json
from Edition import Edition
from Card import Card
from lib import clean_unicode
###Need to change flat cards into an edition on its own. That way I can call find card give an edition and if i dont specify it then it uses the default flattening. this might be better.
class Magic(object):
#This is a Class that will create a Magic object give the json database. It is a dictionary where the Key is the Code of the Edition and the Value is an Edition object.
def __init__(self, json_file):
json_data_file = open(json_file)
json_data = json.load(json_data_file)
json_data_file.close()
flat_list = []
flat_cards = {}
self.data = {}
for k,v in json_data.iteritems():
self.data[clean_unicode(v["name"])] = Edition(v)
for stuff in v["cards"]:
flat_list.append(clean_unicode(stuff["name"]))
flat_cards[clean_unicode(stuff["name"])] = Card(stuff)
self.flatList = sorted(list(set(flat_list)))
self.flatCards = flat_cards
#prints Set names and releaseDates
def __str__(self):
d = {}
for k,v in self.data.iteritems():
d[clean_unicode(v.name)] = v.releaseDate
sorted_d = sorted(d, key=lambda key: d[key])
result = ""
for item in sorted_d:
result += item + " " + d[item] + '\n'
return result
#When called this method will ask for user input, if Card is found then its information will be printed, otherwise it will tell you to try again
def findCard(self, search):
return self.flatCards.get(search)
|
#!/usr/bin/env python3
# import emoji
# print(emoji.emojize('Ola :thumbs_up:'))
# import flask
#
# app = flask.Flask(__name__)
#
# dados = {
# 'acesso':'OK'
# }
# Configurando rotas
# @app.route('/')
# def index():
# return flask.jsonify(dados)
# configurando tipos de requisicao
# @app.route("/api?<any('id','nome'):string>=<int:id>", methods=['POST'])
# def index_get_id(id):
# dados = {'nome':f'nome do id: {id}'}
# return flask.jsonify(dados)
#
# @app.route('/teste/<string:nome>/', methods=['POST','GET'])
# def teste(id):
# return f'Testes retornando {nome}'
#
# @app.route('/teste/<string:nome>/meusarquivos', methods=['POST', 'GET'])
# def teste_teste():
# return f'Testes retornando {nome} meus arquivos'
#
# app.run(host='0.0.0.0',debug=True,port=80) |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-07-29 17:08
from __future__ import unicode_literals
from django.db import migrations, models
import posts.models
class Migration(migrations.Migration):
dependencies = [
('posts', '0005_auto_20160728_2010'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='profile_pic',
field=models.ImageField(blank=True, default='default.png', null=True, upload_to=posts.models.profile_pic_upload),
),
]
|
# libraries
import numpy as np
import matplotlib.pyplot as plt
# set width of bar
barWidth = 0.17
# set height of bar
bars1 = [7, 18, 19, 4, 35, 4, 11, 7]
bars2 = [0, 11, 21, 5, 13, 0, 2, 6,]
bars3 = [1, 7, 11, 1, 7, 2, 3, 5]
bars4 = [3, 5, 6, 2, 12, 2, 7, 5]
# Set position of bar on X axis
r1 = np.arange(len(bars1))
r2 = [x + barWidth for x in r1]
r3 = [x + barWidth for x in r2]
r4 = [x + barWidth for x in r3]
# Make the plot
plt.bar(r1, bars1, color='#000000', width=barWidth, edgecolor='black', label='Topic 1')
plt.bar(r2, bars2, color='#ffffff', width=barWidth, edgecolor='black', label='Topic 2')
plt.bar(r3, bars3, color='#c1c1c1', width=barWidth, edgecolor='black', label='Topic 3')
plt.bar(r4, bars4, color='#777777', width=barWidth, edgecolor='black', label='Topic 4')
#plt.figure(figsize=(20,5))
# Add xticks on the middle of the group bars
plt.xlabel('Coding', fontweight='bold')
plt.xticks([r + barWidth for r in range(len(bars1))], ['ACP', 'CMEASURE', 'CTYPE', 'DEPTH', 'METH', 'SQUAL', 'SRLP', 'WSAP'])
# Create legend & Show graphic
plt.legend()
plt.xticks(rotation=45)
plt.title('Surface Web - Topics distribution',fontsize=20)
plt.show()
#######################
"""VERSIONE 2 """
"""################## SURFACE WEB ###############"""
######################
# libraries
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
import pandas as pd
barWidth = 0.17
# Data
raw_data = {'topic1': [7,18,16,4,23,1,10,7], 'topic2': [1,11,20,5,19,6,9,6],'topic3': [2,4,13,2,15,0,3,6],'topic4': [1,8,8,1,10,1,1,4]}
df = pd.DataFrame(raw_data)
# Set position of bar on X axis
r1 = np.arange(len(df))
r2 = [x + barWidth for x in r1]
r3 = [x + barWidth for x in r2]
r4 = [x + barWidth for x in r3]
# From raw value to percentage
totals = [i+j+k+l for i,j,k,l in zip(df['topic1'], df['topic2'], df['topic3'], df['topic4'])]
#print(totals)
totale = sum(totals[0:len(totals)])
#print(totale)
topic1 = [i / totale * 100 for i,j in zip(df['topic1'], totals)]
topic2 = [i / totale * 100 for i,j in zip(df['topic2'], totals)]
topic3 = [i / totale * 100 for i,j in zip(df['topic3'], totals)]
topic4 = [i / totale * 100 for i,j in zip(df['topic4'], totals)]
print(topic1, topic2, topic3, topic4)
# Make the plot
plt.bar(r1, topic1, color='#000000', width=barWidth, edgecolor='black', label='Topic 1')
plt.bar(r2, topic2, color='#ffffff', width=barWidth, edgecolor='black', label='Topic 2')
plt.bar(r3, topic3, color='#c1c1c1', width=barWidth, edgecolor='black', label='Topic 3')
plt.bar(r4, topic4, color='#777777', width=barWidth, edgecolor='black', label='Topic 4')
# Add xticks on the middle of the group bars
plt.xlabel('Thematic Coding', fontweight='bold')
plt.xticks([r + barWidth for r in range(len(df))], ['ACP', 'CMEASURE', 'CTYPE', 'DEPTH', 'METH', 'SQUAL', 'SRLP', 'WSAP'])
plt.gca().set_yticklabels(['{:.0f}%'.format(x*1) for x in plt.gca().get_yticks()])
# Create legend & Show graphic
plt.legend()
plt.xticks(rotation=45)
plt.title('Surface Web - Topics distribution',fontsize=20)
plt.show()
###############################################################################
###############################################################################
###############################################################################
###############################################################################
"""################## VERSIONE 2 ###############"""
"""################## DEEP/DARK WEB ###############"""
######################
# libraries
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
import pandas as pd
barWidth = 0.17
# Data
raw_data = {'topic1': [0,12,10,4,18,0,2,0],
'topic2': [3,11,11,3,8,0,1,1],
'topic3': [0,4,7,1,4,0,3,0],
'topic4': [2,8,15,2,3,0,0,0]}
df = pd.DataFrame(raw_data)
# Set position of bar on X axis
r1 = np.arange(len(df))
r2 = [x + barWidth for x in r1]
r3 = [x + barWidth for x in r2]
r4 = [x + barWidth for x in r3]
# From raw value to percentage
totals = [i+j+k+l for i,j,k,l in zip(df['topic1'], df['topic2'], df['topic3'], df['topic4'])]
#print(totals)
totale = sum(totals[0:len(totals)])
print(totale)
topic1 = [i / totale * 100 for i,j in zip(df['topic1'], totals)]
topic2 = [i / totale * 100 for i,j in zip(df['topic2'], totals)]
topic3 = [i / totale * 100 for i,j in zip(df['topic3'], totals)]
topic4 = [i / totale * 100 for i,j in zip(df['topic4'], totals)]
print(topic1, topic2, topic3, topic4)
# Make the plot
plt.bar(r1, topic1, color='#000000', width=barWidth, edgecolor='black', label='Topic 1')
plt.bar(r2, topic2, color='#ffffff', width=barWidth, edgecolor='black', label='Topic 2')
plt.bar(r3, topic3, color='#c1c1c1', width=barWidth, edgecolor='black', label='Topic 3')
plt.bar(r4, topic4, color='#777777', width=barWidth, edgecolor='black', label='Topic 4')
# Add xticks on the middle of the group bars
plt.xlabel('Thematic Coding', fontweight='bold')
plt.xticks([r + barWidth for r in range(len(df))], ['ACP', 'CMEASURE', 'CTYPE', 'DEPTH', 'METH', 'SQUAL', 'SRLP', 'WSAP'])
plt.gca().set_yticklabels(['{:.0f}%'.format(x*1) for x in plt.gca().get_yticks()])
# Create legend & Show graphic
plt.legend()
plt.xticks(rotation=45)
plt.title('Deep/Dark Web - Topics distribution',fontsize=20)
plt.show()
###############################################################################
###############################################################################
###############################################################################
###############################################################################
"""################## VERSIONE 2 ###############"""
"""################## DEEP/DARK WEB 5 TOPICS ###############"""
######################
# libraries
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
import pandas as pd
barWidth = 0.17
# Data
raw_data = {'topic1': [2,17,23,8,19,0,3,0], 'topic2': [1,11,9,2,10,0,1,0],'topic3': [0,0,1,0,1,0,2,0],'topic4': [1,1,5,0,0,0,0,0],'topic5': [1,6,5,0,3,0,0,1]}
df = pd.DataFrame(raw_data)
# Set position of bar on X axis
r1 = np.arange(len(df))
r2 = [x + barWidth for x in r1]
r3 = [x + barWidth for x in r2]
r4 = [x + barWidth for x in r3]
r5 = [x + barWidth for x in r4]
# From raw value to percentage
totals = [i+j+k+l+m for i,j,k,l,m in zip(df['topic1'], df['topic2'], df['topic3'], df['topic4'], df['topic5'])]
#print(totals)
totale = sum(totals[0:len(totals)])
print(totale)
topic1 = [i / totale * 100 for i,j in zip(df['topic1'], totals)]
topic2 = [i / totale * 100 for i,j in zip(df['topic2'], totals)]
topic3 = [i / totale * 100 for i,j in zip(df['topic3'], totals)]
topic4 = [i / totale * 100 for i,j in zip(df['topic4'], totals)]
topic5 = [i / totale * 100 for i,j in zip(df['topic5'], totals)]
#print(topic1, topic2, topic3, topic4)
# Make the plot
plt.bar(r1, topic1, color='#000000', width=barWidth, edgecolor='black', label='Topic 1')
plt.bar(r2, topic2, color='#ffffff', width=barWidth, edgecolor='black', label='Topic 2')
plt.bar(r3, topic3, color='#c1c1c1', width=barWidth, edgecolor='black', label='Topic 3')
plt.bar(r4, topic4, color='#444444', width=barWidth, edgecolor='black', label='Topic 4')
plt.bar(r5, topic5, color='#777777', width=barWidth, edgecolor='black', label='Topic 5')
# Add xticks on the middle of the group bars
plt.xlabel('Coding', fontweight='bold')
plt.xticks([r + barWidth for r in range(len(df))], ['ACP', 'CMEASURE', 'CTYPE', 'DEPTH', 'METH', 'SQUAL', 'SRLP', 'WSAP'])
plt.gca().set_yticklabels(['{:.0f}%'.format(x*1) for x in plt.gca().get_yticks()])
# Create legend & Show graphic
plt.legend()
plt.xticks(rotation=45)
plt.title('Deep/Dark Web - Topics distribution',fontsize=20)
plt.show()
|
#
# Copyright (C) 2020-2030 Thorium Corp FP <help@thoriumcorp.website>
#
from odoo import models, fields
class ThoriumcorpSpecialty(models.Model):
_name = 'thoriumcorp.specialty'
_description = 'Medical Specialty'
_sql_constraints = [
('code_uniq', 'UNIQUE(code)', 'Code must be unique!'),
('name_uniq', 'UNIQUE(name)', 'Name must be unique!'),
]
code = fields.Char(
string='Code',
help='Speciality code',
size=256,
required=True,
)
name = fields.Char(
string='Name',
help='Name of the specialty',
size=256,
required=True,
)
category = fields.Selection(
[
('clinical', 'Clinical specialties'),
('surgical', 'Surgical specialties'),
('thoriumcorp', 'Medical-surgical specialties'),
('diagnostic', 'Laboratory or diagnostic specialties'),
],
'Category of specialty'
)
# This is referenced in
# https://es.wikipedia.org/wiki/Especialidades_médicas#Especialidades_clínicas
|
import numpy as np
import pandas as pd
import keycode
FALLBACK_WEIGHT = 0.25 # weight of fallback observations
M_MIN_FREQUENCY = 3 # min frequency per sample for feature fallback
OUTLIER_DISTANCE = 2 # outliers outside +/- std devs
OUTLIER_ITERATIONS = 2 # no. iterations to do recursive outlier removal
def transition_digrams(df, distance=1):
a = df.groupby(['user', 'session']).apply(lambda x: x[:-distance].reset_index())
b = df.groupby(['user', 'session']).apply(lambda x: x[distance:].reset_index())
a = a[['user', 'session', 'keyname', 'timepress', 'timerelease']]
b = b[['keyname', 'timepress', 'timerelease']]
a.columns = ['user', 'session', 'keyname_1', 'timepress_1', 'timerelease_1']
b.columns = ['keyname_2', 'timepress_2', 'timerelease_2']
joined = pd.concat([a, b], join='inner', axis=1)
cols = ['user', 'session', 'keynames', 'transition']
# Create columns for each transition type
t1 = pd.DataFrame({'user': joined['user'],
'session': joined['session'],
'keynames': joined['keyname_1'] + '__' + joined['keyname_2'],
'transition': joined['timepress_2'] - joined['timerelease_1']},
columns=cols, index=joined.index)
t2 = pd.DataFrame({'user': joined['user'],
'session': joined['session'],
'keynames': joined['keyname_1'] + '__' + joined['keyname_2'],
'transition': joined['timepress_2'] - joined['timepress_1']},
columns=cols, index=joined.index)
return t1, t2
def outlier_removal_recursive(df, col, std_distance=OUTLIER_DISTANCE, max_iterations=OUTLIER_ITERATIONS):
'''
Remove duration outliers on a per-user basis
10 iterations will remove most outliers.
Does the following:
group df by user and keyname
get mean and std for each group (user/keyname combination)
filter df durations with the corresponding user/key mean and stds
This could be more efficient by testing the number of outliers removed for
each group and only recomputing the groups with more than 0 removed
'''
prev_len = np.inf
i = 0
while prev_len > len(df):
prev_len = len(df)
df = outlier_removal(df, col, std_distance=std_distance)
print('Removed %d observations' % (prev_len - len(df)))
i += 1
if max_iterations > 0 and i == max_iterations:
break
return df
def outlier_removal(df, col, std_distance=4):
'''
Remove duration outliers on a per-user basis
10 iterations will remove most outliers.
Does the following:
group df by user and keyname
get mean and std for each group (user/keyname combination)
filter df durations with the corresponding user/key mean and stds
This could be more efficient by testing the number of outliers removed for
each group and only recomputing the groups with more than 0 removed
'''
m, s = df[col].mean(), df[col].std()
lower = m - std_distance * s
upper = m + std_distance * s
df = df[(df[col].values > lower) & (df[col].values < upper)]
return df
def reverse_tree(features, hierarchy):
parents = {}
for parent, children in hierarchy.items():
for child in children:
parents[child] = parent
return parents
def extract_gaussian_features(df, group_col_name, feature_col_name, features, decisions, feature_name_prefix):
feature_vector = {}
for feature_name, feature_set in features.items():
full_feature_name = '%s%s' % (feature_name_prefix, feature_name)
obs = df.loc[df[group_col_name].isin(feature_set), feature_col_name]
if len(obs) < M_MIN_FREQUENCY and feature_name in decisions.keys():
fallback_name = decisions[feature_name]
fallback_obs = pd.DataFrame()
while len(obs) + len(fallback_obs) < M_MIN_FREQUENCY:
fallback_set = getattr(keycode, fallback_name)
fallback_obs = df.loc[df[group_col_name].isin(fallback_set), feature_col_name]
if fallback_name in decisions.keys():
fallback_name = decisions[fallback_name] # go up the tree
else:
break # reached the root node
n = len(obs)
# Prevent NA values
if n == 0:
obs_mean = 0
obs_std = 0
elif n == 1:
obs_mean = obs.mean()
obs_std = 0
else:
obs_mean = obs.mean()
obs_std = obs.std()
feature_vector['%s.mean' % full_feature_name] = (n * obs_mean + FALLBACK_WEIGHT * fallback_obs.mean()) / (
n + FALLBACK_WEIGHT)
feature_vector['%s.std' % full_feature_name] = (n * obs_std + FALLBACK_WEIGHT * fallback_obs.std()) / (
n + FALLBACK_WEIGHT)
else:
feature_vector['%s.mean' % full_feature_name] = obs.mean()
feature_vector['%s.std' % full_feature_name] = obs.std()
return pd.Series(feature_vector)
def keystroke_durations(df):
return pd.DataFrame(
{'keyname': df['keyname'].values, 'duration': df['timerelease'].values - df['timepress'].values})
def keystroke_transitions(df):
keynames = df[:-1]['keyname'].values + '__' + df[1:]['keyname'].values
t1 = df[1:]['timepress'].values - df[:-1]['timerelease'].values
t2 = df['timepress'].diff().dropna().values
t3 = df['timerelease'].diff().dropna().values
t4 = df[1:]['timerelease'].values - df[:-1]['timepress'].values
return pd.DataFrame({'keynames': keynames, 't1': t1, 't2': t2, 't3': t3, 't4': t4})
def clean_features(df):
df[(df == np.inf) | (df == -np.inf) | (np.isnan(df))] = 0
return df
def durations_transitions(df):
df = df.groupby(level=[0, 1]).apply(lambda x: x.reset_index().sort('timepress')).reset_index(level=2, drop=True)
d = df.groupby(level=[0, 1]).apply(keystroke_durations).reset_index(level=2, drop=True)
t = df.groupby(level=[0, 1]).apply(keystroke_transitions).reset_index(level=2, drop=True)
return d, t
def extract_keystroke_features(df):
d, t = durations_transitions(df)
features = keycode.LINGUISTIC_FEATURES
fallback = keycode.LINGUISTIC_FALLBACK
duration_features = {k: v for k, v in features.items() if '__' not in k}
transition_features = {k: v for k, v in features.items() if '__' in k}
decisions = reverse_tree(features, fallback)
if len(duration_features) > 0:
du = outlier_removal_recursive(d, 'duration')
du_features = du.groupby(level=[0, 1]).apply(lambda x:
extract_gaussian_features(x, feature_col_name='duration',
group_col_name='keyname',
features=duration_features,
decisions=decisions,
feature_name_prefix='du_'))
if len(transition_features) > 0:
t1 = outlier_removal_recursive(t[['keynames', 't1']], 't1')
t2 = outlier_removal_recursive(t[['keynames', 't2']], 't2')
t1_features = t1.groupby(level=[0, 1]).apply(lambda x:
extract_gaussian_features(x, feature_col_name='t1',
group_col_name='keynames',
features=transition_features,
decisions=decisions,
feature_name_prefix='t1_'))
t2_features = t2.groupby(level=[0, 1]).apply(lambda x:
extract_gaussian_features(x, feature_col_name='t2',
group_col_name='keynames',
features=transition_features,
decisions=decisions,
feature_name_prefix='t2_'))
fspace = pd.concat([du_features, t1_features, t2_features], axis=1)
fspace = clean_features(fspace)
return fspace
|
import support_functions
import vm_functions
import unittest
version_good = "6.1.26r145957"
vm_good = "ws2019"
vm_bad = "bad"
snapshot_good = "live"
snapshot_bad = "bad"
file_good = "./firefox.exe"
file_bad = "./bad.exe"
file_dst = "C:\\windows\\temp\\file.exe"
user_good = "Administrator"
pass_good = "12345678"
user_bad = "bad"
pass_bad = "bad"
ips_good = ["10.0.2.15", "192.168.56.113"]
class TestStringMethods(unittest.TestCase):
# vm_functions options
vm_functions.logging.disable()
vm_functions.vboxmanage_path = "vboxmanage"
vm_functions.timeout = 60
def test01_file_info(self):
result = support_functions.file_info(file_good)
self.assertEqual(result[0], 0)
self.assertEqual(result[1],"f2d2638afb528c7476c9ee8e83ddb20e686b0b05f53f2f966fd9eb962427f8aa",)
self.assertEqual(result[2], "374fb48a959a96ce92ae0e4346763293")
self.assertEqual(result[3], 1070)
def test02_file_info_nonexisted(self):
result = support_functions.file_info(file_bad)
self.assertEqual(result, 1)
def test03_virtualbox_version(self):
result = vm_functions.virtualbox_version()
self.assertEqual(result[0], 0)
self.assertEqual(result[1], version_good)
self.assertEqual(result[2], "")
def test04_vm_start(self):
vm_functions.vm_stop(vm_good)
result = vm_functions.vm_start(vm_good)
vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 0)
self.assertRegex(result[1], f'VM "{vm_good}" has been successfully started.')
self.assertEqual(result[2], "")
def test05_vm_start_running(self):
vm_functions.vm_start(vm_good)
result = vm_functions.vm_start(vm_good)
vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "is already locked by a session")
def test06_vm_start_nonexisting(self):
result = vm_functions.vm_start(vm_bad)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "Could not find a registered machine")
def test07_vm_upload(self):
vm_functions.vm_start(vm_good)
result = vm_functions.vm_upload(vm_good, user_good, pass_good, file_good, file_dst)
vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 0)
self.assertEqual(result[1], "")
self.assertEqual(result[2], "")
def test08_vm_upload_nonexisting_file(self):
vm_functions.vm_start(vm_good)
result = vm_functions.vm_upload(vm_good, user_good, pass_good, file_bad, file_dst)
vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "VERR_FILE_NOT_FOUND")
def test09_vm_upload_incorrect_credentials(self):
vm_functions.vm_start(vm_good)
result = vm_functions.vm_upload(vm_good, user_bad, pass_bad, file_good, file_dst)
vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "The specified user was not able to logon on guest")
def test10_vm_download_incorrect_credentials(self):
vm_functions.vm_start(vm_good)
result = vm_functions.vm_download(vm_good, user_good, pass_bad, file_bad, file_dst)
vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "The specified user was not able to logon on guest")
def test11_vm_download_nonexisting_file(self):
vm_functions.vm_start(vm_good)
result = vm_functions.vm_download(vm_good, user_good, pass_good, file_dst, file_bad)
vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "Querying guest file information failed")
def test12_vm_stop(self):
vm_functions.vm_start(vm_good)
result = vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 0)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "100%")
def test13_vm_stop_stopped(self):
vm_functions.vm_stop(vm_good)
result = vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "VBOX_E_INVALID_VM_STATE")
def test14_vm_snapshot_restore_good(self):
result = vm_functions.vm_snapshot_restore(vm_good, snapshot_good)
self.assertEqual(result[0], 0)
self.assertRegex(result[1], "Restoring snapshot")
self.assertRegex(result[2], "100%")
def test15_vm_snapshot_restore_nonexisting_a(self):
result = vm_functions.vm_snapshot_restore(vm_good, snapshot_bad)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "Could not find a snapshot")
def test16_vm_snapshot_restore_nonexisting_b(self):
result = vm_functions.vm_snapshot_restore(vm_bad, snapshot_bad)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "Could not find a registered machine")
def test17_list_ips(self):
vm_functions.vm_start(vm_good)
result = vm_functions.list_ips(vm_good)
self.assertEqual(result[0], 0)
self.assertEqual(result[1], ips_good)
self.assertEqual(result[2], "")
if __name__ == "__main__":
unittest.main()
|
from PIL import Image
# change as per need
TEST_IMAGE_LOCATION = "test.jpg"
TRAINING_FILE_LOCATION = "training_sheet.txt"
OUTPUT_IMAGE_NAME = "output_image.png"
OUTPUT_IMAGE=[]
MY_HASH_LIST = {}
im=Image.open(TEST_IMAGE_LOCATION, "r") # read image from image location
pix_val=list(im.getdata()) # get all pixel values from image
for line in open(TRAINING_FILE_LOCATION).readlines(): # read each line from training sheet
MY_HASH_LIST[line.strip()] = "" # add that value to dictionary so that searching is easy
for rgb in pix_val: # search for every pixel of the test image
r_unmask, g_unmask, b_unmask = rgb # get rgb value of that pixel
key = str(255*255*r_unmask + 255*g_unmask + b_unmask) # get index position of that rgb value
if key in MY_HASH_LIST: # if that index in present in dictionary, it means it is a skin
OUTPUT_IMAGE.append((255, 255, 255)) # add pixel as white
else: # else non skin
OUTPUT_IMAGE.append((0, 0, 0)) # add pixel as black
im.putdata(OUTPUT_IMAGE)
im.save(OUTPUT_IMAGE_NAME) # save image |
# %load q03_linear_regression/build.py
from greyatomlib.linear_regression.q01_load_data.build import load_data
from greyatomlib.linear_regression.q02_data_splitter.build import data_splitter
from sklearn.linear_model import LinearRegression
dataframe = load_data('data/house_prices_multivariate.csv')
X, y = data_splitter(dataframe)
# Write your code here :
def linear_regression(feature, target):
lm = LinearRegression()
lm.fit(X, y)
return lm
lr = linear_regression(X, y)
print(lr.coef_.shape)
print(lr.coef_[3])
print(lr.coef_[5])
print(lr.coef_[10])
print(lr.coef_[33])
print(lr.intercept_)
|
from django.db import models
from django.utils import timezone
class Customer(models.Model):
customer_id = models.PositiveIntegerField()
customer_username = models.CharField(max_length=20)
customer_first = models.CharField(max_length=100)
customer_last = models.CharField(max_length=100)
customer_email = models.EmailField()
customer_address = models.CharField(max_length=200)
customer_telephone = models.CharField(max_length=10)
customer_join_date = models.DateTimeField(default=timezone.now())
customer_last_logon = models.DateTimeField(default=timezone.now())
class Product(models.Model):
product_id = models.AutoField(primary_key=True)
product_name = models.CharField(max_length=100)
product_description = models.CharField(max_length=500)
product_price = models.DecimalField(max_digits=8, decimal_places=2)
product_picture = models.ImageField()
product_sale_price = models.DecimalField(max_digits=8, decimal_places=2)
product_sale_true = models.BooleanField(default=False)
product_stock = models.PositiveIntegerField()
product_category = models.CharField(max_length=50)
product_keywords = models.CharField(max_length=500)
class Order(models.Model):
order_id = models.AutoField(primary_key=True)
order_customer_id = models.PositiveIntegerField()
order_product_id_quantity_json = models.TextField()
order_date = models.DateTimeField(default=timezone.now())
order_shipped = models.BooleanField(default=False)
order_delivered = models.BooleanField(default=False)
order_charged = models.BooleanField(default=False)
order_payed = models.BooleanField(default=False)
order_ip = models.GenericIPAddressField()
order_last_4_credit = models.DecimalField(max_digits=4, decimal_places=0) |
def main():
monthly_sales = get_sales()
advanced_pay = get_advanced_pay()
commission_rate = get_commission_rate(monthly_sales)
monthly_pay = (monthly_sales * commission_rate) - advanced_pay
if monthly_pay < 0:
imbursement = abs(monthly_pay)
print("You must reimburse the company with an amount of $", imbursement)
else:
print("Your monthly pay is $", monthly_pay)
def get_advanced_pay():
while True:
advance = float(input("Enter advanced pay: "))
if advance >= 0 and advance <= 2000:
break
elif advance > 2000:
print("Advance pay cannot be greater than $2000")
continue
return advance
def get_sales():
sales = float(input("Enter monthly sales: "))
return sales
def get_commission_rate(sale):
if sale < 10000:
rate = 0.10
elif sale >= 10000 and sale <= 14999:
rate = 0.12
elif sale >= 15000 and sale <= 17999:
rate = 0.14
elif sale >= 18000 and sale <= 21999:
rate = 0.16
else:
rate = 0.18
return rate
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/11/18 下午5:48
# @Author : zs
# @Site :
# @File : add_hw.py
# @Software: PyCharm
import xml.etree.ElementTree as ET
import os
def rewrite_xml(cwd, newcwd):
for path, d, filelist in os.walk(cwd):
for xmlname in filelist:
if xmlname.endswith('.xml'):
oldname = os.path.join(path, xmlname)
tree = ET.parse(oldname)
root = tree.getroot()
for width in root.iter('width'):
width.text = '640'
for height in root.iter('height'):
height.text = '360'
for xmin in root.iter('xmin'):
xmin.text = str(int(1920/640*int(xmin.text)))
for xmax in root.iter('xmax'):
xmax.text = str(int(1920 / 640 * int(xmax.text)))
for ymin in root.iter('ymin'):
ymin.text = str(int(1080/360*int(ymin.text)))
for ymax in root.iter('ymax'):
ymax.text = str(int(1080/360*int(ymax.text)))
print('正在转换:', os.path.join(newcwd, xmlname))
tree.write(os.path.join(newcwd, xmlname), encoding="utf-8", xml_declaration=True)
def rename(path):
f = os.listdir(path)
f.sort()
for i in f:
# print(i)
oldname = os.path.join(path, i)
num = str(int(i.split('.')[0]) + 1)
print(i, num)
newname = path + '/' + num.zfill(8) + '.xml'
os.rename(oldname, newname)
print(oldname, '--->', newname)
def rewritename(cwd, errorname, newname):
for path, d, filelist in os.walk(cwd):
for xmlname in filelist:
if xmlname.endswith('.xml'):
oldname = os.path.join(path, xmlname)
tree = ET.parse(oldname)
root = tree.getroot()
for name in root.iter('name'):
if name.text == errorname:
print(oldname)
name.text = newname
tree.write(newcwd + xmlname, encoding="utf-8", xml_declaration=True)
if __name__ == '__main__':
cwd = '/home/zs/Downloads/xmls/'
newcwd = '/home/zs/Downloads/xml1/'
# errorname = ''
# truename = ''
# rewritename(newcwd, errorname, truename)
for class_path in os.listdir(cwd):
image_path = os.path.join(cwd, class_path)
for img in os.listdir(image_path):
t = image_path + '/' + img
for image in os.listdir(t):
img_path = os.path.join(t, image)
tt = img_path.split('/')[0:6]
ttt = img_path.split('/')[7:10]
s = '/'.join(tt)
ss = '/'.join(ttt)
xml_path = os.path.join(s, 'xml', ss)
newxml_path = os.path.join(s, 'newxml', ss)
# print(xml_path)
os.makedirs(newxml_path, exist_ok=True)
rewrite_xml(xml_path, newxml_path)
# rename(newxml_path) |
from sqlite3 import Connection
from uuid import uuid4
class ChatTokenRepository:
def __init__(self, con: Connection):
self._con = con
def create(self, name: str, chat_id: int, telegram_user_id: int):
token = uuid4().hex
with self._con as con:
con.cursor().execute('INSERT INTO chat_tokens (token, name, chat_id, telegram_user_id) VALUES (?, ?, ?, ?)', (token, name, chat_id, telegram_user_id))
return token
def get_chat_id(self, token: str):
record = self._con.cursor().execute('SELECT chat_id FROM chat_tokens WHERE token = ?', (token,)).fetchone()
return record and record['chat_id'] |
from collections import OrderedDict
from lxml import etree
_extensions = {}
class GBIFDarwinCoreMapping(object):
def __init__(self, extension_paths, reset=False):
"""Class used to represent the mapping from Darwin Core terms
to a GBIF compatible list of Darwin Core Archive extensions and
terms.
The list is based on one core extension and a list of additional
extensions. The fields allowed in each extension are defined
using GBIF provided XML files.
Extensions are defined by the schema at
http://rs.gbif.org/schema/extension.xsd . The Occurrence core extension
can be found at:
http://rs.gbif.org/core/dwc_occurrence.xml
While other GBIF supported extensions are available at:
http://rs.gbif.org/extension/
@param extension_paths: List of paths to the extension definition XML.
The first listed extension will be assumed
to be the core extension.
@param reset: If True, ignore cached version and parse anew.
"""
self._terms = {}
self._extensions = OrderedDict()
self._core_extension = None
for extension_path in extension_paths:
extension = self._parse_extension(extension_path, reset)
self._extensions[extension['name']] = extension
if self._core_extension is None:
self._core_extension = extension
for extension in self._extensions:
for term in self._extensions[extension]['terms']:
# Don't overwrite. We don't accept duplicate terms, and if
# conflict existed we'd want to prefer the core extension.
if term['name'] not in self._terms:
self._terms[term['name']] = term
def extensions(self):
"""Return the list of extension names. The core extension is always
first.
"""
return self._extensions.keys()
def row_type(self, extension):
"""Return the row type of a given extension"""
return self._extensions[extension]['row_type']
def terms(self, extension):
"""Return the term names of a given extension"""
terms = []
for term in self._extensions[extension]['terms']:
terms.append(term['name'])
return terms
def has_extension(self, extension):
""" Returns True if that extension exists """
return extension in self._extensions
def is_core_extension(self, extension):
"""Returns True if the given extension exists and is the core extension"""
return self._core_extension['name'] == extension
def term_extension(self, term):
"""Return the extension name of a given term"""
return self._terms[term]['extension']
def term_qualified_name(self, term):
"""Return the qualified name of a given term"""
return self._terms[term]['qualified']
def term_exists(self, term):
"""Return True if the term exists"""
return term in self._terms
def _parse_extension(self, extension_path, reset=False):
"""Parse a GBIF DwC XML extension file and return it
@param extension_path: Path to the XML file
@param reset: If True, clear cached version and re-parse
@returns: Definition of an extension, as a dict defining 'name',
'row_type' and 'terms'.
"""
global _extensions
if reset or extension_path not in _extensions:
xml_tree = etree.parse(extension_path)
xml_root = xml_tree.getroot()
extension_name = xml_root.get('name')
_extensions[extension_path] = {
'name': extension_name,
'row_type': xml_root.get('rowType'),
'terms': []
}
namespace = ''
if None in xml_root.nsmap:
namespace = '{' + xml_root.nsmap[None] + '}'
for xml_property in xml_root.findall(namespace + 'property'):
name = xml_property.get('name')
_extensions[extension_path]['terms'].append({
'name': name,
'extension': extension_name,
'qualified': xml_property.get('qualName'),
'required': xml_property.get('required') == 'true'
})
return _extensions[extension_path] |
#!/usr/bin/env python
# IDENT nluetzge-time.py
# LANGUAGE Python
# AUTHOR N. Luetzgendorf
# PURPOSE
#
# VERSION
# 1.0 24.09.2018 NL Creation
import datetime
# Making a change here
print("# Date : {:s}".format((datetime.datetime.now()).isoformat())) |
# 弹夹类
class BulletBox(object):
def __init__(self, count):
self.__bulletCount = count
def setBulletCount(self, count):
self.__bulletCount = count
def getBulletCount(self):
return self.__bulletCount
|
algo = input('Digite algo: ')
print('O tipo dele é {}. Ele é Alfa Númerico? {}. Ele é Númerico? {}. Ele é alfabetico? {}.'.format(type(algo), algo.isalnum(), algo.isnumeric(), algo.isalpha()))
|
def solution(n, m, section):
answer = 0
point = 0
for i in section:
if i > point:
point = i + m - 1
answer += 1
return answer |
# 直接秒杀
class Solution:
def fullBloomFlowers(self, flowers: List[List[int]], persons: List[int]) -> List[int]:
n = len(flowers)
pre, post = [], []
res = []
for s, e in flowers:
pre.append(s)
post.append(e)
pre.sort()
post.sort()
for t in persons:
s = bisect_right(pre, t)
e = bisect_left(post, t)
res.append(s - e)
return res |
#!/usr/bin/env python
# tested on Python 2 & Python 3
'''
Kiff, the Kicad Diff!
Graphically compare layout changes between two git versions of a PCB.
If `-c` is not given, compares the local working copy against the latest
commited version from git. This is useful to verify board changes before
committing them.
If a git commit-id is given in `-c`, will compare the local version
against this commit. This is useful to compare changes between 2 commits.
A useful shortcut for the commit-id is `HEAD~1`, which means the previous one.
Elements which have been added in the local version are colored green,
removed ones red.
Note that this may look inverted for features on copper fills.
'''
import argparse
from PIL import Image
from numpy import array, zeros, uint8
from subprocess import call, check_output
from io import BytesIO
from os.path import splitext, join
from os import mkdir
from shutil import rmtree
from plot_layers import plot_layers
def img_diff(i1, i2, doInvert=True):
'''
i1, i2: PIL Image objects of same size to compare
doInvert: set true when input is black on white background
returns: PIL Image of the diff
'''
a0 = array(i1)
a1 = array(i2)
if doInvert:
a0 = ~a0
a1 = ~a1
a_out = zeros((a0.shape[0], a0.shape[1], 3), dtype=uint8)
# Black background, unchanged = grey (looks nicer!)
common = a0 & a1
diff1 = a1 & ~common
diff2 = a0 & ~common
a_out[:, :, 0] = common * 0.2 + diff1 * 0.8
a_out[:, :, 1] = common * 0.2 + diff2 * 0.8
a_out[:, :, 2] = common * 0.2
# White background, unchanged = black (simpler!)
# a_out[:, :, 0] = a0
# a_out[:, :, 1] = a1
# a_out[:, :, 2] = a0 & a1
return Image.fromarray(a_out)
def load_pdf(fName, x=4.7, y=2.6, W=7.3, H=6.0, r=600):
'''
fName: .pdf file to load
x, y, W, H: crop window [inch]
r: resolution [dpi]
returns: PIL Image
'''
ppm_str = check_output([
'pdftoppm',
'-r', str(int(r)),
'-x', str(int(x * r)),
'-y', str(int(y * r)),
'-W', str(int(W * r)),
'-H', str(int(H * r)),
fName
])
return Image.open(BytesIO(ppm_str)).convert('L')
def desc():
''' the git describe string '''
tmp = check_output(['git', 'describe', '--dirty'])
return tmp.decode('ascii').strip()
def co(cmds):
''' run and print cmds, raises exception if command returns != 0 '''
print('$ ' + ' '.join(cmds))
check_output(cmds)
def main():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'kicad_pcb',
help='the `.kicad_pcb` file to DIFF'
)
parser.add_argument(
'-c', '--commit',
default='HEAD',
help='git commit-id to compare current version against. Default: HEAD'
)
parser.add_argument(
'-l', '--layers',
default=0,
type=int,
help='Number of inner layers (InX.Cu) to plot. Default: 0'
)
parser.add_argument(
'-r', '--resolution',
default=400,
type=int,
help='Plotting resolution in [dpi]. Default: 400'
)
args = parser.parse_args()
layers = ['F.Cu', 'B.Cu', 'F.SilkS', 'B.SilkS']
layers += ['In{}.Cu'.format(i + 1) for i in range(args.layers)]
print('layers: ' + ' '.join(layers))
# Check for local (un-commited) changes
do_stash = call(['git', 'diff-index', '--quiet', 'HEAD', '--']) > 0
if not do_stash and args.commit == 'HEAD':
print('No local changes, nothing to compare. Try -c <commit-id>')
return -1
# output directory name is derived from `git describe`
try:
git1_name = desc()
except Exception:
# this will happen if user isn't in a git repo
print("No git description, can't continue")
exit(1)
# Do a .pdf plot of the current version
dir1 = 'plot_' + git1_name
print('> ' + dir1)
bounds1 = plot_layers(args.kicad_pcb, dir1, layers)
if bounds1 is None:
exit(1)
# Stash local changes if needed
if do_stash:
co(['git', 'stash'])
# checkout specified git version (default: HEAD) ...
if args.commit != 'HEAD':
co(['git', 'checkout', args.commit])
# ... and do a .pdf plot of it
dir2 = 'plot_' + desc()
print('> ' + dir2)
bounds2 = plot_layers(args.kicad_pcb, dir2, layers)
if bounds2 is None:
exit(1)
# Switch back to current version
if args.commit != 'HEAD':
co(['git', 'checkout', '-'])
# Restore local changes
if do_stash:
co(['git', 'stash', 'pop'])
# Generate plots into `diffs` directory
try:
mkdir('diffs')
except OSError:
print('diffs directory already exists')
# Create a .png diff for each layer
for ll in layers:
pdf_name = splitext(args.kicad_pcb)[0]
pdf_name += '-' + ll.replace('.', '_') + '.pdf'
out_file = 'diffs/' + ll + '.png'
print('> ' + out_file)
i1 = load_pdf(join(dir1, pdf_name), r=args.resolution, **bounds1)
i2 = load_pdf(join(dir2, pdf_name), r=args.resolution, **bounds1)
i_out = img_diff(i1, i2)
i_out.save(out_file)
print('Removing temporary directories')
rmtree(dir1)
rmtree(dir2)
if __name__ == '__main__':
main()
|
'''
Generate's CT file for Circle Plot based on protein FASTA sequence
usage: python generateBaseFile.py apoe.fasta
'''
import sys
input_name = sys.argv[-1]
infile = open(input_name, "r")
protein_name = input_name.split('.')[0]
output_name = protein_name + ".ct"
outfile = open(output_name, "w")
#sequence = "KVEQAVETEPEPELRQQTEWQSGQRWELALGRFWDYLRWVQTLSEQVQEELLSSQVTQELRALMDETMKELKAYKSELEEQLTPVAEETRARLSKELQAAQARLGADMEDVCGRLVQYRGEVQAMLGQSTEELRVRLASHLRKLRKRLLRDADDLQKCLAVYQAGAREGAERGLSAIRERLGPLVEQGRVRAATVGSLAGQPLQERAQAWGERLRARMEEMGSRTRDRLDEVKEQVAEVRAKLEEQAQQIRLQAEAFQARLKSWFEPLVEDMQRQWAGLVEKVQAAVGTSAAPVPSDNH"
sequence = ""
infile.readline()
for line in infile:
sequence += line.strip()
infile.close()
length = len(sequence)
header = str(length) + " " + protein_name + " file #\n"
outfile.write(header)
for i in range(length):
text = str(i+1) + " " + sequence[i] + " " + str(i-1+1) + " " + str(i+1+1) + " 0 " + str(i+1) + "\n"
outfile.write(text)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-04-14 17:58:38
# @Author : Fallen (xdd043@qq.com)
# @Link : https://github.com/fallencrasher/python-learning
# @Version : $Id$
#装饰器的应用
#登陆认证
def get_user_pwd():
usr_dict = {}
with open('register.txt',encoding='utf-8',mode='r') as f:
for i in f:
i = i.strip().split('|',1)
usr_dict[i[0]]=i[1]
return usr_dict
def login():
count = 0
dict1=get_user_pwd()
while count<4:
username = 'fallen' #input('username:')
password = 'wangyishan043000' #input('password:')
if username not in list(dict1.keys()):
print('您还没有注册!')
else:
if dict1[username]!=password:
print('username or password error!')
else:
print('login successfully!')
return True
count += 1
def register():
while True:
username = input('username:')
password = input('password:')
with open('register.txt',encoding='utf-8') as f, open('register.txt',encoding='utf-8',mode='a') as f1:
dict1 = {}
for i in f:
i = i.strip().split('|',1)
dict1[i[0]]=i[1]
if username in list(dict1.keys()):
print('you already have a accout!Please login.')
login()
else:
f1.write('\n'+username+'|'+password)
return True
status_dict = {
'username' : None,
'status': False,
}
def auth(f):
'''
你的装饰器完成:访问被装饰函数之前,写一个三次登陆认证的功能。
登录成功,让其访问被装饰的函数;登陆不成功,不让访问
'''
def inner(*args,**kwargs):
'''访问之前的操作'''
if status_dict['status']:
ret = f(*args,**kwargs)
return ret
else:
login()
if login():
ret = f(*args,**kwargs)
return ret
else:
print('操作错误!退出系统。')
time.sleep(0.5)
break
return inner
@auth
def article():
print('欢迎访问文章页面!')
article()
@auth
def comment():
print('欢迎访问评论页面')
comment()
@auth
def dariy():
print('欢迎访问日记页面')
dariy()
|
import passgen
import os
p = passgen
SYMBOLS = ''
def start():
print('[1] Generate password\n'
+ '[2] Settings\n'
+ '[3] About\n'
+ '[4] Exit')
option = input('--> ')
if option == '1':
gen()
elif option == '2':
settings()
elif option == '3':
about()
elif option == '4':
exit()
else:
print("-->Unknown option<--")
def settings():
global SYMBOLS
invalid = True
while invalid:
SYMBOLS = input('Do you want to have symbols? ($@!€%&-_.,) y/n: ')
if SYMBOLS == "y" or SYMBOLS == "yes":
invalid = False
SYMBOLS = "$@!€%&-_.,"
elif SYMBOLS == "n" or SYMBOLS == "no":
invalid = False
SYMBOLS = ""
else:
return print("-->Unknown option<--")
def about():
print('░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░\n'
+ '░ ░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ ░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░\n'
+ '▒ ▒▒▒▒ ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ ▒▒▒▒ ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ ▒▒▒▒▒▒▒ ▒▒▒▒▒▒▒▒▒▒▒ ▒▒▒▒▒\n'
+ '▒ ▒▒▒▒ ▒▒▒▒ ▒▒▒▒▒▒ ▒▒▒ ▒▒ ▒▒▒▒▒▒▒▒▒▒▒▒▒▒ ▒▒▒▒▒ ▒ ▒▒▒▒▒▒▒▒▒ ▒▒▒▒▒ ▒▒ ▒▒▒▒▒▒▒ ▒▒▒▒▒▒▒▒ ▒▒ ▒▒\n'
+ '▓ ▓▓▓▓ ▓▓ ▓▓ ▓▓▓▓▓ ▓▓▓▓▓ ▓▓▓▓▓▓▓▓▓▓▓ ▓▓▓ ▓▓▓ ▓▓ ▓▓▓▓▓▓▓▓ ▓▓▓ ▓▓▓ ▓▓▓▓▓▓▓ ▓▓▓▓▓▓ ▓▓▓▓▓ ▓\n'
+ '▓ ▓▓▓▓▓▓▓▓ ▓▓▓ ▓▓▓▓ ▓▓▓▓ ▓▓ ▓▓▓ ▓ ▓▓▓ ▓▓ ▓▓▓▓▓▓▓▓▓ ▓ ▓▓▓▓ ▓▓▓▓▓▓▓ ▓▓▓▓▓▓ ▓▓▓▓▓▓ \n'
+ '▓ ▓▓▓▓▓▓▓▓ ▓▓▓ ▓▓▓▓▓▓ ▓▓▓▓▓ ▓▓ ▓▓▓▓ ▓▓▓ ▓▓▓▓▓▓▓▓▓▓ ▓▓ ▓▓▓▓▓▓▓▓▓▓ ▓▓▓▓▓ ▓▓▓▓▓▓▓ ▓▓▓▓▓▓▓ ▓▓▓▓ ▓\n'
+ '█ ██████████ █ █ ██ ████ ███████ ████ ██ ███████████ █████ █ ██ █ ██████ ████\n'
+ '███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████\n\n\n')
print('Author: l0wGK (https://github.com/l0wGK)\n\n'
+ "Change log (v1.1.0):\nIt's now possible to generate passwords with a length of 256 characters.\n"
+ "New start menu\n"
+ "New settings\n"
+ "Removed some symbols\n\n\n")
input('Press enter to leave this page\n\n')
def gen():
global SYMBOLS
invalid = True
while invalid:
try:
num = int(input("Password length: "))
if num >= 257 or num <= 7:
print("-->Password length must be between 8-256 characters<--")
else:
password = p.generate(num, SYMBOLS)
p.copy(password)
invalid = False
print("-->Password copied to clipboard<--")
except:
print("-->Input was not an integer<--")
if __name__ == '__main__':
while True:
start() |
# -*- coding: utf-8 -*-
# Задача на программирование: последняя цифра большого числа Фибоначчи
# Дано число 1≤n≤107, необходимо найти последнюю цифру n-го числа Фибоначчи.
# Как мы помним, числа Фибоначчи растут очень быстро, поэтому при их вычислении нужно быть аккуратным с переполнением. В данной задаче, впрочем, этой проблемы можно избежать, поскольку нас интересует только последняя цифра числа Фибоначчи: если 0≤a,b≤9 — последние цифры чисел Fi и Fi+1 соответственно, то (a+b)mod10 — последняя цифра числа Fi+2.
# Sample Input:
# 2886
# Sample Output:
# 8
# Не сложно понять, что если мы будем постоянно хранить только последнюю цифру то ответ не измениться. Заведем массив от 0 до 1000, в котором a[0]=1 и a[1]=1, дальше в циклу от 2 до n мы будем пользоваться формулой: a[i]:=(a[i-1]+a[i-2]) mod 10, в конце выводим a[n].
n=int(input())
def fib(n):
fb1 = 0
fb2 = 1
i = 2
while i <=n:
fb_last = ((fb1+fb2)%10)
fb1 = (fb2%10)
fb2 = (fb_last%10)
i += 1
return fb_last
print(fib(n)) |
from collections.abc import Iterable
from typing import Any, TypeVar
from _typeshed import Incomplete
from networkx.classes.graph import Graph
_N = TypeVar("_N")
def is_k_edge_connected(G: Graph[Any], k: int) -> Incomplete: ...
def is_locally_k_edge_connected(
G: Incomplete, s: Incomplete, t: Incomplete, k: Incomplete
) -> Incomplete: ...
def k_edge_augmentation(
G: Graph[_N],
k: int,
avail: tuple[_N, _N] | tuple[_N, _N, dict[str, int]] | None = ...,
weight: str | None = ...,
partial: bool = ...,
) -> Iterable[tuple[_N, _N]]: ...
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.