text stringlengths 8 6.05M |
|---|
from pathlib import Path
from logging import getLogger, Formatter, FileHandler, StreamHandler, INFO, DEBUG
def create_logger(exp_version, base_path="./logs"):
log_file = (base_path + "/{}.log".format(exp_version))
# logger
logger_ = getLogger(exp_version)
logger_.setLevel(DEBUG)
# formatter
fmr = Formatter("[%(levelname)s] %(asctime)s >>\t%(message)s")
# file handler
fh = FileHandler(log_file)
fh.setLevel(DEBUG)
fh.setFormatter(fmr)
# stream handler
ch = StreamHandler()
ch.setLevel(INFO)
ch.setFormatter(fmr)
logger_.addHandler(fh)
logger_.addHandler(ch)
def get_logger(exp_version):
return getLogger(exp_version)
## time_keeper.py
import time
from functools import wraps
def stop_watch(VERSION):
def _stop_watch(func):
@wraps(func)
def wrapper(*args, **kargs):
start = time.time()
result = func(*args, **kargs)
elapsed_time = int(time.time() - start)
minits, sec = divmod(elapsed_time, 60)
hour, minits = divmod(minits, 60)
get_logger(VERSION).info("[elapsed_time]\t>> {:0>2}:{:0>2}:{:0>2}".format(hour, minits, sec))
return wrapper
return _stop_watch |
import poplib
M = poplib.POP3('127.0.0.1')
M.user('victim')
M.pass_('hunter2')
numMessages = len(M.list()[1])
for i in range(numMessages):
for j in M.retr(i+1)[1]:
print(j)# smtpd_senddata.py
|
#!/usr/bin/env python
import argparse
import functools
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import boomslang
import numpy
import os
import sys
from expsiftUtils import *
from plotMcperfLatencyCompare import plotMcperfLatencyComparisonDirsWrapper
import plotMcperfLatencyCompare
parser = argparse.ArgumentParser(description='Plot mcperf latency comparisons')
parser.add_argument('plot_filename', help='Filename for the graph')
parser.add_argument('--dataset', default=0, type=int, help='mcperf_only=0/iso=1')
plotMcperfLatencyCompare.FOR_PAPER = True
plotMcperfLatencyCompare.LATENCY_LIMITS = (0, 20)
plotMcperfLatencyCompare.LOAD_LIMITS = (1000, 7000)
plotMcperfLatencyCompare.RL_ORDER = { 'htb' : 1, 'eyeq' : 2, 'qfq' : 3, 'none' : 4 }
plotMcperfLatencyCompare.RL_LABEL = { 'htb' : 'HTB', 'eyeq' : 'PTB', 'qfq' : 'SENIC', 'none' : 'none' }
def main(argv):
# Parse flags
args = parser.parse_args()
if args.dataset == 0:
expt_dirs_base = ['../test_scripts/nsdi-paper-data/Sep21--10-34-combine/']
plotMcperfLatencyCompare.LOAD_LIMITS = (1000, 7000)
elif args.dataset == 1:
expt_dirs_base = ['../test_scripts/nsdi-paper-data/Sep20--07-02-iso-combine/']
plotMcperfLatencyCompare.LOAD_LIMITS = (1000, 5000)
else:
print 'Unknown dataset'
return
expt_dirs = []
for directory in expt_dirs_base:
directory = os.path.abspath(directory)
for (path, dirs, files) in os.walk(directory, followlinks=True):
# Check if an experiment directory was found
if os.path.exists(os.path.join(path, 'expsift_tags')):
#print 'Found experiment directory:', path
expt_dirs.append(path)
print 'Found %d experiment directories to compare' % len(expt_dirs)
# Read the properties for each directory from the expsift tags files
dir2props_dict = getDir2PropsDict(expt_dirs)
# Plot memcached latency comparison graphs (msec)
avg_plot = plotMcperfLatencyComparisonDirsWrapper(dir2props_dict, 'avg')
pc99_plot = plotMcperfLatencyComparisonDirsWrapper(dir2props_dict, 'pc99')
pc999_plot = plotMcperfLatencyComparisonDirsWrapper(dir2props_dict, 'pc999')
# Set xLabel
avg_plot.setXLabel("Load (rpstc)")
pc99_plot.setXLabel("Load (rpstc)")
pc999_plot.setXLabel("Load (rpstc)")
# Set yLabel
avg_plot.setYLabel("Latency (msec)")
pc99_plot.setYLabel("Latency (msec)")
pc999_plot.setYLabel("Latency (msec)")
# Title
avg_plot.title = "Average"
pc99_plot.title = "99th percentile"
pc999_plot.title = "99.9th percentile"
# Tick labels
xmin,xmax = plotMcperfLatencyCompare.LOAD_LIMITS
xTickLabels = range(xmin, xmax+1, 1000)
ticklabels_line = boomslang.Line()
ticklabels_line.xTickLabels = xTickLabels
ticklabels_line.xTickLabelPoints = xTickLabels
avg_plot.add(ticklabels_line)
pc99_plot.add(ticklabels_line)
pc999_plot.add(ticklabels_line)
# Label sizes
avg_plot.setXTickLabelSize("small")
pc99_plot.setXTickLabelSize("small")
pc999_plot.setXTickLabelSize("small")
avg_plot.setYTickLabelSize("small")
pc99_plot.setYTickLabelSize("small")
pc999_plot.setYTickLabelSize("small")
avg_plot.setTitleSize("small")
pc99_plot.setTitleSize("small")
pc999_plot.setTitleSize("small")
# Plot layout
layout = boomslang.PlotLayout()
layout.addPlot(avg_plot, grouping='throuput')
layout.addPlot(pc99_plot, grouping='throuput')
layout.addPlot(pc999_plot, grouping='throuput')
layout.setFigureDimensions(13.5, 3)
# Save the plot
layout.save(args.plot_filename)
return
if __name__ == '__main__':
main(sys.argv)
|
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#@
#@ Converts single the single solid, containting all the silicon detectors [output_file_name],
#@ into the true array of silicon detectors [output_file_name]
#@ Usage:
#@ python conver_silicon.py
#@
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#@ Options:
input_file_name = "./stk_ladders.gdml"
#output_file_name = "./stk_si_advanced.gdml"
output_file_name = "./stk_ladders_advanced.gdml"
SOLID_BASE_NAME = 'DAM_TRKAss1_LadderCFRP-stl0x1052dd0' # (see stk_si.gdml)
VOLUME_BASE_NAME = 'Ladder' # (see stk_si.gdml)
#USE_LATEST_COORDINATE_SYSTEM = True
#@ GDML constants
VERTICES_PER_RECTANGLE = 12
#@ others...
tmp_file_name = "./tmp.txt"
tmp_file_name1 = "./tmp1.txt"
tmp_file_name2 = "./tmp2.txt"
#@
#@ Auxiliary gdml parsing functions ---NOT USED YET!!!!
#@
def parce_vertices_for_gdml_file(filelines):
vertices = {}
for line in filelines:
if "<position" not in line: continue
name = line.split('name=')[1].split('"')[1]
x = line.split('x=')[1].split('"')[1]
x = x.split(".")[0] + "." + x.split(".")[1][0:5]
x = float(x)
y = line.split('y=')[1].split('"')[1]
y = y.split(".")[0] + "." + y.split(".")[1][0:5]
y = float(y)
z = line.split('z=')[1].split('"')[1]
z = z.split(".")[0] + "." + z.split(".")[1][0:5]
z = float(z)
if name in vertices: continue
vertices[name] = [x,y,z]
return vertices
"""
f=open("stk_si.gdml")
l=f.readlines()
a=parce_vertices_for_gdml_file(l)
f.close()
"""
def find_vertices_for_resselated_solid(solidname, filelines):
#@ Parce start and stop position
startline = None
stopline = None
for i in xrange(len(filelines)):
line = filelines[i]
if "<tessellated" not in line: continue
if solidname not in line: continue
startline = i
break
if startline is None:
return None
for i in xrange(startline + 1,len(filelines)):
line = filelines[i]
if "</tessellated>" not in line: continue
stopline = i
break
if stopline is None:
return None
#@ Look for vertices
vertices = []
for i in xrange(startline +1, stopline):
line = filelines[i]
if "triangular" not in line: continue
vertexname1 = line.split('vertex1')[1].split('"')[1] #.split('"')[0]
vertexname2 = line.split('vertex2')[1].split('"')[1] #.split('"')[0]
vertexname3 = line.split('vertex3')[1].split('"')[1] #.split('"')[0]
if vertexname1 not in vertices: vertices.append(vertexname1)
if vertexname2 not in vertices: vertices.append(vertexname2)
if vertexname3 not in vertices: vertices.append(vertexname3)
return vertices
def get_ladder_coordinates(all_vertices_info, layers = "even"):
#@
all_z = []
for vertexname in all_vertices_info.keys():
z= all_vertices_info[vertexname][2]
if z in all_z: continue
all_z.append(z)
all_z = sorted(all_z)
#@ get x,y -coordinates for readout-even/odd layers
final_x = []
final_y = []
final_z = []
for vertexname in all_vertices_info.keys():
x= all_vertices_info[vertexname][0]
y= all_vertices_info[vertexname][1]
z= all_vertices_info[vertexname][2]
#@ z_layer 0 -- 11
z_layer = all_z.index(z)
z_layer = z_layer / 2
#@ Use only even layers
if layers == "even":
if z_layer%2: continue
elif layers == "odd":
if z_layer%2 == 0: continue
else:
raise Exception("Illegal value for argument 'layers'")
#@ Append x, y coordinates
if x not in final_x: final_x.append(x)
if y not in final_y: final_y.append(y)
if z not in final_z: final_z.append(z)
final_x = sorted(final_x)
final_y = sorted(final_y)
final_z = sorted(final_z)
return final_x, final_y, final_z
#@ Read input file
input_file = open(input_file_name, "r")
lines = input_file.readlines()
input_file.close()
total_triangular_lines = len(filter(lambda line: "<triangular" in line, lines))
triangular_counter = 0 #@ number of <triangular> tags in input gdml file
tessellated_counter = 1 #@ number of output subsolids (there is ata least one subsolid)
#@
#@ Create subsolids
#@
#output_file = open(output_file_name, "w")
tmpfile= open(tmp_file_name, "w")
tmpfile.write(' <tessellated aunit="deg" lunit="mm" name="'+ SOLID_BASE_NAME +'-%d">\n'%tessellated_counter)
for line in lines:
if not "<triangular" in line:
continue
triangular_counter+=1
if (triangular_counter * 1.0 / VERTICES_PER_RECTANGLE - triangular_counter / VERTICES_PER_RECTANGLE == 0
and triangular_counter > 0
and triangular_counter < total_triangular_lines):
tessellated_counter+=1
tmpfile.write(line)
tmpfile.write(' </tessellated>\n')
tmpfile.write(' <tessellated aunit="deg" lunit="mm" name="'+ SOLID_BASE_NAME +'-%d">\n'%tessellated_counter)
continue
tmpfile.write(line)
tmpfile.write(' </tessellated>\n')
tmpfile.close()
#@
#@ Logival volumes
#@
#@ Analyze tracker coordinates
f = open(input_file_name, 'r')
tmpfile = open(tmp_file_name, 'r')
l=f.readlines()
tmpfile_lines = tmpfile.readlines()
f.close()
tmpfile.close()
all_vertices_info=parce_vertices_for_gdml_file(l)
all_x_even, all_y_even, all_z_even = get_ladder_coordinates(all_vertices_info,)
all_x_odd, all_y_odd, all_z_odd = get_ladder_coordinates(all_vertices_info, "odd")
#@ Cresete logical volumes
tmpfile1 = open(tmp_file_name1, "w")
volume_names = []
for i in xrange(1,tessellated_counter+1):
#@
#@ Assign name to a volume
#@
solid_name = SOLID_BASE_NAME + '-%d'%i
volume_name = VOLUME_BASE_NAME + "-%d"%(i-1)
volume_names.append(volume_name)
#@ Create volume
tmpfile1.write(' <volume name="' + volume_name + '">\n')
tmpfile1.write(' <materialref ref="FR4"/>\n')
tmpfile1.write(' <solidref ref="'+ solid_name + '"/>\n')
tmpfile1.write(' </volume>\n')
"""
if volume_name == (VOLUME_BASE_NAME + "X-0_" + PLANE_BASE_NAME + "X-19"):
print "volume_name:", volume_name
ver = find_vertices_for_resselated_solid(solid_name, tmpfile_lines)
"""
tmpfile1.close()
#@
#@ Physical volumes
#@
"""
tmpfile2 = open(tmp_file_name2, "w")
for i in xrange(1,tessellated_counter+1):
tmpfile2.write(' <physvol>\n')
#tmpfile.write(' <file name="./Geometry/STK/stk_si_advanced.gdml"/>\n')
tmpfile2.write(' <volumeref ref="' + VOLUME_BASE_NAME + '-%d"/>\n'%i)
#tmpfile.write(' <position x="stk_adjust_x_position" y="stk_adjust_y_position" z="stk_adjust_z_position" unit="mm"/>\n')
tmpfile2.write(' </physvol>\n')
tmpfile2.close()
"""
tmpfile2 = open(tmp_file_name2, "w")
for volume_name in volume_names:
tmpfile2.write(' <physvol>\n')
#tmpfile.write(' <file name="./Geometry/STK/stk_si_advanced.gdml"/>\n')
tmpfile2.write(' <volumeref ref="' + volume_name+'"/>\n')
#tmpfile.write(' <position x="stk_adjust_x_position" y="stk_adjust_y_position" z="stk_adjust_z_position" unit="mm"/>\n')
tmpfile2.write(' </physvol>\n')
tmpfile2.close()
#@
#@
#@ Combine things into the output file
#@
#
#output_file.close()
tmpfile = open(tmp_file_name, "r")
tmpfile1 = open(tmp_file_name1, "r")
tmpfile2 = open(tmp_file_name2, "r")
tmplines = tmpfile.readlines()
tmplines1 = tmpfile1.readlines()
tmplines2 = tmpfile2.readlines()
tmpfile.close()
tmpfile1.close()
tmpfile2.close()
output_file = open(output_file_name, "w")
for line in lines:
output_file.write(line)
if "<solids>" in line:
for tmpline in tmplines:
output_file.write(tmpline)
continue
if "<structure>" in line:
for tmpline in tmplines1:
output_file.write(tmpline)
continue
if '<solidref' in line and SOLID_BASE_NAME in line:
for tmpline in tmplines2:
output_file.write(tmpline)
continue
output_file.close()
"""
#@ read file
f=open(input_file_name)
l=f.readlines()
f.close()
all_vertices_info=parce_vertices_for_gdml_file(l)
x,y,z = get_ladder_coordinates(all_vertices_info, "odd")
"""
"""
#@ Use it for silicons
f=open("stk_si_advanced_v2.gdml")
l=f.readlines()
N_LAYERS = 12
N_X_TILES = 8
N_Y_TILES = 8
#@ get all z for silicons
all_vertices_info=parce_vertices_for_gdml_file(l)
all_z = []
for i in xrange(1,tessellated_counter+1):
solidname = SOLID_BASE_NAME + '-%d'%i
verticesforsolid = find_vertices_for_resselated_solid(solidname,l)
for vertexname in verticesforsolid:
z= all_vertices_info[vertexname][2]
if z in all_z: continue
all_z.append(z)
#lowestvertices[solidname] = find_lowest_vertex_for_solid(all_vertices_info,verticesforsolid)
all_z = sorted(all_z)
assert(len(all_z)==N_LAYERS * 2)
#@ get x-coordinates for readout-Y silicons
all_x = []
all_y = []
for tess_i in xrange(1,tessellated_counter+1):
solidname = SOLID_BASE_NAME + '-%d'%tess_i
verticesforsolid = find_vertices_for_resselated_solid(solidname,l)
for vertexname in verticesforsolid:
x= all_vertices_info[vertexname][0]
y= all_vertices_info[vertexname][1]
z= all_vertices_info[vertexname][2]
#@ z_layer 0 -- 11
z_layer = filter(lambda i: all_z[i]==z, xrange(len(all_z)))
assert(len(z_layer)==1)
z_layer = z_layer [0]
z_layer = z_layer / 2
#@ Use only even layers
if z_layer%2: continue # readout Y
#if z_layer%2 == 1: continue # readout X
#@ Append x, y coordinates
if x not in all_x: all_x.append(x)
if y not in all_y: all_y.append(y)
all_x = sorted(all_x)
all_y = sorted(all_y)
assert(len(all_x) == 2 * N_X_TILES)
assert(len(all_y) == 2 * N_Y_TILES)
f.close()
"""
"""
#@ Use it for ladders
f=open("stk_ladders.gdml")
l=f.readlines()
N_LAYERS = 12
N_X_TILES = 8
N_Y_TILES = 8
#@ get all z for silicons
all_vertices_info=parce_vertices_for_gdml_file(l)
tessellated_counter = len(all_vertices_info)
all_z = []
for i in xrange(1,tessellated_counter+1):
vertexname = all_vertices_info.keys()[i-1]
z= all_vertices_info[vertexname][2]
if z in all_z: continue
all_z.append(z)
#lowestvertices[solidname] = find_lowest_vertex_for_solid(all_vertices_info,verticesforsolid)
all_z = sorted(all_z)
assert(len(all_z)==N_LAYERS * 2)
#@ get x-coordinates for readout-Y (readpout-X?) silicons
all_x = []
all_y = []
for tess_i in xrange(1,tessellated_counter+1):
vertexname = all_vertices_info.keys()[tess_i-1]
x= all_vertices_info[vertexname][0]
y= all_vertices_info[vertexname][1]
z= all_vertices_info[vertexname][2]
#@ z_layer 0 -- 11
z_layer = filter(lambda i: all_z[i]==z, xrange(len(all_z)))
assert(len(z_layer)==1)
z_layer = z_layer [0]
z_layer = z_layer / 2
#@ Use only even layers
if z_layer%2 == 0: continue
#@ Append x, y coordinates
if x not in all_x: all_x.append(x)
if y not in all_y: all_y.append(y)
all_x = sorted(all_x)
all_y = sorted(all_y)
#assert(len(all_x) == 2 * N_X_TILES)
#assert(len(all_y) == 2 * N_Y_TILES)
f.close()
"""
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^cart/(?P<pk>[0-9]+)/add/$', views.add_cart, name='add_cart'),
url(r'^cart/(?P<pk>[0-9]+)/remove/$', views.remove_cart, name='remove_cart'),
url(r'^cart/checkout$', views.checkout, name='checkout'),
]
|
from __future__ import (absolute_import, division, print_function, unicode_literals)
from flask import Flask, request, jsonify, abort
from dotenv import load_dotenv, find_dotenv
from datetime import datetime
from reviewgramdb import *
from reviewgramlog import *
from repoutils import *
from languagefactory import LanguageFactory
from socket import *
from Crypto import Random
from Crypto.Cipher import AES
from functools import wraps
import struct
import logging
import json
import os
import pymysql
import base64
import traceback
import requests
import time
import math
import re
import jedi
import errno
import signal
import uuid
import subprocess
import sys
import binascii
load_dotenv(find_dotenv())
bot_webhook_token = os.getenv("BOT_WEBHOOK_TOKEN")
bot_api_token = os.getenv("BOT_API_TOKEN")
nowrap = None
app = Flask(__name__)
# Исключение для таймаута
class TimeoutError(Exception):
pass
# Декоратор для таймаута
class Timeout:
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
# Класс для шифрования данных
class AESCipher(object):
def __init__(self):
self.bs = AES.block_size
self.key = os.getenv("AES_SECRET_KEY")
def _pad(self, s):
return s.decode("utf-8") + (self.bs - len(s) % self.bs) * '0'
def encrypt(self, raw):
raw_size = len(raw)
raw_bytes = self._pad(raw)
raw_size_bytes = struct.pack('<i', raw_size)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key.encode("utf8"), AES.MODE_CBC, iv)
return base64.b64encode(iv + raw_size_bytes + cipher.encrypt(raw_bytes.encode("utf8")))
def decrypt(self, enc):
enc = base64.b64decode(enc)
iv = enc[:self.bs]
raw_size = struct.unpack('<i', enc[self.bs:self.bs + 4])[0]
cipher = AES.new(self.key.encode("utf8"), AES.MODE_CBC, iv)
raw_bytes = cipher.decrypt(enc[self.bs + 4:])
raw = raw_bytes[:raw_size].decode('utf_8')
return raw
# Строит запрос для автодополнения из таблицы по полному совпадению
def build_exact_match_autocomplete_query(amount, limit, langId):
if (amount == 0):
return ""
if (amount == 1):
return "SELECT DISTINCT(r2.TEXT) " \
"FROM" \
" `repository_autocompletion_lexemes` AS r1, " \
" `repository_autocompletion_lexemes` AS r2 " \
"WHERE " \
" r1.LEXEME_ID = 0 AND r1.`TEXT` = %s " \
"AND r2.LEXEME_ID = 1 " \
"AND r1.ROW_ID = r2.ROW_ID " \
"AND r1.`LANG_ID` = " + str(langId) + " LIMIT " + str(limit)
result = "SELECT DISTINCT(r" + str(amount + 1) +".TEXT) "
result += " FROM "
i = 1
while (i <= amount):
result += " `repository_autocompletion_lexemes` AS r" + str(i) + ", "
i = i + 1
result += " `repository_autocompletion_lexemes` AS r" + str(amount + 1) + " "
result += " WHERE "
result += " r1.LEXEME_ID = 0 AND r1.`TEXT` = %s "
i = 2
while (i <= amount):
result += " AND r" + str(i) + ".LEXEME_ID = " + str(i - 1) + " AND r" + str(i) + ".`TEXT` = %s "
i = i + 1
result += " AND r" + str(amount + 1) +".LEXEME_ID = " + str(amount) + " "
i = 0
while (i < amount):
result += " AND r1.ROW_ID = r" + str(i + 2) + ".ROW_ID "
i = i + 1
result += " AND r1.`LANG_ID` = " + str(langId)
result += " LIMIT " + str(limit)
return result
# Строит запрос для автодополнения из таблицы по неполному совпадению
def build_non_exact_match_autocomplete_query(amount, limit, langId):
if (amount == 0):
return ""
if (amount == 1):
return "SELECT DISTINCT(r1.`TEXT`) " \
"FROM " \
" `repository_autocompletion_lexemes` AS r1 " \
"WHERE " \
"r1.`LEXEME_ID` = 0 AND levenshtein(r1.`TEXT`, %s) <= CHAR_LENGTH(r1.`TEXT`) / 2 " \
"AND r1.`LANG_ID` = " + str(langId) + " LIMIT " + str(limit)
result = "SELECT DISTINCT(r" + str(amount) +".TEXT) "
result += " FROM "
i = 1
while (i < amount):
result += " `repository_autocompletion_lexemes` AS r" + str(i) + ", "
i = i + 1
result += " `repository_autocompletion_lexemes` AS r" + str(amount) + " "
result += " WHERE "
result += " r1.LEXEME_ID = 0 AND r1.`TEXT` = %s "
i = 2
while (i < amount):
result += " AND r" + str(i) + ".LEXEME_ID = " + str(i - 1) + " AND r" + str(i) + ".`TEXT` = %s "
i = i + 1
result += " AND r" + str(amount) + ".LEXEME_ID = " + str(amount - 1) + " AND levenshtein(r" + str(amount) + ".`TEXT`, %s) <= CHAR_LENGTH(r" + str(amount) + ".`TEXT`) / 2 "
i = 2
while (i <= amount):
result += "AND r1.ROW_ID = r" + str(i) + ".ROW_ID "
i = i + 1
result += " AND r1.`LANG_ID` = " + str(langId)
result += " LIMIT " + str(limit)
return result
# Пытается сделать автодополнение через таблицу
def table_try_autocomplete_with_max_amount(con, lexemes, maxAmount, langId):
if (len(lexemes) == 0):
return []
exactLimit = math.ceil(maxAmount / 2)
exactQuery = build_exact_match_autocomplete_query(len(lexemes), exactLimit, langId)
exactRows = []
try:
with Timeout(seconds = 3):
exactRows = select_and_fetch_all(con, exactQuery, lexemes)
except:
try:
con.close()
except:
append_to_log("/reviewgram/table_autocomplete/: Unable to close connection")
con = connect_to_db()
append_to_log("/reviewgram/table_autocomplete/: " + traceback.format_exc())
append_to_log("/reviewgram/table_autocomplete/: Timeout for fetching autocompletion")
result = []
for row in exactRows:
appendType = 'space'
firstChar = row[0][0].lower()
if ((not re.match("^[a-zA-Z]$", firstChar)) and (firstChar != "\"") and (firstChar != "'")):
appendType = 'no_space'
result.append({
'append_type': appendType,
'complete': row[0],
'name_with_symbols': row[0]
})
nonExactLimit = maxAmount - len(result)
nonExactQuery = build_non_exact_match_autocomplete_query(len(lexemes), nonExactLimit, langId)
nonExactRows = []
try:
with Timeout(seconds = 3):
nonExactRows = select_and_fetch_all(con, nonExactQuery, lexemes)
except:
try:
con.close()
except:
append_to_log("/reviewgram/table_autocomplete/: Unable to close connection")
con = connect_to_db()
append_to_log("/reviewgram/table_autocomplete/: " + traceback.format_exc())
append_to_log("/reviewgram/table_autocomplete/: Timeout for fetching autocompletion")
for row in nonExactRows:
if (row[0].startswith(lexemes[-1])):
completePart = row[0][len(lexemes[-1]):]
if (len(completePart) != 0):
result.append({
'append_type': 'no_space',
'complete': completePart,
'name_with_symbols': row[0]
})
if (len(result) < maxAmount):
result = result + table_try_autocomplete_with_max_amount(con, lexemes[1:], maxAmount - len(result), langId)
return result
# Пытается сделать автодополнение через таблицу
def table_try_autocomplete(con, lexemes, langId):
if (len(lexemes) == 0):
return []
maxAmount = int(os.getenv("AUTOCOMPLETE_MAX_AMOUNT"))
return table_try_autocomplete_with_max_amount(con, lexemes, maxAmount, langId)
# Получение вложенных данных из словаря
def safe_get_key(dict, keys):
tmp = dict
for key in keys:
if key in tmp:
tmp = tmp[key]
else:
return None
return tmp
# Вставляет или обновляет соотношение токена с чатом
def insert_or_update_token_to_chat(con, chatId, uuid):
query = "SELECT COUNT(*) AS CNT FROM `token_to_chat_id` WHERE `TOKEN` = %s AND `CHAT_ID` = %s"
countRows = select_and_fetch_first_column(con, query, [uuid, chatId])
if (countRows > 0):
execute_update(con, "UPDATE `token_to_chat_id` SET TSTAMP = UNIX_TIMESTAMP(NOW()) WHERE TOKEN = %s AND CHAT_ID = %s", [uuid, chatId])
return 0
else:
return execute_insert(con, "INSERT INTO `token_to_chat_id`(TOKEN, CHAT_ID, TSTAMP) VALUES (%s, %s, UNIX_TIMESTAMP(NOW()))", [uuid, chatId])
def insert_or_update_repo_lock(con, chatId, uuid):
query = "SELECT COUNT(*) AS CNT FROM `repo_locks` WHERE `CHAT_ID` = %s"
countRows = select_and_fetch_first_column(con, query, [chatId])
if (countRows > 0):
execute_update(con, "UPDATE `repo_locks` SET TSTAMP = UNIX_TIMESTAMP(NOW()), TOKEN = %s WHERE CHAT_ID = %s", [uuid, chatId])
return 0
else:
return execute_insert(con, "INSERT INTO `repo_locks`(TOKEN, CHAT_ID, TSTAMP) VALUES (%s, %s, UNIX_TIMESTAMP(NOW()))", [uuid, chatId])
# Находится ли пользователь в чате и все связанные с этим проверки
def is_user_in_chat(uuid, chatId):
try:
if ((chatId is not None) and (uuid is not None)):
chatId = int(chatId)
con = connect_to_db()
with con:
timestamp = int(time.time())
tokenCleanupTime = int(os.getenv("TOKEN_CLEANUP_TIME")) * 60
chatCleanupTime = int(os.getenv("CHAT_CACHE_TOKEN_SECONDS"))
query = "SELECT USER_ID FROM `token_to_user_id` WHERE `TOKEN` = %s AND " + str(timestamp) + " - TSTAMP <= " + str(tokenCleanupTime) + " LIMIT 1"
userId = select_and_fetch_first_column(con, query, [uuid])
if (userId is not None):
query = "SELECT ID FROM `token_to_chat_id` WHERE `TOKEN` = %s AND `CHAT_ID` = %s AND " + str(timestamp) + " - TSTAMP <= " + str(chatCleanupTime) + " LIMIT 1"
row = select_and_fetch_first_column(con, query, [uuid, chatId])
if (row is None):
url = "https://api.telegram.org/bot" + os.getenv("BOT_API_TOKEN") + "/getChatMember"
append_to_log("/reviewgram/register_chat_id_for_token/: " + url)
params = {'user_id': userId, 'chat_id' : chatId}
response = requests.get(url, params=params)
json_response = response.json()
if (json_response is None):
append_to_log("/reviewgram/register_chat_id_for_token/: unable to parse response from request:" + url + ", params" + json.dumps(params))
return False
else:
if (json_response["ok"] is True):
insert_or_update_token_to_chat(con, chatId, uuid)
return True
else:
if (("chat" in json_response["description"]) and (chatId < 0)):
kchatId = "-100" + str(-1 * chatId)
params = {'user_id': userId, 'chat_id' : kchatId}
response = requests.get(url, params=params)
json_response = response.json()
if (json_response is None):
append_to_log("/reviewgram/register_chat_id_for_token/: unable to parse response from request:" + url + ", params" + json.dumps(params))
return False
else:
if (json_response["ok"] is True):
insert_or_update_token_to_chat(con, chatId, uuid)
return True
append_to_log("/reviewgram/register_chat_id_for_token/: TG API reported that user " + str(userId) + "is not in chat " + str(chatId))
return False
else:
return True
else:
append_to_log("/reviewgram/register_chat_id_for_token/: user token not found")
return False
return True
else:
append_to_log("/reviewgram/register_chat_id_for_token/: no data")
return False
except Exception as e:
append_to_log("/reviewgram/register_chat_id_for_token/: Exception " + traceback.format_exc())
return False
# Валидация таблицы замен при сохранении
def validate_replace_table(table):
i = 1
for row in table:
common_error = "В таблице замен обнаружена некорректная строка №" + str(i)
if (not isinstance(row, list)):
return common_error
if (len(row) != 2):
return common_error
if ((not isinstance(row[0], str)) or (not isinstance(row[1], str))):
return common_error
row0 = row[0].strip()
row1 = row[1].strip()
if ((len(row0) == 0) and (len(row1) != 0)):
return "В таблице замене в строке №" + str(i) + " не заполнена исходная строка"
if ((len(row0) != 0) and (len(row1) == 0)):
return "В таблице замене в строке №" + str(i) + " не заполнена строка для заменв"
++i
return ""
# Обновление таблицы замен при сохранении
def update_replace_table(con, repoId, table):
source_rows = select_and_fetch_all(con, "SELECT ID FROM `replace_tables` WHERE `REPO_ID` = %s ORDER BY `ID` ASC" ,[repoId])
source_rows = [row[0] for row in source_rows]
table = list(map(lambda x: [x[0].strip(), x[1].strip()], table))
table = list(filter(lambda x: ((len(x[0]) != 0) and (len(x[1]) != 0)), table))
for row in table:
if (len(source_rows) != 0):
id = source_rows[0]
source_rows.pop(0)
execute_update(con, "UPDATE `replace_tables` SET `FROM_TEXT` = %s, TO_TEXT= %s WHERE `ID` = %s", [row[0], row[1], id])
else:
execute_update(con, "INSERT INTO `replace_tables`(`FROM_TEXT`, `TO_TEXT`, `REPO_ID`) VALUES (%s,%s,%s)", [row[0], row[1], repoId])
if (len(source_rows) != 0):
source_rows = [str(row) for row in source_rows]
execute_update(con, "DELETE FROM `replace_tables` WHERE `ID` IN (" + ",".join(source_rows) + ")", [])
@app.route('/')
def index():
return 'OK'
@app.route('/reviewgram/')
def reviewgram():
return 'OK'
@app.route('/reviewgram/bot_username/')
def bot_username():
return os.getenv("BOT_USERNAME")
def imp_bot_api(request):
if request.args.get('token') != bot_webhook_token:
abort(404)
data = request.json
if data is None:
abort(404)
message = ""
try:
userId = safe_get_key(data, ["message", "from", "id"])
message = safe_get_key(data, ["message", "text"])
if ((userId is not None) and (message is not None)):
if (message == "/start"):
return 'OK'
decoded = base64.b64decode(message)
con = connect_to_db()
with con:
countRows = select_and_fetch_first_column(con, "SELECT COUNT(*) AS CNT FROM `token_to_user_id` WHERE `TOKEN` = %s", [decoded])
if (countRows > 0):
execute_update(con, "UPDATE `token_to_user_id` SET USER_ID = %s, TSTAMP = UNIX_TIMESTAMP(NOW()) WHERE TOKEN = %s", [userId, decoded])
else:
execute_update(con, "INSERT INTO `token_to_user_id`(USER_ID, TOKEN, TSTAMP) VALUES (%s, %s, UNIX_TIMESTAMP(NOW()))", [userId, decoded])
else:
append_to_log("/reviewgram/bot: no data")
return 'ERROR'
except binascii.Error:
append_to_log("/reviewgram/bot: Unable to decode message: " + message)
return 'ERROR'
except Exception as e:
if message is None:
message = ""
append_to_log("/reviewgram/bot: Exception " + traceback.format_exc() + "\nMessage was: " + message)
return 'ERROR'
return 'OK'
@app.route('/reviewgram/bot/', methods=['POST', 'GET'])
def bot_api():
return imp_bot_api(request)
def imp_register_chat_id_for_token(request):
chatId = request.values.get("chatId")
uuid = request.values.get("uuid")
if (is_user_in_chat(uuid, chatId)):
return 'OK'
else:
abort(404)
@app.route('/reviewgram/register_chat_id_for_token/', methods=['POST'])
def register_chat_id_for_token():
return imp_register_chat_id_for_token(request)
def toggle_nowrap():
global nowrap
nowrap = True
def if_jsonify(o):
global nowrap
if nowrap is None:
return jsonify(o)
return o
def imp_get_repo_settings(request):
chatId = request.values.get("chatId")
uuid = request.values.get("uuid")
if (is_user_in_chat(uuid, chatId)):
con = connect_to_db()
with con:
row = select_and_fetch_one(con, "SELECT REPO_SITE, REPO_USER_NAME, REPO_SAME_NAME, USER, PASSWORD, LANG_ID, ID FROM `repository_settings` WHERE `CHAT_ID` = %s LIMIT 1", [chatId])
table = []
if (row is not None):
id = row[6]
withTable = request.values.get("withTable")
if (withTable is not None):
rows = select_and_fetch_all(con, "SELECT FROM_TEXT, TO_TEXT FROM `replace_tables` WHERE `REPO_ID` = %s ORDER BY `ID` ASC" ,[id])
for localRow in rows:
table.append([localRow[0], localRow[1]])
if (len(row[4]) > 0):
c = AESCipher()
password = c.decrypt(row[4])
return if_jsonify({"site": row[0], "repo_user_name": row[1], "repo_same_name": row[2], "user": row[3], "password": base64.b64encode(password.encode('UTF-8')).decode('UTF-8'), "langId" : row[5], "id": row[6], "table": table })
else:
return if_jsonify({"site": "", "repo_user_name" : "", "repo_same_name": "", "user": "", "password": "", "langId": 1, "id": 0, "table": []})
else:
abort(404)
@app.route('/reviewgram/get_repo_settings/')
def get_repo_settings():
return imp_get_repo_settings(request)
def imp_set_repo_settings(request):
global nowrap
json = request.json
chatId = safe_get_key(json, ["chatId"])
uuid = safe_get_key(json, ["uuid"])
repoUserName = safe_get_key(json, ["repoUserName"])
repoSameName = safe_get_key(json, ["repoSameName"])
user = safe_get_key(json, ["user"])
password = safe_get_key(json, ["password"])
langId = safe_get_key(json, ["langId"])
table = safe_get_key(json, ["table"])
if (is_user_in_chat(uuid, chatId)):
if (repoUserName is None):
return if_jsonify({"error": "Не указано имя собственника репозитория"})
else:
repoUserName = repoUserName.strip()
if (len(repoUserName) == 0):
return if_jsonify({"error": "Не указано имя собственника репозитория"})
if (repoSameName is None):
return if_jsonify({"error": "Не указано имя репозитория"})
else:
repoSameName = repoSameName.strip()
if (len(repoSameName) == 0):
return if_jsonify({"error": "Не указано имя репозитория"})
if (user is None):
return if_jsonify({"error": "Не указано имя пользователя"})
else:
user = user.strip()
if (len(user) == 0):
return if_jsonify({"error": "Не указано имя пользователя"})
if (password is None):
return if_jsonify({"error": "Не указан пароль"})
else:
try:
password = base64.b64decode(password).strip()
if (len(password) == 0):
return if_jsonify({"error": "Не указан пароль"})
except Exception as e:
return if_jsonify({"error": "Не указан пароль"})
if (table is None):
return if_jsonify({"error": "Не указана таблица записей"})
if (not isinstance(table, list)):
return if_jsonify({"error": "Не указана таблица записей"})
error = validate_replace_table(table)
if (len(error) != 0):
return if_jsonify({"error": error})
con = connect_to_db()
with con:
if (langId is None):
return if_jsonify({"error": "Не указан ID языка"})
else:
try:
langId = int(langId)
row = select_and_fetch_one(con, "SELECT * FROM `languages` WHERE `ID` = %s LIMIT 1", [langId])
if (row is None):
return if_jsonify({"error": "Не найден язык"})
except Exception as e:
return if_jsonify({"error": "Не распарсен язык"})
c = AESCipher()
password = c.encrypt(password)
row = select_and_fetch_one(con, "SELECT ID FROM `repository_settings` WHERE `CHAT_ID` = %s LIMIT 1", [chatId])
id = 0
if (row is not None):
id = row[0]
execute_update(con, "UPDATE `repository_settings` SET REPO_SITE = %s, REPO_USER_NAME = %s, REPO_SAME_NAME = %s, USER = %s, PASSWORD = %s, LANG_ID = %s WHERE CHAT_ID = %s", ['github.com', repoUserName, repoSameName, user, password, langId, chatId])
else:
id = execute_insert(con, "INSERT INTO `repository_settings`(CHAT_ID, REPO_SITE, REPO_USER_NAME, REPO_SAME_NAME, USER, PASSWORD, LANG_ID) VALUES (%s, %s, %s, %s, %s, %s, %s)", [chatId, 'github.com', repoUserName, repoSameName, user, password, langId])
update_replace_table(con, id, table)
return if_jsonify({"error": ""})
else:
abort(404)
@app.route('/reviewgram/set_repo_settings/', methods=['POST'])
def set_repo_settings():
return imp_set_repo_settings(request)
def imp_try_lock(request):
chatId = request.values.get("chatId")
uuid = request.values.get("uuid")
if (is_user_in_chat(uuid, chatId)):
con = connect_to_db()
lockTime = int(os.getenv("LOCK_TIME"))
timestamp = int(time.time())
with con:
row = select_and_fetch_one(con, "SELECT * FROM `repo_locks` WHERE `TOKEN` <> %s AND `CHAT_ID` = %s AND " + str(timestamp) + " - TSTAMP <= " + str(lockTime) + " LIMIT 1", [uuid, chatId])
if (row is not None):
return if_jsonify({"locked": True})
else:
insert_or_update_repo_lock(con, chatId, uuid)
return if_jsonify({"locked": False})
else:
abort(404)
@app.route('/reviewgram/try_lock/')
def try_lock():
return imp_try_lock(request)
def imp_check_syntax(request):
data = request.json
if data is None:
return if_jsonify({"errors": ""})
try:
fileName = safe_get_key(data, ["filename"])
content = safe_get_key(data, ["content"])
start = safe_get_key(data, ["start"])
end = safe_get_key(data, ["end"])
langId = safe_get_key(data, ["langId"])
if ((fileName is not None) and (content is not None) and (start is not None) and (end is not None) and (langId is not None)):
fileContent = base64.b64decode(content)
langId = int(langId)
errors = LanguageFactory().create(langId).checkSyntax(fileName, fileContent.decode('UTF-8'), start, end)
errors = base64.b64encode(errors.encode('UTF-8')).decode('UTF-8')
return if_jsonify({"errors": errors})
except Exception as e:
append_to_log("/reviewgram/check_syntax: Exception " + traceback.format_exc())
return if_jsonify({"errors": ""})
@app.route('/reviewgram/check_syntax/', methods=['POST', 'GET'])
def check_syntax():
return imp_check_syntax(request)
def imp_get_autocompletions(request):
data = request.json
if data is None:
return if_jsonify([])
try:
tokens = safe_get_key(data, ["tokens"])
content = safe_get_key(data, ["content"])
line = int(safe_get_key(data, ["line"]))
position = int(safe_get_key(data, ["position"]))
chatId = int(safe_get_key(data, ["chatId"]))
branchId = safe_get_key(data, ["branchId"])
langId = safe_get_key(data, ["langId"])
if ((tokens is not None) and (content is not None) and (line is not None) and (position is not None) and (chatId is not None) and (branchId is not None) and (langId is not None)):
if (not isinstance(tokens, list)):
raise Exception("Error!")
langId = int(langId)
fileContent = base64.b64decode(content)
con1 = connect_to_db()
con2 = connect_to_db()
result = []
try:
with con1:
result = result + LanguageFactory().create(langId).getAutocompletions(con1, tokens, fileContent, line, position, chatId, branchId,)
except Exception as e:
append_to_log("/reviewgram/get_autocompletions: Exception " + traceback.format_exc())
append_to_log("/reviewgram/get_autocompletions: Proceeding to table")
try:
with con2:
if (len(result) == 0):
result = result + table_try_autocomplete(con2, tokens, langId)
except Exception as e:
append_to_log("/reviewgram/get_autocompletions: Exception " + traceback.format_exc())
append_to_log("/reviewgram/get_autocompletions: Proceeding to result")
resultHash = {}
filteredResult = []
for part in result:
if (not (part["complete"] in resultHash)):
resultHash[part["complete"]] = True
filteredResult.append(part)
if (len(filteredResult) > 5):
filteredResult = filteredResult[0:5]
return if_jsonify(filteredResult)
except Exception as e:
append_to_log("/reviewgram/get_autocompletions: Exception " + traceback.format_exc())
return if_jsonify([])
@app.route('/reviewgram/get_autocompletions/', methods=['POST', 'GET'])
def get_autocompletions():
return imp_get_autocompletions(request)
def imp_start_recognizing(request):
perfLogFileName = os.getenv("APP_FOLDER") + "/perf_log.txt"
fileObject = open(perfLogFileName, 'at')
start = time.perf_counter()
langId = request.form.get("langId")
content = request.form.get("content")
record = request.files.get("record")
repoId = request.form.get("repoId")
if (repoId is not None):
try:
repoId = int(repoId)
except Exception as e:
repoId = 0
append_to_log("/reviewgram/start_recognizing: broken repo id")
if (content is None):
content = ""
if (request.form.get("langId") is not None):
append_to_log("/reviewgram/start_recognizing: " + request.form.get("langId"))
if (record is None):
fileObject.close()
return if_jsonify({})
measure1 = time.perf_counter()
fileObject.write("Pretesting args for recognition: " + str(measure1 - start) + "\n")
record.seek(0, os.SEEK_END)
fileLength = record.tell()
if (fileLength >= int(os.getenv("MAX_RECORD_SIZE")) * 1024 * 1024):
fileObject.close()
return if_jsonify({"error": "file is too large: " + str(fileLength) })
fileName = os.getenv("APP_FOLDER") + "records/" + str(uuid.uuid4()) + "-" + str(time.time()) + ".ogg"
append_to_log("/reviewgram/start_recognizing: " + fileName)
measure2 = time.perf_counter()
fileObject.write("Measuring file size: " + str(measure2 - measure1) + "\n")
record.seek(0, os.SEEK_SET)
record.save(fileName)
measure3 = time.perf_counter()
fileObject.write("Saving file: " + str(measure3 - measure2) + "\n")
con = connect_to_db()
if (langId is None):
langId = 0
rowId = execute_insert(con, "INSERT INTO `recognize_tasks`(FILENAME, LANG_ID, CONTENT, REPO_ID) VALUES (%s, %s, %s, %s)", [fileName, langId, content, repoId])
host = '127.0.0.1'
port = 9090
addr = (host,port)
tcp_socket = socket(AF_INET, SOCK_STREAM)
tcp_socket.connect(addr)
data = "1".encode("utf-8")
tcp_socket.send(data)
tcp_socket.close()
measure4 = time.perf_counter()
fileObject.write("Saving to DB and launching task: " + str(measure4 - measure3) + "\n")
fileObject.close()
return if_jsonify({"id": rowId})
@app.route('/reviewgram/start_recognizing/', methods=['POST'])
def start_recognizing():
return imp_start_recognizing(request)
def imp_recognizing_status(request):
id = request.values.get("id")
if (id is None):
return if_jsonify({"status": "pending"})
else:
try:
id = int(id)
con = connect_to_db()
row = select_and_fetch_one(con, "SELECT `ID`, `RES` FROM `recognize_tasks` WHERE `ID` = %s AND `DATE_END` IS NOT NULL LIMIT 1", [id])
if (row is not None):
return if_jsonify({"status": "ok", "result": row[1] })
else:
return if_jsonify({"status": "pending"})
except Exception as e:
append_to_log("/reviewgram/recognizing_status: Exception " + traceback.format_exc())
return if_jsonify({"status": "pending"})
@app.route('/reviewgram/recognizing_status/', methods=['GET'])
def recognizing_status():
return imp_recognizing_status(request)
if __name__ == '__main__':
gunicorn_logger = logging.getLogger("gunicorn.error")
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
app.run(debug=True, host='0.0.0.0')
|
from sqlalchemy import MetaData, Table, Column, Integer, String, DateTime, FLOAT
from gerenciador.utils.conector.mysql import mysql_engine
engine = mysql_engine('pessoal')
meta = MetaData()
gerenciador = Table(
'gerenciador', meta,
Column('id', Integer, primary_key=True),
Column('descricao', String(100)),
Column('data_compra', String(20)),
Column('valor', FLOAT),
Column('vencimento', String(20)),
Column('data_criacao', String(20)),
Column('observacoes', String(100))
)
meta.create_all(engine)
# meta.drop_all(engine) |
# -*- coding: UTF-8 -*-
from zope.interface import implements
from zope.interface import Interface
import persistent
from google.interfaces import IProject
class Project(persistent.Persistent):
"""A simple implementation of a Project .
Make sure that the ``Project`` implements the ``IProject`` interface:
>>> from zope.interface.verify import verifyClass
>>> verifyClass(IProject, Project)
True
Here is an example of changing the name of the project:
>>> project = Project()
>>> project.name
u''
>>> project.name = u'Project Name'
>>> project.name
u'Project Name'
"""
implements(IProject)
# See google.interfaces.IProject
name = u''
|
from django.contrib import admin
from lhcbpr_api import models
class ApplicationAdmin(admin.ModelAdmin):
pass
class OptionAdmin(admin.ModelAdmin):
pass
class ExecutableAdmin(admin.ModelAdmin):
pass
# class ApplicationVersionAdmin(admin.ModelAdmin):
# pass
admin.site.register(models.Application, ApplicationAdmin)
admin.site.register(models.Option, OptionAdmin)
admin.site.register(models.Executable, ExecutableAdmin)
#admin.site.register(models.ApplicationVersion, ApplicationVersionAdmin)
|
from settings import MAX_ITENS, ITENS, BAG_SIZE
class Chromosome():
def __init__(self, gene):
self.gene = gene
self.total_benefit = 0
self.total_size = 0
for i in range(MAX_ITENS):
if self.gene[i]:
self.total_size += ITENS[i][0]
self.total_benefit += ITENS[i][1]
@property
def fitness(self):
if self.total_size > BAG_SIZE:
return 0
return self.total_size * self.total_benefit
def __repr__(self):
return "Fitness: {}".format(
self.fitness
)
|
# -*- coding: utf-8 -*-
"""
@author: xiaoke
@file: img_sim.py
@time:2020-04-22 15:46
@file_desc:
"""
import logging as log
import sys
log.basicConfig(level=log.DEBUG)
_logger = log.getLogger(__name__)
from skimage.measure import compare_ssim
import cv2
import os
import voc
import numpy as np
|
from typing import List
from common import *
def busca_gulosa(map):
print('- Realizando Busca Gulosa:')
success = False
path_string = ''
path_list = [NodePath(map.start_node, None, None, [], map.relations)]
while len(path_list) and not success:
path = find_lower_path(path_list, 'gulosa')
if path.node == map.end_node:
success = True
path_string = create_path_string(path)
else:
if not path.parent_node:
print('* Começando pelo nó', path.node)
else:
print('\n* Checando pelo próximo nó menos custoso:', path.node)
for rel in path.node.relations:
# Se a relação ainda não foi verificada
if rel.status == RelationStatus.NOVA:
node = get_another_node_from_relation(path.node, rel)
parent = path.node
path_list.append(NodePath(node, parent, path, path_list, map.relations))
print(' ** Encontrado caminho de', parent, 'até', node, '( Custo:', node.cost, ')')
path_list[-1].adjust_best_path(path_list, "gulosa")
path_list.remove(path)
reset(map)
return {
'success': success,
'path_string': path_string
}
def printa_busca_gulosa(map_list):
for map in map_list:
if map.type == SearchTypes.ORDENADA:
print('- Realizando Busca Gulosa em:', map.name, '\n')
return_dict = busca_gulosa(map)
if return_dict['success']:
print('\n- O nó', map.end_node, 'foi encontrado!\n'
'- Percurso Menos Custoso até o nó:', return_dict['path_string'], '\n'
)
else:
print('- O nó não pôde ser encontrado! :(') |
# Generated by Django 2.0 on 2018-01-30 16:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('madadju', '0002_letter_message_need_report_urgentneed'),
]
operations = [
migrations.RemoveField(
model_name='letter',
name='hamyar',
),
migrations.RemoveField(
model_name='letter',
name='madadju',
),
migrations.RemoveField(
model_name='message',
name='madadju',
),
migrations.RemoveField(
model_name='need',
name='madadju',
),
migrations.RemoveField(
model_name='report',
name='madadju',
),
migrations.RemoveField(
model_name='urgentneed',
name='madadju',
),
migrations.DeleteModel(
name='Letter',
),
migrations.DeleteModel(
name='Message',
),
migrations.DeleteModel(
name='Need',
),
migrations.DeleteModel(
name='Report',
),
migrations.DeleteModel(
name='UrgentNeed',
),
]
|
from django.shortcuts import render
# Create your views here.
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import authentication, permissions
from django.contrib.auth.models import User
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from .serializers import UserSerializer,RegisterSerializer,LoginSerializer
from rest_framework import response, status
from django.contrib.auth import login
from rest_framework import permissions
from rest_framework.authtoken.serializers import AuthTokenSerializer
class ListUsers(APIView):
serializer_class = UserSerializer
authentication_classes = [authentication.TokenAuthentication]
permission_classes = [permissions.IsAuthenticated]
def get(self, request, format=None):
"""
Return a list of all users.
"""
usernames = [user.username for user in User.objects.all()]
return Response(usernames)
class RegisterApi(generics.GenericAPIView):
serializer_class = RegisterSerializer
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data,
context={'request': request})
if serializer.is_valid(raise_exception=True):
user = serializer.save()
token, created = Token.objects.get_or_create(user=user)
#instance,created = User.objects.update_or_create(email=serializer.validated_data.get('email',None),defaults=serializer.validated_data)
#return response.Response(serializer.data,status=status.HTTP_200_OK)
return Response({
'user': UserSerializer(user,context=self.get_serializer_context()).data,
'token': token.key,
#'created': Response(serializer.data,status = status.HTTP_200_OK),
})
#response.Response(serializer.data,status=status.HTTP_200_OK)
return response.Response(serializer.data,status=stauus.HTTP_400_BAD_REQUESTS)
class LoginAPI(generics.GenericAPIView):
serializer_class = LoginSerializer
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data,
context={'request': request})
serializer.is_valid(raise_exception=True)
user = serializer.validated_data
token, created = Token.objects.get_or_create(user=user)
return Response({
'user': UserSerializer(user,context=self.get_serializer_context()).data,
'token': token.key,
})
class UserAPI(generics.RetrieveAPIView):
serializer_class = UserSerializer
permission_classes = [permissions.IsAuthenticated]
#authentication_classes = [authentication.TokenAuthentication]
def get_object(self):
return self.request.user |
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QRadioButton, QPushButton, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot, QRect
from PyQt5.QtGui import QImage, QPixmap
from simplesaad_model import SimpleSaad
from hedia_keras import HediaKeras
from maarten_torch import MaartenTorch
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
f = None
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml')
smile_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_smile.xml')
frontalcatface_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalcatface.xml')
altface_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_alt.xml')
detection_mode = 0
model = SimpleSaad()
hedia_model = HediaKeras()
maarten_model = MaartenTorch()
#demo_model = DemoModels(model_id=0)
# https://www.kaggle.com/milan400/human-emotion-detection-by-using-cnn?select=weights_best_4.hdf5
# user: milan400
#demo_model.load_model('milan400_keras_model.h5')
def run(self):
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
if self.detection_mode == "default":
frame = self.default_processing(frame)
else:
frame = self.processing(frame, self.detection_mode)
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
def default_processing(self, frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
# apply model to get emotion label
# label = model(image)
label = "n/a"
cv2.putText(frame, f"{label}", (50, 50), cv2.FONT_HERSHEY_COMPLEX_SMALL, .7, (0, 0, 255))
eyes = self.eye_cascade.detectMultiScale(roi_gray)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
return frame
def processing(self, frame, mode):
if mode == "Smile":
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), ((x + w), (y + h)), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
smiles = self.smile_cascade.detectMultiScale(roi_gray, 1.8, 20)
for (sx, sy, sw, sh) in smiles:
cv2.rectangle(roi_color, (sx, sy), ((sx + sw), (sy + sh)), (0, 0, 255), 2)
return frame
elif mode == "Cat Face":
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = self.frontalcatface_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), ((x + w), (y + h)), (255, 0, 0), 2)
return frame
elif mode == "Alt Face":
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = self.altface_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), ((x + w), (y + h)), (255, 0, 0), 2)
return frame
elif mode == "Keras SimpleSaad":
# apply SimpleSaad pre-trained Keras model
return self.model.evaluate_face(frame)
elif mode == "First Trial":
# apply Hedia's pre-trained Keras model
return self.hedia_model.evaluate_face(frame)
elif mode == "Maarten Torch":
# apply Hedia's pre-trained Keras model
return self.maarten_model.evaluate_face(frame)
# elif mode == 6:
# # apply Kaggle user milan400's pre-trained Keras model
# return self.demo_model.evaluate_face(frame, 0)
else:
#print("mode not available")
return frame
def activate_detection(self,mode):
self.detection_mode = mode
class App(QWidget):
def __init__(self):
super().__init__()
self.mode = 0
self.selected_radio = 'First Trial'
self.title = 'PyQt5 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
@pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
self.label = QLabel(self)
self.label.move(280, 120)
self.label.resize(640, 480)
self.pushButton = QPushButton(self)
self.pushButton.setGeometry(QRect(1000, 240, 180, 25))
self.pushButton.setObjectName("pushButton")
self.pushButton.setText("Activate Face Detection")
self.pushButton.clicked.connect(self.activate_detection)
self.rbtn1 = QRadioButton(self)
self.rbtn1.setText("First Trial")
self.rbtn1.setGeometry(QRect(1025, 280, 180, 25))
self.rbtn1.setChecked(True)
self.rbtn2 = QRadioButton(self)
self.rbtn2.setText("Cat Face")
self.rbtn2.setGeometry(QRect(1025, 310, 180, 25))
self.rbtn3 = QRadioButton(self)
self.rbtn3.setText("Smile")
self.rbtn3.setGeometry(QRect(1025, 340, 180, 25))
self.rbtn4 = QRadioButton(self)
self.rbtn4.setText("Keras SimpleSaad")
self.rbtn4.setGeometry(QRect(1025, 370, 180, 25))
self.rbtn5 = QRadioButton(self)
self.rbtn5.setText("Maarten Torch")
self.rbtn5.setGeometry(QRect(1025, 400, 180, 25))
self.rbtn1.toggled.connect(self.onClicked)
self.rbtn2.toggled.connect(self.onClicked)
self.rbtn3.toggled.connect(self.onClicked)
self.rbtn4.toggled.connect(self.onClicked)
self.rbtn5.toggled.connect(self.onClicked)
self.th = Thread(self)
self.th.changePixmap.connect(self.setImage)
self.th.start()
self.show()
def activate_detection(self):
if self.mode == 0:
self.th.activate_detection(self.selected_radio)
self.mode = 1
self.pushButton.setText("Deactivate Face Detection")
else:
self.th.activate_detection(0)
self.pushButton.setText("Activate Face Detection")
self.mode = 0
def onClicked(self):
radioBtn = self.sender()
if radioBtn.isChecked():
self.selected_radio = radioBtn.text()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_()) |
final = 347991
square = int(final ** 0.5)
if square % 2 == 0:
square -= 1
halfdown = square // 2
halfup = halfdown + 1
x = square**2 + halfup
for y in range(4):
if final <= x:
steps = halfup + x - final
break
if final <= x + halfup:
steps = halfup + final - x
break
x += 2*halfup
print(steps) |
import threading
import time
def f():
for i in range(5):
print(f"f {i}")
time.sleep(1)
def g():
for i in range(5):
print(f"g {i}")
time.sleep(1)
print("creating threads")
tf = threading.Thread(target=f)
tg = threading.Thread(target=g)
print("starting threads")
tf.start()
tg.start()
print("joining threads")
tf.join()
tg.join()
print("done")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-01-13 10:53
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('movie_tracker', '0005_auto_20180113_0742'),
]
operations = [
migrations.CreateModel(
name='UserMovie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.TextField(choices=[('watched', 'Watched'), ('not_watched', 'Not Watched')], default='not_watched', max_length=2)),
('movie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movie_tracker.Movie')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='watched',
name='movie',
),
migrations.RemoveField(
model_name='watched',
name='user',
),
migrations.DeleteModel(
name='Watched',
),
]
|
if __name__ == "__main__":
import os
import sys
import argparse
try:
p = os.path.dirname(__file__)
if p not in sys.path:
sys.path.append(p)
except NameError:
pass
try:
pylo = sys.modules["pylo"]
except KeyError as e:
raise RuntimeError("Please use 'python -m pylo' to start PyLo") from e
from pylo import execute
from pylo import CLIView
from pylo import StopProgram
from pylo import IniConfiguration
from pylo.config import PROGRAM_NAME
from pylo.pylolib import getDeviceText
from pylo.pylolib import defineConfigurationOptions
parser = argparse.ArgumentParser(PROGRAM_NAME,
description="Record lorentz-TEM images")
group = parser.add_mutually_exclusive_group()
group.add_argument("-s", "--settings", help="Show the settings",
action="store_true")
group.add_argument("-d", "--devices",
help="Show the directories and the device.ini files",
action="store_true")
group.add_argument("-r", "--reset", help="Reset the settings",
action="store_true")
program_args = parser.parse_args()
view = CLIView()
configuration = IniConfiguration()
try:
if program_args.settings:
defineConfigurationOptions(configuration)
view.showSettings(configuration)
elif program_args.devices:
view.showHint(getDeviceText())
elif program_args.reset:
configuration.reset()
print("Configuration is reset.")
else:
# execute pylo if it is run as a program
execute(view, configuration)
except StopProgram:
print("Exiting.") |
a=int(input())
b=list(map(int,input().split()))
r=[1]*a
for pa in range(a):
if pa==0:
if b[pa]>b[pa+1]:
r[pa]=r[pa]+r[pa+1]
elif pa>0:
if b[pa]>b[pa-1]:
r[pa]=r[pa]+r[pa-1]
print(sum(r))
|
class className:
def __init__(self, someProp):
self.someProp = someProp
def someFtn(self):
print(self.someProp)
def otherFtn()
print("Hello World!")
x = className("Voila!")
x.someFtn()
x.otherFtn()
|
primary=input("Enter primary color:").lower()
secondary=input("Enter primary color:").lower()
if(primary=="red"):
if(secondary=="yellow"):
print("When you mix red and yellow, you get orange.")
elif(secondary=="blue"):
print("When you mix red and blue, you get purple.")
else:
print("You didn't input two primary colors.")
elif(primary =="blue"):
if(secondary=="red"):
print("When you mix blue and red, you get purple.")
elif(secondary=="yellow"):
print("When you mix blue and yellow, you get green.")
else:
print("You didn't input two primary colors.")
elif(primary=="yellow"):
if(secondary=="red"):
print("When you mix yellow and red, you get orange.")
elif(secondary=="blue"):
print("When you mix yellow and blue, you get green.")
else:
print("You didn't input two primary colors.")
else:
print("You didn't input two primary colors.") |
class rental_info:
def __init__(self,vehicle_id,vehicle_name,customer_id,rental_date,rental_price,is_active):
self.vehicle_id = vehicle_id
self.vehicle_name = vehicle_name
self.customer_id = customer_id
self.rental_date = rental_date
self.rental_price = rental_price
self.is_active = is_active
def add_rental_info(self):
return{"vehicle_id":self.vehicle_id,"vehicle_name":self.vehicle_name,"customer_id":self.customer_id,"rental_date":self.rental_date,"rental_price":self.rental_price,"is_active":self.is_active}
|
x = float(raw_input("number a: "))
y = float(raw_input("number b: "))
print "sum is: " + str(x + y)
print "subtracts is: " + str(x - y)
print "multiplies is: " + str(x * y)
print "divides is: " + str(x / y)
print "remainder is: " + str(x % y)
print "integer divides is: " + str(x // y)
print "Performs exponential is: " + str(x ** y)
|
from google.appengine.ext import ndb
class BYOusers(ndb.Model):
first_name = ndb.StringProperty()
last_name= ndb.StringProperty()
email = ndb.StringProperty()
class Story(ndb.Model):
title = ndb.StringProperty(required=True)
first_story_point_key = ndb.KeyProperty(required = False)
author = ndb.StringProperty(required=False)
class StoryPoint(ndb.Model):
story_key = ndb.KeyProperty(required=True)
text = ndb.StringProperty(required=True)
class ChoicePoint(ndb.Model):
text = ndb.StringProperty(required=True)
begin_story_point_key = ndb.KeyProperty(required=True)
end_story_point_key = ndb.KeyProperty(required=True)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# main executable script
# stdlib
import argparse
import sys
import json
import time
from copy import deepcopy
# project
from controlflow.ControlFlowGraph import ControlFlowGraph
from test import *
def main():
parser = argparse.ArgumentParser(description="IMP language interpreter")
parser.add_argument("source", help="Path to source code file", type=str)
group = parser.add_mutually_exclusive_group()
group.add_argument("-t", "--tests", nargs="+", help="Runs specific coverage tests", type=str, choices=list(TESTS.keys()))
group.add_argument("--all-tests", help="Runs all available coverage tests", action="store_true")
group = parser.add_mutually_exclusive_group()
group.add_argument("-i", "--input", nargs="+", help="Input state set for tests (json files)", type=str)
group.add_argument("-g", "--generate", help="Generate state set that passes coverage tests", action="store_true")
parser.add_argument("--timeout", help="States generation timeout (seconds)", type=int, default=10)
parser.add_argument("--k-paths", help="Paramter of k-TC test (default 4)", type=int, default=4)
parser.add_argument("--i-loops", help="Paramter of i-TB test (default 2)", type=int, default=2)
parser.add_argument("-cfg", "--controlflow", help="Output controlflow graph", type=str)
args = parser.parse_args()
if (args.tests or args.all_tests) and not (args.generate or args.input):
return print("Cannot run tests without input states. Use -i or -g flag.", file=sys.stderr)
test_classes = {}
if args.all_tests:
test_classes = TESTS
elif args.tests:
test_classes = {t: TESTS[t] for t in args.tests}
graph = None
with open(args.source) as source_file:
graph = ControlFlowGraph(source_file.read())
if args.controlflow:
graph.output_graph(args.controlflow)
if test_classes:
tests = {}
for key, test_class in test_classes.items():
if test_class == AllKPathsTest:
obj = AllKPathsTest(args.k_paths)
elif test_class == AllILoopsTest:
obj = AllILoopsTest(args.i_loops)
else:
obj = test_class()
tests[key] = obj
states = []
if args.input:
for input_file in args.input:
with open(input_file) as json_file:
test_set = json.load(json_file)
if test_set is not None:
states.extend(test_set)
states = merge_states(states)
elif args.generate:
for coverage_test in tests.values():
test_set = coverage_test.generate(graph, timeout=args.timeout)
if test_set is not None:
states.extend(test_set)
states = merge_states(states)
if not states:
states = None
return print(json.dumps(states, indent=4, sort_keys=True))
max_key_len = max(map(lambda x: len(str(x)), tests.keys()))
max_str_len = max(map(lambda x: len(str(x)), tests.values()))
for test_name, coverage_test in tests.items():
coverage = coverage_test.run(graph, deepcopy(states))
print(f"[{test_name}]", ' '*(max_key_len-len(test_name)),
coverage_test, ' '*(max_str_len-len(str(coverage_test))),
'{COLOR}{coverage:6.2f}%{ENDC}'.format(
COLOR=('\033[92m' if coverage == 1 else '\033[91m'),
ENDC='\033[0m',
coverage=coverage*100
))
else:
state = {}
print("Enter initial state")
for var in graph.get_vars():
state[var] = int(input(f'{var}: '))
start = time.time()
state = graph.run(state, return_state=True)
end = time.time()
print("Program terminated {:.3f} ms.".format((end-start)*1000))
for var, value in state.items():
print(f'{var}: {value}')
if __name__ == '__main__':
main()
|
#Task
#Read a given string, change the character at a given index and then print the modified string.
#Input Format
#The first line contains a string, S.
#The next line contains an integer i, denoting the index location and a character c separated by a space.
#Output Format
#Using any of the methods explained above, replace the character at index i with character c.
s = list(raw_input())
index, letter = raw_input().split()
# what is the purpose of using split here?
s[int(index)] = letter
print("".join(s))
|
s,v=(input().split())
print(s+v)
|
word="banana"
count=0
for i in word:
if i == "a":
count=count+1
print(count)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/UIWarning.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_UIWarning(object):
def setupUi(self, UIWarning):
UIWarning.setObjectName("UIWarning")
UIWarning.resize(432, 170)
UIWarning.setMinimumSize(QtCore.QSize(432, 170))
UIWarning.setMaximumSize(QtCore.QSize(432, 170))
self.buttonBox = QtWidgets.QDialogButtonBox(UIWarning)
self.buttonBox.setGeometry(QtCore.QRect(10, 130, 409, 25))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setCenterButtons(True)
self.buttonBox.setObjectName("buttonBox")
self.label = QtWidgets.QLabel(UIWarning)
self.label.setGeometry(QtCore.QRect(10, 20, 411, 91))
self.label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label.setText("")
self.label.setObjectName("label")
self.retranslateUi(UIWarning)
self.buttonBox.accepted.connect(UIWarning.accept)
QtCore.QMetaObject.connectSlotsByName(UIWarning)
def retranslateUi(self, UIWarning):
_translate = QtCore.QCoreApplication.translate
UIWarning.setWindowTitle(_translate("UIWarning", "Warning"))
|
from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from django.db.models import F
from .models import Post, Category, Tag, SitePage
def about(request):
page = get_object_or_404(SitePage, url='about')
SitePage.objects.filter(url='about').update(number_views=F("number_views") + 1)
return render(request, 'blog/page_about.html', {'page': page})
def post_list(request):
#posts = Post.objects.filter(is_publish=True).order_by('-created_date')
posts = Post.objects.order_by('-created_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, slug):
post = get_object_or_404(Post, slug=slug)
# обновляю счетчик
Post.objects.filter(slug=slug).update(number_views=F("number_views") + 1)
return render(request, 'blog/post_detail.html', {'post': post})
def post_list_category(request, slug):
category = get_object_or_404(Category, slug=slug)
return render(request, 'blog/post_list_category.html',
{'category': category})
def post_list_tag(request, slug):
tag = get_object_or_404(Tag, slug=slug)
return render(request, 'blog/post_list_tag.html', {'tag': tag})
# 404
def page_not_found(request):
return render(request, 'blog/404.html')
# 500
def server_error(request):
return render(request, 'blog/500.html')
def google_page(request):
return render(request, 'blog/google_page.html')
|
#!/usr/bin/python
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts video encoding result data from text files to visualization
data source."""
__author__ = "jzern@google.com (James Zern),"
__author__ += "jimbankoski@google.com (Jim Bankoski)"
__author__ += "hta@gogle.com (Harald Alvestrand)"
import encoder
import gviz_api
import math
import mpeg_settings
import numpy
import optimizer
import re
import string
import pick_codec
def bdsnr(metric_set1, metric_set2):
"""
BJONTEGAARD Bjontegaard metric calculation
Bjontegaard's metric allows to compute the average gain in psnr between two
rate-distortion curves [1].
rate1,psnr1 - RD points for curve 1
rate2,psnr2 - RD points for curve 2
returns the calculated Bjontegaard metric 'dsnr'
code adapted from code written by : (c) 2010 Giuseppe Valenzise
http://www.mathworks.com/matlabcentral/fileexchange/27798-bjontegaard-metric/content/bjontegaard.m
"""
# pylint: disable=too-many-locals
# numpy seems to do tricks with its exports.
# pylint: disable=no-member
# map() is recommended against.
# pylint: disable=bad-builtin
rate1 = [x[0] for x in metric_set1]
psnr1 = [x[1] for x in metric_set1]
rate2 = [x[0] for x in metric_set2]
psnr2 = [x[1] for x in metric_set2]
log_rate1 = map(math.log, rate1)
log_rate2 = map(math.log, rate2)
# Best cubic poly fit for graph represented by log_ratex, psrn_x.
poly1 = numpy.polyfit(log_rate1, psnr1, 3)
poly2 = numpy.polyfit(log_rate2, psnr2, 3)
# Integration interval.
min_int = max([min(log_rate1), min(log_rate2)])
max_int = min([max(log_rate1), max(log_rate2)])
# Integrate poly1, and poly2.
p_int1 = numpy.polyint(poly1)
p_int2 = numpy.polyint(poly2)
# Calculate the integrated value over the interval we care about.
int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int)
int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int)
# Calculate the average improvement.
if max_int != min_int:
avg_diff = (int2 - int1) / (max_int - min_int)
else:
avg_diff = 0.0
return avg_diff
def bdrate(metric_set1, metric_set2):
"""
BJONTEGAARD Bjontegaard metric calculation
Bjontegaard's metric allows to compute the average % saving in bitrate
between two rate-distortion curves [1].
rate1,psnr1 - RD points for curve 1
rate2,psnr2 - RD points for curve 2
adapted from code from: (c) 2010 Giuseppe Valenzise
"""
# numpy plays games with its exported functions.
# pylint: disable=no-member
# pylint: disable=too-many-locals
# pylint: disable=bad-builtin
rate1 = [x[0] for x in metric_set1]
psnr1 = [x[1] for x in metric_set1]
rate2 = [x[0] for x in metric_set2]
psnr2 = [x[1] for x in metric_set2]
log_rate1 = map(math.log, rate1)
log_rate2 = map(math.log, rate2)
# Best cubic poly fit for graph represented by log_ratex, psrn_x.
poly1 = numpy.polyfit(psnr1, log_rate1, 3)
poly2 = numpy.polyfit(psnr2, log_rate2, 3)
# Integration interval.
min_int = max([min(psnr1), min(psnr2)])
max_int = min([max(psnr1), max(psnr2)])
# find integral
p_int1 = numpy.polyint(poly1)
p_int2 = numpy.polyint(poly2)
# Calculate the integrated value over the interval we care about.
int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int)
int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int)
# Calculate the average improvement.
avg_exp_diff = (int2 - int1) / (max_int - min_int)
# In really bad formed data the exponent can grow too large.
# clamp it.
if avg_exp_diff > 200:
avg_exp_diff = 200
# Convert to a percentage.
avg_diff = (math.exp(avg_exp_diff) - 1) * 100
return avg_diff
def FillForm(string_for_substitution, dictionary_of_vars):
"""
This function substitutes all matches of the command string //%% ... %%//
with the variable represented by ... .
"""
return_string = string_for_substitution
for i in re.findall("//%%(.*)%%//", string_for_substitution):
return_string = re.sub("//%%" + i + "%%//", dictionary_of_vars[i],
return_string)
return return_string
def HasMetrics(line):
"""
The metrics files produced by vpxenc are started with a B for headers.
"""
if line[0:1] != "B" and len(string.split(line)) > 0:
return True
return False
def ParseMetricFile(file_name, metric_column):
"""
Convert a metrics file into a set of numbers.
This returns a sorted list of tuples with the first number
being from the first column (bitrate) and the second being from
metric_column (counting from 0).
"""
metric_set1 = set([])
metric_file = open(file_name, "r")
for line in metric_file:
metrics = string.split(line)
if HasMetrics(line):
if metric_column < len(metrics):
my_tuple = float(metrics[0]), float(metrics[metric_column])
else:
my_tuple = float(metrics[0]), 0
metric_set1.add(my_tuple)
metric_set1_sorted = sorted(metric_set1)
return metric_set1_sorted
def GraphBetter(metric_set1_sorted, metric_set2_sorted, use_set2_as_base):
"""
Search through the sorted metric set for metrics on either side of
the metric from file 1. Since both lists are sorted we really
should not have to search through the entire range, but these
are small lists."""
# pylint: disable=too-many-locals
total_bitrate_difference_ratio = 0.0
count = 0
# TODO(hta): Replace whole thing with a call to numpy.interp()
for bitrate, metric in metric_set1_sorted:
for i in range(len(metric_set2_sorted) - 1):
s2_bitrate_0, s2_metric_0 = metric_set2_sorted[i]
s2_bitrate_1, s2_metric_1 = metric_set2_sorted[i + 1]
# We have a point on either side of our metric range.
if s2_metric_0 < metric <= s2_metric_1:
# Calculate a slope.
if s2_metric_1 - s2_metric_0 != 0:
metric_slope = ((s2_bitrate_1 - s2_bitrate_0) /
(s2_metric_1 - s2_metric_0))
else:
metric_slope = 0
estimated_s2_bitrate = (s2_bitrate_0 + (metric - s2_metric_0) *
metric_slope)
# Calculate percentage difference as given by base.
if use_set2_as_base:
bitrate_difference_ratio = ((bitrate - estimated_s2_bitrate) /
estimated_s2_bitrate)
else:
bitrate_difference_ratio = ((bitrate - estimated_s2_bitrate) /
bitrate)
total_bitrate_difference_ratio += bitrate_difference_ratio
count += 1
break
# Calculate the average improvement between graphs.
if count != 0:
avg = total_bitrate_difference_ratio / count
else:
avg = 0.0
return avg
def DataSetBetter(metric_set1, metric_set2, method):
"""
Compares two data sets and determines which is better and by how
much.
The input metric set is sorted on bitrate.
The first set is the one to compare, the second set is the baseline.
"""
# Be fair to both graphs by testing all the points in each.
if method == 'avg':
avg_improvement = 50 * (
GraphBetter(metric_set1, metric_set2,
use_set2_as_base=True) -
GraphBetter(metric_set2, metric_set1,
use_set2_as_base=False))
elif method == 'dsnr':
avg_improvement = bdsnr(metric_set1, metric_set2)
else:
avg_improvement = bdrate(metric_set2, metric_set1)
return avg_improvement
def FileBetter(file_name_1, file_name_2, metric_column, method):
"""
Compares two data files and determines which is better and by how
much.
metric_column is the metric.
"""
# Store and parse our two files into lists of unique tuples.
# Read the two files, parsing out lines starting with bitrate.
metric_set1_sorted = ParseMetricFile(file_name_1, metric_column)
metric_set2_sorted = ParseMetricFile(file_name_2, metric_column)
return DataSetBetter(metric_set1_sorted, metric_set2_sorted, method)
def HtmlPage(page_template, page_title="", page_subtitle="",
filestable="", snrs="", formatters=""):
"""
Creates a HTML page from the template and variables passed to it.
"""
# pylint: disable=too-many-arguments
# Build up a dictionary of the variables actually used in the template.
my_dict = {
'page_title': page_title,
'page_subtitle': page_subtitle,
'filestable_dpsnr': filestable['dsnr'],
'filestable_avg': filestable['avg'],
'filestable_drate': filestable['drate'],
'snrs': snrs,
'formatters': formatters
}
return FillForm(page_template, my_dict)
def ListOneTarget(codecs, rate, videofile, do_score, datatable,
score_function=None):
"""Extend a datatable with the info about one video file's scores."""
# pylint: disable=too-many-arguments
for codec_name in codecs:
# For testing:
# Allow for direct context injection rather than picking by name.
if isinstance(codec_name, basestring):
codec = pick_codec.PickCodec(codec_name)
my_optimizer = optimizer.Optimizer(codec, score_function=score_function)
else:
my_optimizer = codec_name
codec_name = my_optimizer.context.codec.name
best_encoding = my_optimizer.BestEncoding(rate, videofile)
if do_score and not best_encoding.Result():
best_encoding.Execute()
best_encoding.Store()
AddOneEncoding(codec_name, my_optimizer, best_encoding, videofile,
datatable)
def AddOneEncoding(codec_name, my_optimizer, this_encoding, videofile,
datatable):
assert this_encoding.Result()
# Ignore results that score less than zero.
if my_optimizer.Score(this_encoding) < 0.0:
return
# Datatable is a dictionary of codec name -> result sets.
# Each result set is an array containing result info.
# Each result info is a dictionary containing the
# ID of the configuration used, the
# target bitrate, the command line, the score and the result.
(datatable.setdefault(codec_name, {})
.setdefault(videofile.basename, [])
.append({'config_id': this_encoding.encoder.Hashname(),
'target_bitrate': this_encoding.bitrate,
'encode_command': this_encoding.EncodeCommandLine(),
'score': my_optimizer.Score(this_encoding),
'result': this_encoding.ResultWithoutFrameData()}))
def ListMpegResults(codecs, do_score, datatable, score_function=None):
"""List all scores for all tests in the MPEG test set for a set of codecs."""
# It is necessary to sort on target bitrate in order for graphs to display
# correctly.
for rate, filename in sorted(mpeg_settings.MpegFiles().AllFilesAndRates()):
videofile = encoder.Videofile(filename)
ListOneTarget(codecs, rate, videofile, do_score, datatable,
score_function)
def ListMpegSingleConfigResults(codecs, datatable, score_function=None):
encoder_list = {}
optimizer_list = {}
for codec_name in codecs:
codec = pick_codec.PickCodec(codec_name)
my_optimizer = optimizer.Optimizer(codec,
score_function=score_function, file_set=mpeg_settings.MpegFiles())
optimizer_list[codec_name] = my_optimizer
encoder_list[codec_name] = my_optimizer.BestOverallEncoder()
for rate, filename in sorted(mpeg_settings.MpegFiles().AllFilesAndRates()):
videofile = encoder.Videofile(filename)
for codec_name in codecs:
if encoder_list[codec_name]:
my_encoding = encoder_list[codec_name].Encoding(rate, videofile)
my_encoding.Recover()
AddOneEncoding(codec_name, optimizer_list[codec_name],
my_encoding, videofile, datatable)
def ExtractBitrateAndPsnr(datatable, codec, filename):
dataset = [(r['result']['bitrate'], r['result']['psnr'])
for r in datatable[codec][filename]]
return dataset
def BuildComparisonTable(datatable, metric, baseline_codec, other_codecs):
"""Builds a table of comparison data for this metric."""
# Find the metric files in the baseline codec.
videofile_name_list = datatable[baseline_codec].keys()
countoverall = {}
sumoverall = {}
for this_codec in other_codecs:
countoverall[this_codec] = 0
sumoverall[this_codec] = 0
# Data holds the data for the visualization, name given comes from
# gviz_api sample code.
data = []
for filename in videofile_name_list:
row = {'file': filename}
baseline_dataset = ExtractBitrateAndPsnr(datatable,
baseline_codec,
filename)
# Read the metric file from each of the directories in our list.
for this_codec in other_codecs:
# If there is a metric in this_codec, calculate the overall difference
# between it and the baseline codec's metric.
if (this_codec in datatable and filename in datatable[this_codec]
and filename in datatable[baseline_codec]):
this_dataset = ExtractBitrateAndPsnr(datatable,
this_codec,
filename)
overall = DataSetBetter(
baseline_dataset, this_dataset, metric)
if not math.isnan(overall):
# TODO(hta): figure out when DataSetBetter generates NaN
row[this_codec] = overall
sumoverall[this_codec] += overall
countoverall[this_codec] += 1
data.append(row)
# Add the overall numbers.
row = {"file": "OVERALL " + metric}
for this_codec in other_codecs:
if countoverall[this_codec]:
row[this_codec] = sumoverall[this_codec] / countoverall[this_codec]
data.append(row)
return data
def BuildGvizDataTable(datatable, metric, baseline_codec, other_codecs):
"""Builds a Gviz DataTable giving this metric for the files and codecs."""
description = {"file": ("string", "File")}
data = BuildComparisonTable(datatable, metric, baseline_codec, other_codecs)
for this_codec in other_codecs:
description[this_codec] = ("number", this_codec)
# Generate the gViz table
gviz_data_table = gviz_api.DataTable(description)
gviz_data_table.LoadData(data)
return gviz_data_table
def CrossPerformanceGvizTable(datatable, metric, codecs, criterion):
"""Build a square table of codecs and relative performance."""
# pylint: disable=too-many-locals
videofile_name_list = datatable[codecs[0]].keys()
description = {}
description['codec'] = ('string', 'Codec')
data = []
for codec in codecs:
description[codec] = ('string', codec)
for codec1 in codecs:
lineitem = {'codec': codec1}
for codec2 in codecs:
if codec1 != codec2:
count = 0
overall = 0.0
for filename in videofile_name_list:
if (codec1 in datatable and filename in datatable[codec1]
and codec2 in datatable and filename in datatable[codec2]):
overall += DataSetBetter(
ExtractBitrateAndPsnr(datatable, codec2, filename),
ExtractBitrateAndPsnr(datatable, codec1, filename), metric)
count += 1
if count > 0:
display = ('<a href=/results/show_result.html?' +
'codec1=%s&codec2=%s&criterion=%s>%5.2f</a>') % (
codec2, codec1, criterion, overall / count)
lineitem[codec2] = (overall / count, display)
data.append(lineitem)
gviz_data_table = gviz_api.DataTable(description)
gviz_data_table.LoadData(data)
return gviz_data_table
|
from django.db import models
from django.contrib.auth.models import User
class List(models.Model):
owner = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return '{} - {}'.format(self.pk, self.owner)
class Record(models.Model):
record = models.TextField()
list = models.ForeignKey(List, on_delete=models.CASCADE, related_name='records')
def __str__(self):
return '{} - {}'.format(self.pk, self.record)
|
class Solution:
# @param A : string
# @return a list of strings
def prettyJSON(self, A):
indent = ""
o =[]
curr = ""
for index, c in enumerate(A):
if(c == "[" or c == "{"):
#if the case is :
#{
# A:"B",
# C:
# {
# Then curr mein C: tha, jo aapko push karna padega
if(curr):
o.append(indent+curr)
o.append(indent + c)
curr =""
indent+='\t'
elif(c == "]" or c == "}"):
#if the case is :
# {
# G:"H",
# I:"J" <-- save this first, then move to next
# }
#}
if(curr):
o.append(indent + curr)
indent = indent[:-1]
o.append(indent+c)
curr = ""
elif(c == ","):
if(A[index-1] == "]" or A[index-1] == "}"):
#D:
# {
# E:"M"
# },
# F: "G"
#Then here you need to go back to } and add a , to it
o[-1] = o[-1] + c
else:
o.append(indent + curr + c)
curr=""
elif c == " ":
curr = ""
else :
curr = curr + c
return o |
from io import open
import os
import matplotlib.pyplot as mpl
def transposition_encryption():
os.system('clear')
# Read file
txtFile = open('plain text.txt','r', encoding="utf8")
fileContent = txtFile.readlines()
txtFile.close()
# Replace returns by spaces
plain_text = ""
for fc in fileContent:
plain_text += fc.replace("\n", " ").upper()
# Alphabet
alphabet = "ABCDEFGHIJKLMNÑOPQRSTUVWXYZ0123456789"
# Delete special characters
final_plain_text = ""
for pt in plain_text:
if pt.upper() in alphabet:
final_plain_text += pt.upper()
else:
if pt == " ":
final_plain_text += " "
else:
final_plain_text += ""
print("========== Plain text ==========\n\n", final_plain_text)
# print(msg)
keyin = input("\n~# KEY: ").upper()
paddingin = input("\n~# PADDING [a-z]: ").lower()
key = ''
padding = ''
if keyValidate(keyin):
key = keyin
else:
exit()
if paddingValidate(paddingin):
padding = paddingin
else:
exit()
msg = final_plain_text.replace(" ", padding)
# assigning numbers to keywords
kywrd_num_list = keyword_num_assign(key)
print("\n\n========== Transposition grid ==========\n")
# printing key
for i in range(len(key)):
print(key[i], end=" ", flush=True)
# for
print()
for i in range(len(key)):
print(str(kywrd_num_list[i]), end=" ", flush=True)
# for
print("\n-------------------------")
# in case characters don't fit the entire grid perfectly.
extra_letters = len(msg) % len(key)
# print(extraLetters)
dummy_characters = len(key) - extra_letters
# print(dummyCharacters)
if extra_letters != 0:
for i in range(dummy_characters):
msg += padding
# if
# print(msg)
num_of_rows = int(len(msg) / len(key))
# Converting message into a grid
arr = [[0] * len(key) for i in range(num_of_rows)]
z = 0
for i in range(num_of_rows):
for j in range(len(key)):
arr[i][j] = msg[z]
z += 1
# for
# for
for i in range(num_of_rows):
for j in range(len(key)):
print(arr[i][j], end=" ", flush=True)
print()
# for
# getting locations of numbers
num_loc = get_number_location(key, kywrd_num_list)
# cipher
cipher_text = ""
k = 0
for i in range(len(key)):
if k == len(key):
break
else:
d = int(num_loc[k])
# if
for j in range(num_of_rows):
cipher_text += arr[j][d]
# for
k += 1
# for
print("\n\n========== Final Text Encrypted ==========\n")
print(cipher_text+'\n\n')
# Create output file
outputFile = open('encrypted text.txt','w')
outputFile.write(cipher_text)
outputFile.close()
createHistogram(cipher_text.replace(padding," "))
def transposition_decryption():
os.system('clear')
# Read file
txtFile = open('encrypted text.txt','r', encoding="utf8")
fileContent = txtFile.readlines()
txtFile.close()
# Replace returns by spaces
tws = ""
for fc in fileContent:
tws += fc.replace("\n", " ")
# Alphabet
alphabet = "ABCDEFGHIJKLMNÑOPQRSTUVWXYZ0123456789"
alphabetM = "abcdefghijklmnñopqrstuvwxyz"
# Replace returns by spaces
encrypted_text = ""
for fc in tws:
if fc in alphabet:
encrypted_text += fc.upper()
elif fc in alphabetM:
encrypted_text += " "
else:
encrypted_text += ""
print("========== Encrypted text ==========\n\n", encrypted_text)
msg = encrypted_text
# print(msg)
# print(msg)
keyin = input("\n~# KEY: ").upper()
key = ''
if keyValidate(keyin):
key = keyin
else:
exit()
# assigning numbers to keywords
kywrd_num_list = keyword_num_assign(key)
num_of_rows = int(len(msg) / len(key))
# getting locations of numbers
num_loc = get_number_location(key, kywrd_num_list)
# Converting message into a grid
arr = [[0] * len(key) for i in range(num_of_rows)]
# decipher
plain_text = ""
k = 0
itr = 0
for i in range(len(msg)):
d = 0
if k == len(key):
k = 0
else:
d: int = int(num_loc[k])
for j in range(num_of_rows):
arr[j][d] = msg[itr]
itr += 1
if itr == len(msg):
break
k += 1
print()
for i in range(num_of_rows):
for j in range(len(key)):
plain_text += str(arr[i][j])
print("\n========== Decrypted text ==========\n")
print(plain_text)
# Create output file
outputFile = open('decrypted text.txt','w')
outputFile.write(plain_text)
outputFile.close()
def keyValidate(key):
alphabet = "12345678"
if(len(key) > len(alphabet) or len(key) < 4):
print("ERROR! THE KEY MUST BE EQUAL TO THE ALPHABET [4-8]")
return False
flag = 0
for k in key:
repeat = 0
for l in key:
if l in alphabet:
if k == l:
repeat += 1
else:
flag += 1
if repeat > 1:
flag += 1
if flag >= 1:
print("\nERROR! A KEY NUMBER IS REPEATED OR IS INVALID")
return False
else:
return True
# * ================================== *
# * GENERATE HISTOGRAM *
# * ================================== *
def createHistogram(plainText):
alphabet = "ABCDEFGHIJKLMNÑOPQRSTUVWXYZ0123456789"
A=0;B=0;C=0;D=0;E=0;F=0;G=0;H=0;I=0;J=0;K=0;L=0;M=0;N=0;Ñ=0;O=0;P=0;Q=0;R=0;S=0;T=0;U=0;V=0;W=0;X=0;Y=0;Z=0
one=0;two=0;three=0;four=0;five=0;six=0;seven=0;eigth=0;nine=0;ten=0
for cc in plainText:
if cc in alphabet:
if cc == 'A': A += 1
if cc == 'B': B += 1
if cc == 'C': C += 1
if cc == 'D': D += 1
if cc == 'E': E += 1
if cc == 'F': F += 1
if cc == 'G': G += 1
if cc == 'H': H += 1
if cc == 'I': I += 1
if cc == 'J': J += 1
if cc == 'K': K += 1
if cc == 'L': L += 1
if cc == 'M': M += 1
if cc == 'N': N += 1
if cc == 'Ñ': Ñ += 1
if cc == 'O': O += 1
if cc == 'P': P += 1
if cc == 'Q': Q += 1
if cc == 'R': R += 1
if cc == 'S': S += 1
if cc == 'T': T += 1
if cc == 'U': U += 1
if cc == 'V': V += 1
if cc == 'W': W += 1
if cc == 'X': X += 1
if cc == 'Y': Y += 1
if cc == 'Z': Z += 1
if cc == '0': one += 1
if cc == '1': two += 1
if cc == '2': three += 1
if cc == '3': four += 1
if cc == '4': five += 1
if cc == '5': six += 1
if cc == '6': seven += 1
if cc == '7': eigth += 1
if cc == '8': nine += 1
if cc == '9': ten += 1
histogram = mpl.figure(u'FRECUENCY HISTOGRAM ON CIPHER TEXT')
axis = histogram.add_subplot(111)
aLabels = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','Ñ','O','P','Q','R','S','T','U','V','W','X','Y','Z','0','1','2','3','4','5','6','7','8','9']
num = [A,B,C,D,E,F,G,H,I,J,K,L,M,N,Ñ,O,P,Q,R,S,T,U,V,W,X,Y,Z,one,two,three,four,five,six,seven,eigth,nine,ten]
xx = range(len(num))
rects1 = axis.bar(xx,num,width=0.5,color = 'y',align='center')
axis.set_xticks(xx)
axis.set_xticklabels(aLabels)
mpl.xlabel("Cipher Text")
mpl.ylabel("Absolute Frecuency")
def autolabel(rects):
for rect in rects:
height = rect.get_height()
axis.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%d' % int(height),
ha='center', va='bottom')
autolabel(rects1)
mpl.show()
def paddingValidate(padding):
alpha = "abcdefghijklmnñopqrstuvwxyz"
flag = True
if len(padding) > 1:
print("ERROR! PADDING MUST BE A ONLY ONE CHARACTER")
flag = False
elif padding not in alpha:
print("ERROR! PADDING MUST BE A LOWER CASE CHARACTER [a-z]")
flag = False
return flag
def get_number_location(key, kywrd_num_list):
num_loc = ""
for i in range(len(key)):
for j in range(len(key)):
if kywrd_num_list[j] == i:
num_loc += str(j)
# if
# for
# for
return num_loc
def keyword_num_assign(key):
alpha = "12345678"
kywrd_num_list = list(range(len(key)))
# print(kywrdNumList)
init = 0
for i in range(len(alpha)):
for j in range(len(key)):
if alpha[i] == key[j]:
init += 1
kywrd_num_list[j] = init - 1
# if
# inner for
# for
return kywrd_num_list
def main():
os.system('clear')
option = int (input("========== Choose an option ==========\n1) Encrypt \n2) Decrypt\n\n"))
if option == 1:
transposition_encryption()
elif option == 2:
transposition_decryption()
else:
print("Incorrect option")
if __name__ == "__main__":
main() |
#!/usr/bin/python3
'''
Defines the trip class
'''
from models.base_model import BaseModel
class Trip(BaseModel):
'''
Defines the trip class which inherits from BaseModel
'''
collection = "trips"
def __init__(self, *args, **kwargs):
if kwargs:
super().__init__(**kwargs)
else:
super().__init__()
self.country_id = ""
self.city_id = ""
self.country = ""
self.city = ""
self.name = ""
self.description = ""
self.users = []
self.host = ""
self.host_firstname = ""
self.host_lastname = ""
self.host_pic = ""
self.date_range = ""
|
"""6.009 Lab 8A: carlae Interpreter"""
import sys
class EvaluationError(Exception):
"""Exception to be raised if there is an error during evaluation."""
def __str__(self):
return "EvaluationError"
def mult(args):
prod = 1
for elem in args:
prod *= elem
return prod
class Environment:
def __init__(self, parent=None):
self.parent = parent
if parent == None:
self.assignments = {
'+': sum,
'-': lambda args: -args[0] if len(args) == 1 else (args[0] - sum(args[1:])),
'*': mult,
'/': lambda args: args[0] if len(args) == 1 else (args[0]/mult(args[1:]))
}
else:
self.assignments = {}
def __setitem__(self,key,value):
"""
Setter function: sets key to value in CURRENT environment
"""
self.assignments[key] = value
def __getitem__(self,key):
"""
Getter function: retrieves value of key in current environment,
otherwise loops through parent environment until found or
throws EvaluationError.
"""
if key in self.assignments:
return self.assignments[key]
env = self
while env.parent != None:
if key in env.parent.assignments:
return env.parent.assignments[key]
env = env.parent
raise EvaluationError
class Func:
def __init__(self, params, body, env):
self.params = params
self.body = body
self.env = env
def __str__(self):
return str(self.body)
def tokenize(source):
"""
Splits an input string into meaningful tokens (left parens, right parens,
other whitespace-separated values). Returns a list of strings.
Arguments:
source (str): a string containing the source code of a carlae
expression
"""
result = []
tokens = source.split("\n")
#Splitting input into lines
for token in tokens:
#Skip commented line
if len(token) == 0:
continue
subtokens = token.split(" ")
for subtoken in subtokens:
if ";" in subtoken:
break
if len(subtoken) == 1:
result.append(subtoken)
else:
substr = ""
for letter in subtoken:
if letter == "(" or letter == ")":
#Separate by parentheses
if len(substr) > 0:
result.append(substr)
substr = ""
result.append(letter)
else:
substr = substr + letter
if len(substr) > 0:
result.append(substr)
return result
def parse(tokens):
"""
Parses a list of tokens, constructing a representation where:
* symbols are represented as Python strings
* numbers are represented as Python ints or floats
* S-expressions are represented as Python lists
Arguments:
tokens (list): a list of strings representing tokens
"""
def parse_expression(index):
"""
index is starting index for traversing through expression
returns: tuple of parsed value, next index
"""
try:
#Checking if token is a num
return (float(tokens[index]),index+1)
except:
result = []
if tokens[index] == "(":
index += 1
while index < len(tokens) and tokens[index] != ")":
expr = parse_expression(index)
result.append(expr[0])
index = expr[1]
if index == len(tokens):
raise SyntaxError
else:
return (tokens[index],index+1)
return (result, index + 1)
expr = parse_expression(0)
if expr[1] != len(tokens):
raise SyntaxError
return parse_expression(0)[0]
def result_and_env(tree, env = None):
#Initializing environment (if none given) to empty environment with builtins as parent
if env == None:
env = Environment(Environment())
env.assignments = {}
print('env',env.assignments)
if not isinstance(tree,list):
#Checking if tree is singular number or function
if isinstance(tree, float) or isinstance(tree, int):
return (tree, env)
return (env[tree],env)
#Defining variable
if tree[0] == "define":
if len(tree) < 3:
raise EvaluationError
if isinstance(tree[1], list): #If the name is an S-expression, evaluate value as user-defined function
return result_and_env(['define',tree[1][0],['lambda',tree[1][1:],tree[2]]],env)
else:
print("setting", tree[1], "to value",result_and_env(tree[2],env)[0])
env[tree[1]] = result_and_env(tree[2],env)[0]
return result_and_env(tree[2],env)
#Creating Func object and returning
if tree[0] == "lambda":
if len(tree) < 3:
raise EvaluationError
return (Func(tree[1],tree[2],env), env)
#Calling function, recursively evaluating
else:
try:
#print("env",env.assignments)
#print("parent",env.parent.assignments)
op = env[tree[0]]
print("Function:" , op, "Params", op.params, "Environment", op.env.assignments)
finally:
if isinstance(tree[0],int) or isinstance(tree[0],float):
raise EvaluationError
try:
#print('tree',tree)
if tree[0][0] == "lambda":
first = result_and_env(tree[0],env)[0]
#print('first',first)
if isinstance(first,Func):
env['temp_lambda'] = first
op = first
finally:
if isinstance(op, Func):
if env.parent == op.env:
new_env = env
else:
new_env = Environment(op.env)
ind = 1
try:
for param in op.params:
print("param",param)
print("value",tree[ind])
new_env[param] = result_and_env(tree[ind],new_env)[0]
ind += 1
except:
raise EvaluationError
return (result_and_env(op.body, new_env)[0],new_env)
tree = tree[1:]
new_tree = []
for subelem in tree:
new_tree.append(result_and_env(subelem, env)[0])
return op(new_tree),env
def evaluate(tree, env = None):
"""
Evaluate the given syntax tree according to the rules of the carlae
language.
Arguments:
tree (type varies): a fully parsed expression, as the output from the
parse function
environment: the environment to evaluate/define in
"""
return result_and_env(tree,env)[0]
# e = Environment()
# env = Environment(e)
# print(evaluate(parse(tokenize("(define x 7)")),env))
# print(evaluate(parse(tokenize("(define foo (lambda (x) (lambda (y) (+ x y))))")),env))
# print(evaluate(parse(tokenize("(define bar (foo 3))")),env))
# print(evaluate(parse(tokenize("(bar 2)")),env))
# print(evaluate(parse(tokenize("((lambda (x) (* x x)) 3)")),env))
#(define addN(lambda (n) (lambda (i) (+ i n))))
#(define add7(addN 7))
#(add7 2)
#(add7 ((addN 3)((addN 19) 8)))
if __name__ == '__main__':
# code in this block will only be executed if lab.py is the main file being
# run (not when this module is imported)
# pass
expr = input(">>> ")
env = Environment(Environment())
while expr != "QUIT":
try:
res = result_and_env(parse(tokenize(expr)),env)
print(res[0])
env = res[1]
except:
print("error, try again")
expr = input(">>> ")
# expr = input(">>> ")
# while expr != "QUIT":
# try:
# res = evaluate(parse(tokenize(expr)))
# print(res)
# except:
# print("error, try again")
# expr = input(">>> ")
|
from flask import Flask
from flask import url_for
from flask import render_template
app = Flask(__name__)
@app.route("/")
@app.route("/index.html")
def index():
return render_template("index.html")
@app.route("/dashboard2.html")
def dashboard2():
return render_template("dashboard2.html")
@app.route("/dashboard3.html")
def dashboard3():
return render_template("dashboard3.html")
@app.route("/dashboard4.html")
def dashboard4():
return render_template("dashboard4.html")
@app.route("/facts.html")
def facts():
return render_template("facts.html")
@app.route("/cpc_a.html")
def cpc_a():
return render_template("cpc_a.html")
@app.route("/cpc_d.html")
def cpc_d():
return render_template("cpc_d.html")
@app.route("/cpc_f.html")
def cpc_f():
return render_template("cpc_f.html")
@app.route("/cpc_n.html")
def cpc_n():
return render_template("cpc_n.html")
@app.route("/cpc_s.html")
def cpc_s():
return render_template("cpc_s.html")
@app.route("/cpc_total.html")
def cpc_total():
return render_template("cpc_total.html")
@app.route("/ppg_b.html")
def ppg_b():
return render_template("ppg_b.html")
@app.route("/ppg_c.html")
def ppg_c():
return render_template("ppg_c.html")
@app.route("/ppg_d.html")
def ppg_d():
return render_template("ppg_d.html")
@app.route("/ppg_h.html")
def ppg_h():
return render_template("ppg_h.html")
@app.route("/ppg_i.html")
def ppg_i():
return render_template("ppg_i.html")
@app.route("/ppg_total.html")
def ppg_total():
return render_template("ppg_total.html")
@app.route("/tp_b.html")
def tp_b():
return render_template("tp_b.html")
@app.route("/tp_c.html")
def tp_c():
return render_template("tp_c.html")
@app.route("/tp_e.html")
def tp_e():
return render_template("tp_e.html")
@app.route("/tp_v.html")
def tp_v():
return render_template("tp_v.html")
@app.route("/tp_i.html")
def tp_i():
return render_template("tp_i.html")
@app.route("/tp_total.html")
def tp_total():
return render_template("tp_total.html")
@app.route("/tc_b.html")
def tc_b():
return render_template("tc_b.html")
@app.route("/tc_f.html")
def tc_f():
return render_template("tc_f.html")
@app.route("/tc_g.html")
def tc_g():
return render_template("tc_g.html")
@app.route("/tc_j.html")
def tc_j():
return render_template("tc_j.html")
@app.route("/tc_u.html")
def tc_u():
return render_template("tc_u.html")
@app.route("/tc_total.html")
def tc_total():
return render_template("tc_total.html")
if __name__=="__main__":
app.run(debug=True)
|
import os
import numpy as np
import cv2 as cv
#resized_hsv_fisheye_1414593023678_1403623027411_00013_20140624_171851_jp.png
# /home/juraj/Desktop/juro/programovanie/dipl/dipl/init_work2
def open_img(fname):
path = '/home/juraj/Desktop/juro/programovanie/dipl/dipl/init_work2/fisheyes/2014/06/24/' + fname
img_read = cv.imread(path)
print(len(img_read))
print(len(img_read[0]))
print(img_read[0][0])
for i in img_read:
for j in i:
print j,
print('\n')
open_img('resized_hsv_fisheye_1414593023678_1403623027411_00013_20140624_171851_jp.png') |
# JTSK-350112
# robustness.py
# Taiyr Begeyev
# t.begeyev@jacobs-university.de
def example1():
# consider the case when denominator is 0
for _ in range(3):
x = int(input("enter a number: "))
y = int(input("enter another number: "))
print(x, '/', y, '=', x / y)
def example2(L):
# When we reach the last element
# from L, we can't get L[i + 1], due to the fact it doesn't
# exist. So it leads to the IndexError
print("\n\nExample 2")
# sum = 0
sumOfPairs = []
for i in range(len(L) - 1):
sumOfPairs.append(L[i] + L[i + 1])
# here we got TypeError: unsupported operand type(s) for +: 'int' and 'str',
# which leads to TypeError. We need to covert int to str
print("sumOfPairs = ", str(sumOfPairs))
def printUpperFile(fileName):
file = open(fileName, "r")
for line in file:
print(line.upper())
file.close()
def main():
try:
example1()
L = [10, 3, 5, 6, 9, 3]
example2(L)
# leads to TypeError
# example2([10, 3, 5, 6, "NA", 3])
example2([10, 3, 5, 6, 3])
printUpperFile("doesNotExistYest.txt")
printUpperFile("./Dessssktop/misspelled.txt")
except ZeroDivisionError as exc:
# consider the case when denominator is 0
print("ZeroDivisionError: ", exc)
except ValueError as exc:
# Raised when an operation or function receives
# an argument that has the right type but an inappropriate value
print("ValueError: ", exc)
except IndexError as exc:
# consider the case when a sequence subscript is out of range
print("IndexError: ", exc)
except TypeError as exc:
# Passing arguments of the wrong type
print("TypeError: ", exc)
except FileNotFoundError as exc:
# consider the case when the file is not found
print("FileNotFoundError: ", exc)
main() |
def tempAposentadoria(idade, tempoTrabalho):
if idade >= 65 and tempoTrabalho >= 30:
return True
else:
return False
idade = int(input())
tempoTrabalho = int(input())
print(tempAposentadoria(idade,tempoTrabalho)) |
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
class AllMedHistory(models.Model):
'''
This model lists out every searchable medical history item including current medical issues(type 1), medications (type 2), family history (type 3), allergies (type 4). Smoking history, pregnancy and possibly sexual activity are current issues.
'''
user = models.ManyToManyField(User, related_name= 'allMedHistories', blank=True)
historyType = models.IntegerField(default=0) #types
description = models.CharField(max_length = 230)
def __str__(self):
return '%d: %s' % (self.historyType, self.description)
class Meta:
ordering = ['historyType']
class AllEvent(models.Model):
'''
This table includes all possibe events that could be pushed to the user. It includes an event type (appt, labs, imaging, survey). The foreign keys indicate which, if any, medical histories are associated with the item.
'''
user = models.ManyToManyField(User, related_name = 'allUserEvents', through='UserEvent', blank=True)
name = models.TextField(max_length = 100)
eventType = models.IntegerField() #appt=1, labs=2, imaging=3, survey/counseling=4
minAge = models.DecimalField(max_digits = 5, decimal_places = 2)
maxAge = models.DecimalField(max_digits = 5, decimal_places = 2)
gender = models.IntegerField(default=0) #0=both, 1=men, 2=women
timelineDescription = models.TextField()
publicFeedDescription = models.TextField(null = True, blank=True)
personalFeedDescription = models.TextField(null = True, blank=True)
counselingText = models.TextField(null = True, blank=True)
allMedHistory1 = models.ForeignKey(AllMedHistory, on_delete=models.CASCADE, null=True, blank=True, related_name = 'AllEvents_allMedHistory1')
allMedHistory2 = models.ForeignKey(AllMedHistory, on_delete=models.CASCADE, null=True, blank=True, related_name = 'AllEvents_allMedHistory2')
allMedHistory3 = models.ForeignKey(AllMedHistory, on_delete=models.CASCADE, null=True, blank=True, related_name = 'AllEvents_allMedHistory3')
allMedHistory4 = models.ForeignKey(AllMedHistory, on_delete=models.CASCADE, null=True, blank=True, related_name = 'AllEvents_allMedHistory4')
allMedHistory5 = models.ForeignKey(AllMedHistory, on_delete=models.CASCADE, null=True, blank=True, related_name = 'AllEvents_allMedHistory5')
allMedHistory6 = models.ForeignKey(AllMedHistory, on_delete=models.CASCADE, null=True, blank=True, related_name = 'AllEvents_allMedHistory6')
def __str__(self):
return self.name
class UserProfile(models.Model):
'''
Links UserProfile to a User model instance. User includes username, password, email, first_name, last_name
'''
user = models.OneToOneField(User, primary_key = True, related_name='userProfile')
birthDate = models.DateField(blank=True, null=True)
gender = models.IntegerField(default=0, blank=True, null=True) # 1 is male, 2 is female
completionPercentage = models.IntegerField(default=0, null=True, blank=True)
profilePicture = models.ImageField(upload_to=None, height_field=None, width_field=None, max_length=100, blank=True, null=True)
friends = models.ManyToManyField(User, related_name='friends', blank=True)
def __str__(self):
return '%s %s' % (self.user.first_name, self.user.last_name)
class UserCreatedEvent(models.Model):
'''
Events that are created by the user and added to the timeline
'''
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name = 'userCreatedEvents', null=True, blank=True)
eventType = eventType = models.IntegerField()
withWhom = models.CharField(max_length = 100, null=True, blank=True)
eventDate = models.DateField()
description = models.TextField(max_length = 300, null=True, blank=True)
class UserEvent(models.Model):
'''
These are specific AllEvents that apply to specific users
'''
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name ='eventsOfUser')
allEvent = models.ForeignKey(AllEvent, on_delete=models.CASCADE, null=True, blank=True, related_name='eventInfo')
createdEvent = models.OneToOneField(UserCreatedEvent, related_name='userEventForCreatedEvent', null=True, blank=True)
completed = models.IntegerField(default=0, null=True, blank=True)
dateCompleted = models.DateField(null=True, blank=True)
dateShared = models.DateField(null=True, blank=True)
ifShared = models.IntegerField(default=0, null=True, blank=True) #0=not shared, 1=friends, 2=public
def __str__(self):
return '%s : %d' % (self.user, self.completed) #need to add DateTimeField
class Meta:
ordering = ['dateCompleted']
class Like(models.Model):
'''
Every like by every user for any UserEvent
'''
user = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)
userEvent = models.ForeignKey(UserEvent, on_delete=models.CASCADE, null=True, blank=True, related_name = 'eventLikes')
def __str__(self):
return '%s %s' % (self.user.first_name, self.user.last_name)
class Comment(models.Model):
'''
Every comment by every user for any UserEvent
'''
text = models.TextField(max_length = 400, null=True, blank=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name = 'commenter')
userEvents = models.ForeignKey(UserEvent, on_delete=models.CASCADE, null=True, blank=True, related_name = 'eventComments')
commentTime = models.DateTimeField(null=True, blank=True)
class Meta:
ordering = ['commentTime']
def __str__(self):
return '%s: %s' % (self.text, self.commentTime)
|
from django.conf import settings
from Pbas import views
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^index$', views.login, name='pbas_index'),
url(r'^Home$', views.home_page, name='home_page'),
url(r'^signup_action/$', views.signup_action, name='signup_action'),
) |
from django import forms
from main_clothesmarket.models import Category, Kind, Product
# creation form of category
class CategoryForm(forms.ModelForm):
title = forms.CharField(max_length=16,
widget=forms.TextInput(attrs={'placeholder': 'Название', 'required': 'required'}))
category_order = forms.IntegerField(
widget=forms.TextInput(attrs={'placeholder': 'Порядок категории в меню',
'required': 'required'}))
photo = forms.ImageField(widget=forms.FileInput())
is_visible = forms.BooleanField(initial=True, required=False)
class Meta:
model = Category
fields = ('title', 'category_order', 'photo', 'is_visible')
# creation form of kinds
class KindForm(forms.ModelForm):
title = forms.CharField(max_length=30,
widget=forms.TextInput(attrs={'placeholder': 'Название', 'required': 'required'}))
kind_order = forms.IntegerField(
widget=forms.TextInput(attrs={'placeholder': 'Порядок категории в базе',
'required': 'required'}))
photo = forms.ImageField(widget=forms.FileInput())
is_visible = forms.BooleanField(initial=True, required=False)
des = forms.CharField(max_length=150,
widget=forms.TextInput(attrs={'placeholder': 'Описание',
'required': 'required'}))
category = Category
class Meta:
model = Kind
fields = ('title', 'kind_order', 'photo', 'is_visible', 'des', 'category')
# creation form of category
class ProductForm(forms.ModelForm):
title = forms.CharField(max_length=30,
widget=forms.TextInput(attrs={'placeholder': 'Название', 'required': 'required'}))
manufactura = forms.CharField(max_length=30,
widget=forms.TextInput(attrs={'placeholder': 'Бренд', 'required': 'required'}))
product_order = forms.IntegerField(
widget=forms.TextInput(attrs={'placeholder': 'Порядок категории в базе',
'required': 'required'}))
photo = forms.ImageField(widget=forms.FileInput())
subphoto1 = forms.ImageField(widget=forms.FileInput())
subphoto2 = forms.ImageField(widget=forms.FileInput())
subphoto3 = forms.ImageField(widget=forms.FileInput())
is_visible = forms.BooleanField(initial=True, required=False)
price = forms.DecimalField(max_digits=6, decimal_places=2,
widget=forms.TextInput(attrs={'placeholder': 'Цена', 'required': 'required'}))
des = forms.CharField(max_length=150,
widget=forms.TextInput(attrs={'placeholder': 'Описание',
'required': 'required'}))
category = Kind
size = forms.CharField(max_length=30,
widget=forms.TextInput(attrs={'placeholder': 'Размер',
'required': 'required'}))
available = forms.BooleanField(initial=True, required=False)
slug = forms.SlugField(max_length=200)
class Meta:
model = Product
fields = ('title', 'manufactura', 'product_order', 'photo', 'subphoto1', 'subphoto2', 'subphoto3',
'is_visible', 'price', 'des', 'category', 'size', 'available', 'slug') |
import numpy as np
import cv2
from keras.optimizers import Adam
from keras.models import Model, Sequential
from keras.callbacks import ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import load_img, img_to_array
from keras.layers import Dense, Input, Dropout, GlobalAveragePooling2D, Flatten, Conv2D, BatchNormalization, Activation, MaxPooling2D
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
class HediaKeras():
def __init__(self):
self.model = None
self.face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
self.emotions = {0: 'Angry', 1: 'Disgust', 2: 'Fear', 3: 'Happy',
4: 'Sad', 5: 'Surprise', 6: 'Neutral'}
self.create_model()
self.load_weights()
def create_model(self):
nb_classes = 7
model = Sequential()
model.add(Conv2D(64, (3, 3), padding='same', input_shape=(48, 48, 1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (5, 5), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(nb_classes, activation='softmax'))
self.model = model
def load_weights(self, file='Hedia_kerasmodel.h5'):
self.model.load_weights(file)
def evaluate_face(self, frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y - 50), (x + w, y + h + 10), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
prediction = self.model.predict(cropped_img)
print(prediction)
maxindex = int(np.argmax(prediction))
print(maxindex)
cv2.putText(frame, self.emotions[maxindex], (x + 20, y - 60), cv2.FONT_HERSHEY_SIMPLEX, 1,
(255, 255, 255),
2, cv2.LINE_AA)
return frame |
import subprocess
import os
import sys
import re
sys.path.insert(0, os.path.join("tools", "families"))
import fam
import run_all_species
from run_all_species import SpeciesRunFilter
import plot_speciesrax
import simulations_common
import plot_simulations
do_run = True
do_plot = not do_run
datasets = []
cores = 40
subst_model = "GTR+G"
gene_trees = ["raxml-ng"]
launch_mode = "normald"
replicates = range(3000, 3100)
varying_params = []
#varying_params.append((None, ["none"]))
#varying_params.append(("mu", ["mu0.35", "mu0.5", "mu0.7"]))
#varying_params.append(("population", ["pop10000000", "pop100000000"]))
#varying_params.append(("species", ["s25", "s75"]))
tag = "extreme"
fixed_point = "ssim_" + tag + "_s50_f1000_sites100_GTR_bl1.0_d0.0_l0.0_t0.0_gc0.0_p0.0_pop1000000000_mu0.2_theta5.0_seed20"
# metric to plot
metric_names = ["species_unrooted_rf"]
# methods to plot
methods_tuples = []
methods_tuples.append(("minibmepruned-mininj_raxml-ng", "MiniBMEPruned"))
#methods_tuples.append(("njrax-mininj_raxml-ng", "MiniNJ"))
methods_tuples.append(("astral_raxml-ng", "Astral"))
methods_tuples.append(("astrid-fastme_raxml-ng", "Astrid-FastMe"))
methods_tuples.append(("astrid-bionj_raxml-ng", "Astrid-BioNJ"))
# run run_filter on all datasets in dataset
def run_species_methods(datasets, subst_model, cores, run_filter, launch_mode):
for dataset in datasets:
dataset_dir = fam.get_datadir(dataset)
run_filter.run_reference_methods(dataset_dir, subst_model, cores, launch_mode)
def run_varying_experiment():
run_filter = SpeciesRunFilter()
run_filter.disable_all()
run_filter.generate = True
run_filter.pargenes = True
run_filter.pargenes_starting_trees = 1
run_filter.pargenes_bootstrap_trees = 0
run_filter.starting_gene_trees = gene_trees
run_filter.njrax = True
run_filter.astral = True
run_filter.astrid = True
run_filter.njst = True
run_filter.minibme = True
run_filter.minibmepruned = True
run_filter.cleanup = True
run_filter.analyse = True
for entry in varying_params:
datasets = simulations_common.get_dataset_list(fixed_point, entry[1], replicates)
run_species_methods(datasets, subst_model, cores, run_filter, launch_mode)
def plot_varying_experiment():
for entry in varying_params:
datasets = simulations_common.get_dataset_list(fixed_point, entry[1], replicates, True)
print("Plotting parameter " + entry[0])
for metric in metric_names:
param = entry[0]
output = simulations_common.get_plot_name("varyilsmissing", param, subst_model, metric)
plot_simulations.plot_varying_params(datasets, param, metric, methods_tuples, subst_model, output)
if (do_run):
run_varying_experiment()
if (do_plot):
plot_varying_experiment()
|
class Solution:
#贪心
def findContentChildren(self, g: List[int], s: List[int]) -> int:
g.sort()
s.sort()
index_g=0
index_s=0
while(index_g<len(g) and index_s<len(s)):
if(g[index_g] <= s[index_s]):
index_g +=1
index_s +=1
else:
index_s +=1
return index_g
|
from radiopie import *
import logging
def main():
lcd = LCDController()
menu = MenuController(lcd)
menu.start()
def setupLogger():
logging.basicConfig()
log = logging.getLogger("radiopie")
log.setLevel(logging.DEBUG)
if __name__ == "__main__":
setupLogger()
main()
|
num = int(input("enter a number"))
nums = [i for i in range(2, num//2+1) if num % i != 0]
print(nums) |
import sys
import clean_taffy
import make_pool
import preprocess_data
import train_recommend
import train_recommend_selected
import glob
directory = "/var/www/html/users/"+str(sys.argv[1])
province = sys.argv[2]
root_dir = "/var/www/html/"
train_recommend_selected.train_and_recommend_selected(root_dir, directory, province) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 9 11:27:21 2020
@author: adonay
"""
import numpy as np
import matplotlib.pyplot as plt
import utils_io as uio
## Plotting functions
def plot_per_axis(ts, time, color, fig_axes, **kwargs):
"For each axes subplot plots ts and time"
for i, axis in enumerate(fig_axes):
if color:
axis.plot(time, ts[:, i, :].T, color,**kwargs)
else:
axis.plot(time, ts[:, i, :].T,**kwargs)
def plot_per_axis_outlier(outliers, time, color, fig_axes):
for i, axis in enumerate(fig_axes):
outs = [o[i][0][0] for o in outliers if len(o) and o[i][0][0].shape[0]]
if outs == []:
continue
ixs = np.hstack(outs)
vls = np.hstack([o[i][0][1] for o in outliers if len(o) and o[i][0][1].shape[0]])
ixs = ixs.astype(int)
axis.plot(time[ixs], vls, color)
def plot_check_quality(fingers, int1, int2, outliers, timestmp, axes, fig,
subj, subj_diag, path_s,ts_old=None, old_frames=None):
def plotter():
axes[0].clear()
# plot_per_axis(int1, timestmp, 'b', axes)
plot_per_axis(int2, timestmp, [], axes)
plot_per_axis_outlier(outliers, timestmp, 'k*', axes)
axes[0].set_ylim(axes[0].get_ylim())
axes[1].set_ylim(axes[1].get_ylim())
plot_per_axis(fingers, timestmp, [], axes, **{'alpha':.1})
if ts_old is not None:
plot_per_axis(ts_old, timestmp, 'g-.', axes, **{'alpha':.2})
if old_frames is not None:
plot_per_axis(ts_old[:,:, old_frames], timestmp[old_frames], 'go', axes, **{'alpha':.2})
plot_per_axis(int2[:,:, old_frames], timestmp[old_frames], 'ko', axes, **{'alpha':.2})
axes[0].set_title(f'Diag:{subj_diag}| {subj}')
axes[0].set_xlabel("Doing Quality Inspection")
axes[1].set_xlabel("Instructions. Prediction quality good for analysis?"
"\n Press: 0) if no 1) if yes, i) to inspect TS")
plotter()
fig.tight_layout()
vals, relab = [0, 1, 'i'], []
while True:
res = get_key_resp(fig, vals)
if res == 'i':
relab, _ = plot_ts_inspection(
[], timestmp, int1, int2, path_s, subj, subj_diag, axes, fig, [], ts_old)
plotter()
elif res in [0, 1]:
res = resp_corr_fig_bkg(fig, res)
break
return res, relab
def resp_corr_fig_bkg(fig, res):
if res == 0 or res == 'bad':
fig.patch.set_facecolor('xkcd:light red')
res = 'bad'
elif res == 1 or res == 'good':
fig.patch.set_facecolor('xkcd:mint green')
res = 'good'
return res
def plot_get_times(fingers, int2,timestmp, axes, fig, subj, subj_diag):
lines0 = axes[0].plot(timestmp, int2[:,0,:].T, '-', lw=2, picker=3)
lines1 = axes[1].plot(timestmp, int2[:,1,:].T, '-', lw=2, picker=3)
_ = axes[0].plot(timestmp, int2[:,0,:].T, 'k.', markersize=1)
_ = axes[1].plot(timestmp, int2[:,1,:].T, 'k.', markersize=1)
ln_clr= ['r', 'g']
n_side = len(lines0)/2
n_r = np.arange(n_side, dtype='int')
n_l = np.arange(n_side, n_side*2, dtype='int')
for s, c in zip([n_r, n_l], ln_clr): # side and color
for l in [ np.array(lines0)[s], np.array(lines1)[s]]: # lines
for j in l:
j.set_color(c)
plot_per_axis(fingers, timestmp, 'r', axes, **{'alpha':.1})
axes[0].set_title(f'Diag:{subj_diag}| {subj}: doing R-L Beg-End times')
axes[1].set_xlabel("Instructions. Select R/L beg/end."
" Click: Right (RED) beginning - end, Left (Green) beg."
f" - end time points. \n Press: "
"Escape) to reselect point. Enter) if good time point")
fig.tight_layout()
res = get_key_times(fig, axes)
times = np.hstack([t[0] for t in res['inxs']])
times_leng = np.hstack([timestmp[times[i + 1]] - timestmp[times[i]]
for i in [0,2]])
return times, times_leng
def plot_frame_outlier(frame, coords_ori, coords_out, axis):
axis.imshow(frame)
mrkr = ['kx', 'r+']
for i, vers in enumerate([coords_ori, coords_out]):
for coor in vers:
axis.plot(coor[0], coor[1], mrkr[i])
def plot_contig_frames(res, frame_num, fingers, int2, path_s, subj, relab):
fig, ax = plt.subplots(1, 1, figsize=(20, 10))
fram_n = frame_num - 1 if res == "left" else frame_num +1
while True:
fram = uio.get_prediction_frame(path_s, subj, fram_n)
plot_frame_outlier(fram, fingers[:, :, fram_n],
int2[:, :, fram_n], ax)
ax.set_title(f"Frame n: {fram_n}, outlier frame {frame_num}")
ax.set_xlabel("Instructions. <- arrow) previous -> arrow) next"
" frame, l) Label preds, escape) finish viewing")
res2 = get_key_resp(fig, vals=["escape", "right", "left", "l"])
if res2 == 'escape':
plt.close(fig)
print("out loop")
break
elif res2 in ["right", "left"]:
fram_n = fram_n - 1 if res2 == "left" else fram_n +1
ax.clear()
elif res2 in ["l"]:
fig2, ax2 = plt.subplots(1, 1, figsize=(20, 10))
pred = plot_make_new_pred(fram, int2[:, :, fram_n], fig2, ax2, fram_n)
relab.extend(pred)
plt.close(fig2)
for p in pred:
int2[int(p[1]), :, int(p[0])] = p[2:]
return relab
def plot_oulier_qc(outliers, fingers, int2, path_s, subj, axis, fig, relab):
# reshape outliers, current very bad
out_new = []
for nf, f in enumerate(outliers):
for ns, xy in enumerate(f):
for xy_p, xy_v in zip(xy[0][0], xy[0][1]):
out_new.append([nf, ns, int(xy_p), round(xy_v)])
# Ensure same prediction diff axis (x, y) are not inspected twice
frames_nums = [ o[2] for o in out_new]
u_val, u_x, u_inv, u_c = np.unique(frames_nums, return_index=True,
return_inverse=True, return_counts=True)
o_done = np.zeros(u_c.size, dtype=bool)
for i, out in enumerate(out_new):
frame_num = out[2]
# Check if inspected previous axis (x, y)
if o_done[u_inv[i]]:
prev = frames_nums.index(frame_num)
# Add resp from previous axis
out_new[i].append(out_new[prev][-1])
continue
frame = uio.get_prediction_frame(path_s, subj, frame_num)
coords_ori = fingers[:, :, frame_num]
coords_out = int2[:, :, frame_num]
while True:
plot_frame_outlier(frame, coords_ori, coords_out, axis)
axis.set_title(f"Outlier {i}/{u_val.size}, frame num: {frame_num}")
axis.set_xlabel(f"Instructions. Outlier (red +) improved prediction?"
" Press: 0) if no 1) if yes, l) label, left-right arrow) "
"plot contiguous frames \n Then enter) next"
" escape) respond again")
fig.tight_layout()
# Select if good outlier
res = get_key_resp(fig, vals=[0, 1, "left", "right", "l"])
# Plot contiguous frames
if res in ["left", "right"]:
relab = plot_contig_frames(res, frame_num, fingers, int2,
path_s, subj, relab)
res = get_key_resp(fig, vals=[0, 1])
elif res == "l":
res = resp_corr_fig_bkg(fig, 0)
fig2, ax2 = plt.subplots(1, 1, figsize=(20, 10))
pred = plot_make_new_pred(frame, coords_out, fig2, ax2,
frame_num)
relab.extend(pred)
plt.close(fig2)
for p in pred:
coords_out[int(p[1]), :] = p[2:]
int2[int(p[1]), :, int(p[0])] = p[2:]
plot_frame_outlier(frame, coords_ori, coords_out, axis)
axis.set_xlabel(f"Instructions. enter) to confirm"
" escape) respond again")
break
res = resp_corr_fig_bkg(fig, res)
out_val = int2[out[0], :, frame_num]
axis.plot(out_val[0], out_val[1], "ro", markerfacecolor="none")
# Accept answer
out_insp = get_key_resp(fig, vals=['enter', 'escape'])
if out_insp == 'enter':
break
elif out_insp == 'escape':
axis.clear()
axis.clear()
fig.patch.set_facecolor('w')
out_new[i].append(res)
o_done[u_inv[i]] = True
return out_new, relab
def plot_ts_inspection(out_checked, timestmp, int1, int2, path_s, subj,
subj_diag, axes, fig, ttl=None, ts_old=None, old_frames=None):
axes[0].clear()
axes[1].clear()
if ttl:
axes[0].set_xlabel(ttl, fontweight='bold')
def plot_ts(int2):
if ts_old is not None:
plot_per_axis(ts_old, timestmp, 'g-', axes, **{'alpha':.4})
lines0 = axes[0].plot(timestmp, int2[:,0,:].T, '-', lw=1, picker=True)
lines1 = axes[1].plot(timestmp, int2[:,1,:].T, '-', lw=1, picker=True)
for ix in range(2):
axes[ix].yaxis.set_pickradius(25)
axes[ix].xaxis.set_pickradius(1)
ln_clr= ['r', 'g']
n_side = len(lines0)/2
n_r = np.arange(n_side, dtype='int')
n_l = np.arange(n_side, n_side*2, dtype='int')
for s, c in zip([n_r, n_l], ln_clr): # side and color
for l in [ np.array(lines0)[s], np.array(lines1)[s]]: # lines
for j in l:
j.set_color(c)
return lines0, lines1
lines0, lines1 = plot_ts(int2)
# Plot outliers
for out in out_checked:
out[:4] = [int(i) for i in out[:4]]
color = "go" if out[-1] == 'good' else "ro"
axes[out[1]].plot(timestmp[out[2]], out[3], color,
markerfacecolor=None)
plot_per_axis(int1, timestmp, 'b', axes, **{'alpha':.2})
if old_frames is not None:
plot_per_axis(ts_old[:,:, old_frames], timestmp[old_frames], 'go', axes, **{'alpha':.2})
plot_per_axis(int2[:,:, old_frames], timestmp[old_frames], 'ko', axes, **{'alpha':.2})
axes[0].set_title(f'Diag:{subj_diag}| {subj}: doing TS inspection')
axes[1].set_xlabel("Instructions. Press o) to finish or click on a "
"time point for inspection. Then, press Enter) to plot "
"frame or Escape) to select another point")
fig.tight_layout()
new_pred, good_pred = [], []
plt.rcParams['keymap.zoom'] = ['ctrl+o']
plt.rcParams['keymap.yscale'] = ['ctrl+l']
plt.rcParams['keymap.xscale'] = ['ctrl+k', 'L']
plt.rcParams['keymap.save'] = ['ctrl+s']
while True:
res, avline = get_clicked_times(fig, axes, 'k')
if res['inxs']:
if isinstance(res['inxs'], list):
res['inxs'] = res['inxs'][0]
if isinstance(res['inxs'], np.ndarray):
res['inxs'] = res['inxs'][0]
frame_num = res['inxs']
pred, relab = plot_pred_relab(path_s, subj, frame_num, int1, int2,
avline, [])
if len(pred):
new_pred.extend(pred)
_ = [(l.remove(), l1.remove()) for l, l1 in zip(lines0,lines1)]
for p in pred:
int2[int(p[1]),:,int(p[0])] = p[2:]
lines0, lines1 = plot_ts(int2)
else:
if isinstance(frame_num, np.ndarray):
good_pred.append(frame_num[0])
else:
good_pred.append(frame_num)
if len(relab):
_ = [(l.remove(), l1.remove()) for l, l1 in zip(lines0,lines1)]
new_pred.extend(relab)
for p in relab:
int2[int(p[1]),:,int(p[0])] = p[2:]
for ax in axes:
ax.axvline(timestmp[int(p[0])], c='r', lw=1)
lines0, lines1 = plot_ts(int2)
elif res['key'] =='o':
break
return new_pred, good_pred
def plot_pred_relab(path_s, subj, frame_num, int1, int2, avline, relab, nan_insp=False, ttl=None):
plt.rcParams['keymap.zoom'] = ['ctrl+o']
plt.rcParams['keymap.yscale'] = ['ctrl+l']
plt.rcParams['keymap.xscale'] = ['ctrl+k', 'L']
plt.rcParams['keymap.save'] = ['ctrl+s']
new_pred = []
fig2, ax2 = plt.subplots(1, 1, figsize=(20, 10))
if ttl:
ax2.set_title(ttl)
frame = uio.get_prediction_frame(path_s, subj, frame_num)
coords_ori = int1[:, :, frame_num]
coords_out = int2[:, :, frame_num]
while True:
plot_frame_outlier(frame, coords_ori, coords_out, ax2)
ax2.set_xlabel(f"Good prediction?"
f" Press: 0) if no, l) no and label, 1) if yes, right or left arrows) "
" plot contiguous frames \n Then enter) next or"
" escape) respond again")
fig2.tight_layout()
# Select if good outlier
if nan_insp:
ax2.set_xlabel(f"Good prediction?"
f" Press: s) to Skip 0) if no, l) no and label, 1) if yes, right or left arrows) "
" plot contiguous frames \n Then enter) next or"
" escape) respond again")
res1 = get_key_resp(fig2, vals=[0, 1, "left", "right", "l", 's'])
else:
res1 = get_key_resp(fig2, vals=[0, 1, "left", "right", "l"])
# Plot contiguous frames
if res1 in ["left", "right"]:
relab = plot_contig_frames(res1, frame_num, int1, int2, path_s,
subj, relab)
continue
# res1 = get_key_resp(fig2, vals=[0, 1])
elif res1 == "l":
pred = plot_make_new_pred(frame, coords_out, fig2, ax2, frame_num)
new_pred.extend(pred)
plt.close(fig2)
for l in avline:
l.set_color("r")
break
for p in pred:
coords_out[p[1], :] = p[2:]
plot_frame_outlier(frame, coords_ori, coords_out, ax2)
res1 = 0
elif res1 == "s":
new_pred ="skip"
plt.close(fig2)
break
res1 = resp_corr_fig_bkg(fig2, res1)
ax2.set_xlabel(f"{res1} prediction? Press: Enter) to proceed"
" or Esc) to respond again")
# Confirm answer
res_conf = get_key_resp(fig2, vals=['enter', 'escape'])
if res_conf == 'enter':
if res1 == 'good':
plt.close(fig2)
for l in avline:
l.set_color("g")
break
elif res1 == 'bad':
pred = plot_make_new_pred(frame, coords_out, fig2, ax2, frame_num)
new_pred.extend(pred)
plt.close(fig2)
for l in avline:
l.set_color("r")
break
elif res_conf == 'escape':
ax2.clear()
fig2.patch.set_facecolor('w')
return new_pred, relab
def nan_inspec(nans_pred, path_s, subj, int1, int2, relab):
good_pred = []
bad_relab = []
n_nans = len(nans_pred)
for ix, frame_num in enumerate(nans_pred):
ttl = f"nan inspection {ix}/{n_nans}, frame # {frame_num}"
new_pred, relab = plot_pred_relab(path_s, subj, frame_num, int1, int2,
[], relab, nan_insp=True, ttl=ttl)
if new_pred == "skip":
continue
if len(new_pred):
bad_relab.extend(new_pred)
elif len(new_pred) == 0 :
good_pred.append(frame_num)
return good_pred, bad_relab, relab
def ret_new_pred(frame_num, coords, coords_out):
coords, coords_out = np.squeeze(coords), np.squeeze(coords_out)
pred = [np.hstack([frame_num, i, n]) for i, (n, o) in
enumerate(zip(coords, coords_out))
if not array_equal_nan(n, o)]
pred = np.vstack(pred) if len(pred) else np.array([])
return list(pred)
def array_equal_nan(a1, a2):
"""
True if two arrays have the same shape and elements, False otherwise.
1.19.0 Numpy
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
b : bool
Returns True if the arrays are equal.
"""
try:
a1, a2 = np.asarray(a1), np.asarray(a2)
except Exception:
return False
if a1.shape != a2.shape:
return False
# Handling NaN values
a1nan, a2nan = np.isnan(a1), np.isnan(a2)
# NaN's occur at different locations
if not (a1nan == a2nan).all():
return False
# Shapes of a1, a2 and masks are guaranteed to be consistent by this point
return bool(np.asarray(a1[~a1nan] == a2[~a1nan]).all())
def plot_make_new_pred(frame, coord, fig, ax, frame_num):
def onclick1(event):
resp['data'] = [event.mouseevent.xdata, event.mouseevent.ydata]
def onkey(event):
print(event.key)
K = event.key
resp['key'] = K
try:
resp['key']= int(resp['key'])
except Exception:
pass
def legend_coor(coord):
legend = [s + f" - {c}" if np.isnan(c[0]) else s + f" - {c.astype(int)}"
for s, c in zip(leg_names, coord)]
return legend
ax.clear()
ax.imshow(frame, picker=True)
leg_names = ["0 - r_inx ", "1 - r_thm", "2 - r_wrt ",
"3 - l_inx", "4 - l_thm", "5 - l_wrt "]
legnd = legend_coor(coord)
points = [ax.plot(c[0], c[1], 'x') for c in coord]
ax.legend([c for p in points for c in p], legnd)
resp = {'data': [], 'inxs':[], 'key':None}
new_coord = coord.copy()
while True:
cid2 = fig.canvas.mpl_connect('key_press_event', onkey)
ax.set_xlabel('Press a number to change position, Enter) to finish')
if resp['key'] == 'enter':
break
elif resp['key'] in range(6):
k = resp['key']
resp['key'] = []
crs, = ax.plot(new_coord[k][0], new_coord[k][1], 'ko',
markerfacecolor=None)
ax.set_xlabel(f'Click on new position for "{leg_names[k]}", press '
"num again) for none, esc) to leave")
while True:
cid1 = fig.canvas.mpl_connect('pick_event', onclick1)
if resp['key'] in ['enter', 'escape']:
break
if len(resp['data']) or resp['key'] == k:
if resp['key'] == k:
x, y = np.nan, np.nan
else:
x, y = resp['data'][0], resp['data'][1]
if len(new_coord[k].shape)>1:
new_coord[k] = np.array([x, y])[:, np.newaxis]
elif len(new_coord[k].shape)==1:
new_coord[k] = np.array([x, y])
old_col = points[k][0].get_color()
new_p = ax.plot(x, y, color=old_col, marker="x",
markerfacecolor=None)
points[k][0].remove()
points.pop(k)
points.insert(k, new_p)
new_leg = legend_coor(new_coord)
ax.legend( [c for p in points for c in p], new_leg)
fig.canvas.mpl_disconnect(cid1)
crs.remove()
resp = {'data': [], 'inxs':[], 'key':None}
break
plt.pause(.1)
plt.pause(.5)
fig.canvas.mpl_disconnect(cid2)
return ret_new_pred(frame_num, new_coord, coord)
# Events functions
def get_key_times(fig, axes):
clrs = ['r', 'b', 'g', 'k']
resp = {'data': [], 'inxs':[]}
ins = ["R beg", "R end", "L beg", "L end"]
for i, clr in enumerate(clrs):
axes[1].set_xlabel(f"Instructions. Select {ins[i]}. \n Press: Escape) "
"to reselect point. Enter) if good time point")
while True:
r, _ = get_clicked_times(fig, axes, clr)
if not r['inxs'] == []:
break
resp['inxs'].append(r['inxs'])
resp['data'].append(r['data'])
return resp
def get_clicked_times(fig, axes, clr):
def onclick(event):
ind = event.ind
if len(ind) > 1:
ind = ind[round(len(ind)/2)]
x, y = event.artist.get_xdata()[ind], event.artist.get_ydata()[ind]
resp['inxs']= [ind]
resp['data'] = (x, y)
def onkey(event):
print(event.key)
K = event.key
resp['key'] = K
resp = {'data': [], 'inxs':[], 'key':None}
ax_vls = []
previous_resp = []
while True:
cid1 = fig.canvas.mpl_connect('pick_event', onclick)
cid2 = fig.canvas.mpl_connect('key_press_event', onkey)
if resp['key'] == 'escape':
resp = { 'data': [], 'inxs':[], 'key':None}
for l in ax_vls:
l.remove()
ax_vls = []
elif resp['key'] in ['enter', 'o']:
break
if len(resp['inxs']) and clr and not resp['inxs'] == previous_resp:
if ax_vls:
for i, l in enumerate(ax_vls):
l.remove()
ax_vls = []
for ax in axes:
ax_vls.append(ax.axvline(resp['data'][0], c=clr, lw=1))
previous_resp = resp['inxs']
plt.pause(.5)
fig.canvas.mpl_disconnect(cid1)
fig.canvas.mpl_disconnect(cid2)
return resp, ax_vls
def get_key_resp(fig, vals):
"Returns key press in fig from``vals`` list"
def onkey(event):
print(event.key)
K = event.key
resp['key'] = K
resp = {"responded":False, 'key':-1}
while not resp["responded"]:
cid = fig.canvas.mpl_connect('key_press_event', onkey)
plt.pause(.2)
try:
resp['key']= int(resp['key'])
except Exception:
pass
if resp['key'] in vals:
resp["responded"] = True
fig.canvas.mpl_disconnect(cid)
return resp['key']
|
# Copyright 2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Provide a TestPlan class to coordinate multiple feature test stages.
"""
from __future__ import annotations
from itertools import chain
from .config import Config
from .logger import LOG
from .system import System
from .test_stages import STAGES
from .ui import banner, rule, summary, yellow
class TestPlan:
"""Encapsulate an entire test run with multiple feature test stages.
Parameters
----------
config: Config
Test runner configuration
system: System
Process execution wrapper
"""
def __init__(self, config: Config, system: System) -> None:
self._config = config
self._system = system
self._stages = [
STAGES[feature](config, system) for feature in config.features
]
def execute(self) -> int:
"""Execute the entire test run with all configured feature stages."""
LOG.clear()
LOG(self.intro)
for stage in self._stages:
LOG(stage.intro)
stage(self._config, self._system)
LOG(stage.outro)
all_procs = tuple(
chain.from_iterable(s.result.procs for s in self._stages)
)
total = len(all_procs)
passed = sum(proc.returncode == 0 for proc in all_procs)
LOG(self.outro(total, passed))
return int((total - passed) > 0)
@property
def intro(self) -> str:
"""An informative banner to display at test run start."""
details = (
f"* Feature stages : {', '.join(yellow(x) for x in self._config.features)}", # noqa E501
f"* Test files per stage : {yellow(str(len(self._config.test_files)))}", # noqa E501
)
return banner("Test Suite Configuration", details=details)
def outro(self, total: int, passed: int) -> str:
"""An informative banner to display at test run end."""
result = summary("All tests", total, passed)
return f"\n{rule()}\n{result}\n"
|
import json
from constants import *
import matplotlib.pyplot as plt
import numpy as np
DEFAULT_LINKS_FILE = "paper_links.txt"
DEFAULT_OUTPUT_FILE = "json_data.json"
def plot_paper_ranks(papers, ticks=False):
x = np.array(range(len(papers)))
if ticks:
y = np.array([paper[1] for paper in papers])
my_xticks = [paper[0] for paper in papers]
plt.xticks(x, my_xticks, rotation='vertical')
else:
y = papers
plt.xlabel("Paper Indices")
plt.plot(x, y)
plt.title("Top " + str(len(papers)) + " papers, plotted against their rank")
plt.ylabel("Rank")
plt.show()
def write_list(lst, filename=DEFAULT_LINKS_FILE):
with open(filename, 'w') as fp:
for item in lst:
fp.write("%s\n" % item)
def read_list(filename=DEFAULT_LINKS_FILE):
with open(filename, 'r') as fp:
return fp.read().split('\n')[:-1]
def write_dict(output):
outputText = json.dumps(output, indent=4, )
with open("json_data.json", 'w') as f:
f.write(outputText)
def get_url_from_index(index):
pathname = RAW_DATA_PATH + str(index // 1000000)
with open(pathname, 'rb') as f:
index = index % 1000000
for line in f:
data = json.loads(line)
if index == 0:
return len(data["inCitations"]), data["s2Url"]
index -= 1
def get_paper_from_index(index):
pathname = RAW_DATA_PATH + str(index // 1000000)
with open(pathname, 'rb') as f:
index = index % 1000000
for line in f:
data = json.loads(line)
if index == 0:
return data
index -= 1
def dist(vec1, vec2):
dist_val = 0
for i in range(RECORDS):
dist_val += abs(vec1[i] - vec2[i])
return dist_val
|
from django.test import TestCase
from .models import *
from django.contrib.auth import get_user_model
User = get_user_model()
from rest_framework.test import APIClient
# Create your tests here.
class TweetTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="abc", password="pass!@#")
Tweets.objects.create(content="tweet 1", user=self.user)
Tweets.objects.create(content="tweet 2", user=self.user)
self.currentCount = Tweets.objects.all().count()
def test_tweet_created(self):
tweet_data = Tweets.objects.create(content="tweet 2", user=self.user)
self.assertEqual(tweet_data.user, self.user)
def get_client(self):
test_user = APIClient()
test_user.login(username=self.user.username, password='pass!@#')
return test_user
def test_tweet_detail(self):
test_user = self.get_client()
response = test_user.get("tweets/detail/1/")
self.assertEqual(response.status_code, 200)
data = response.json()
tweet_id = data.get("id")
self.assertEqual(tweet_id, 1)
def test_tweet_delete(self):
test_user = self.get_client()
response = test_user.get("deleteTweet/1/delete/")
self.assertEqual(response.status_code, 200)
test_user = self.get_client()
response = test_user.get("deleteTweet/1/delete/")
self.assertEqual(response.status_code, 404)
response_is_incorrect = test_user.get("deleteTweet/2/delete/")
|
def CountVowel(word):
print ("Given String {0}".format(word))
word = word.lower()
return {
v:word.count(v) for v in 'aeiou'
}
if __name__ == '__main__':
CountVowel("I Love Python Programming")
|
# -*- coding: utf-8 -*-
"""
linalg.py
Functions implementing maths in linear algebra.
Function list:
dot_mod
dot_mod_as_list
mat_pow_mod
mat_pow_mod_as_list
mat_pow_sum_mod
gauss_jordan_elimination
gauss_jordan_modular_elimination
gauss_jordan_modular_elimination_as_list
gauss_jordan_elimination_with_unknown_RHS
get_integer_matrix_inverse_as_list
get_integer_matrix_inverse_as_numpy_array
@author: Jasper Wu
"""
from copy import deepcopy
import numpy as np
from sympy import Symbol, Rational
from . formula import gcd
from . modulo import inv_mod
def dot_mod(A, B, m=0):
"""matrix multiplication, avoid overflow in numpy"""
a = len(A)
l = len(B)
b = len(B[0])
C = np.zeros((a, b), dtype=np.int64)
for i in range(a):
for j in range(b):
cij = 0
for k in range(l):
if m:
cij = (cij + A[i, k] * B[k, j]) % m
else:
cij += A[i, k] * B[k, j]
C[i, j] = cij
return C
def dot_mod_as_list(A, B, m=0):
"""matrix multiplication defined as list, avoid overflow in numpy"""
a = len(A)
l = len(B)
b = len(B[0])
C = [[0] * b for _ in range(a)]
for i in range(a):
for j in range(b):
cij = 0
for k in range(l):
if m:
cij = (cij + A[i][k] * B[k][j]) % m
else:
cij += A[i][k] * B[k][j]
C[i][j] = cij
return C
def mat_pow_mod(mat, n, m=0):
"""return (mat^n) % m"""
if n < 0:
raise ValueError("power must be positive!")
d = len(mat)
res = np.eye(d, dtype=np.int64)
while n:
if n & 1:
if m:
res = np.mod(np.dot(res, mat), m)
else:
res = np.dot(res, mat)
if m:
mat = np.mod(np.dot(mat, mat), m)
else:
mat = np.dot(mat, mat)
n >>= 1
return res
def mat_pow_mod_as_list(mat, n, m=0):
"""return (mat^n) % m, mat is defined as list, avoid overflow in numpy"""
if n < 0:
raise ValueError("power must be positive!")
d = len(mat)
res = [[0] * d for _ in range(d)]
for i in range(d):
res[i][i] = 1
while n:
if n & 1:
res = dot_mod_as_list(res, mat, m)
mat = dot_mod_as_list(mat, mat, m)
n >>= 1
return res
def mat_sum_pow_mod(A0, Q, n, m=0):
"""return (A0 + Q A0 + Q^2 A0 + ... + Q^n A0) % m"""
if n < 0:
raise ValueError("power must be positive!")
if n == 0:
return A0
assert len(A0) == len(Q[0])
if m:
A0 = A0 % m
Q = Q % m
d = len(Q)
O = np.zeros((d, d), dtype=np.int64)
I = np.eye(d, dtype=np.int64)
Q_ext = np.concatenate([np.concatenate([Q, I], axis=1), np.concatenate([O, I], axis=1)])
Q_ext_pow = mat_pow_mod(Q_ext, n, m)
I2 = np.concatenate([I, I], axis=0)
res = np.dot(Q_ext_pow, I2)
if m:
res %= m
res = np.dot(res, A0)
if m:
res %= m
return res[:d]
def gauss_jordan_elimination(coeffs):
"""
Gauss-Jordan elimination algorithm, can only be used when there are more variables than equations
coeffs: 2D list, all elements float
"""
w, d = len(coeffs[0]), len(coeffs)
coefmat = np.matrix(coeffs)
for i in range(d):
flag = 1
j = i
while flag and j < d:
if abs(coefmat[i, j]) < 0.001:
for k in range(i+1, d):
if abs(coefmat[k, j]) > 0.001:
flag = 0
coefmat[k], coefmat[i] = deepcopy(coefmat[i]), deepcopy(coefmat[k])
break
if flag:
j += 1
else:
flag = 0
if j == d:
break
if coefmat[i, j] != 1:
coefmat[i] /= coefmat[i, j]
for k in range(i+1, d):
if coefmat[k, j]:
coefmat[k] = coefmat[k] - coefmat[k, j] * coefmat[i]
for i in range(1, d):
for j in range(w):
if abs(coefmat[i, j]) > 0.001:
break
for k in range(i):
if coefmat[k, j]:
coefmat[k] = coefmat[k] - coefmat[k, j] * coefmat[i]
return coefmat
def gauss_jordan_modular_elimination(coeffs, mod):
"""
modular Gauss-Jordan elimination algorithm, can only be used when there are more variables than equations
coeffs: 2D list, all elements integer, mod is prime
"""
w, d = len(coeffs[0]), len(coeffs)
coefmat = np.matrix(coeffs) % mod
for i in range(d):
flag = 1
j = i
while flag and j < d:
if coefmat[i, j] == 0:
for k in range(i+1, d):
if coefmat[k, j]:
flag = 0
coefmat[k], coefmat[i] = deepcopy(coefmat[i]), deepcopy(coefmat[k])
break
if flag:
j += 1
else:
flag = 0
if j == d:
break
if coefmat[i, j] != 1:
coefmat[i] *= inv_mod(coefmat[i, j], mod)
coefmat[i] %= mod
for k in range(i+1, d):
if coefmat[k, j]:
coefmat[k] = (coefmat[k] - coefmat[k, j] * coefmat[i]) % mod
for i in range(1, d):
for j in range(w):
if coefmat[i, j]:
break
for k in range(i):
if coefmat[k, j]:
coefmat[k] = (coefmat[k] - coefmat[k, j] * coefmat[i]) % mod
return coefmat % mod
def gauss_jordan_modular_elimination_as_list(coeffs, mod):
"""
modular Gauss-Jordan elimination algorithm, can only be used when there are more variables than equations
coeffs: 2D list, all elements integer, mod is prime
"""
w, d = len(coeffs[0]), len(coeffs)
coefmat = [[x % mod for x in row] for row in coeffs]
for i in range(d):
flag = 1
j = i
while flag and j < d:
if coefmat[i][j] == 0:
for k in range(i+1, d):
if coefmat[k][j]:
flag = 0
coefmat[k], coefmat[i] = coefmat[i][:], coefmat[k][:]
break
if flag:
j += 1
else:
flag = 0
if j == d:
break
if coefmat[i][j] != 1:
xinv = inv_mod(coefmat[i][j], mod)
coefmat[i] = [x * xinv % mod for x in coefmat[i]]
for k in range(i+1, d):
if coefmat[k][j]:
x0 = coefmat[k][j]
coefmat[k] = [(x - x0 * coefmat[i][l]) % mod for l, x in enumerate(coefmat[k])]
for i in range(1, d):
for j in range(w):
if coefmat[i][j]:
break
for k in range(i):
if coefmat[k][j]:
x0 = coefmat[k][j]
coefmat[k] = [(x - x0 * coefmat[i][l]) % mod for l, x in enumerate(coefmat[k])]
return [[x % mod for x in row] for row in coefmat]
def gauss_jordan_elimination_with_unknown_RHS(coeffs):
"""
Gauss-Jordan elimination algorithm, can only be used when there are more variables than equations
Allow RHS to be sympy.Symbol
coeffs: 2D list, all elements sympy.Rational or sympy.Symbol
"""
w, d = len(coeffs[0]), len(coeffs)
coefmat = deepcopy(coeffs)
for i in range(d):
flag = 1
j = i
while flag and j < d:
if isinstance(coefmat[i][j], Rational) and coefmat[i][j] == 0:
for k in range(i+1, d):
if isinstance(coefmat[i][j], Rational) and coefmat[k][j] != 0:
flag = 0
coefmat[k], coefmat[i] = deepcopy(coefmat[i]), deepcopy(coefmat[k])
break
if flag:
j += 1
else:
flag = 0
if j == d:
break
if coefmat[i][j] != 1:
coefmat[i] = [n / coefmat[i][j] for n in coefmat[i]]
for k in range(i+1, d):
if coefmat[k][j] != 0:
coefmat[k] = [coefmat[k][x] - coefmat[k][j] * coefmat[i][x] for x in range(w)]
for i in range(1, d):
for j in range(w-1):
if coefmat[i][j] != 0:
break
for k in range(i):
if coefmat[k][j]:
coefmat[k] = [coefmat[k][x] - coefmat[k][j] * coefmat[i][x] for x in range(w)]
return coefmat
def get_integer_matrix_inverse_as_list(matrix):
"""
get inverse of matrix by elementry row transformation
the matrix is defined by list of list, to avoid overflow in numpy
"""
L = len(matrix)
matrix = [row + [0]*L for row in matrix]
for r in range(L):
matrix[r][r+L] = 1
# handle every row
for r in range(L):
r2 = r
while r2 < L and matrix[r2][r] == 0:
r2 += 1
if r2 == L:
raise ValueError("Singular matrix!")
if r2 != r:
matrix[r], matrix[r2] = matrix[r2][:], matrix[r][:]
if matrix[r][r] < 0:
matrix[r] = [-x for x in matrix[r]]
g = matrix[r][r]
for c in range(r+1, 2*L):
x = matrix[r][c]
if x:
g = gcd(g, abs(x))
matrix[r] = [x // g for x in matrix[r]]
pivot = matrix[r][r]
for r2 in range(L):
if r2 != r and matrix[r2][r]:
x = matrix[r2][r]
if x:
g = gcd(abs(x), pivot)
matrix[r2] = [pivot // g * matrix[r2][c] - x // g * matrix[r][c] for c in range(2*L)]
# reduce every row
det = 1
for r in range(L):
if matrix[r][r] < 0:
matrix[r] = [-x for x in matrix[r]]
g = matrix[r][r]
for c in range(L, 2*L):
x = matrix[r][c]
if x:
g = gcd(g, abs(x))
matrix[r] = [x // g for x in matrix[r]]
g = matrix[r][r]
det = g // gcd(g, det) * det
# handle diagonal
for r in range(L):
g = det // matrix[r][r]
matrix[r] = [g * x for x in matrix[r]]
# get inverse
matrix = [row[L:] for row in matrix]
return det, matrix
def get_integer_matrix_inverse_as_numpy_array(matrix):
"""
get inverse of matrix by elementry row transformation
the matrix is defined by numpy array with int64, but may still overflow when values are large
"""
L = len(matrix)
I = np.eye(L, dtype=np.int64)
matrix = np.concatenate((matrix, I), axis=1)
# handle every row
for r in range(L):
r2 = r
while r2 < L and matrix[r2, r] == 0:
r2 += 1
if r2 == L:
raise ValueError("Singular matrix!")
if r2 != r:
matrix[r], matrix[r2] = matrix[r2].copy(), matrix[r].copy()
if matrix[r, r] < 0:
matrix[r] *= -1
g = matrix[r, r]
for c in range(r+1, 2*L):
x = matrix[r, c]
if x:
g = gcd(g, abs(x))
matrix[r] //= g
pivot = matrix[r, r]
for r2 in range(L):
if r2 != r and matrix[r2, r]:
x = matrix[r2, r]
if x:
g = gcd(abs(x), pivot)
matrix[r2] = pivot // g * matrix[r2] - x // g * matrix[r]
# reduce every row
det = 1
for r in range(L):
if matrix[r, r] < 0:
matrix[r] *= -1
g = matrix[r, r]
for c in range(L, 2*L):
x = matrix[r, c]
if x:
g = gcd(g, abs(x))
matrix[r] //= g
g = int(matrix[r, r])
det = g // gcd(g, det) * det
# handle diagonal
for r in range(L):
matrix[r] *= det // matrix[r, r]
return det, matrix[:, L:]
|
from functions import *
from whatssApp import *
if __name__ == "__main__":
wish()
#speak(takecommand())
while 1 :
query = takecommand().lower()
#logic building
############# Open Notepad
if "open notepad" in query:
npath = "C:\\WINDOWS\\system32\\notepad.exe"
os.startfile(npath)
elif "close notepad" in query:
speak("okay sir, closing notepad")
os.system("taskkill /f /im notepad.exe")
############ Open command prompt
elif "open command prompt" in query:
os.system("start cmd")
############### Open camera
elif "open camera" in query:
cap = cv2.VideoCapture(0)
while True:
ret,img = cap.read()
cv2.imshow('webcam',img)
k = cv2.waitKey(50)
if k == 27:
break
cap.release()
cv2.destroyWindow()
############# screenshot
elif "screenshot" in query:
screenShot()
############play Music
elif "play music" in query:
music_dir = "D:\\Music\\songs"
songs = os.listdir(music_dir)
#rd = random.choice(songs)
for song in songs:
if song.endswith('.mp3'):
os.startfile(os.path.join(music_dir,song))
############## Ip address
elif "ip address" in query:
ip = get('https://api.ipify.org').text
speak(f"your IP address is {ip}")
############## wikipedia
elif "wikipedia" in query:
wikipe()
################### Youtube saerch
elif "open youtube" in query:
speak("what should i search on youtube, search like, search carry minati")
yt = takecommand()
yt = yt.replace("search","")
web = 'https://www.youtube.com/results?search_query='+yt
webbrowser.open(web)
speak("done sir")
################# Facebook saerch
elif "open facebook" in query:
webbrowser.open("www.facebook.com")
############## Google search
elif "google search" in query:
speak("sir, what should i search on google, search like,search bill gates")
cm = takecommand()
cm = cm.replace("search","")
kit.search(cm)
#webbrowser.open(f"{cm}")
speak("done sir")
################ open website
elif "open website" in query:
speak("sir, what website should i open, search like,open stack overflow")
cm = takecommand().lower()
cm = cm.replace("open","")
speak("ok sir opening website"+cm)
site = 'https://www.'+cm+'.com'
webbrowser.open(site)
############# WhattsApp Automation
elif "send message" in query:
speak("Tell me the name of person")
name = takecommand().lower()
if 'mother' in name:
speak("Tell me the message")
msg = takecommand()
speak("time in hour")
hour = int(takecommand())
speak("time in minutes")
min = int(takecommand())
kit.sendwhatmsg("+917875499882",msg,hour,min,20)
############### Youtube play
elif "song on youtube" in query:
speak("what song should i play, sir ")
y_song = takecommand().lower()
kit.playonyt(y_song)
################ open telegram
elif "close telegram" in query:
speak("okay sir, closing telegram")
os.system("taskkill /f /im Telegram.exe")
################## Repeat my word
elif "repeat" in query:
speak("what should i repeat,sir ?")
tt = takecommand()
speak(f"You said {tt}")
############## my location ##############
elif "my location" in query:
speak("wait sir, opening your location")
webbrowser.open("https://www.google.com/maps/place/Pimpalgaon+Baswant,+Maharashtra+422209/@20.1713327,73.9669121,14z/data=!3m1!4b1!4m13!1m7!3m6!1s0x3bdddb30e9f3651d:0x59ca2dd6f1d49aa8!2sMaharashtra+422209!3b1!8m2!3d20.1833163!4d74.006509!3m4!1s0x3bdddad08f0efbc5:0xce8131b0c54d3c09!8m2!3d20.1653828!4d73.9879417")
################# Open apps
elif "open app" in query:
OpenApps()
############## Close apps
elif "close app" in query:
CloseApp()
############## Set Alarm
elif "set alarm" in query:
speak("sir please, tell me time like, set alarm to 2:30 am")
a_t = takecommand()
a_t = a_t.replace("set alarm to ","")
a_t = a_t.replace(".","")
a_t = a_t.upper()
import MyAlarm
MyAlarm.alarm(a_t)
############## say jokes
elif "tell me joke" in query:
joke = pyjokes.get_joke(language="en", category="neutral")
speak(joke)
elif "fuck" in query:
speak("don't abuse me sir, I am your jarvis ,I am hurt.")
speak("i am sleeping.")
sys.exit()
################# Google news ###################
elif "google news" in query:
google_news()
############### news api ##################
elif "news api" in query:
news_api()
############## Dictionary ###############
elif "dictionary" in query:
Diction()
############# for shut down system
elif "shut down the system" in query:
os.system("shutdown /s /t 5")
############## restart system
elif "restart the system" in query:
os.system("shutdown /s /t 1")
########### sleep the system
elif "sleep the system" in query:
os.system("rundll32.exe powrprof.dll,SetSuspendStat"
"e 0,1,0")
############## Youtube Automation #################################
if 'pause' in query:
keyboard.press('space bar')
ok_sir()
elif 'mute' in query:
keyboard.press('m')
ok_sir()
elif 'restart' in query:
keyboard.press('0')
ok_sir()
elif 'skip' in query:
keyboard.press('l')
ok_sir()
elif 'back' in query:
keyboard.press('j')
ok_sir()
elif 'full screen' in query:
keyboard.press('f')
ok_sir()
elif 'film mode' in query:
keyboard.press('t')
ok_sir()
########################################################################
################### Chrome automation #################################
elif "close this tab" in query:
keyboard.press_and_release('ctrl + w')
elif "open new tab" in query:
keyboard.press_and_release('ctrl + t')
elif "open new window" in query:
keyboard.press_and_release('ctrl + n')
elif "history" in query:
keyboard.press_and_release('ctrl + h')
########################################################################
elif "email to chaitu" in query:
try:
speak("what should i say?")
content = takecommand().lower()
to = "Cpatil27112001@gmail.com"
sendEmail(to, content)
speak("Email has been send to chaitu")
except Exception as e:
print(e)
speak("sorry sir, i am not able to send mail")
elif "no thanks" in query:
speak("thanks for using me sir.")
sys.exit()
############################### Automate whattsapp ##############
elif "open whatsapp" in query:
speak("ok sir, opening whattsapp")
whatAuto()
speak("sir, do you have any other work")
|
import csv
import numpy as np
import tensorflow as tf
import pandas as pd
def test0():
x = tf.Variable(initial_value=1, dtype=tf.int32)
print(x) # x is a Variable
x = x.assign(1)
print(x) # x becomes a Tensor with less capacity
def test1():
a = tf.placeholder(dtype=tf.float32, shape=(3,))
x = tf.Variable(initial_value=1, dtype=tf.float32)
x = x.assign(10) # use this statement, occuring the error 'ValueError: No gradients provided for any variable,'
# delete this statement, it works!
cost = tf.reduce_sum(x * a * a)
train_op = tf.compat.v1.train.AdamOptimizer(1e-2).minimize(cost)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
for i in range(100):
session.run(train_op, feed_dict={a: np.array([1, 2, 3])})
print(session.run(cost, feed_dict={a: np.array([1, 2, 3])}))
print(session.run(x))
def test2():
x = np.zeros(shape=(3, 1))
for idx, item in enumerate(x):
print(item)
def test3():
x = np.array([1, 12, 3])
y = np.array([4, 5, 6])
z = [x, y]
max = np.argmax(z, axis=0)
print(max)
def test4():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
# write
with open('../matrix_test.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
[writer.writerow(r) for r in a]
# read it
data = pd.read_csv('../matrix_test.csv', header=None)
data = data.to_numpy()
print(data.shape)
print(data)
def test5():
x = dict()
x['anh'] = 0
x['toi'] = 1
# write
with open('../matrix_test.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
for k, v in x.items():
writer.writerow([k, v])
# read
data = pd.read_csv('../matrix_test.csv', header=None)
for row in range(len(data)):
a = data.at[row,0]
b = data.at[row, 1]
print(f"{a} {b}")
def test6():
x = 4
y = np.log(x)
print(y)
z = np.e **y
print(z)
def test7():
x1 = np.array([[1, 2], [3, 4]])
x2 = np.array([[1, 20], [30, 4]])
y = np.mean([x1==x2], axis=None)
z = np.concatenate(x1)
print(z)
print(y)
test7()
|
from flask import Flask,request, url_for, redirect, render_template, jsonify
import pandas as pd
import pickle
import numpy as np
app = Flask(__name__)
model = pickle.load(open('Module 2/flaskapp/model.pkl', 'rb'))
day_dict = {'Fri':[1,0,0,0,0,0,0], 'Mon':[0,1,0,0,0,0,0],
'Sat': [0,0,1,0,0,0,0], 'Sun':[0,0,0,1,0,0,0],
'Thu':[0,0,0,0,1,0,0], 'Tue':[0,0,0,0,0,1,0],
'Wed': [0,0,0,0,0,0,1]}
# cols = ['hour', 'is_holiday', 'day_of_week']
@app.route('/')
def home():
return render_template("index.html")
@app.route('/predict',methods=['POST','GET'])
def predict():
item = [x for x in request.form.values()]
## postman begin
#hour = request.args.get('hour')
#is_holiday = request.args.get('is_holiday')
#day_of_week = request.args.get('day_of_week')
#data = []
#data.append(hour)
#if is_holiday == 'Yes':
# data.extend([0,1])
#else:
# data.extend([1,0])
#
#data.extend(day_dict[day_of_week])
### postman end
data = []
# As the The training data was dummified one, so we have to pass the
# test data in the same format ('hour','is_holiday','day_of_week')
data.append(int(item[0]))
# is holiday
if item[1] == 'Yes':
data.extend([0,1])
else:
data.extend([1,0])
# fri, mon, sat , sun, thu, tue, wed
data.extend(day_dict[item[2]])
prediction = int(model.predict([data])[0])
# postman begin
# return 'the predicted total bike count :' + str(prediction)
# postman end
return render_template('index.html',pred='Total Bike ride counts on {} at {}:00 Hrs will be {}'.format(item[2], item[0],prediction))
#if __name__ == '__main__':
# app.run(host="0.0.0.0", port=config.PORT, debug=config.DEBUG_MODE)
if __name__ == "__main__":
app.run(debug=True) |
from rest_framework import generics, permissions
from .serializers import CompanySerializer, ContactSerializer, ProjectSerializer, TaskSerializer
from .models import Company, Contact, Project, Task
class CompanyDetail(generics.RetrieveUpdateDestroyAPIView):
model = Company
queryset = Company.objects.all()
serializer_class = CompanySerializer
permission_classes = [
permissions.AllowAny
]
class CompanyList(generics.ListCreateAPIView):
model = Company
queryset = Company.objects.all()
serializer_class = CompanySerializer
permission_classes = [
permissions.AllowAny
]
class ContactDetail(generics.RetrieveUpdateDestroyAPIView):
model = Contact
queryset = Contact.objects.all()
serializer_class = ContactSerializer
permission_classes = [
permissions.AllowAny
]
class ContactList(generics.ListCreateAPIView):
model = Contact
queryset = Contact.objects.all()
serializer_class = ContactSerializer
permission_classes = [
permissions.AllowAny
]
class CompanyContactList(generics.ListCreateAPIView):
model = Contact
queryset = Contact.objects.all()
serializer_class = ContactSerializer
permission_classes = [
permissions.AllowAny
]
def get_queryset(self):
queryset = super(CompanyContactList, self).get_queryset()
return queryset.filter(company__pk=self.kwargs.get('pk'))
class ProjectDetail(generics.RetrieveUpdateDestroyAPIView):
model = Project
queryset = Project.objects.all()
serializer_class = ProjectSerializer
permission_classes = [
permissions.AllowAny
]
class ProjectList(generics.ListCreateAPIView):
model = Project
queryset = Project.objects.all()
serializer_class = ProjectSerializer
permission_classes = [
permissions.AllowAny
]
class CompanyProjectList(generics.ListCreateAPIView):
model = Project
queryset = Project.objects.all()
serializer_class = ProjectSerializer
permission_classes = [
permissions.AllowAny
]
def get_queryset(self):
queryset = super(CompanyProjectList, self).get_queryset()
return queryset.filter(company__pk=self.kwargs.get('pk'))
class TaskDetail(generics.RetrieveUpdateDestroyAPIView):
model = Task
queryset = Task.objects.all()
serializer_class = TaskSerializer
permission_classes = [
permissions.AllowAny
]
class TaskList(generics.ListCreateAPIView):
model = Task
queryset = Task.objects.all()
serializer_class = TaskSerializer
permission_classes = [
permissions.AllowAny
]
class CompanyTaskList(generics.ListCreateAPIView):
model = Task
queryset = Task.objects.all()
serializer_class = TaskSerializer
permission_classes = [
permissions.AllowAny
]
def get_queryset(self):
queryset = super(CompanyTaskList, self).get_queryset()
return queryset.filter(company__pk=self.kwargs.get('pk'))
class ProjectTaskList(generics.ListCreateAPIView):
model = Task
queryset = Task.objects.all()
serializer_class = TaskSerializer
permission_classes = [
permissions.AllowAny
]
def get_queryset(self):
queryset = super(ProjectTaskList, self).get_queryset()
return queryset.filter(project__pk=self.kwargs.get('pk'))
|
# Licensed to Tomaz Muraus under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# Tomaz muraus licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import urllib2
import chardet
from gevent import monkey
from gevent.pool import Pool
monkey.patch_socket()
BASE_URL = 'http://seznami.gov.si/DURS/Neplacniki_files/Html%s.html'
IMAGE_LINK = 'http://seznami.gov.si/DURS/Neplacniki_files/%s'
START_NUM = 0
END_NUM = 326
NUM_LEN = 4
CHUNK_SIZE = 1024 * 64
pool = Pool(30)
def read_page(url):
response = urllib2.urlopen(url)
content = response.read()
encoding = chardet.detect(content)
content = content.decode(encoding['encoding'])
return content
def parse_image_links(content):
content = content
result = re.findall(r'<img\s+src=\'(.*\.jpg)\'\s+width="\d+"\s+/>',
content, re.IGNORECASE)
if not result:
return []
links = [IMAGE_LINK % (name) for name in result]
return links
def download_file(url, file_name):
response = urllib2.urlopen(url)
with open(file_name, 'w') as fp:
data = response.read(CHUNK_SIZE)
while data:
fp.write(data)
data = response.read(CHUNK_SIZE)
def process_page(i):
num = str(i)
padding = NUM_LEN - len(num)
page_num = padding * '0' + num
url = BASE_URL % (page_num)
print 'Downloading images on page %s...' % (i + 1)
content = read_page(url=url)
image_links = parse_image_links(content=content)
for index, url in enumerate(image_links):
file_name = os.path.join('scraped/', 'page_%s-%s.jpg' % (page_num,
index + 1))
download_file(url, file_name=file_name)
print 'Images on page %s downloaded' % (i + 1)
def main():
for i in range(START_NUM, END_NUM):
pool.spawn(process_page, i)
pool.join()
main()
|
__author__ = 'lish'
from numpy import *
from scipy.cluster.vq import vq, kmeans, whiten
dataSet = []
fileIn = open('test2.txt')
for line in fileIn.readlines():
# print line.replace('\n','')
# lineArr = line.strip().split(',')
# dataSet.append([int(lineArr[0]), int(lineArr[1])])
dataSet.append([int(line.replace('\n',''))])
# dataSet=mat(dataSet)
# features = array([[ 1.9,2.3], [ 1.5,2.5],[ 0.8,0.6],[ 0.4,1.8],[ 0.1,0.1],[ 0.2,1.8], [ 2.0,0.5],[ 0.3,1.5], [ 1.0,1.0]])
whitened = whiten(dataSet)
# print whitened
book = array((whitened[0],whitened[1]))
print kmeans(whitened,book,2)
# (array([[ 2.3110306 , 2.86287398], # random
# [ 0.93218041, 1.24398691]]), 0.85684700941625547)
dataSet=mat(dataSet)
codes = 2
print kmeans(dataSet,codes)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import jsonify, request
from flask_restful import Resource
from flask_api import status
import requests,json
import re
"""
Class Validation for checking input fields sent through UI
SMTP class - Endpoint for retreiving and setting SMTP configuration parameter
"""
class Validation:
def __init__(self):
self.validation = {}
def validate(self, data):
validate_data = dict(data)
for key in validate_data:
if key == 'sender':
sender = re.match('^\w+@[a-zA-Z_]+?\.[a-zA-Z]{2,3}$', validate_data[key])
if sender == None:
self.validation['sender'] = 'Sender ID is not a valid Email Address'
del data[key]
elif key == 'interval':
try:
interval = int(validate_data[key])
except ValueError:
self.validation['interval'] = 'Notification Interval is not a positive number'
del data[key]
elif key == 'server':
server = re.match('^(https?:\/\/)?([\da-z\.-]+)\.([a-z\.]{2,6})([\/\w \.-]*)*\/?$', validate_data[key])
if server == None:
self.validation['server'] = 'Server URL is not valid'
del data[key]
elif key == 'hardware':
email_add = validate_data[key].split(';')
for e in email_add:
email = re.match('^\w+@[a-zA-Z_]+?\.[a-zA-Z]{2,3}$', e)
if email == None:
self.validation[key] = 'Entered email ' + e + ' for the ' + validate_data[key] + ' admin is not valid'
del data[key]
break;
return self.validation
class SMTP(Resource):
## Request to Handle SMTP Request
add_req = {'params':{'overwrite':'false','args':'','method':''},'type':'alertmanager'}
show_req = {'params':{'subgroupId':'setting','groupId':'User','operation':'DB_GET_KEY_VALUE','key':[]},'type':'db'}
response = {}
headers = {'Content-Type':'application/json'}
smtp_keys = ['SMTP','SENDER_ID','HARDWARE_ADMINS','SOFTWARE_ADMINS','ACCT','PASS','NOTIFICATION_INTERVAL']
def get(self, media):
self.response = {}
for key in self.smtp_keys:
self.show_req['params']['key'] = [key]
data = json.dumps(self.show_req)
res = requests.post('https://'+media+':8446/ascws/jobmanager/job', data=data, headers=self.headers, verify=False)
res_json = res.json()
self.response[key] = {'status':res_json['status'],'value':res_json['keyValuePair'][key]}
return self.response, status.HTTP_200_OK
def post(self):
self.response = {'messages':[],'error':[]}
smtp = request.get_json(force=True)['smtp']
media = request.get_json(force=True)['media']
skip = False
# Error during validation
error = Validation().validate(smtp)
self.response['error'] = error
for appl in media:
for key in smtp:
if key == 'sender':
self.add_req['params']['method'] = 'asemail'
self.add_req['params']['args'] = smtp[key]
elif key == 'interval':
self.add_req['params']['method'] = 'interval'
self.add_req['params']['args'] = smtp[key]
elif key == 'server' or key == 'username' or key == 'password':
if skip:
continue
self.add_req['params']['method'] = 'asmtp'
self.add_req['params']['args'] = smtp['server'] + ' ' + smtp['username'] + ' ' + smtp['password']
skip = True
elif key == 'hardware':
self.add_req['params']['method'] = 'aemail'
self.add_req['params']['args'] = smtp[key]
data = json.dumps(self.add_req)
res = requests.post('https://'+appl+':8446/ascws/jobmanager/job', data=data, headers=self.headers, verify=False)
res_json = res.json()
if 'statusMessage' in res_json:
self.response['messages'].append(res_json['statusMessage'])
elif 'responses' in res_json:
self.response['messages'].append(res_json['responses'][0]['statusMessage'])
else:
self.response['messages'].append('Successful added Software Admins')
return self.response, status.HTTP_200_OK
def delete(self):
self.response = {'messages':[]}
smtp = request.get_json(force=True)['smtp']
media = request.get_json(force=True)['media']
for appl in media:
for key in smtp:
if key == 'hardware':
self.add_req['params']['method'] = 'demail'
self.add_req['params']['args'] = smtp['hardware']
data = json.dumps(self.add_req)
res = requests.post('https://'+appl+':8446/ascws/jobmanager/job', data=data, headers=self.headers, verify=False)
res_json = res.json()
for status in res_json['responses']:
self.response['messages'].append(status['statusMessage'])
elif key == 'software':
continue
return self.response, status.HTTP_200_OK
|
"""fn
Description:
`zet` is a mess
use `fn` to generate a file name.
use `fn -r [dir]` to get the most recent file (in dir).
more options listed below.
Usage:
zet hen list <adr>
Options:
-h --help show this screen.
--version show version.
"""
from sys import exit as pexit
from sys import stderr
from traceback import print_exc
from pprint import pprint as pp
from docopt import docopt
from zet.zet import Zet
def main():
args = docopt(__doc__, version='zet 0.0.1')
zet = Zet(args['<adr>'])
try:
for o in zet.pin_all():
pp(o)
except ValueError as e:
print('err: ' + str(e), file=stderr)
pexit(1)
except Exception as e:
print_exc(file=stderr)
pexit(2)
if __name__ == '__main__':
main()
|
from flask import Flask
from flask.ext.restplus import Api
from flask.ext.restplus import fields
#from International_Cuisine_Clustering.ipynb import similar_cuisines
app = Flask(__name__)
api = Api(
app,
version='1.0',
title='Cuisine Predictor',
description='Recommend similar cuisines')
ns = api.namespace('cuisine predictor',
description='Predict similar cuisines')
parser = api.parser()
parser.add_argument(
'What is your favorite cuisine?',
type=str,
required=True,
help='Input your favorite cuisine to get a recommendation of a similar cuisine',
location='form')
################################################
resource_fields = api.model('Resource', {
'result': fields.String,
})
from flask.ext.restplus import Resource
@ns.route('/')
class CreditApi(Resource):
@api.doc(parser=parser)
@api.marshal_with(resource_fields)
def post(self):
args = parser.parse_args()
result = self.get_result(args)
return result, 201
def get_result(self, args):
debtRatio = args["DebtRatio"]
monthlyIncome = args["MonthlyIncome"]
dependents = args["NumberOfDependents"]
openCreditLinesAndLoans = args["NumberOfOpenCreditLinesAndLoans"]
pastDue30Days = args["NumberOfTime30-59DaysPastDueNotWorse"]
pastDue60Days = args["NumberOfTime60-89DaysPastDueNotWorse"]
pastDue90Days = args["NumberOfTimes90DaysLate"]
realEstateLoansOrLines = args["NumberRealEstateLoansOrLines"]
unsecuredLines = args["RevolvingUtilizationOfUnsecuredLines"]
age = args["age"]
from pandas import DataFrame
df = DataFrame([[
debtRatio,
monthlyIncome,
dependents,
openCreditLinesAndLoans,
pastDue30Days,
pastDue60Days,
pastDue90Days,
realEstateLoansOrLines,
unsecuredLines,
age
]])
clf = joblib.load('model/nb.pkl');
result = clf.predict(df)
if(result[0] == 1.0):
result = "deny"
else:
result = "approve"
return {
"result": result
}
if __name__ == '__main__':
app.run(debug=True) |
# -*- coding:utf-8 -*-
import logging
import os
import zipfile
from urllib.parse import urlparse
import numpy as np
import tensorflow as tf
from pai_tf_predict_proto import tf_predict_pb2
from com.aliyun.api.gateway.sdk import client
from com.aliyun.api.gateway.sdk.common import constant
from com.aliyun.api.gateway.sdk.http import request
def zip_file(src_dir):
zip_name = src_dir + '.zip'
with zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED) as z:
for dirpath, dirnames, filenames in os.walk(src_dir):
fpath = dirpath.replace(src_dir, '')
fpath = fpath and fpath + os.sep or ''
for filename in filenames:
z.write(os.path.join(dirpath, filename), fpath + filename)
def get_last_meta_path(save_path):
path = "/".join(save_path.split("/")[:-1])
model_name = save_path.split("/")[-1]
meta_file_info = {}
for file_name in os.listdir(path):
if file_name.find(model_name) == 0 and len(file_name) > 5 and file_name[-5:] == ".meta":
step_str = file_name[:-5].split("-")[-1]
try:
meta_file_info[int(step_str)] = os.path.join(path, file_name)
except ValueError as e:
logging.error(e, exc_info=1)
meta_file_info[0] = os.path.join(path, file_name)
if not meta_file_info:
return None
meta_keys = list(meta_file_info.keys())
meta_keys.sort()
return meta_file_info[meta_keys[-1]]
def get_saver_and_last_step(meta_path, sess):
if meta_path is None:
return None, -1
else:
saver = tf.train.import_meta_graph(meta_path)
saver.restore(sess, meta_path[:-5])
try:
return saver, int(meta_path[:-5].split("-")[-1])
except ValueError as e:
logging.error(e, exc_info=1)
return saver, -1
class LearningRate(object):
def __init__(self):
self._count = 0
self._init = 0.01
def get_learning_rate(self):
return self._init * 0.95
class LinearFit(object):
def __init__(self):
self.sess = None
self.learning_rate_manager = LearningRate()
self.save_path = os.path.join(os.path.dirname(__file__), "models_meta", self.__class__.__name__)
if not os.path.exists(self.save_path):
os.makedirs(self.save_path, exist_ok=True)
# 数据集
train_data_size = 10000
self.train_data_x = np.random.rand(train_data_size) * 10 # 0-10取值
self.train_data_y = 20 * self.train_data_x + 3 + np.random.normal(loc=0, scale=0.1, size=(train_data_size,))
self.test_data_x = np.arange(0, 10)
self.test_data_y = 20 * self.test_data_x + 3
@staticmethod
def batch_data(x, y, size=128, last_cursor=None):
if last_cursor is None:
return x[:size], y[:size]
else:
if last_cursor + size >= x.shape[0]:
return None, None
return x[last_cursor: last_cursor + size], y[last_cursor:last_cursor + size]
@staticmethod
def build():
# 参数
tf_x = tf.placeholder(tf.float32, name="x")
tf_y = tf.placeholder(tf.float32, name="y")
tf_w = tf.Variable(0.0, name="w", )
tf_b = tf.Variable(0.0, name="b", )
tf_learning_rate = tf.Variable(0.01, name="learning_rate")
tf_y_predict = tf.multiply(tf_x, tf_w) + tf_b
cross_entropy = tf.reduce_mean(tf.multiply(tf.square(tf_y - tf_y_predict), 0.5))
train_step = tf.train.GradientDescentOptimizer(tf_learning_rate).minimize(cross_entropy)
tf.add_to_collection("inputs", tf_x)
tf.add_to_collection("inputs", tf_y)
tf.add_to_collection("outputs", tf_y_predict)
tf.add_to_collection("outputs", cross_entropy)
tf.add_to_collection("outputs", train_step)
def train(self):
if self.sess is None:
self.sess = tf.InteractiveSession()
saver, last_step = get_saver_and_last_step(get_last_meta_path(self.save_path), self.sess)
if saver is None:
# 没有持久化: 重新初始化模型
print(" init models ...")
self.build()
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
saver = tf.train.Saver()
else:
print(" restoring models ...")
tf_x, tf_y = tf.get_collection('inputs')
tf_y_predict, cross_entropy, train_step = tf.get_collection("outputs")
graph = tf.get_default_graph()
tf_w = graph.get_tensor_by_name("w:0")
tf_b = graph.get_tensor_by_name("b:0")
tf_learning_rate = graph.get_tensor_by_name("learning_rate:0")
print("w is {}, b is {}".format(self.sess.run(tf_w), self.sess.run(tf_b)))
batch_size = 1000
global_step = last_step
for i in range(10):
train_data_cursor = 0
while True:
batch_x, batch_y = self.batch_data(self.train_data_x, self.train_data_y, batch_size, train_data_cursor)
train_data_cursor = train_data_cursor + batch_size
if batch_x is None and batch_y is None:
break
self.sess.run(train_step, feed_dict={tf_x: batch_x,
tf_y: batch_y,
tf_learning_rate: self.learning_rate_manager.get_learning_rate()})
global_step += 1
if global_step % 10 == 0:
saver.save(self.sess, self.save_path, global_step=global_step)
print("w is {}, b is {}".format(self.sess.run(tf_w), self.sess.run(tf_b)))
print("cross is {}".format(self.sess.run(tf.reduce_mean(
self.sess.run(cross_entropy, feed_dict={tf_x: self.test_data_x, tf_y: self.test_data_y})
))))
self.sess.close()
def build_simple_model(self, export_dir: str):
""" """
sess = tf.InteractiveSession()
saver, last_step = get_saver_and_last_step(get_last_meta_path(self.save_path), sess)
tf_x, tf_y = tf.get_collection('inputs')
tf_y_predict, cross_entropy, train_step = tf.get_collection("outputs")
graph = tf.get_default_graph()
tf_w = graph.get_tensor_by_name("w:0")
tf_b = graph.get_tensor_by_name("b:0")
tf.saved_model.simple_save(
session=sess,
export_dir=export_dir,
inputs={"x": tf_x},
outputs={"y": tf_y_predict},
)
sess.close()
def build_complex_model(self, export_dir: str):
""" """
sess = tf.InteractiveSession()
saver, last_step = get_saver_and_last_step(get_last_meta_path(self.save_path), sess)
tf_x, tf_y = tf.get_collection('inputs')
tf_y_predict, cross_entropy, train_step = tf.get_collection("outputs")
graph = tf.get_default_graph()
tf_w = graph.get_tensor_by_name("w:0")
tf_b = graph.get_tensor_by_name("b:0")
# 调整模型
tf_d = tf.placeholder(tf.float32, name="d")
new_y = tf_y_predict + tf_d
tf.saved_model.simple_save(
session=sess,
export_dir=export_dir,
inputs={"x": tf_x, "d": tf_d},
outputs={"y": new_y},
)
sess.close()
def serving(self, saved_model_dir: str):
""" 运行服务 """
pass
class PAIClientDemo(object):
app_key = 'xxx'
app_secret = 'xxx'
@staticmethod
def predict(url, app_key, app_secret, request_data):
cli = client.DefaultClient(app_key=app_key, app_secret=app_secret)
body = request_data
url_ele = urlparse(url)
host = 'https://' + url_ele.hostname
path = url_ele.path
req_post = request.Request(host=host, protocol=constant.HTTP, url=path, method="POST", time_out=6000)
req_post.set_body(body)
req_post.set_content_type(constant.CONTENT_TYPE_STREAM)
stat, header, content = cli.execute(req_post)
return stat, dict(header) if header is not None else {}, content
def simple(self, x: float):
# 输入模型信息,点击模型名字就可以获取到了
url = 'https://xxxx-cn-shenzhen.alicloudapi.com/EAPI_1372988890346240_demo_simple'
# 构造服务
_request = tf_predict_pb2.PredictRequest()
_request.signature_name = 'serving_default'
_request.inputs['x'].dtype = tf_predict_pb2.DT_FLOAT # images 参数类型
_request.inputs['x'].float_val.extend([x])
# 将pb序列化成string进行传输
request_data = _request.SerializeToString()
stat, header, content = self.predict(url, self.app_key, self.app_secret, request_data)
if stat != 200:
print('Http status code: ', stat)
print('Error msg in header: ', header['x-ca-error-message'] if 'x-ca-error-message' in header else '')
print('Error msg in body: ', content)
else:
response = tf_predict_pb2.PredictResponse()
response.ParseFromString(content)
print(response)
def complex(self, x: float, d: float):
# 输入模型信息,点击模型名字就可以获取到了
url = "https://xxxx-cn-shenzhen.alicloudapi.com/EAPI_1372988890346240_demo_complex"
# 构造服务
_request = tf_predict_pb2.PredictRequest()
_request.signature_name = 'serving_default'
_request.inputs['x'].dtype = tf_predict_pb2.DT_FLOAT # images 参数类型
_request.inputs['x'].float_val.extend([x])
_request.inputs['d'].dtype = tf_predict_pb2.DT_FLOAT # images 参数类型
_request.inputs['d'].float_val.extend([d])
# 将pb序列化成string进行传输
request_data = _request.SerializeToString()
stat, header, content = self.predict(url, self.app_key, self.app_secret, request_data)
if stat != 200:
print('Http status code: ', stat)
print('Error msg in header: ', header['x-ca-error-message'] if 'x-ca-error-message' in header else '')
print('Error msg in body: ', content)
else:
response = tf_predict_pb2.PredictResponse()
response.ParseFromString(content)
print(response)
def build_model(_export_dir: str):
if not os.path.exists(_export_dir):
os.makedirs(_export_dir, exist_ok=True)
LinearFit().build_simple_model(export_dir=_export_dir)
zip_file(_export_dir)
def call_simple_server(x: float):
PAIClientDemo().simple(x=x)
def build_complex_model(_export_dir: str):
if not os.path.exists(_export_dir):
os.makedirs(_export_dir, exist_ok=True)
LinearFit().build_complex_model(export_dir=_export_dir)
zip_file(_export_dir)
def call_complex_server(x: float, d: float):
PAIClientDemo().complex(x=x, d=d)
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# 读取mnist数据集
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# 学习速率
learning_rate = 0.001
# 训练步长
train_step = 10000
# 每次训练放入的样本数量
batch_size = 100
# 打印间隔
displayer_step = 100
# 一个向量有多少元素
frame_size = 28
# 一共有多少向量
sequence_num = 28
# 隐藏神经元数量
hidden_size = 100
# 样本类别树 数字0-9 10个
n_class = 10
# 重置计算图
tf.reset_default_graph()
# 网络输入 x_data
# 占位符:模型输入
x_data = tf.placeholder(tf.float32, [None, frame_size * sequence_num], name='input')
"""
目标输出值
占位符:模型目标输出
"""
y_data = tf.placeholder(tf.float32, [None, n_class])
"""
输出层神经元权值
权值 tf.truncated_normal(shape=[hidden_size,n_class]) 符合正态分布
"""
weight = tf.Variable(tf.truncated_normal(shape=[hidden_size, n_class]))
"""
偏置项 设置为全0
输出层神经元阈值
"""
bias = tf.Variable(tf.zeros(shape=[n_class]))
"""
RNN 网络搭建
"""
# 改变样本外观形状
x = tf.reshape(x_data, shape=[-1, sequence_num, frame_size])
# 构建隐层循环结构 rnn_cell中设置100个神经元
rnn_cell = tf.nn.rnn_cell.BasicRNNCell(100)
# RNN传输过程
output, state = tf.nn.dynamic_rnn(rnn_cell, x, dtype=tf.float32)
"""
只需要最后一个输出
y 整个网络的输出值
"""
y = tf.nn.softmax(tf.matmul(output[:, -1, :], weight) + bias, name='output')
"""
交叉熵 cross_entropy
reduce_mean 均值
"""
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_data, logits=y))
"""
优化 AdamOptimizer 适量梯度优化器
训练节点
"""
train = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
# 准确率
acc = tf.reduce_mean(tf.to_float(tf.equal(tf.argmax(y, 1), tf.arg_max(y_data, 1))))
# 保存模型
saver = tf.train.Saver()
# 启动会话
sess = tf.Session()
# 执行变量初始化操作
sess.run(tf.global_variables_initializer())
"""
训练 10000步
"""
step = 1
while step < train_step:
# 获取训练样本
x_s, y_s = mnist.train.next_batch(batch_size)
# 模型训练
loss, _ = sess.run([cross_entropy, train], feed_dict={x_data: x_s, y_data: y_s})
if step % displayer_step == 0:
# 模型训练精度
acc_tr, loss = sess.run([acc, cross_entropy], feed_dict={x_data: x_s, y_data: y_s})
print('第', step, '次训练', '训练精度', acc_tr, '交叉熵损失项', loss, )
step += 1
# 测试模型在测试集上的预测精度
acc_te = sess.run(acc, feed_dict={x_data: mnist.test.images, y_data: mnist.test.labels}) # 模型测试精度
print('模型在测试集上的预测精度:', acc_te)
# 保存模型
saver.save(sess, 'model/softmax_model')
sess.close()
|
a=0
frase = input("Ingrese una frase: ")
letrita= input("\nIngrese letra a buscar: ")
print (frase.replace(' ', '')) #Reemplaza los espacios en blanco
for letra in frase:
if letra == letrita:
a+=1
#print(letra)
if a == 0:
print("No aparece en la frase la letra ", letrita)
elif a ==1:
print(a, "vez se repite dentro de la frase")
else:
print(a, "veces se repite dentro de la frase")
|
#C언어는 {}로 종속문장을 구분
#파이썬은 공백으로 구분 => 스페이스바 4번
# time = float(input("현재 시간 : "))
# if time >= 18.5:
# print("집에가자~")
# else:
# print("공부합시다.^^")
#print()내부에 end공간에 "\n"를 넣어놓은거에요
age = int(input("당신의 나이를 입력 : "))
if age > 19:
print("당신은 성인",end="☆")
else:
print("당신은 미성년자",end="☆")
print("입니다.")
money = True #bool자료형 : 컴퓨터가 대답해주는형태(True, False)
print("케이크 집에 가서 ",end="")
if money:
print("먹는다")
print("냠냠")
else:
print("손가락만 빨고 있어ㅜ")
num = int(input("정수 입력 : "))
#print("%d"%포맷코드, end="")
if num < 0:
print("%d는 음의 정수"%num, end="")
else:
print("%d는 양의 정수"%num, end="")
print("입니다.")
|
# flake8: noqa
import os
import django_heroku
import requests
from .base import *
SECRET_KEY = os.environ.get("SECRET_KEY")
DEBUG = bool(os.environ.get("DEBUG", False))
ALLOWED_HOSTS = [
# Change me!
"ocloud-backend.herokuapp.com",
"o-cloud.com",
]
if "ALLOWED_HOST" in os.environ:
ALLOWED_HOSTS.append(os.environ.get("ALLOWED_HOST"))
try:
EC2_IP = requests.get("http://169.254.169.254/latest/meta-data/local-ipv4").text
ALLOWED_HOSTS.append(EC2_IP)
except requests.exceptions.RequestException:
pass
INSTALLED_APPS = INSTALLED_APPS + ["storages"]
# Secure connection
SECURE_REDIRECT_EXEMPT = [r"/?health"]
# Uploaded files storage
DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
AWS_STORAGE_BUCKET_NAME = os.environ.get("MEDIA_BUCKET")
AWS_DEFAULT_ACL = "private"
AWS_S3_REGION_NAME = ""
AWS_S3_SIGNATURE_VERSION = "s3v4"
DEFAULT_FROM_EMAIL = "contact@o-cloud.com"
# Logging
LOGGING["formatters"] = {
"aws": {
"format": "[%(levelname)s] [%(name)s] %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
}
}
# Caching
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": os.environ.get("REDIS_URL"),
"OPTIONS": {"CLIENT_CLASS": "django_redis.client.DefaultClient",},
}
}
# Static files
STATICFILES_STORAGE = "core.storage.ManifestStorage"
# Sentry
if "SENTRY_DSN" in os.environ:
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
dsn=os.environ.get("SENTRY_DSN"),
integrations=[DjangoIntegration()],
environment=os.environ.get("ENVIRONMENT"),
release=os.environ.get("VERSION"),
)
# Celery config
BROKER_URL = os.environ["REDIS_URL"]
CELERY_RESULT_BACKEND = os.environ["REDIS_URL"]
django_heroku.settings(locals())
CORS_ALLOW_ALL_ORIGINS = True
|
n = input("Please enter a positive integer : ")
n = int(n)
m = 1
while m <=10:
print(n,"x",m,"=",n*m)
m += 1 |
# -*- coding: utf-8 -*-
import codecs
import os
topics = {}
with codecs.open('test_predict_new.csv', 'w', 'utf8') as writer:
with codecs.open('test_predict.csv', 'r', 'utf8') as predict_reader:
with codecs.open('test_pair', 'r', 'utf8') as pair_reader:
predict_reader.readline()
for rline in predict_reader:
rline = rline.strip().split(',')
rtopics = rline[2].split(';')
remotions = rline[3].split(';')
aline = pair_reader.readline()
aline = aline.strip().split('\t')
if len(aline) == 3:
atopics = aline[1].split(';')
aemotions = aline[2].split(';')
i = 0
for rtopic in rtopics:
if rtopic == 'NULL':
j = 0
for aemotion in aemotions:
if remotions[i] == aemotion:
if len(atopics[j]) > 1:
rtopics[i] = atopics[j]
if atopics[j] not in topics:
topics[atopics[j]] = 0
topics[atopics[j]] += 1
j += 1
i += 1
rline[2] = ';'.join(rtopics)
writer.write(','.join(rline) + '\n')
lists = []
for key, val in topics.items():
lists.append((val, key))
lists = sorted(lists, reverse=True)
with codecs.open('topic_freq', 'w', 'utf8') as writer:
for val, key in lists:
writer.write('%5d %s\n' % (val, key))
|
# -*- coding: utf-8 -*-
class Solution:
def interpret(self, command: str) -> str:
return command.replace("()", "o").replace("(al)", "al")
if __name__ == "__main__":
solution = Solution()
assert "Goal" == solution.interpret("G()(al)")
assert "Gooooal" == solution.interpret("G()()()()(al)")
assert "alGalooG" == solution.interpret("(al)G(al)()()G")
|
import os
from .ImageCaptionsDataset import ImageCaptionsDataset
def Sydney(data_dir: str, transform=None):
file_name = 'dataset_sydney_modified.json'
file_path = os.path.join(data_dir, file_name)
# f'{data_path}/{file_name}'
return ImageCaptionsDataset(file_path=file_path, transform=transform)
|
a=input().split(" ")
for i in range(len(a)):
a[i]=int(a[i])
small=min(a)
big=max(a)
a.remove(small)
a.remove(big)
small=min(a)
big=max(a)
print(big,small)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
from revolver import command, package
from revolver import contextmanager as ctx
from revolver import directory as dir
from revolver.core import sudo, run
_VERSION = '2.4'
_OPTIONS = ''
def install(version=_VERSION, options=_OPTIONS):
package.ensure(['git-core', 'build-essential'])
tmpdir = dir.temp()
try:
with ctx.cd(tmpdir):
run('git clone git://github.com/antirez/redis.git ./ --depth 1')
run('git checkout %s' % version)
run('make %s > /dev/null' % options)
sudo('make install')
finally:
dir.remove(tmpdir, recursive=True)
def ensure(version=_VERSION, options=_OPTIONS):
# TODO Check if version if fulfilled
if command.exists('redis-server'):
return
install(version, options)
|
#1.jointplot, 2.pairsplots, 3.heatmaps
#jointplot
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
tips = sns.load_dataset('tips')
sns.jointplot(x='total_bill', y='tip', data=tips)
plt.show()
sns.jointplot(x='total_bill', y='tip', data=tips, kind='kde')
plt.show()
#pairplot
sns.pairplot(tips)
plt.show()
#using pairplot with hue='sex'
sns.pairplot(tips, hue='sex')
plt.show()
#heatmap
#sns.heatmap('total_bill', 'tip', 'size')
#plt.show()
covarience=tips
print(covarience)
import numpy as np
data_covarience=np.random.rand(4,4)
sns.heatmap(data_covarience)
plt.title('covarience')
plt.show() |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name = 'index'),
path('performance/', views.performance, name='performance'),
path('contact/', views.contact, name='contact'),
path('buySeasonTickets/', views.buySeasonTicket, name='buySeasonTickets'),
path('payment/', views.payment, name='payment'),
path('echo/', views.echo, name='echo'),
path('seatSelection/', views.seatSelection, name='seatSelection'),
path('confirmationPage/<str:seat_numbers>/', views.confirmationPage, name='confirmationPage')
]
|
a = 0
if not a:
print("gagné")
elif a:
print("perdu")
|
# coding=utf-8
import requests
from lxml import html
from .config import LOGIN_URL,data,headers
def login():
s = requests.session()
r = s.get(LOGIN_URL)
tree = html.fromstring(r.text)
el1 = tree.xpath('//input[@name="post_key"]')[0]
post_key = el1.value
id = input('Please input your pivix id:')
passwd = input('Please input your password:')
data['pixiv_id'] = id
data['password'] = passwd
data['post_key'] = post_key
r = s.post(LOGIN_URL, headers=headers, data=data)
if r.url == 'https://www.pixiv.net/':
print('Login success!')
else:
print('Login failed.')
exit(0)
return s
|
#!/usr/bin/python
import os
import sys
import subprocess, re, shutil, glob
import gettext
_ = gettext.lgettext
COLOR_BLACK = "\033[00m"
COLOR_RED = "\033[1;31m"
PRESCRIPTS = """
patch -s < ks.p
patch -s < conf.p
sudo mv /etc/mic/mic.conf /etc/mic/orig.conf
sudo mv test.conf /etc/mic/mic.conf
"""
POSTSCRIPTS = """
sudo mv -f /etc/mic/orig.conf /etc/mic/mic.conf
"""
def PrepEnv(cases_dir, case, work_env):
"""prepare working env"""
for one in glob.glob(os.path.join(cases_dir, 'base', '*')):
shutil.copy(one, work_env)
for other in glob.glob(os.path.join(cases_dir, 'test-'+case, '*')):
shutil.copy(other, work_env)
def ImgCheck(work_env):
"""check image generate"""
genImage = False
for root, dirs, files in os.walk(work_env):
for name in files:
#add raw check support and XXX.tar file check support
m = re.match(r'.*\.(img|raw|iso|usbimg|tar)', name) or re.match(r'system-release',name)
if m:
genImage = True
break
return genImage
def RunandCheck(object, work_env):
"""run mic-image-creator command and check something"""
ret = False
cwd = os.getcwd()
os.chdir(work_env)
os.system(PRESCRIPTS)
#set value of "expect"
expect = None
if "expect" in os.listdir(work_env):
exp_f = open('expect', 'r')
exp = exp_f.read()
if len(exp) > 0:
expect = exp.strip()
exp_f.close()
#set cmdline
opt_f = open('options','r')
mic_cmd = opt_f.read().strip()
if mic_cmd.find('-h')!=-1 or mic_cmd.find('help')!=-1 or mic_cmd.find('?')!=-1:
args = mic_cmd
else:
args = mic_cmd+' test.ks'
print args
log = open('miclog','w')
proc = subprocess.Popen(args,stdout = log ,stderr=subprocess.PIPE,shell=True)
errorinfo = proc.communicate()[1]
log.close()
mic_cmd_msg = None
miclog_f = open('miclog','r')
miclog_tuple = miclog_f.read()
if len(miclog_tuple) > 0:
mic_cmd_msg = miclog_tuple.strip()
#check
if expect:
if errorinfo.find(expect) != -1 or mic_cmd_msg.find(expect) != -1 :#FIXME
ret =True
else:
proc.wait()
ret = ImgCheck(work_env)
os.system(POSTSCRIPTS)
os.chdir(cwd)
try:
object.assertTrue(ret)
except object.failureException:
if expect:
''' Used to update help expect info automaticlly.
path = object._testMethodName
path = path.replace('_','-',1)
os.unlink('%s/mic_cases/%s/expect' % (cwd,path))
fp = open('%s/mic_cases/%s/expect' % (cwd,path),'w')
fp.write(mic_cmd_msg)
fp.close()
'''
raise object.failureException(_("Expect and mic out msg are not constant\n%sExpect:%s\n\nMic out msg:%s%s") %(COLOR_RED,expect,mic_cmd_msg,COLOR_BLACK))
else:
raise object.failureException(_("%s%s%s") %(COLOR_RED,errorinfo,COLOR_BLACK))
|
import os
import sys
sys.path.insert(0, 'scripts')
import experiments as exp
import run_ALE
def launch(datadir, cluster, cores):
dataset = os.path.basename(os.path.normpath(datadir))
command = ["python"]
command.extend(sys.argv)
command.append("--exprun")
resultsdir = os.path.join("RestartAle", dataset)
resultsdir = exp.create_result_dir(resultsdir, [])
submit_path = os.path.join(resultsdir, "submit.sh")
exp.submit(submit_path, " ".join(command), cores, cluster)
if (__name__ == "__main__"):
is_run = ("--exprun" in sys.argv)
if (len(sys.argv) < 4):
print("Syntax: datadir cores cluster")
exit(1)
datadir = sys.argv[1]
cores = int(sys.argv[2])
if (is_run):
run_ALE.restart_exabayes_and_ALE(datadir, cores)
else:
cluster = sys.argv[3]
launch(datadir, cluster, cores)
|
# -*- encoding: utf-8 -*-
class Retorno:
''' Retorno Débito Automático '''
def __init__(self, arquivo):
pass
|
import turtle as t
def write_xy(x,y):
t.goto(x,y)
t.stamp()
t.write("x:%d, y:%d"%(x,y))
def screen_clear(x,y):
t.goto(x,y)
t.clear()
t.setup(600,600)
s=t.Screen()
t.penup()
s.onscreenclick(write_xy,1)
s.onscreenclick(screen_clear,3)
s.listen()
|
#list
list = ['one','two','three','four','five','six']
list[2] = '2' #index is start from zero
print list
#append & remove
list.append('qqzezr')
if 'one' in list:
list.remove('one')
list.sort()
print list
#tuple
tuple = ('a','b','c') #can't change the value in tuple
print tuple
#dict
dic = {'1':'one','2':'two','3':'three'}
key = '2'
if key in dic :
print dic['1']
dic.pop(key)
if not (key in dic):
print 'pop the key called ' , key
else:
print 'there is not exist a key called ' , key
#set
setA = set(['a','b','c','d']) #create by a list
setB = set(['c','e','g'])
setA.add('c')
print setA
print setA | setB
print setA & setB
input() |
# Generated by Django 2.2.2 on 2019-08-28 09:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0002_usertoken'),
]
operations = [
migrations.AlterField(
model_name='client',
name='token',
field=models.CharField(default='', max_length=255, unique=True),
),
migrations.AlterField(
model_name='client',
name='username',
field=models.CharField(max_length=50, unique=True),
),
]
|
# insertion_sort.py
class Solution():
def insertion_sort(input):
result = []
for i in range(0, len(input)):
j = i
while j > 0 and input[i] < result[j-1]:
j -= 1
result.insert(j, input[i])
return result
def main():
pass
if __name__ == "__main__":
Solution.main()
|
import gevent
from uc.itm import ITM, UCWrapper
from collections import defaultdict
from numpy.polynomial.polynomial import Polynomial
import logging
log = logging.getLogger(__name__)
'''
This wrapper exists in the UC execution in both the real and ideal worlds
as a standalone ITM. In a way this can be seen as a functionality where
it acts as a trusted third party that does what it is expected to do.
THe logic behind the wrapper is to enable synchronout communication in UC.
Recall from the original formulation of the UC framework, all communication
is asynchronous. This means that the adversary can arbitrarily delay message
delivery between any two parties. Synchronous communication requires that
message delay has a known upper bound. This means that the synchronous world
proceeds in rounds.
The wrappers generalizes the idea of message delivery by instead allowing
synchronous execution of code blocks. The protocol parties and funutonalities
can schedule codeblocks to be executed synchronously in the same way as a message
normally would be. The upper bound delay is added to the codeblock when
scheduled but the adversary can control which round it executes the codeblock
with the ``exec'' call. The adversary can not have complete control over
the execution of codeblocks and the progression of rounds so the environment
can try to advance the wrapper through ``poll''. Eventually the delay parameter
will reach 0 with enough ``poll'' calls causing the next codeblock to be popped
off the queue and executed.
Party/Functionality Interface
-- ``schedule'' : this message by a party of a functionality comes with a
codeblock to execute (in the form of a function and input
parameters), and the environment specified upper-bound on delay
delta. Scheduling a codeblock saves it to the ``todo'' list
which the maximum delay assinged by default. This means that new
codeblocks are automatically inserted into todo[curr_round + delta].
Additionally this increments a ``delay'' parameter.
-- ``clock-round'': the functionality just writes the current clock round back to the
calling party.
-- ``call me'' : (party only) Part of a synchronous model is input completeness: every honest party
is able to give input in each round that it wants to. A party passes in a round
number, r, with the ``call-me'' message. The wrapper schedules the
caling party to be activated in round ``todo[curr_round + r]''
-- ``leak'' : there are two was of leaking information to the adversary:
1). directly write the leak onto the tape of the adverasry (activating it)
2). buffer the leaks in the functionality that the adversary can ask for
In this wrapper we opt for #2 simply because it simplifies protocols
and functionalities.
Adversary Interface
-- ``delay'' : There is a
--
'''
class Syn_FWrapper(UCWrapper):
def __init__(self, k, bits, crupt, channels, pump, poly, importargs):
self.curr_round = 1
self.delay = 0
self.todo = { self.curr_round: [] }
self.leaks = []
# TODO keep the round here until something happens
# alternate theory: the round won't change unless something exists todo
# in future rounds
#self.adv_callme(self.curr_round)
self.total_queue_ever = 0
UCWrapper.__init__(self, k, bits, crupt, 'wrap', 'me', channels, poly, pump, importargs)
def party_clock_round(self, sender):
self.write( 'w2p', (sender, self.curr_round))
def func_clock_round(self):
self.write( 'w2f', self.curr_round )
def print_todo(self):
p_dict = {}
for k in self.todo:
o = []
for f,args in self.todo[k]:
o.append((f.__name__, args))
p_dict[k] = o
print('\n\033[1m', str(p_dict), '\033[0m\n')
def fschedule(self, sender, f, args, delta, imp):
log.debug('\033[1mFschedule\033[0m delta: {}, import: {}, sender: {}'.format(imp, delta, sender))
# add to the runqueue
if self.curr_round+delta not in self.todo:
self.todo[self.curr_round + delta] = []
self.todo[self.curr_round + delta].append( (f,args) )
self.total_queue_ever += 1
log.debug('total_queue_ever: {}'.format(self.total_queue_ever))
# leaks the schedule
idx = len(self.todo[self.curr_round + delta])-1
r = self.curr_round + delta
self.leaks.append( (sender, ('schedule', r, idx, f.__name__), 0) )
self.print_todo()
# add to the delay and return control to sender
self.delay += 1
self.write('w2f', (sender, ('OK',)) )
def pschedule(self, sender, f, args, delta):
log.debug('\033[1mPschedule\033[0m {} {}'.format(sender, delta))
# add to runqueue
if self.curr_round+delta not in self.todo:
self.todo[self.curr_round + delta] = []
self.todo[self.curr_round + delta].append( (f,args) )
self.total_queue_ever += 1
log.debug('total_queue_ever: {}'.format(self.total_queue_ever))
# leak the schedule
idx = len(self.todo[self.curr_round + delta])-1
r = self.curr_round + delta
self.leaks.append( (sender, ('schedule', r, idx, f.__name__), 0) )
# add to delay and return control to sender
self.delay += 1
#self.w2p.write( (sender, ('OK',)) )
self.write('w2p', (sender, ('OK',)) )
def adv_delay(self, t, imp):
self.assertimp(imp, t)
self.delay += t
self.write('w2a', "OK" )
def adv_execute(self, r, i):
f,args = self.todo[r].pop(i)
self.print_todo()
f(*args)
def next_round(self):
rounds = self.todo.keys()
for r in sorted(rounds):
if r >= self.curr_round and len(self.todo[r])>0:
return r
return self.curr_round
def leak(self, sender, msg, imp):
log.debug("Leaking information, sender={}, msg={}".format(sender, msg))
self.leaks.append( (sender, msg, imp) )
def poll(self, imp):
self.assertimp(imp, 1)
if self.delay > 0:
self.delay -= 1
self.write('w2a', ('poll',) )
else:
self.curr_round = self.next_round()
r = self.curr_round
if len(self.todo[r]): self.adv_execute(r, 0)
else: self.pump.write("dump")#dump.dump()
def clock_round(self, sender, channel):
self.write( channel, (sender, ('round', self.curr_round)) )
def env_msg(self, d):
msg = d.msg
imp = d.imp
if msg[0] == 'poll':
self.poll(imp)
else:
self.pump.write("dump")
def func_msg(self, d):
msg = d.msg
imp = d.imp
sender,msg = msg
if msg[0] == 'schedule':
self.fschedule(sender, msg[1], msg[2], msg[3], imp)
elif msg[0] == 'leak':
self.leak(sender, msg[1], imp)
elif msg[0] == 'clock-round':
self.write( 'f2p', ('round', self.curr_round) )
else:
self.pump.write("dump")
# TODO revisit this to see if adversary can delay callme actions
def party_callme(self, r):
if r not in self.todo: self.todo[r] = []
#self.todo[r].append( (lambda: self.w2a.write(('shotout',)), ()) )
self.todo[r].append( (lambda: self.write('w2a', ('shotout',)), ()) )
#self.w2p.write( ('OK',) )
self.write('w2p', ('OK',) )
def party_msg(self, d):
msg = d.msg
imp = d.imp
sender,msg = msg
if msg[0] == 'schedule':
self.pschedule(msg[1], msg[2], msg[3])
elif msg[0] == 'clock-round':
self.clock_round(sender, 'w2p')
elif msg[0] == 'callme':
self.party_callme(sender)
elif msg[0] == 'leak':
self.leak(sender, msg, imp)
else:
#dump.dump()
self.pump.write("dump")
def adv_callme(self, r):
if r not in self.todo: self.todo[r] = []
#self.todo[r].append( (lambda: self.w2a.write(('shoutout',)), ()) )
self.todo[r].append( (lambda: self.write('w2a', ('shoutout',)), ()) )
#self.w2a.write( ('OK',) )
self.write('w2a', ('OK',) )
def adv_get_leaks(self):
total_import = 0
output = []
for leak in self.leaks:
sender,msg,imp = leak
total_import += imp
output.append( (sender, msg, imp) )
#self.channels['w2a'].write( output, total_import )
self.write( 'w2a', output, total_import )
self.leaks = []
def adv_msg(self, d):
msg = d.msg
imp = d.imp
#print('msg', msg)
if msg[0] == 'delay':
self.adv_delay(msg[1], imp)
elif msg[0] == 'exec':
self.adv_execute(msg[1], msg[2])
elif msg[0] == 'callme':
self.adv_callme(msg[1])
elif msg[0] == 'get-leaks':
self.adv_get_leaks()
else:
#dump.dump()
self.pump.write("dump")
|
import numpy as np
from scipy.io import loadmat
import pickle
import random
window_len = 30*256
pid = 1
file_x = r'\Users\Owner\Desktop\chb-mit\data\patient{0}\features_se.mat'.format(pid)
file_y = r'\Users\Owner\Desktop\chb-mit\data\patient{0}\y_data{0}.npy'.format(pid)
interval_file = r'\Users\Owner\Desktop\chb-mit\data\patient{0}\intervals.data'.format(pid)
x = loadmat(file_x)
y = np.load(file_y)
x_data = list()
y_data = list()
with open(interval_file, "rb") as f:
intervals = pickle.load(f)
for i in range(len(intervals)-1):
start = intervals[i]
end = intervals[i+1]
num_windows = np.int(np.floor((end-start)/window_len))
# hour data
x_subset = x[start:end,:]
y_subset = y[start:end]
for j in range(num_windows):
window_x = x_subset[j*self.window_len:(j+1)*self.window_len]
window_y = y_subset[j*self.window_len:(j+1)*self.window_len]
x_svm_avg = np.average(window_x, axis=0)
x_svm_std = np.std(window_x, axis=0)
x_svm = np.concatenate((x_svm_avg,x_svm_std), axis=1)
#x_svm = np.reshape(x_svm,(-1,1))
y_svm = int(max(window_y))
x_data.append(x_svm)
y_data.append(y_svm)
c = list(zip(x_data, y_data))
random.shuffle(c)
x_data, y_data = zip(*c)
x_svm = np.concatenate(x_data,axis=0)
y_svm = np.asarray(y_data)
|
#read the file which contains the pairs
f=open("C:\\Users\\HoratiuC\\Documents\\numbers.txt","r")
nums = f.read().split('\n')
#read the first element of the list, showing the number of pairs
print ("This program sums {} pairs of numbers".format(int(nums[0])))
#remove first element, only pairs remain
l = nums[1:]
#split the list
k=[]
for i in l:
k.append(i.split())
sum=0
#sum of each pair will be appended to answer
answer=[]
for x in range(0,len(k)):
sum = int(k[x][0])+int(k[x][1])
answer.append(sum)
print (answer)
|
"""
Given a binary array, find the maximum length of a contiguous subarray with
equal number of 0 and 1.
Example 1:
Input: [0,1]
Output: 2
Explanation: [0, 1] is the longest contiguous subarray with equal number of 0
and 1.
Example 2:
Input: [0,1,0]
Output: 2
Explanation: [0, 1] (or [1, 0]) is a longest contiguous subarray with equal
number of 0 and 1.
"""
class Solution(object):
def findMaxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# this is for initial values to compare
count_index = {0: -1}
count = 0
global_len = 0
for i in range(len(nums)):
if nums[i] == 0:
count -= 1
else:
count += 1
# if the count already in the dict then we have seen
# same number of 0s and 1s so far
if count in count_index:
# update the global max
prev_index = count_index[count]
curr_len = i-prev_index
global_len = max(global_len, curr_len)
else:
count_index[count] = i
return global_len
nums = [1, 0, 1, 1, 0, 0]
obj = Solution()
result = obj.findMaxLength(nums)
print(result)
|
from neo.io.basefromrawio import BaseFromRaw
from neo.rawio.nixrawio import NIXRawIO
# This class subjects to limitations when there are multiple asymmetric blocks
class NixIO(NIXRawIO, BaseFromRaw):
name = 'NIX IO'
_prefered_signal_group_mode = 'group-by-same-units'
_prefered_units_group_mode = 'split-all'
def __init__(self, filename):
NIXRawIO.__init__(self, filename)
BaseFromRaw.__init__(self, filename)
def read_block(self, block_index=0, lazy=False, signal_group_mode=None,
units_group_mode=None, load_waveforms=False):
bl = super().read_block(block_index, lazy,
signal_group_mode,
units_group_mode,
load_waveforms)
for chx in bl.channel_indexes:
if "nix_name" in chx.annotations:
nixname = chx.annotations["nix_name"]
chx.annotations["nix_name"] = nixname[0]
return bl
def __enter__(self):
return self
def __exit__(self, *args):
self.header = None
self.file.close()
|
import os
import sys
from unittest.mock import Mock
from sc2 import Race, race_worker
from sc2.units import Units
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from wombots.zerg_bot import *
# noinspection PyUnresolvedReferences
from wombots.composed_bot import *
async def async_mock(val):
return val
def unit_mock_of(type_id: UnitTypeId):
unit: Unit = Mock()
unit.is_mine = Mock(return_value=True)
unit.type_id = type_id
return unit
def add_unit_to_bot(unit_type: UnitTypeId, bot: ZergBot, ) -> Unit:
unit: Unit = unit_mock_of(unit_type)
bot.units.append(unit)
bot.workers = bot.units(race_worker[bot.race])
return unit
def initial_bot_state(build_order) -> ZergBot:
bot = ZergBot(Deque(build_order))
bot.race = Race.Zerg
bot.units = Units([], Mock())
bot.supply_left = 10
return bot
|
import sys
from utils import Utils
from utils import APIs
def lcodeViewer(utils):
utils.codeLoad()
codes = utils.lcode
msg = ''
idx = 0
for code in codes:
idx +=1
if msg == "":
msg = "[%s](%s)"%(code, codes[code])
else:
msg += "\t\t" + "[%s] %s"%(code, codes[code])
if (idx%5) == 0:
print (msg)
msg = ""
def callData(lname, lcode, period):
apis = APIs()
filename = './output/%s-%s.csv'%(lname.strip().replace(' ',''),lcode)
with open(filename, 'w') as f:
title = '시/군/구 동\t지번\t아파트명\t층\t전용면적\t평(전용면적)\t가격(보증금)\t월세\t평단가(전용)\t평단가(80%)\t년\t월\t일\t년월\t건축년도\t거래형태'
print (title)
f.write ('%s\n'%title)
for p in period:
apis.rent(f, lname, lcode, p)
apis.trade(f, lname, lcode, p)
if __name__ == '__main__':
utils = Utils()
lcodeViewer(utils)
lcode = input('\n* 지역명 옆의 지역코드를 입력하세요. ')
period = input('* 추출기간. 오늘부터 몇개월 전까지를 조회할까요?(ex. 3, 12) : ')
dates = utils.period(int(period))
callData(utils.lcode[lcode], lcode, dates)
|
#!/usr/bin/python
# -*- coding:utf8 -*-
"""
@author:xiaotian zhao
@time:12/24/18
"""
from __future__ import print_function
import scrapy
import re
import time
import random
import BeautifulSoup
from urllib import quote
class ProxyCrawler(scrapy.Spider):
name = "proxy_crawler"
allowed_domains = ['zhidao.baidu.com']
start_urls = []
def __init__(self, output_file_path=None, max_page_num=5):
super(ProxyCrawler, self).__init__()
self.max_page_num = int(max_page_num)
self.output_file_path = output_file_path
self.start_urls = [
'https://www.kuaidaili.com/free/inha/{}/'.format(i) for i in range(self.max_page_num)
]
if output_file_path:
self.output_file = open(output_file_path, 'a')
def parse(self, response):
ips = response.xpath("//tr/td[1]/text()").extract()
ports = response.xpath('//tr/td[2]/text()').extract()
for ip, port in zip(ips, ports):
if self.output_file_path:
self.output_file.write('\"{}:{}\",\n'.format(ip, port))
else:
print("{}:{}".format(ip, port))
time.sleep(random.randint(0, 2))
|
from __future__ import absolute_import
import numpy as np
import mimpy.mesh.mesh as mesh
import mimpy.mesh.hexmesh_cython as hexmesh_cython
from six.moves import range
class HexMesh(mesh.Mesh):
""" Class for constructing structured hexahedral meshes.
"""
def _nonplanar_face_normal(self, face_index):
""" Calculates an approximate normal for a
face that might not planar.
:param int face_index: index of face.
:return: The face normal.
:rtype: ndarray
"""
face = self.get_face(face_index)
v1 = self.get_point(face[1]) - self.get_point(face[0])
v2 = self.get_point(face[0]) - self.get_point(face[-1])
normal = np.cross(v2, v1)
return normal/np.linalg.norm(normal)
def _nonplanar_face_centroid(self, face_index):
""" Calculates an approximate centroid for a
face that might not planar.
:param int face_index: index of face.
:return: Face centroid.
:rtype: ndarray
"""
face = self.get_face(face_index)
p1 = self.get_point(face[0])
p2 = self.get_point(face[1])
p3 = self.get_point(face[2])
p4 = self.get_point(face[3])
center_point = .25 * (p1 + p2 + p3 + p4)
return center_point
def _nonplanar_cell_centroid(self, cell_index):
""" Calculates an approximate centroid for a
cell that may have nonplanar faces.
"""
centroid = np.zeros(3)
count = 0.
for face in self.get_cell(cell_index):
for point in self.get_face(face):
count += 1.
centroid += self.get_point(point)
centroid = centroid/count
return centroid
def _populate_face_areas(self):
""" Finds all the faces areas and
stores them the area array.
"""
hexmesh_cython.all_face_areas(self.faces.pointers,
len(self.faces),
self.faces.data,
self.points,
self.face_areas)
def _populate_face_centroids(self):
""" Finds all the faces centroids and
stores them the area array.
"""
for face_index in range(self.get_number_of_faces()):
current_centroid = self._nonplanar_face_centroid(face_index)
self.set_face_real_centroid(face_index, current_centroid)
def _populate_face_normals(self):
""" Finds all the faces normals and
stores them the normals array.
"""
hexmesh_cython.all_face_normals(self.faces.pointers,
len(self.faces),
self.faces.data,
self.points,
self.face_normals)
def _nonplanar_face_area(self, face_index):
""" Calculates an approximate area for a
face that might not planar.
:param int face_index: index of face.
:return: Face area.
:rtype: float
"""
face = self.get_face(face_index)
area = 0.
p1 = self.points[face[0]]
p2 = self.points[face[1]]
p3 = self.points[face[2]]
p4 = self.points[face[3]]
center_point = .25 * (p1 + p2 + p3 + p4)
a = np.linalg.norm(p1-p2)
b = np.linalg.norm(p2-center_point)
c = np.linalg.norm(center_point - p1)
s = (a + b + c)/2.
area += np.sqrt(s*(s-a)*(s-b)*(s-c))
a = np.linalg.norm(p2-p3)
b = np.linalg.norm(p3-center_point)
c = np.linalg.norm(center_point-p2)
s = (a + b + c)/2.
area += np.sqrt(s*(s-a)*(s-b)*(s-c))
a = np.linalg.norm(p3-p4)
b = np.linalg.norm(p4-center_point)
c = np.linalg.norm(center_point - p3)
s = (a + b + c)/2.
area += np.sqrt(s*(s-a)*(s-b)*(s-c))
a = np.linalg.norm(p4-p1)
b = np.linalg.norm(p1-center_point)
c = np.linalg.norm(center_point - p4)
s = (a + b + c)/2.
area += np.sqrt(s*(s-a)*(s-b)*(s-c))
return area
def get_dim_x(self):
""" Return the dimension of the domain
in the X direction.
:return: Domain x dimension.
:rtype: float
"""
return self.dim_x
def get_dim_y(self):
""" Return the dimension of the domain
in the Y direction.
:return: Domain y dimension.
:rtype: float
"""
return self.dim_y
def get_dim_z(self):
""" Return the dimension of the domain
in the Z direction.
:return: Domain z dimension.
:rtype: float
"""
return self.dim_z
def _build_faces(self, ni, nj, nk):
""" Function to build the mesh faces.
:param int ni: Number of faces in the x-direction.
:param int nj: Number of faces in the y-direction.
:param int nk: Number of faces in the z-direction.
:return: Dictionary mapping ijka to index.
:rtype: dict
"""
count = 0
polygon_ijka_to_index = {}
for k in range(nk):
for j in range(nj):
for i in range(ni):
if i < ni-1 and j < nj-1:
new_face = [self.ijk_to_index(i, j, k),
self.ijk_to_index(i+1, j, k),
self.ijk_to_index(i+1, j+1, k),
self.ijk_to_index(i, j+1, k)]
face_index = self.add_face(new_face)
polygon_ijka_to_index[(i, j, k, 0)] = face_index
if k == 0:
self.add_boundary_face(4, face_index, -1)
if k == nk-1:
self.add_boundary_face(5, face_index, 1)
count += 1
if k < nk-1 and i < ni-1:
new_face = [self.ijk_to_index(i, j, k),
self.ijk_to_index(i, j, k+1),
self.ijk_to_index(i+1, j, k+1),
self.ijk_to_index(i+1, j, k)]
face_index = self.add_face(new_face)
polygon_ijka_to_index[(i, j, k, 1)] = face_index
if j == 0:
self.add_boundary_face(2, face_index, -1)
if j == nj - 1:
self.add_boundary_face(3, face_index, 1)
count += 1
if j < nj-1 and k < nk-1:
new_face = [self.ijk_to_index(i, j, k),
self.ijk_to_index(i, j+1, k),
self.ijk_to_index(i, j+1, k+1),
self.ijk_to_index(i, j, k+1)]
face_index = self.add_face(new_face)
polygon_ijka_to_index[(i, j, k, 2)] = count
if i == 0:
self.add_boundary_face(0, count, -1)
if i == ni - 1:
self.add_boundary_face(1, count, 1)
count += 1
self._populate_face_areas()
self._populate_face_centroids()
self._populate_face_normals()
return polygon_ijka_to_index
def ijk_to_index(self, i, j, k):
""" Returns point index number for an i, j, k numbering.
:param int i: index in x-direction.
:param int j: index in y-direction.
:param int k: index in z-direction.
"""
return i+self.ni*j+self.ni*self.nj*k
def ijk_to_cell_index(self, i, j, k):
""" Returns cell index number for an i, j, k numbering.
:param int i: index in x-direction.
:param int j: index in y-direction.
:param int k: index in z-direction.
"""
return i+(self.ni-1)*j+(self.ni-1)*(self.nj-1)*k
def __init__(self):
""" Initialize hexmesh.
"""
mesh.Mesh.__init__(self)
self.dim_x = 0.0
self.dim_y = 0.0
self.dim_z = 0.0
self.ni = 0
self.nj = 0
self.nk = 0
self.cell_to_ijk = {}
def build_mesh(self, ni, nj, nk,
dim_x, dim_y, dim_z, K,
modification_function = None):
""" Constructs a structured hexahedral mesh.
:param int ni: Number of cells in the x-direction.
:param int nj: Number of cells in the y-direction.
:param int nk: Number of cells in the z-direction.
:param function K: Permeability map function.
K(point, i, j, k ) -> 3x3 Matrix.
:param float dim_x: Size of domain in the x-direction.
:param float dim_y: Size of domain in the y-direction.
:param float dim_z: Size of domain in the z-direction.
:param function modification_function: Alteration function for shifting
points of the cells. modification_function(p) -> 3 array.
:return: None
"""
# Needs to be moved to an __init__ function.
self.dim_x = dim_x
self.dim_y = dim_y
self.dim_z = dim_z
ni += 1
nj += 1
nk += 1
self.ni = ni
self.nj = nj
self.nk = nk
dx = self.dim_x/float(ni-1.)
dy = self.dim_y/float(nj-1.)
dz = self.dim_z/float(nk-1.)
self.set_boundary_markers([0, 1, 2, 3, 4, 5],
['BottomX', 'TopX',
'BottomY', 'TopY',
"BottomZ,", "TopZ",])
## adding points:
if modification_function == None:
for k in range(nk):
for j in range(nj):
for i in range(ni):
self.add_point(np.array([float(i)*dx,
float(j)*dy,
float(k)*dz]))
else:
for k in range(nk):
for j in range(nj):
for i in range(ni):
self.add_point(modification_function(
np.array([float(i)*dx,
float(j)*dy,
float(k)*dz]), i, j, k))
polygon_ijka_to_index = self._build_faces(ni, nj, nk)
### adding cells:
for k in range(nk-1):
for j in range(nj-1):
for i in range(ni-1):
new_cell = [polygon_ijka_to_index[(i, j, k, 0)],
polygon_ijka_to_index[(i, j, k, 1)],
polygon_ijka_to_index[(i, j, k, 2)],
polygon_ijka_to_index[(i+1, j, k, 2)],
polygon_ijka_to_index[(i, j+1, k, 1)],
polygon_ijka_to_index[(i, j, k+1, 0)]]
cell_index = self.add_cell(new_cell,
[-1, -1, -1, 1, 1, 1])
self.cell_to_ijk[cell_index] = (i, j, k)
self.find_volume_centroid_all()
for cell_index in range(self.get_number_of_cells()):
(i, j, k) = self.cell_to_ijk[cell_index]
[cx, cy, cz] = self.get_cell_real_centroid(cell_index)
k_e = K(np.array([cx, cy, cz]), i, j, k)
self.set_cell_k(cell_index, k_e)
|
from rest_framework import serializers
from .models import Hunter, JobArea, Company, Internship, Stack, Roadmap, PlanItem, Test, Vacancy
from myauth.serializers import UserSerializer
class JobAreaSerializer(serializers.ModelSerializer):
class Meta:
model = JobArea
fields = ('id', 'title', 'related_words', 'description','rank','popularity','created_on')
class HunterSerializer(serializers.ModelSerializer):
job_area = JobAreaSerializer(read_only=True)
user = UserSerializer(read_only=True)
class Meta:
model = Hunter
fields = ('id', "user",'full_name', 'email', 'phone', 'birthday', 'isFemale', 'address', 'city','position','thumbnailPath',"skills", "job_area",'experience', 'interests','github_link', 'linkedin_link', 'instagram_link', 'account_created_on' )
class CompanySerializer(serializers.ModelSerializer):
job_area = JobAreaSerializer(read_only=True)
class Meta:
model = Company
fields = ('id', 'name', 'address', 'city', 'description', 'rank', 'thumbnailPath', 'linkedin_link', 'instagram_link', 'job_area')
class VacancySerializer(serializers.ModelSerializer):
company = CompanySerializer(read_only=True)
job_area = JobAreaSerializer(read_only=True)
class Meta:
model = Vacancy
fields = ('id', 'title', 'company', 'job_area', 'requirements', 'min_exp_time', 'description', 'estimated_salary', 'perks', 'status', 'created_on')
class IntershipSerializer(serializers.ModelSerializer):
job_area = JobAreaSerializer(read_only=True)
company = CompanySerializer(read_only=True)
class Meta:
model = Internship
fields = ('id', 'title', 'start_date', 'company','job_area', 'description', 'estimated_salary', 'duration', 'status', 'created_on')
class StackSerializer(serializers.ModelSerializer):
job_area = JobAreaSerializer(read_only=True)
class Meta:
model = Stack
fields = ('id', 'title', 'job_area', 'description', 'popularity','features', 'created_on' )
class RoadmapSerializer(serializers.ModelSerializer):
class Meta:
model = Roadmap
fields = ('id', 'title', 'plan','created_on' , 'updated_on')
class PlanItemSerializer(serializers.ModelSerializer):
roadmap = RoadmapSerializer(read_only=True)
class Meta:
model = PlanItem
fields = ('id', 'title', 'created_on' , 'updated_on', 'roadmap', 'technologies', 'useful_links', 'tutorials')
class TestSerializer(serializers.ModelSerializer):
stack = StackSerializer(read_only=True)
class Meta:
model = Test
fields = ('id', 'stack', 'questions', 'solutions') |
from django.db import models
from reddituser.models import RedditUser
from subreddit.models import Subreddit
from django.utils import timezone
class PostComment(models.Model):
def getPopularity(self):
return self.up_vote.count() - self.down_vote.count()
user = models.ForeignKey(RedditUser, on_delete=models.CASCADE, related_name="comment_user")
message = models.CharField(
max_length=500,
blank=False,
null=True
)
created_at = models.DateTimeField(
default=timezone.now,
auto_now_add=False
)
updated_at = models.DateTimeField(
default=timezone.now,
auto_now_add=False
)
up_vote = models.ManyToManyField(RedditUser, related_name="comment_up_vote")
down_vote = models.ManyToManyField(RedditUser, related_name="comment_down_vote")
comments = models.ManyToManyField('self', related_name="comment_comment")
class Post(models.Model):
def getPopularity(self):
return self.up_vote.count() - self.down_vote.count()
user = models.ForeignKey(
RedditUser, on_delete=models.CASCADE, related_name="user")
post = models.CharField(
max_length=500,
blank=False,
null=True
)
link = models.URLField(
max_length=200,
blank=False,
null=True
)
image = models.ImageField(upload_to='images/')
title = models.CharField(
max_length=100,
blank=False,
null=True
)
created_at = models.DateTimeField(
default=timezone.now,
auto_now_add=False
)
updated_at = models.DateTimeField(
default=timezone.now,
auto_now_add=False
)
up_vote = models.ManyToManyField(RedditUser, related_name="up_vote")
down_vote = models.ManyToManyField(RedditUser, related_name="down_vote")
subreddit = models.ForeignKey(
Subreddit, on_delete=models.CASCADE
)
comments = models.ManyToManyField(PostComment, related_name="post_comment")
|
import tensorflow as tf
import cv2
import facenet.src.align.detect_face as detect_face
def test():
video = cv2.VideoCapture(0)
print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
minsize = 20
threshold = [0.6, 0.7, 0.7]
factor = 0.709
minsize = 20
threshold = [0.7, 0.7, 0.7]
factor = 0.8
while True:
ret, frame = video.read()
bounding_boxes, _ = detect_face.detect_face(frame, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0]
print('找到人脸数目为:{}'.format(nrof_faces))
i = 0
for face_position in bounding_boxes:
i = i + 1
face_position = face_position.astype(int)
cv2.imwrite("c:\\temp\\o.jpg", frame)
frame2 = frame[face_position[1]:face_position[3], face_position[0]:face_position[2]]
print(face_position[0])
print(face_position[1])
print(face_position[2])
print(face_position[3])
cv2.imwrite("c:\\temp\\f.%d.jpg" % (i), frame2)
cv2.rectangle(frame, (face_position[0], face_position[1]),(face_position[2], face_position[3]), (0, 255, 0), 2)
cv2.imwrite("c:\\temp\\s.jpg", frame)
cv2.imshow('show', frame)
if cv2.waitKey(5) & 0xFF == ord('q'):
break
video.release()
cv2.destroyAllWindows()
test()
|
from django.conf.urls import url, include
from django.contrib import admin
from accounts.views import (login_view, logout_view, register_view)
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^register/', register_view, name='register'),
url(r'^login/', login_view, name='login'),
url(r'^logout/', logout_view, name='logout'),
url(r'^', include('sleep.urls')),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.