text stringlengths 8 6.05M |
|---|
import tkinter as tk
import tkinter.font as tkFont
import easygui
from graphSolveGain import SolveFinalGain
from graphRender import RenderSignalFlowGraph
import Maison
import Structs
maxNoOfNodes = 20
appName = 'Signal Flow Graph'
class App (tk.Frame):
def __init__(self, root=None, splashWindow=None):
super().__init__(root)
self.root = root
self.grid(sticky='nsew')
self.grid_rowconfigure(1, weight=1)
self.grid_columnconfigure(0, weight=1)
frameMatrix = tk.Frame(self)
frameControls = tk.Frame(self)
frameMatrix.grid(row=0, column=0, rowspan=2, sticky='nsew', padx=10, pady=10)
for i in range(0, maxNoOfNodes+1):
frameMatrix.grid_rowconfigure(i, weight=1)
frameMatrix.grid_columnconfigure(i, weight=1)
frameControls.grid(row=0, column=1, sticky='ns', padx=10, pady=10)
self.noOfNodesTkStr = tk.StringVar()
tempOptions = [str(i)+' Nodes' for i in range(1, maxNoOfNodes+1)]
self.noOfNodesSelector = tk.OptionMenu(frameControls, self.noOfNodesTkStr, *tempOptions)
self.noOfNodesSelector.config(font=defaultFont)
self.noOfNodesSelector['menu'].config(font=optionMenuItemFont)
self.noOfNodesSelector.grid(sticky='ew')
self.noOfNodesTkStr.set('Click here to select the Number of Nodes')
noOfNodesSelectorCallback = lambda internalName, index, triggerMode: self.RedrawMatrix()
self.noOfNodesTkStr.trace('w', noOfNodesSelectorCallback)
tk.Label(frameControls).grid(pady=10)
self.buttonSolve = tk.Button(frameControls, text='Solve', command=self.SolveGraph,font=tkFont.Font,bg='black',fg='blue')
self.buttonDraw = tk.Button(frameControls, text='Draw The graph', command=self.DrawGraph,font=tkFont.Font,bg='black',fg='blue')
self.buttonSolve.grid(sticky='E')
self.buttonDraw.grid(sticky='E')
tk.Label(frameControls).grid(pady=10)
self.rowLabels = []
self.columnLabels = []
for i in range(maxNoOfNodes):
self.rowLabels.append(tk.Label(frameMatrix, text=str(i), relief=tk.SUNKEN, width=3, font=defaultFont))
self.rowLabels[i].grid(row=i+1, column=0, sticky='nsew', padx=4, pady=4)
self.rowLabels[i].grid_remove()
self.columnLabels.append(tk.Label(frameMatrix, text=str(i), relief=tk.SUNKEN, width=3, font=defaultFont))
self.columnLabels[i].grid(row=0, column=i+1, sticky='nsew', padx=4, pady=4)
self.columnLabels[i].grid_remove()
self.textBoxes = []
for i in range(maxNoOfNodes):
self.textBoxes.append([])
for j in range(maxNoOfNodes):
self.textBoxes[i].append(tk.Entry(frameMatrix, width=3, font=defaultFont))
self.textBoxes[i][j].grid(row=i+1, column=j+1, sticky='nsew', padx=4, pady=4)
self.textBoxes[i][j].grid_remove()
self.textBoxes[i][j].bind('<FocusIn>', self.HighlightNodes)
self.textBoxes[i][j].bind('<FocusOut>', self.UnhighlightNodes)
self.update_idletasks()
CenterifyWindow(self.root)
if splashWindow:
splashWindow.destroy()
self.root.deiconify()
self.update_idletasks()
def RedrawMatrix (self):
self.noOfNodes = int(self.noOfNodesTkStr.get().split()[0])
self.grid_remove()
for i in range(maxNoOfNodes):
if i < self.noOfNodes:
self.rowLabels[i].grid()
self.columnLabels[i].grid()
else:
self.rowLabels[i].grid_remove()
self.columnLabels[i].grid_remove()
for i in range(maxNoOfNodes):
for j in range(maxNoOfNodes):
if i < self.noOfNodes and j < self.noOfNodes:
self.textBoxes[i][j].grid()
else:
self.textBoxes[i][j].grid_remove()
self.grid()
self.update_idletasks()
CenterifyWindow(self.root)
def HighlightNodes (self, event):
for i in range(self.noOfNodes):
for j in range(self.noOfNodes):
if self.textBoxes[i][j] == event.widget:
self.rowLabels[i].config(bg='grey')
self.columnLabels[j].config(bg='grey')
def UnhighlightNodes (self, event):
for i in range(self.noOfNodes):
for j in range(self.noOfNodes):
if self.textBoxes[i][j] == event.widget:
self.rowLabels[i].config(bg=defaultWindowBgColour)
self.columnLabels[j].config(bg=defaultWindowBgColour)
def ExtractMatrix (self):
matrix = []
if(not hasattr(self,"noOfNodes")):
easygui.msgbox("please enter an adjancency matrix representing the Graph!", title="simple gui")
return None;
for i in range(self.noOfNodes):
matrix.append([])
for j in range(self.noOfNodes):
matrix[i].append(self.textBoxes[i][j].get())
return matrix
def PreprocessMatrix (self, matrix):
n = len(matrix)
for i in range(n):
for j in range(n):
if matrix[i][j] == '':
matrix[i][j] = '0'
return matrix
def DrawGraph (self):
matrix = self.ExtractMatrix()
matrix = self.PreprocessMatrix(matrix)
RenderSignalFlowGraph(matrix)
def SolveGraph (self):
matrix = self.ExtractMatrix()
matrix = self.PreprocessMatrix(matrix)
resultRaw = SolveFinalGain(matrix)
if(resultRaw=="No Path Found"):
easygui.msgbox("There is No Path", title="No Path Found");
self.ShowSolved(resultRaw)
def ShowSolved (self, resultRaw):
popup = tk.Toplevel(self)
popup.title('Final Gain')
popup.grid_columnconfigure(0, weight=1)
popup.grid_rowconfigure(0, weight=1)
popup.grid_rowconfigure(1, weight=1)
frameRaw = tk.LabelFrame(popup, text='Raw Form')
#framePretty = tk.LabelFrame(popup, text='Pretty Form')
frameRaw.grid(sticky='nsew', padx=10, pady=10)
frameRaw.grid_columnconfigure(0, weight=1)
frameRaw.grid_rowconfigure(0, weight=1)
# framePretty.grid(sticky='nsew', padx=10, pady=10)
# framePretty.grid_columnconfigure(0, weight=1)
#framePretty.grid_rowconfigure(0, weight=1)
labelRaw = tk.Entry(frameRaw, font=monoFont, relief='flat', justify='center')
labelRaw.insert(0, resultRaw)
labelRaw.config(state='readonly', readonlybackground='white')
# labelPretty = tk.Label(framePretty, text=resultPretty, font=monoFont, bg='white')
labelRaw.grid(sticky='nsew', padx=10, pady=10)
# labelPretty.grid(sticky='nsew', padx=10, pady=10)
popup.update_idletasks()
width, height, posX, posY = ParseWindowGeometry(popup.geometry())
if width < 200:
width = 200
if height < 150:
height = 150
posX = (screenWidth-width)//2
posY = (screenHeight-height)//2
popup.geometry('{}x{}+{}+{}'.format(width, height, posX, posY))
popup.focus()
popup.grab_set()
self.wait_window(popup)
def ChangeFontSize (self):
fontSize = int(self.fontSizeTkStr.get().split()[0])
defaultFont['size'] = fontSize
monoFont['size'] = fontSize
if fontSize < 8:
optionMenuItemFont['size'] = 8
elif fontSize < 12:
optionMenuItemFont['size'] = fontSize
else:
optionMenuItemFont['size'] = 12
def ParseWindowGeometry (string):
temp1 = string.split('+')
temp2 = temp1[0].split('x')
return int(temp2[0]), int(temp2[1]), int(temp1[1]), int(temp1[2]) # W,H,X,Y
def CenterifyWindow (toplevelWindow):
width, height, posX, posY = ParseWindowGeometry(toplevelWindow.geometry())
posX = (screenWidth-width)//2
posY = (screenHeight-height)//2
toplevelWindow.geometry('+{}+{}'.format(posX, posY))
if __name__ == '__main__':
root = tk.Tk()
root.withdraw()
screenWidth = root.winfo_screenwidth()
screenHeight = root.winfo_screenheight()
splashWindow = tk.Toplevel(root)
splashWindow.withdraw()
splashWindow.grid_columnconfigure(0, weight=1)
tk.Label(splashWindow, text=appName+'\n\nLOADING...').grid(sticky='ew', padx=10, pady=15)
splashWindow.update()
splashWindow.deiconify()
CenterifyWindow(splashWindow)
splashWindow.title('')
splashWindow.focus()
splashWindow.grab_set()
splashWindow.update()
tempLabel = tk.Label(root, text='Specimen')
defaultWindowBgColour = tempLabel['background']
defaultFont = tkFont.Font(font='TkDefaultFont')
monoFont = tkFont.Font(font='TkFixedFont')
optionMenuItemFont = tkFont.Font(font='TkDefaultFont')
tempLabel.destroy()
root.title(appName)
root.grid()
root.grid_columnconfigure(0, weight=1)
root.grid_rowconfigure(0, weight=1)
app = App(root, splashWindow)
root.mainloop()
|
import re
def read_common_words(filename):
'''
Reads the commmon words file into a set.
If the file name is None returns an emtpy set. If the file cannot be
opened returnes an empty set:
'''
words = set()
if filename != None:
try:
with open(filename) as fp:
for line in fp:
words.add(line.strip().lower())
except FileNotFoundError:
print("Warning: Couldn't read common words file")
return words
def read_corpus(filename, common_words, test_words):
'''
Reads the corpus from the given file.
Loads the data into a dictionary that holds the targets and the words that
they occur with in a sentence.
'''
associations = {}
word_counts = {}
# regular expression to strip punctuations
punctuations = "|".join(re.escape(x) for x in ('{', '}', '(', ')', '[', ']', ',', ' ', '\t',':', ';',"'", '"'))
repl1 = re.compile(punctuations)
# regular expression to remove --
repl2 = re.compile("--")
# regular expression to split the text into sentences.
sent_splitter = re.compile("\.|\?\!")
# regular expression to split a sentence into words.
word_splitter = re.compile("\\s{1,}")
try:
with open(filename) as fp:
data = fp.read()
sentences = sent_splitter.split(data.lower())
# now iterate through the sentence.
for sentence in sentences:
sentence = repl2.sub(" ", repl1.sub(" ", sentence))
words = set([word for word in word_splitter.split(sentence) if word not in common_words])
# having split up the sentence in words, let's go through the words and
# find the associations.
for word in words:
word_count = word_counts.get(word)
if not word_count:
word_counts[word] = 1
else:
word_counts[word] += 1
for other_word in words:
if word != other_word:
count = associations.get(word)
if count == None:
associations[word] = {other_word: 1}
else:
ocount = count.get(other_word)
if ocount == None:
count[other_word] = 1
else:
count[other_word] += 1
return word_counts, associations
except FileNotFoundError:
print("Error could not read the corpus")
def print_status(word_counts, associations):
'''
Pretty prints the contents of our data structures
'''
print(len(word_counts), "words in words list")
print("word_count_dict\nword_word_dict")
words = sorted(word_counts.keys(), key=lambda x: -word_counts[x])
for word in words:
count = word_counts[word]
print(word, count)
related = associations[word]
related_words = sorted(related.keys())
for related_word in related_words:
print(" ", related_word, related[related_word])
def read_test_data(filename):
'''
Reads the test data into a set
'''
data = set()
try:
with open(filename) as fp:
for line in fp:
data.add(line.strip().lower())
except FileNotFoundError:
print("Error the test data could not be read")
return data
def main(corpus_file_name, test_sets_file, commonwords_file_name = None):
'''
Program entry point
'''
stop_words = read_common_words(commonwords_file_name)
test_data = read_test_data(test_sets_file)
word_counts, associations = read_corpus(corpus_file_name, stop_words, test_data)
print_status(word_counts, associations)
if __name__ == '__main__':
main('punctuation.txt', 'sample0_set.txt', 'common.txt')
|
import numpy
from tigger.helpers import *
TEMPLATE = template_for(__file__)
def find_local_size(device_params, max_workgroup_size, dims):
"""
Simple algorithm to find local_size with given limitations
"""
# shortcut for CPU devices
if device_params.warp_size == 1:
return [max_workgroup_size] + [1] * (dims - 1)
# trying to find local size with dimensions which are multiples of warp_size
unit = device_params.warp_size
max_dims = device_params.max_work_item_sizes
sizes = [1]
for i in xrange(1, min_blocks(max_workgroup_size, unit)):
if i * unit <= max_workgroup_size:
sizes.append(i * unit)
total_size = lambda indices: product([sizes[i] for i in indices])
result_indices = [0] * dims
pos = 0
while True:
result_indices[pos] += 1
if result_indices[pos] < len(sizes) and total_size(result_indices) <= max_workgroup_size:
if sizes[result_indices[pos]] > max_dims[pos]:
result_indices[pos] -= 1
if pos == len(result_indices):
break
pos += 1
else:
result_indices[pos] -= 1
break
return tuple([sizes[i] for i in result_indices])
def render_stub_vsize_funcs():
return TEMPLATE.get_def('stub_funcs').render()
class VirtualSizes:
def __init__(self, device_params, max_workgroup_size, global_size, local_size):
self.params = device_params
self.global_size = wrap_in_tuple(global_size)
if local_size is None:
local_size = find_local_size(device_params, max_workgroup_size, len(global_size))
self.local_size = wrap_in_tuple(local_size)
if len(self.global_size) != len(self.local_size):
raise ValueError("Global/local work sizes have differing dimensions")
if len(self.global_size) > 3:
raise ValueError("Virtual sizes are supported for 1D to 3D grids only")
self.naive_bounding_grid = [min_blocks(gs, ls)
for gs, ls in zip(self.global_size, self.local_size)]
if product(self.local_size) > self.params.max_work_group_size:
raise ValueError("Number of work items is too high")
if product(self.naive_bounding_grid) > product(self.params.max_num_groups):
raise ValueError("Number of work groups is too high")
self.grid_parts = self.get_rearranged_grid(self.naive_bounding_grid)
gdims = len(self.params.max_num_groups)
self.grid = [product([row[i] for row in self.grid_parts])
for i in range(gdims)]
self.k_local_size = list(self.local_size) + [1] * (gdims - len(self.local_size))
self.k_global_size = [l * g for l, g in zip(self.k_local_size, self.grid)]
def get_rearranged_grid(self, grid):
# This algorithm can be made much better, but at the moment we have a simple implementation
# The guidelines are:
# 1) the order of array elements should be preserved (so it is like a reshape() operation)
# 2) the overhead of empty threads is considered negligible
# (usually it will be true because it will be hidden by global memory latency)
# 3) assuming len(grid) <= 3
max_grid = self.params.max_num_groups
if len(grid) == 1:
return self.get_rearranged_grid_1d(grid, max_grid)
elif len(grid) == 2:
return self.get_rearranged_grid_2d(grid, max_grid)
elif len(grid) == 3:
return self.get_rearranged_grid_3d(grid, max_grid)
else:
raise NotImplementedError()
def get_rearranged_grid_2d(self, grid, max_grid):
# A dumb algorithm which relies on 1d version
grid1 = self.get_rearranged_grid_1d([grid[0]], max_grid)
# trying to fit in remaining dimensions, to decrease the number of operations
# in get_group_id()
new_max_grid = [mg // g1d for mg, g1d in zip(max_grid, grid1[0])]
if product(new_max_grid[1:]) >= grid[1]:
grid2 = self.get_rearranged_grid_1d([grid[1]], new_max_grid[1:])
grid2 = [[1] + grid2[0]]
else:
grid2 = self.get_rearranged_grid_1d([grid[1]], new_max_grid)
return grid1 + grid2
def get_rearranged_grid_3d(self, grid, max_grid):
# same dumb algorithm, but relying on 2d version
grid1 = self.get_rearranged_grid_2d(grid[:2], max_grid)
# trying to fit in remaining dimensions, to decrease the number of operations
# in get_group_id()
new_max_grid = [mg // g1 // g2 for mg, g1, g2 in zip(max_grid, grid1[0], grid1[1])]
if len(new_max_grid) > 2 and product(new_max_grid[2:]) >= grid[2]:
grid2 = self.get_rearranged_grid_1d([grid[2]], new_max_grid[2:])
grid2 = [[1, 1] + grid2[0]]
elif len(new_max_grid) > 1 and product(new_max_grid[1:]) >= grid[2]:
grid2 = self.get_rearranged_grid_1d([grid[2]], new_max_grid[1:])
grid2 = [[1] + grid2[0]]
else:
grid2 = self.get_rearranged_grid_1d([grid[2]], new_max_grid)
return grid1 + grid2
def get_rearranged_grid_1d(self, grid, max_grid):
g = grid[0]
if g <= max_grid[0]:
return [[g] + [1] * (len(max_grid) - 1)]
# for cases when max_grid was passed from higher dimension methods,
# and there is no space left
if max_grid[0] == 0:
return [[1] + self.get_rearranged_grid_1d([g], max_grid[1:])[0]]
# first check if we can split the number
fs = factors(g)
for f, div in reversed(fs):
if f <= max_grid[0]:
break
if f != 1 and div <= product(max_grid[1:]):
res = self.get_rearranged_grid_1d([div], max_grid[1:])
return [[f] + res[0]]
# fallback: will have some empty threads
# picking factor equal to the power of 2 to make id calculations faster
# Starting from low powers in order to minimize the number of resulting empty threads
for p in range(1, log2(max_grid[-1]) + 1):
f = 2 ** p
remainder = min_blocks(g, f)
if remainder <= product(max_grid[:-1]):
res = self.get_rearranged_grid_1d([remainder], max_grid[:-1])
return [res[0] + [f]]
# fallback 2: couldn't find suitable 2**n factor, so using the maximum size
f = max_grid[0]
remainder = min_blocks(g, f)
res = self.get_rearranged_grid_1d([remainder], max_grid[1:])
return [[f] + res[0]]
def render_vsize_funcs(self):
return TEMPLATE.get_def('normal_funcs').render(vs=self, product=product)
def get_call_sizes(self):
return tuple(self.k_global_size), tuple(self.k_local_size)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class ImplResourceAttributes(object):
DESCRIPTION = 'description'
DEPRECATED = 'deprecated'
RESULT_TYPE = 'result_type'
FIELD_DESCRIPTION = 'fields_description'
|
import simplegui
import random
import math
num_range = 100
# helper function to start and restart the game
def new_game():
# initialize global variables used in your code here
global allowed_guesses
if num_range == 100:
allowed_guesses = 7
elif num_range == 1000:
allowed_guesses = 10
global secret_number
secret_number = random.randrange(0, num_range)
print "New game. Range is from 0 to", num_range
print "Number of remaining guesses is", allowed_guesses
print ""
# define event handlers for control panel
def range100():
# button that changes the range to [0,100) and starts a new game
global allowed_guesses
allowed_guesses = 7
global num_range
num_range = 100
new_game()
def range1000():
# button that changes the range to [0,1000) and starts a new game
global allowed_guesses
allowed_guesses = 10
global num_range
num_range = 1000
global secret_number
secret_number = random.randrange(0, 1000)
new_game()
def input_guess(guess):
# main game logic goes here
global secret_number
global allowed_guesses
allowed_guesses = allowed_guesses - 1
print "Guess was", int(guess)
print "Number of remaining guesses is", allowed_guesses
if int(guess) == secret_number:
print "Correct!"
print ""
new_game()
elif allowed_guesses < 1:
print "You ran out of guesses. The number was", secret_number
print ""
new_game()
elif int(guess) > secret_number:
print "Lower!" '\n'
elif int(guess) < secret_number:
print "Higher!" '\n'
# create frame
frame = simplegui.create_frame("Guess the Number", 200, 200)
# register event handlers for control elements and start frame
frame.add_button("Range is [0, 100)", range100, 200)
frame.add_button("Range is [0, 1000)", range1000, 200)
frame.add_input("Enter a guess", input_guess, 200)
# call new_game
new_game()
frame.start() |
#Python Module:
'''
* Modules are used to categorize python code into smaller parts.
* A module is simply a python file, where classes, functions and variables are defined.
* Grouping similar code into a single file makes it easy to access.
Python module advantage:
1. Reusability: Module can be used in some other python code.
Hence it provides the facility of reusability.
-----------------------
import MyModule
MyModule.add()
MyModule.square(5)
print(MyModule.sqrt(9))
MyModule.isodd(9)
----------------------
from MyModule import add
add()
output:
300
---------------------
from MyModule import add,square,sqrt,isodd
add()
square(5)
print(sqrt(81))
isodd(9)
output:
300
25
9.0
True
------------------
from MyModule import *
add()
square(5)
print(sqrt(81))
isodd(9)
output:
300
25
9.0
True
'''
|
# -*- coding: utf-8 -*-
import re, os.path
import urllib, urllib2, urlparse, cookielib
import string, json
def removeDuplicates(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def stripHtml(text):
return re.compile(r'(<!--.*?-->|<[^>]*>)').sub('', text).replace('»','').strip()
def repPolChars(txt):
txt = txt.replace('\xc4\x85','a').replace('\xc4\x84','A')
txt = txt.replace('\xc4\x87','c').replace('\xc4\x86','C')
txt = txt.replace('\xc4\x99','e').replace('\xc4\x98','E')
txt = txt.replace('\xc5\x82','l').replace('\xc5\x81','L')
txt = txt.replace('\xc5\x84','n').replace('\xc5\x83','N')
txt = txt.replace('\xc3\xb3','o').replace('\xc3\x93','O')
txt = txt.replace('\xc5\x9b','s').replace('\xc5\x9a','S')
txt = txt.replace('\xc5\xba','z').replace('\xc5\xb9','Z')
txt = txt.replace('\xc5\xbc','z').replace('\xc5\xbb','Z')
return txt
def httpRequest(params = {}, post_data = None):
#print('params=',params)
#print('post_data=',post_data)
#self.proxyURL = '84.10.15.134:8080'
#self.proxyURL = '178.217.113.62:8080'
#self.useProxy = True
def urlOpen(req, customOpeners):
if len(customOpeners) > 0:
opener = urllib2.build_opener( *customOpeners )
response = opener.open(req)
else:
response = urllib2.urlopen(req)
return response
cj = cookielib.LWPCookieJar()
response = None
req = None
out_data = None
opener = None
if 'header' in params:
headers = params['header']
else:
headers = { 'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0' }
customOpeners = []
#cookie support
if 'use_cookie' not in params and 'cookiefile' in params and ('load_cookie' in params or 'save_cookie' in params):
params['use_cookie'] = True
if params.get('use_cookie', False):
customOpeners.append( urllib2.HTTPCookieProcessor(cj) )
if params.get('load_cookie', False) and os.path.isfile(params['cookiefile']) :
cj.load(params['cookiefile'], ignore_discard = True)
#proxy support
#if self.useProxy == True:
# if dbg == True: print('getURLRequestData USE PROXY')
# customOpeners.append( urllib2.ProxyHandler({"http":self.proxyURL}) )
if None != post_data:
#if dbg == True: print('pCommon - getURLRequestData() -> post data: ' + str(post_data))
if params.get('raw_post_data', False):
dataPost = post_data
else:
dataPost = urllib.urlencode(post_data)
#print('dataPost,headers=',dataPost,headers)
req = urllib2.Request(params['url'], dataPost, headers)
else:
req = urllib2.Request(params['url'], None, headers)
if not params.get('return_data', False):
out_data = urlOpen(req, customOpeners)
else:
gzip_encoding = False
try:
response = urlOpen(req, customOpeners)
if response.info().get('Content-Encoding') == 'gzip':
gzip_encoding = True
data = response.read()
response.close()
except urllib2.URLError, e:
if hasattr(e, 'code'):
"""
if HTTP_ERRORS[e.code]:
kom = HTTP_ERRORS[e.code]
else:
kom=''
"""
try:
kom = HTTP_ERRORS[e.code]
except:
kom=''
print 'HTTP Error '+str(e.code) + ' ' + kom
xbmcgui.Dialog().ok('HTTP Error', 'kod: '+str(e.code),kom)
out_data = '' #'Error HTTP:' + str(e.code)
data = '' # self.net.http_GET(self.url, headers=self.headers).content.decode('unicode_escape')
#print ('Błąd HTTP: '+str(e.code) +' url='+params['url'])
elif hasattr(e, 'reason'):
xbmcgui.Dialog().ok('Błąd URL', str(e.reason))
data = 'Error URL:' + str(e.reason)
if gzip_encoding:
print('pCommon - getURLRequestData() -> Content-Encoding == gzip')
buf = StringIO(data)
f = gzip.GzipFile(fileobj=buf)
out_data = f.read()
else:
out_data = data
if params.get('use_cookie', False) and params.get('save_cookie', False):
#self.checkDir(ptv.getAddonInfo('path') + os.path.sep + "cookies")
cj.save(params['cookiefile'], ignore_discard = True)
return out_data
|
import collections
with open('news.txt', 'r',encoding='utf-8') as f:
line = f.read() #type str
token = line.split()
token2idx = collections.defaultdict(lambda: -1)
for word in token:
if word not in token2idx:
token2idx[word] = len(token2idx)
print(token2idx)
|
"""
Draw a simple, perfectly self-similar tree, ideally using recursion.
Each branch splits off into 2 smaller branches, of half the length and
2/3 of the thickness, separated by 30 degrees. Go 6 layers deep.
"""
import turtle
def draw_simple_tree(start_heading, width, length, depth_remaining):
turtle.setheading(start_heading)
turtle.pensize(width)
turtle.forward(length)
if (depth_remaining > 1):
draw_simple_tree(start_heading - 20, width * 0.75, length * 0.5, depth_remaining - 1)
draw_simple_tree(start_heading + 20, width * 0.75, length * 0.5, depth_remaining - 1)
turtle.setheading(start_heading)
turtle.penup()
turtle.backward(length)
turtle.pendown() |
import subprocess
proc = subprocess.Popen(['echo', 'to stdout'],
stdout=subprocess.PIPE)
stdout_val, _ = proc.communicate()
print 'stdout:', repr(stdout_val)
# stdout: 'to stdout\n'
|
import os
import sys
import time
import braindecode.hyperopt.hyperopt as hyperopt
__authors__ = ["Katharina Eggensperger"]
__contact__ = "automl.org"
import logging
def parse_cli():
"""
Provide a generic command line interface for benchmarks. It will just parse
the command line according to simple rules and return two dictionaries, one
containing all arguments for the benchmark algorithm like dataset,
crossvalidation metadata etc. and the containing all learning algorithm
hyperparameters.
Parsing rules:
- Arguments with two minus signs are treated as benchmark arguments, Xalues
are not allowed to start with a minus. The last argument must --params,
starting the hyperparameter arguments.
- All arguments after --params are treated as hyperparameters to the
learning algorithm. Every parameter name must start with one minus and must
have exactly one value which has to be given in single quotes.
- Arguments with no value before --params are treated as boolean arguments
Example:
python neural_network.py --folds 10 --fold 1 --dataset convex --params
-depth '3' -n_hid_0 '1024' -n_hid_1 '1024' -n_hid_2 '1024' -lr '0.01'
"""
args = {}
arg_name = None
arg_values = None
parameters = {}
cli_args = sys.argv
found_params = False
skip = True
iterator = enumerate(cli_args)
for idx, arg in iterator:
if skip:
skip = False
continue
else:
skip = True
if arg == "--params":
if arg_name:
args[arg_name] = " ".join(arg_values)
found_params = True
skip = False
elif arg[0:2] == "--" and not found_params:
if arg_name:
args[arg_name] = " ".join(arg_values)
arg_name = arg[2:]
arg_values = []
skip = False
elif arg[0:2] == "--" and found_params:
raise ValueError("You are trying to specify an argument after the "
"--params argument. Please change the order.")
elif arg[0] == "-" and arg[0:2] != "--" and found_params:
parameters[cli_args[idx][1:]] = cli_args[idx+1]
elif arg[0] == "-" and arg[0:2] != "--" and not found_params:
raise ValueError("You either try to use arguments with only one lea"
"ding minus or try to specify a hyperparameter bef"
"ore the --params argument. %s" %
" ".join(cli_args))
elif arg[0:2] != "--" and not found_params:
arg_values.append(arg)
skip = False
elif not found_params:
raise ValueError("Illegal command line string, expected an argument"
" starting with -- but found %s" % (arg,))
else:
raise ValueError("Illegal command line string, expected a hyperpara"
"meter starting with - but found %s" % (arg,))
return args, parameters
def main(params, **kwargs):
#print 'Params: ', params,
print "kwargs", kwargs
if "debug" in kwargs and kwargs['debug'] == '1':
print "#" * 80
print "# DEBUGGING "
print "#" * 80
logging.basicConfig(level=logging.DEBUG)
y = hyperopt.train_hyperopt(params)
print 'Result: ', y
return y
if __name__ == "__main__":
starttime = time.time()
args, params = parse_cli()
print params
dataset_dir = args['dataset_dir']
fold = int(float(args['fold']))
dataset_list = ['A01TE.mat',
'A02TE.mat',
'A03TE.mat',
'A04TE.mat',
'A05TE.mat',
'A06TE.mat',
'A07TE.mat',
'A08TE.mat',
'A09TE.mat']
# We assume we split our datasets into 10 folds
# the given fold variable is referring to all folds of all datasets
# concatenated, so e.g. fold 37 is
# dataset 3, fold 7 in 0-based indexing
assert len(dataset_list) == (int(float(args['folds'])) / 10)
dataset_nr = fold / 10
inner_fold_nr = fold % 10
# debug: (should be 0.649123 for rawnet)
#dataset_nr = 0
#inner_fold_nr = 9
params['dataset_filename'] = os.path.join(dataset_dir, dataset_list[dataset_nr])
params['i_test_fold'] = inner_fold_nr
assert os.path.isfile(params['dataset_filename'])
print "Using dataset %s" % params['dataset_filename']
result = main(params, **args)
duration = time.time() - starttime
print "Result for ParamILS: %s, %f, 1, %f, %d, %s" % \
("SAT", abs(duration), result, -1, str(__file__)) |
#! /usr/bin/python
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#c1 S3(cache)
#c2 S3(cache) + S4(cache) + S5(cache)
#c3 S2(cache) + S3(cache) + S4(cache) + S5(cache)
#c4 S2(cache) + S3(cache) + S4-5(cache)
#c5 S2(cache) + S3(cache) + S4(cache) + S5(2way-cache)
raw_data = {'graph': ['Youtube', 'LiveJournal', 'Pokec', 'RMAT-19-32', 'RMAT-21-32'],
'c1': [1.4022/0.622, 9.7264/4.9670, 3.8728/2.2921, 2.1245/0.7186, 8.6928/3.1482],
'c2': [1.4022/0.462, 9.7264/4.3352, 3.8728/1.5297, 2.1245/0.6692, 8.6928/3.6212],
'c3': [1.4022/0.464, 9.7264/4.3420, 3.8728/1.4992, 2.1245/0.6694, 8.6928/3.6276],
'c4': [1.4022/0.444, 9.7264/6.2370, 3.8728/1.7054, 2.1245/0.6595, 8.6928/6.0361]
}
df = pd.DataFrame(raw_data, columns = ['graph', 'c1', 'c2', 'c3', 'c4'])
label=('c1', 'c2', 'c3', 'c4')
# Setting the positions and width for the bars
pos = list(range(len(df['c1'])))
width = 0.15
ecolor='k'
lw=0.5
print pos
#cmap = plt.get_cmap('jet')
#colors = cmap(np.linspace(0, 1.0, len(fuck_label)))
# Plotting the bars
fig, ax = plt.subplots(figsize=(10,5))
# Create a bar with pre_score data,
# in position pos,
plt.bar(pos,
#using df['pre_score'] data,
df['c1'],
# of width
width,
linewidth=lw,
edgecolor='k',
hatch=4*'/',
# with alpha 0.5
alpha=0.5,
# with color
#color=colors[0],
color='w',
# with label the first value in first_name
label=label[0])
# Create a bar with mid_score data,
# in position pos + some width buffer,
plt.bar([p + width for p in pos],
#using df['mid_score'] data,
df['c2'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
linewidth=lw,
edgecolor='k',
hatch=4*'.',
color='w',
#color=colors[1],
# with label the second value in first_name
label=label[1])
# Create a bar with post_score data,
# in position pos + some width buffer,
plt.bar([p + width*2 for p in pos],
#using df['post_score'] data,
df['c3'],
# of width
width,
linewidth=lw,
edgecolor='k',
# with alpha 0.5
alpha=0.5,
# with color
hatch=4*'-',
color='w',
#color=colors[2],
# with label the third value in first_name
label=label[2])
# Create a bar with post_score data,
# in position pos + some width buffer,
plt.bar([p + width*3 for p in pos],
#using df['post_score'] data,
df['c4'],
# of width
width,
linewidth=lw,
edgecolor='k',
# with alpha 0.5
alpha=0.5,
# with color
#color=colors[3],
hatch=4*'\\',
color='w',
# with label the third value in first_name
label=label[3])
# Set the y axis label
ax.set_ylabel('Performance Speedup')
# Set the chart's title
#ax.set_title('Test Subject Scores')
# Set the position of the x ticks
ax.set_xticks([p + 1.5 * width for p in pos])
# Set the labels for the x ticks
ax.set_xticklabels(df['graph'])
# Setting the x-axis and y-axis limits
#plt.xlim(min(pos)-width, max(pos)+width*4)
#plt.ylim([0, max(df['c1'] + df['c2'] + df['c3'] + df['c4'] + df['c5'] + df['c6'] + df['c7'] + df['c8'] + df['c9'])] )
# Adding the legend and showing the plot
plt.legend(['cache(S4)', 'cache(S4, S5, S6)', 'cache(S3, S4, S5, S6)', 'cache(S3, S4, S5S6)'], ncol=2, loc='lower center')
ax.grid(linewidth='0.5')
ax.xaxis.grid(False)
#plt.show()
plt.savefig("../cache-performance.pdf", bbox_inches='tight')
|
#!/usr/bin/env python
import os
import re
def main():
cleanup_puzzle('easy')
cleanup_puzzle('simple')
cleanup_puzzle('intermediate')
cleanup_puzzle('expert')
def cleanup_puzzle(level):
file = open('design/puzzles_%s.txt' % level, 'rb')
buffer = ""
with open('res/puzzles_%s.txt' % level, 'w') as f:
for line in file:
buffer += "".join(re.findall(r"[.0-9]", line))
if len(buffer) == 81:
f.write(buffer)
f.write("\n")
buffer = ""
if __name__ == "__main__":
main()
|
from django.contrib.syndication.views import Feed
from .models import Post
class LatestPosts(Feed):
title = "Q Blog"
link = "/feed/"
description = "Latest Posts of Q"
def items(self):
return Post.objects.published()[:5]
def item_title(self, item):
return item.title
def item_description(self, item):
return item.description
|
# Generated by Django 3.1 on 2020-10-16 09:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('theblog', '0011_profile_website_url'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='pinterest_url',
new_name='linkin_url',
),
]
|
#!/usr/bin/python
import os
import select
class Counters(object):
counters = {
'storetokenandreturn' : 4,
'handledata' :5,
'overflow' : 6,
'ignorepacket' : 7,
'sendnakandreti' : 8,
'handlein' : 9,
'handlein1' : 10,
'handlein3' : 11,
'se0' : 12,
'soferror': 13
}
def __init__(self,instring):
#instring is with the heading 'd'
self.instring = instring
self.values = {}
for (k,v) in self.counters.items():
n = int(instring[1+(v-4)*2:3+(v-4)*2],16)
self.values[k]=n
def __repr__(self):
restr=""
for k in self.counters:
restr += "%s = 0x%02x (%u)\n"%(k,self.values[k],self.values[k])
sum = self.handledata + self.ignorepacket + self.handlein + self.storetokenandreturn
restr += "0x%02x ?= 0x%02x diff = 0x%02x \n"%(self.values['se0'], sum, self.se0-sum)
# se0 = handledata + ignorepacket + handlein + storetokenandreturn
restr += "lag = 0x%02x\n"%(self.se0-self.handlein1)
return restr
def __getattr__(self,attname):
return self.values[attname]
def nreadline(f):
try:
signal.alarm(1);line = self.readline()
except IOError:
line = ""
return line
def nread(self):
try:
signal.alarm(1);line = self.read()
except IOError:
line = ""
return line
f=open("/var/lock/LCK..ttyAMA0","w")
f.write(str(os.getpid()))
f.close()
if __name__ == '__main__':
serial=open("/dev/ttyAMA0","w+b")
usb=open("/dev/ttyACM0","w+b")
print "usb:",nread(usb)
print "serial:",nread(serial)
serial.write("d")
line = serial.readline()
print line
print Counters(line)
serial.write("d")
line = serial.readline()
print line
print Counters(line)
|
#!/usr/bin/env python3
import pandas as pd
import argparse
def crime_corr(target='net'):
"""correlation of crime rate with number of moves in or out of a ward."""
crime = pd.read_csv("data/Burglary_LAD_2016.csv")
migration = pd.read_csv("data/sales_"+target+"_LAD_2016.csv").set_index('lad16cd')
migration_perc = migration.drop(columns=['Total']).div(migration['Total'], axis=0) * 100
merged = crime.merge(migration, left_on='lad16cd', right_on=migration.index).set_index('lad16cd')
merged_perc = crime.merge(migration_perc, left_on='lad16cd', right_on=migration_perc.index).set_index('lad16cd')
corr_df = merged.corr()
print("\nCorrelation of crime with %s flow: " % target)
print(corr_df.iloc[0].round(3))
return corr_df
def main():
if args.r:
r = 'Rentals'
df = pd.read_csv("data/rentals_and_distance.csv", index_col=0)
var_list=['NumberOfRentals', 'RentUnder250', 'RentOver250',
'Terraced', 'Flat', 'SemiDetached', 'Detached', 'Bungalow',
'PropertyTypeUnknown', 'Beds1to3', 'Beds4Plus', 'distance']
else:
r = 'Sales'
df = pd.read_csv("data/sales_and_distance.csv", index_col=0)
var_list = ['NumberOfMoves', 'MovesUnder250k', 'MovesOver250k',
'Terraced', 'Flat', 'SemiDetached', 'Detached',
'Beds1to3', 'Beds4Plus', 'distance']
print("\nCorrelation of %s with distance: " % r)
df_cor = df[var_list].corr()
print(df_cor['distance'].round(3)[:-1])
crime_corr_df = crime_corr('net')
return df_cor
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-r", action='store_true',
help="use rental data.")
args = parser.parse_args()
main()
|
import math
x1, y1 = map(float, input().split(' '))
x2, y2 = map(float, input().split(' '))
distancia = math.sqrt(math.pow(x2 - x1, 2) + math.pow(y2 - y1, 2))
print("{:.4f}". format(distancia)) |
def part(arr):
ap = ["Partridge","PearTree","Chat","Dan","Toblerone","Lynn","AlphaPapa","Nomad"]
result = sum([arr.count(x) for x in ap])
if result == 0:
return "Lynn, I've pierced my foot on a spike!!"
else:
return "Mine's a Pint{}".format('!' * result)
'''
To celebrate today's launch of my Hero's new book: Alan Partridge: Nomad,
We have a new series of kata arranged around the great man himself.
Given an array of terms, if any of those terms relate to Alan Partridge, return Mine's a Pint!
The number of ! after the t should be determined by the number of Alan related terms you
find in the provided array (x). The related terms are:
Partridge
PearTree
Chat
Dan
Toblerone
Lynn
AlphaPapa
Nomad
If you don't find any related terms, return 'Lynn, I've pierced my foot on a spike!!'
All Hail King Partridge
Other katas in this series:
'''
|
#!/usr/bin/env python
# # Sample Usage:
# # ./m2o-analysis.py enw.words.gz enw.pos.gz
# # > perp>1.75
# # dance 1.75 12
# # Crowd 1.75 8
# # meltdown 1.75 4
# # Personnel 1.75 4
# # ...
# # >
# #
from collections import defaultdict as dd
import gzip
from itertools import izip
import math
import re
import sys
def read_file(name):
if name.endswith('.gz'):
f = gzip.open(name)
elif name == "-":
f = sys.stdin
else:
f = open(name)
l = f.readlines()
f.close()
return l
def m2o_mapping(cluster, pos):
pos_counts = dd(lambda: dd(int))
for c, p in izip(cluster, pos):
pos_counts[c][p] += 1
mapping = {}
for c, pc in pos_counts.itervalues():
mapping[c] = max(pc.iterkeys(), key=lambda x: pc[x])
return mapping
def m2o_score(cluster, pos, mapping):
match = 0
for c, p in izip(cluster, pos):
if mapping[c] == p:
match += 1
return float(match) / len(cluster)
def entropy(vec):
s = sum(vec)
return sum(-math.log(float(v) / s, 2) * (float(v) / s) for v in vec)
def perplexity(word, pos):
pos_counts = dd(lambda: dd(int))
for w, p in izip(word, pos):
pos_counts[w][p] += 1
perp = {}
counts = {}
for w, pos_count in pos_counts.iteritems():
counts[w] = sum(pos_count.itervalues())
perp[w] = 2 ** entropy(pos_count.values())
return perp, counts
word = read_file(sys.argv[1])
pos = read_file(sys.argv[2])
assert len(word) == len(pos)
clusters = {}
for f in sys.argv[3:]:
clusters[f] = read_file(f)
assert len(clusters[f]) == len(word)
perp, counts = perplexity(word, pos)
mappings = {}
for c, f in clusters.iteritems():
mappings[c] = m2o_mapping(f, pos)
print "%s\t%.2f" % (c, 100 * m2o_score(mappings[c]))
user = raw_input('> ') + "\n"
while user != "\n":
if re.search("perp\s*>\s*(.*)", user):
try:
num = float(re.search("perp\s*>\s*(.*)", user).group(1))
for w, p in sorted(filter(lambda x: x[1] > num, perp.iteritems()), key=lambda x: x[1]):
print "%s\t%.2f\t%d" % (w.strip(), p, counts[w])
except:
print "Bad number."
else:
matches = dd(list)
for i in xrange(len(word)):
if word[i] == user:
for c, f in clusters.iteritems():
matches[c].append(f[i])
if len(matches) == 0:
print "No such word."
else:
for c, f in clusters.iteritems():
print "%s\t%.2f" % (c, 100 * m2o_score(mappings[c]))
user = raw_input('> ') + "\n"
print "Happy Happy Joy Joy."
|
from settings import settings
from tests import random_seed
from tests.graph_case import GraphTestCase
from office365.directory.user import User
from office365.directory.userProfile import UserProfile
class TestGraphUser(GraphTestCase):
"""Tests for Azure Active Directory (Azure AD) users"""
test_user = None # type: User
def test1_get_user_list(self):
users = self.client.users.top(1).get().execute_query()
self.assertEqual(len(users), 1)
for user in users:
self.assertIsNotNone(user.id)
def test2_create_user(self):
password = "P@ssw0rd{0}".format(random_seed)
profile = UserProfile("testuser{0}@{1}".format(random_seed, settings['tenant']), password)
new_user = self.client.users.add(profile).execute_query()
self.assertIsNotNone(new_user.id)
self.__class__.test_user = new_user
def test3_update_user(self):
user_to_update = self.__class__.test_user
prop_name = 'city'
prop_val = 'Earth{0}'.format(random_seed)
user_to_update.set_property(prop_name, prop_val).update().execute_query()
result = self.client.users.filter("{0} eq '{1}'".format(prop_name, prop_val)).get().execute_query()
self.assertEqual(1, len(result))
def test4_delete_user(self):
user_to_delete = self.__class__.test_user
user_to_delete.delete_object(True).execute_query()
|
""":mod:`bikeseoul.web.user` --- User pages
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from flask import Blueprint, render_template, request
from ..station import Station
from .db import session
bp = Blueprint('user', __name__)
@bp.route('/')
def home():
"""Home."""
stations = session.query(Station).all()
return render_template('home.html', stations=stations)
@bp.route('/search/', methods=['POST'])
def search():
"""Search."""
q = '%{}%'.format(request.form['query'])
stations = session.query(Station) \
.filter(Station.name.like(q) | Station.address.like(q)) \
.all()
return render_template('search.html', stations=stations)
|
import cv2
from PIL import Image
from io import BytesIO
import numpy as np
from multiprocessing import Process, Array, Value
from time import sleep as bsleep
from time import time
from asyncio import sleep, get_event_loop
from loguru import logger
TARGET_FPS = 15
TARGET_FRAMETIME = 1 / TARGET_FPS
MAX_W, MAX_H = MAX_RES = 1920, 1080
class VideoCamera(object):
def __init__(self):
global MAX_W, MAX_H
self.video = cv2.VideoCapture(0)
logger.info("Opened Camera 0!")
self.video.set(cv2.CAP_PROP_FRAME_WIDTH, MAX_W)
self.video.set(cv2.CAP_PROP_FRAME_HEIGHT, MAX_H)
_, image = self.video.read()
H, W, C = image.shape
MAX_H, MAX_W = H, W
logger.info("Catching {W}x{H}x{C} frames...".format(W=W, H=H, C=C))
self.__imbuf = Array("c", H * W * C)
self.__jpbuf = Array("c", H * W * C * 2)
self.__jpbufsz = Value("i", 0)
self.__frameidx = Value("i", 0)
self.__jpegidx = Value("i", 0)
self.__quality_adjust = Value("i", 0)
self.__halt_flag = Value("i", 0)
self.__H = Value("i", H)
self.__W = Value("i", W)
self.__framerate = Value("d", 1)
self.last_frame = np.frombuffer(self.__imbuf.get_obj(), dtype=np.uint8).reshape(
H, W, C
)
self.last_jpeg = np.frombuffer(self.__jpbuf.get_obj(), dtype=np.uint8).reshape(
H * W * C * 2, 1
)
self.img_proc = Process(target=self.__capture)
self.img_proc.start()
self.encode_proc = Process(target=self.__encode)
self.encode_proc.start()
self.last_idx = 0
self.last_frame_timestamp = time()
def __del__(self):
self.video.release()
self.__halt_flag.value = 1
self.img_proc.join()
self.encode_proc.join()
logger.info("Closed Camera 0.")
def reopen_camera(self):
return # TODO: Test this with a camera that isn't bad so that this can actually run
if self.video is not None:
self.video.release()
del self.video
logger.info(
"Changing source resolution to {W}x{H}".format(
W=self.__W.value, H=self.__H.value
)
)
self.video = cv2.VideoCapture(0)
self.video.set(cv2.CAP_PROP_FRAME_WIDTH, self.__W.value)
self.video.set(cv2.CAP_PROP_FRAME_HEIGHT, self.__H.value)
self.video.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc("M", "J", "P", "G"))
_, frame = self.video.read()
H, W, C = frame.shape
self.__H.value = H
self.__W.value = W
def __capture(self):
while self.__halt_flag.value == 0:
_, frame = self.video.read()
H, W, C = frame.shape
if H / self.__H.value < 0.66666 or H / self.__H.value > 1.5:
self.reopen_camera()
self.last_frame[:, :, :] = frame
self.__frameidx.value += 1
def __encode(self):
last_idx = 0
H, W = MAX_H, MAX_W
def update_resolution(nH, nW):
self.__quality_adjust.value = 0
self.__W.value, self.__H.value = (
min(max(nW, 120), MAX_W),
min(max(nH, 160), MAX_H),
)
while self.__halt_flag.value == 0:
while self.__frameidx.value == last_idx:
continue
if self.__quality_adjust.value > 5:
update_resolution(self.__H.value * 8 // 7, self.__W.value * 8 // 7)
elif self.__quality_adjust.value < -5:
update_resolution(self.__H.value * 7 // 8, self.__W.value * 7 // 8)
frame = cv2.resize(self.last_frame, (self.__W.value, self.__H.value), interpolation=cv2.INTER_NEAREST)
ret, jpeg = cv2.imencode(".jpg", frame)
bufsz, _ = jpeg.shape
self.__jpbufsz.value = bufsz
self.last_jpeg[:bufsz] = jpeg
last_idx = self.__frameidx.value
self.__jpegidx.value += 1
async def get_frame(self):
while self.last_idx == self.__jpegidx.value:
await sleep(0)
self.last_idx = self.__jpegidx.value
time_now = time()
elapsed = time_now - self.last_frame_timestamp
self.last_frame_timestamp = time_now
self.__framerate.value = (0.8 * self.__framerate.value) + (
0.2 / elapsed
)
if elapsed > 1.2 * TARGET_FRAMETIME:
self.__quality_adjust.value -= 1
elif elapsed < 0.8 * TARGET_FRAMETIME:
self.__quality_adjust.value += 1
return self.last_jpeg[: self.__jpbufsz.value]
def get_info(self):
return {
"resolution": [self.__W.value, self.__H.value],
"framerate": self.__framerate.value,
}
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
class DBMetadata:
def __init__(self, connection, executor):
self.connection=connection
self.executor=executor
class Datos:
def __init__(self,name,lastname,phone,email):
self.name=name
self.lastname=lastname
self.phone=phone
self.email=email
|
#!/usr/bin/python
import os.path
import datetime
import ipaddress
import subprocess
import csv
import ipwhois
import re
from ipwhois import IPWhois
from pprint import pprint
from IPy import IP
# Pre-condition: Ran ZMAP to scan internet:
# zmap -p 80 -o results.csv -b blacklist.txt -t 7200
# --output-filter="success = 1 && repeat = 0"
# Read zmap scan results file
networks = csv.reader(
open(
'results.csv',
newline=''),
delimiter=' ',
quotechar='|')
subdirectory = "today" + "-network"
try:
os.mkdir(subdirectory)
except Exception:
pass
# Validate IP Address before accessing attributes
def valid_ip(address):
try:
socket.inet_aton(address)
return address
except BaseException:
return False
for row in networks:
net_row = row[0] # Get IP address value
obj = IPWhois(net_row) # Create IPWhois object for each row (IP address)
result = obj.lookup_rdap(depth=1) # Look up whois on IP
if valid_ip(result):
this_net = result['network']['cidr'] # Get CIDR range of network
# Create network object and coerce host bits to zero
net = ipaddress.ip_network(this_net, strict=False)
# print(this_net) # Identify network
net_ips = net.num_addresses
# print(net_ips) # Identify number of IP addresses per network
net_array = []
hosts = net.hosts()
for row in hosts:
# Coerce object to string that the other module will accept
net_array.append(str(row))
# print(net_array)
# Write host as new line in each network file
filename = net_row + '.txt'
with open(os.path.join(subdirectory, filename), 'w') as txtfile:
for x in net_array:
txtfile.write(x + '\n')
txtfile.close()
|
n = int(input())
print(n // 2, '\n', '2 ' * (n // 2 - 1), 2 + n % 2, sep='')
|
# you'll notice that after we generate a list of media files from the conversion of Wiki pages to static pages, there are tons of duplicates.
# we'd like to regenerate the list so that no duplicates are included
# script should only take about 0.1 seconds to complete
infilename = 'lists/listInfoBoxImgs.txt'
outfilename = 'lists/listInfoBoxImgs-nodup.txt'
lines_seen = set() # holds lines already seen
outfile = open(outfilename, "w")
for line in open(infilename, "r"):
if line not in lines_seen: # not a duplicate
outfile.write(line)
lines_seen.add(line)
outfile.close() |
import os
from filelock import FileLock
def check_if_exists(path):
if not os.path.exists(path): os.makedirs(path)
return path
here = os.path.dirname(__file__)
data_dir = check_if_exists(os.path.join(here, '../../data'))
locks_dir = check_if_exists(os.path.join(here, '../../locks'))
trajectories_dir = os.path.join(data_dir, 'bicicletas-publicas/')
stations_dir = os.path.join(data_dir, 'estaciones-bicicletas-publicas')
scripts_dir = os.path.join(here, '../../scripts')
def get_lock(obj):
fname = os.path.join(locks_dir, type(obj).__name__)
return FileLock(fname)
|
"""
Train a supervised classifier based on an IQR session state dump.
Descriptors used in IQR, and thus referenced via their UUIDs in the IQR session
state dump, must exist external to the IQR web-app (uses a non-memory backend).
This is needed so that this script might access them for classifier training.
Getting an IQR Session's State Information
==========================================
Click the "Save IQR State" button to download the IqrState file encapsulating
the descriptors of positively and negatively marked items. These descriptors
will be used to train the configured SupervisedClassifier.
"""
import logging
import os
from smqtk.algorithms import SupervisedClassifier
from smqtk.algorithms import get_classifier_impls
from smqtk.iqr import IqrSession
from smqtk.representation import DescriptorElementFactory
from smqtk.representation.descriptor_element.local_elements \
import DescriptorMemoryElement
from smqtk.utils.bin_utils import (
basic_cli_parser,
utility_main_helper,
)
from smqtk.utils.plugin import make_config
from smqtk.utils.plugin import from_plugin_config
def get_cli_parser():
parser = basic_cli_parser(__doc__)
parser.add_argument('-i', '--iqr-state',
help="Path to the ZIP file saved from an IQR session.")
return parser
def get_default_config():
return {
"classifier": make_config(get_classifier_impls()),
}
def train_classifier_iqr(config, iqr_state_fp):
#: :type: smqtk.algorithms.SupervisedClassifier
classifier = from_plugin_config(
config['classifier'],
get_classifier_impls(sub_interface=SupervisedClassifier)
)
# Load state into an empty IqrSession instance.
with open(iqr_state_fp, 'rb') as f:
state_bytes = f.read().strip()
descr_factory = DescriptorElementFactory(DescriptorMemoryElement, {})
iqrs = IqrSession()
iqrs.set_state_bytes(state_bytes, descr_factory)
# Positive descriptor examples for training are composed of those from
# external and internal sets. Same for negative descriptor examples.
pos = iqrs.positive_descriptors | iqrs.external_positive_descriptors
neg = iqrs.negative_descriptors | iqrs.external_negative_descriptors
classifier.train(class_examples={'positive': pos, 'negative': neg})
def main():
args = get_cli_parser().parse_args()
config = utility_main_helper(get_default_config, args)
log = logging.getLogger(__name__)
log.debug("Showing debug messages.")
iqr_state_fp = args.iqr_state
if not os.path.isfile(iqr_state_fp):
log.error("IQR Session info JSON filepath was invalid")
exit(102)
train_classifier_iqr(config, iqr_state_fp)
if __name__ == "__main__":
main()
|
#Quiz Two Part Two
#I pledge my honor that I have abided by the Stevens honor system -Maya O
def main():
print("Hello!")
print("Please enter 1 if you would like to access the mathematical funtions")
print("Please enter 2 if you would like to access the string operations")
m = int(input("Choice: "))
def error():
print()
print("Error: You entered a number that was not one of the menu options")
print("Please trying running the code from the beginning")
def thanks():
print()
print("Thanks for using this program!")
if m == 1:
print()
print("Great! Now, ")
print("For addition, please enter 1")
print("For subtraction, please enter 2")
print("For multiplication, please enter 3")
print("For division, please enter 4")
math = int(input("Choice: "))
if math ==1:
print()
print("Please enter 2 numbers")
num1 = int(input("Number 1: "))
num2 = int(input("Number 2: "))
addition = num1 + num2
print("The sum of these two numbers is" , addition)
thanks()
elif math==2:
print()
print("Please enter 2 numbers")
num1 = int(input("Number 1: "))
num2 = int(input("Number 2: "))
subtraction = num1 - num2
print("The difference between these two numbers is" , subtraction)
thanks()
elif math==3:
print()
print("Please enter 2 numbers")
num1 = int(input("Number 1: "))
num2 = int(input("Number 2: "))
multiplication = num1 * num2
print("The product of these two numbers is" , multiplication)
thanks()
elif math==4:
print()
print("Please enter 2 numbers")
num1 = int(input("Number 1: "))
num2 = int(input("Number 2: "))
division = round(num1 / num2, 3)
print("The quotient of these two numbers is" , division)
thanks()
elif math>4 or math<1:
error()
elif m == 2:
print()
print("Great! Now,")
print("To determine the number of vowels in a string, please enter 1")
print("To encrypt a string, please enter 2")
stringop= int(input("Choice: "))
if stringop==1:
print()
string = str(input("Please enter a string: "))
count = 0
vowel = set("aeiouAEIOU")
for letters in string:
if letters in vowel:
count = count + 1
if count == 1:
print("There is" , count , "vowel in the given string")
else:
print("There are" , count , "vowels in the given string")
thanks()
elif stringop==2:
print()
string = input("Please enter a string: ")
print("Here is the encrypted message:")
for i in string:
x=ord(i)
print("",2*x+3,end="")
thanks()
elif stringop>2 or stringop<1:
error()
elif m>2 or m<1:
error()
main()
|
'''
Created on Jan 24, 2016
@author: Andrei Padnevici
@note: This is an exercise: 12.2
'''
import re
import urllib.parse
import urllib.request
import validators
addressStr = str(input("Enter address: "))
if addressStr is "" or addressStr is None:
addressStr = "http://www.py4inf.com/code/romeo.txt"
addressStr = addressStr.strip()
# prepare url for urlparse
if re.search("^http[s]*://", addressStr) is None:
if addressStr.startswith("//") is False:
if addressStr.startswith("/") is True:
addressStr = "/" + addressStr
else:
addressStr = "//" + addressStr
address = urllib.parse.urlparse(addressStr, scheme="http")
addressStr = address.geturl()
# exit program if invalid url
if validators.url(addressStr) is not True:
print("Invalid URL - [%s], please try again." % addressStr)
exit(-1)
# navigating to the address and read data
responseDataBytes = bytes()
print("Navigating to:", addressStr)
try:
with urllib.request.urlopen(addressStr) as responseUrl:
while True:
data = responseUrl.read(512)
if (len(data) < 1): break
responseDataBytes += data
except:
print("Unknown error is occurred on navigating")
exit(-1)
# getting total count of <p> tag
pTags = re.findall("<p>", responseDataBytes.decode("ISO-8859-1"))
print("There are %d of <p> tags in the requested page" % len(pTags))
|
import random
from model.vehicle_handling.vehicle import Enemy
from model.vehicle_handling.collision_and_boundaries import check_all_collision
import global_variables as gv
spawn_rate = 40 # higher is a lower spawn_rate
spawn_max = 20
def spawn_chance(vehicles, movement_pattern="random", x=None, y=None, w=gv.ENEMY_WIDTH, l=gv.ENEMY_LENGTH):
index = len(vehicles)
if index >= spawn_max:
return
if x is not None:
if random.randint(1, spawn_rate) == 1:
vehicle = spawn_random_enemy(index, movement_pattern, x, y, w, l)
if check_for_other_vehicle(vehicle, vehicles): # if spawn area is occupied by another car, will not spawn
return
vehicles.append(vehicle)
else:
if random.randint(1, spawn_rate) == 1:
x_placement = int(round(random.randint(gv.WINDOW_W * (1 - gv.ROAD_W_RATIO) / 2 + w,
gv.WINDOW_W * (1 - gv.ROAD_W_RATIO) / 2 + gv.ROAD_W) - w))
vehicle = spawn_random_enemy(index, movement_pattern, x_placement, y, w, l)
if check_for_other_vehicle(vehicle, vehicles):
return
vehicles.append(vehicle)
def spawn_random_enemy(index, movement_pattern, x, y, w, l):
pattern = gv.MOVEMENT_PATTERNS[random.randint(2, len(gv.MOVEMENT_PATTERNS)-1)]
return Enemy(index, pattern, x, y, w, l)
def check_for_other_vehicle(vehicle, vehicles):
if check_all_collision(vehicle, vehicles) is not None:
return True
return False
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 24 08:08:28 2019
@author: imad
"""
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Perceptron
from sklearn.metrics import accuracy_score
from plot_decission_regions import PDR
import numpy as np
import matplotlib.pyplot as plt
iris = datasets.load_iris()
#we will use two features from iris dataset -> petal length and petal width
X = iris.data[:, [2,3]]
y = iris.target
#perform train test split
#the later is called a 70-30 split, 70% of X is used as training data, 30% is used as testing data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
#Feature scaling is somewhat necessary for optimal performance
#we only need to scale the feature vectors, as targets are just class labels, and scaling them won't achieve anthing
sc = StandardScaler()
#we need to fit the scaler to the training data to estimate mean and variance of the data
#mean and variance is required to standardize the data using normal distribution
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
#we now use the perceptron, this is simmilar to the one we implemented
ppn = Perceptron(verbose=0, eta0=0.1, n_iter=40, random_state=0)
ppn.fit(X_train_std, y_train)
#now we have trained the perceptron model
#we will now test the model
y_pred = ppn.predict(X_test_std)
print("Misclassified samples: %d" % (y_pred != y_test).sum())
#we will calculate accuracy using the built in fn
acc = accuracy_score(y_test, y_pred)
print("Accuracy: %f" % acc)
X_combined_std = np.vstack((X_train_std, X_test_std))
y_combined = np.hstack((y_train, y_test))
print(X_combined_std.shape)
print(y_combined.shape)
a = PDR(X_combined_std, y_combined, classifier=ppn)
plt.xlabel("Petal Length [Standardized]")
plt.ylabel("Petal Width [Standardized]")
plt.legend(loc = "upper left")
plt.show()
|
"""
Canned response for fastly
"""
from __future__ import absolute_import, division, unicode_literals
import random
import string
import uuid
class FastlyResponse(object):
"""
Canned response for fastly.
See docs here https://docs.fastly.com/api/config
"""
fastly_cache = {}
def get_current_customer(self):
"""
Returns the current customer with response code 200.
:return: a JSON-serializable dictionary matching the format of the JSON
response for fastly_client.get_current_customer()
("/current_customer") request.
"""
def _random_string():
random_string = u''.join(random.choice(
string.ascii_uppercase + string.ascii_uppercase)
for _ in range(20))
return random_string
id = _random_string()
owner_id = _random_string()
current_customer = {
u'can_edit_matches': u'0',
u'can_read_public_ip_list': u'0',
u'can_upload_vcl': u'1',
u'updated_at': u'2014-11-03T23:37:44+00:00',
u'has_config_panel': u'1',
u'has_improved_ssl_config': False,
u'id': id,
u'has_historical_stats': u'1',
u'has_openstack_logging': u'0',
u'can_configure_wordpress': u'0',
u'has_improved_logging': u'1',
u'readonly': '',
u'ip_whitelist': u'0.0.0.0/0',
u'owner_id': owner_id,
u'phone_number': u'770-123-1749',
u'postal_address': None,
u'billing_ref': None,
u'can_reset_passwords': True,
u'has_improved_security': u'1',
u'stripe_account': None,
u'name': u'Poppy - Test',
u'created_at': u'2014-11-03T23:37:43+00:00',
u'can_stream_syslog': u'1',
u'pricing_plan': u'developer',
u'billing_contact_id': None,
u'has_streaming': u'1'}
return current_customer
def create_service(self, url_data):
"""
Returns POST service with response json.
:return: a JSON-serializable dictionary matching the format of the JSON
response for fastly_client.create_service()
("/service") request.
"""
data = {key: value[0] for key, value in url_data}
publish_key = uuid.uuid4().hex
service_id = uuid.uuid4().hex
service_name = data['name']
self.fastly_cache[service_name] = {
'service_details': {
u'comment': '',
u'locked': False,
u'updated_at': u'2014-11-13T14:29:10+00:00',
u'created_at': u'2014-11-13T14:29:10+00:00',
u'testing': None,
u'number': 1,
u'staging': None,
u'active': None,
u'service_id': service_id,
u'deleted_at': None,
u'inherit_service_id': None,
u'deployed': None},
'service_name': service_name
}
self.fastly_cache[service_id] = self.fastly_cache[service_name]
create_service = {
u'comment': '',
u'publish_key': publish_key,
u'name': service_name,
u'versions': [{u'comment': '', u'locked': u'0',
u'service': service_id,
u'updated_at': u'2014-11-12T18:43:21',
u'created_at': u'2014-11-12T18:43:21',
u'testing': None, u'number': u'1',
u'staging': None,
u'active': None,
u'service_id': service_id,
u'deleted_at': None,
u'inherit_service_id': None,
u'deployed': None,
u'backend': 0}],
u'created_at': u'2014-11-12T18:43:21+00:00',
u'updated_at': u'2014-11-12T18:43:21+00:00',
u'customer_id': data['customer_id'],
u'id': service_id}
return create_service
def get_service_by_name(self, service_name):
"""Returns the details of the CDN service.
:return: a JSON-serializable dictionary matching the format of the JSON
response for fastly_client.get_service_by_name()
("/service/version") request.
"""
return self.fastly_cache[service_name]
def create_version(self, service_id):
"""
Returns POST service with response json.
:return: a JSON-serializable dictionary matching the format of the JSON
response for fastly_client.create_version()
("/service/version") request.
"""
create_version = {
'service_id': service_id,
'number': 1}
return create_version
def create_domain(self, url_data, service_id, service_version):
"""
Returns POST create_domain with response json.
:return: a JSON-serializable dictionary matching the format of the JSON
response for fastly_client.create_domain()
("/service/<service_id>/version/<service_version>/domain")
request.
"""
request_dict = {k: v[0] for k, v in url_data}
domain_name = request_dict['name']
create_domain = {
'comment': '',
'service_id': service_id,
'version': service_version,
'name': domain_name}
if 'domain_list' not in self.fastly_cache[service_id]:
self.fastly_cache[service_id]['domain_list'] = []
self.fastly_cache[service_id]['domain_list'].append(
[create_domain, 'None', 'False'])
return create_domain
def check_domains(self, service_id, service_version):
"""
Returns GET check_domains with response json.
:return: a JSON-serializable dictionary matching the format of the JSON
response for fastly_client.check_domain()
("/service/%s/version/%d/domain/check_all")
request.
"""
domain_list = self.fastly_cache[service_id]['domain_list']
return domain_list
def create_backend(self, url_data, service_id, service_version):
"""
Returns create_backend response json.
:return: a JSON-serializable dictionary matching the format of the JSON
response for fastly_client.create_backend()
("/service/<service_id>/version/<service_version>/backend")
request.
"""
request_dict = {k: v[0] for k, v in url_data}
create_backend = {
u'comment': '',
u'shield': None,
u'weight': 100,
u'ssl_client_key': None,
u'first_byte_timeout': 15000,
u'auto_loadbalance': False,
u'use_ssl': request_dict['use_ssl'],
u'port': request_dict['port'],
u'ssl_hostname': None,
u'hostname': request_dict['name'],
u'error_threshold': 0,
u'max_conn': 20,
u'version': service_version,
u'ipv4': None,
u'ipv6': None,
u'client_cert': None,
u'ssl_ca_cert': None,
u'request_condition': '',
u'healthcheck': None,
u'address': request_dict['address'],
u'ssl_client_cert': None,
u'name': request_dict['name'],
u'connect_timeout': 1000,
u'between_bytes_timeout': 10000,
u'service_id': service_id}
if 'origin_list' not in self.fastly_cache[service_id]:
self.fastly_cache[service_id]['origin_list'] = []
self.fastly_cache[service_id]['origin_list'].append(create_backend)
return create_backend
def create_condition(self, url_data, service_id, service_version):
"""
Returns create_condition response json.
:return: a JSON-serializable dictionary matching the format of the JSON
response for fastly_client.create_condition()
("/service/<service_id>/version/<service_version>/condition")
request.
"""
request_dict = {k: v[0] for k, v in url_data}
create_condition = {
u"type": "REQUEST",
u"comment": "",
u"name": "condition",
u"version": service_version,
u"service_id": service_id,
u"statement": request_dict['statement'],
u"priority": request_dict['priority']
}
if 'condition_list' not in self.fastly_cache[service_id]:
self.fastly_cache[service_id]['condition_list'] = []
self.fastly_cache[service_id][
'condition_list'].append(create_condition)
return create_condition
def create_cache_settings(self, url_data, service_id, service_version):
"""
Returns create_cache_settings response json.
:return: a JSON-serializable dictionary matching the format of the JSON
response for fastly_client.create_cache_settings()
("/service/<service_id>/version/<service_version>/cache_settings")
request.
"""
request_dict = {k: v[0] for k, v in url_data}
create_cache_settings = {
"stale_ttl": request_dict.get("stale_ttl", 0),
"ttl": request_dict.get("ttl", 0),
"action": request_dict.get("action", ""),
"cache_condition": "",
"name": "cache_setting",
"version": service_version,
"service_id": service_id
}
if 'cache_settings_list' not in self.fastly_cache[service_id]:
self.fastly_cache[service_id]['cache_settings_list'] = []
self.fastly_cache[service_id][
'cache_settings_list'].append(create_cache_settings)
return create_cache_settings
def create_response_object(self, url_data, service_id, service_version):
"""
Returns response_object response json.
:return: a JSON-serializable dictionary matching the format of the JSON
response for fastly_client.create_response_object()
("/service/<service_id>/version/<service_version>/response_object)
request.
"""
request_dict = {k: v[0] for k, v in url_data}
create_response_object = {
"status": request_dict["status"],
"response": request_dict["response"],
"cache_condition": request_dict.get("cache_condition", ""),
"request_condition": request_dict.get("request_condition", ""),
"name": request_dict["name"],
"version": service_version,
"content": request_dict["content"],
"content_type": "text/plain",
"service_id": service_id
}
if 'response_object_list' not in self.fastly_cache[service_id]:
self.fastly_cache[service_id]['response_object_list'] = []
self.fastly_cache[service_id][
'response_object_list'].append(create_response_object)
return create_response_object
def create_settings(self, url_data, service_id, service_version):
"""
Returns settings response json.
:return: a JSON-serializable dictionary matching the format of the JSON
response for fastly_client.create_settings()
("/service/<service_id>/version/<service_version>/settings)
request.
"""
request_dict = {k: v[0] for k, v in url_data}
create_settings = {
"service_id": service_id,
"version": service_version,
"general.default_ttl": request_dict.get("general.default_ttl", 0),
"general.default_host": request_dict.get("general.default_host", "")
}
if 'settings_list' not in self.fastly_cache[service_id]:
self.fastly_cache[service_id]['settings_list'] = []
self.fastly_cache[service_id][
'settings_list'].append(create_settings)
return create_settings
def list_versions(self, service_id):
"""
Returns GET list_versions with response json.
:return: a JSON-serializable dictionary matching the format of the JSON
response for fastly_client.list_versions()
("/service/%s/version") request.
"""
return [self.fastly_cache[service_id]['service_details']]
def activate_version(self, service_id, version_number):
"""
Returns activate_version response json.
:return: a JSON-serializable dictionary matching the format of the JSON
response for fastly_client.activate_version()
("/service/%s/version/%d/activate") request.
"""
self.fastly_cache[service_id]['service_details']['active'] = True
return self.fastly_cache[service_id]['service_details']
def deactivate_version(self, service_id, version_number):
"""
Returns deactivate_version response json.
:return: a JSON-serializable dictionary matching the format of the JSON
response for fastly_client.deactivate_version()
("/service/%s/version/%d/deactivate") request.
"""
self.fastly_cache[service_id]['service_details']['active'] = False
return self.fastly_cache[service_id]['service_details']
def get_service_details(self, service_id):
"""
Returns get_service_details response json.
:return: a JSON-serializable dictionary matching the format of the JSON
response for fastly_client.get_service_details()
("/service/%s/details") request.
"""
version_details = self.fastly_cache[service_id]['service_details']
service_details = {
u'id': service_id,
u'name': self.fastly_cache[service_id]['service_name'],
u'customer_id': "hTE5dRlSBICGPJxJwCH4M",
u'comment': "",
u"updated_at": "2012-06-14T21:20:19+00:00",
u"created_at": "2012-06-14T21:20:19+00:00",
u"publish_key": "xgdbdd93h5066f8d330c276fDe00f9d293abfex7",
u'versions': [version_details]}
return service_details
def delete_service(self, service_id):
"""
Returns DELETE service with response json.
:return: a JSON-serializable dictionary matching the format of the JSON
response for fastly_client.delete_service()
("/service/%s") request.
"""
service_name = self.fastly_cache[service_id]['service_name']
del(self.fastly_cache[service_id])
del(self.fastly_cache[service_name])
return {'status': 'ok'}
def get_health(self):
"""
Returns 200 with response json.
"""
return {'status': 'ok'}
|
from django.urls import path
from .import views
urlpatterns = [
path('', views.index, name='home'),
path('posts/<str:category_name>/', views.post_list, name='post_list'),
]
|
"""Created by nasim zolaktaf, 2019
This file plots the MSE vs iteration, (such as fig 4 and fig 5 of the paper).
To run this file, first run map.py to do parameter estimation once with FPEI, once with SSAI and to generate neccessary files.
Then in this file set dc1 and dc2 to correct flies.
Run 'plot_ssavsFPEI.py'
"""
from __future__ import division, absolute_import
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as plt
import seaborn as sns
import csv
__all__ = ['minimize', 'minimize_scalar']
import os
import sys
sys.path.insert(0,os.path.realpath('../learndnakinetics'))
import myenums
sns.set()
sns.set_context(rc={"figure.figsize": (8, 4)})
"""Setting options for different plots based on the plots in the literature. """
class PlotOptions(object):
def __init__(self, figuresavename, xlabel, ylabel, names, documents , yax_min , yax_max, title, time):
self.figuresavename= figuresavename
self.xlabel = xlabel
self.ylabel = ylabel
self.names= names
self.documents= documents
self.yax_min = yax_min
self.yax_max = yax_max
self.title= title
self.time = time
"""open a csv file"""
def open_document(document) :
my_CSV = list(csv.reader(open(document, 'rb')))
return my_CSV
"""Drawing the MSE of FPEI VS SSA"""
def draw_plot(po):
title_fontsize = 36
label_fontsize = 28
tick_fontsize=26
propsize = 26
colors=['red','blue', 'green']
linewidth = 1
loc =1
smarkersize= [20,20,20,20]
FPEIstarcolor= "g"
FPEIstarstyle= "*"
linestyle = [ '-' , '--' , '--']
fig, ax = plt.subplots()
ymin = np.inf
ymax= -np.inf
lengths= []
for document, name in zip( po.documents , po.names) :
if name =="":
lengths.append(0)
else:
row = open_document(document)
row= row[0]
lengths.append(len(row))
for document, name , i in zip( po.documents , po.names , range(len(po.names))) :
if name =="" :
continue
row = open_document(document)
row = row[0]
x = range(len(row))
y = []
for cc in x :
y.append(row[cc])
if y[-1] =="" :
x = x[:-1]
y = y [:-1]
y = map(float, y)
ymint= min(y)
if ymint < ymin :
ymin = ymint
ymaxt= max(y)
if ymaxt > ymax and ymaxt!= np.inf:
ymax = ymaxt
if myenums.NamesinPlot.SSAI.value in name:
ind = 0
elif myenums.NamesinPlot.FPEI.value in name:
ind= 1
plt.plot(x, y, color= colors[i ], linewidth = linewidth, linestyle= linestyle[i], label = name +" (" +po.time[ind] + " s / iter")
#########reading only values when new paths are generated
if usexnewandynew == True and myenums.NamesinPlot.FPEI.value in name:
dl = document.split("/")
documenty = ""
print dl
for i in range (len(dl)-1):
print dl[i]
documenty += dl[i]
documenty += "/"
documenty =documenty + myenums.ImportantFiles.MSEYNEW.value
row = open_document(documenty )
row = row[0]
ynew = []
lengths = range( len(row))
for cc in lengths :
ynew.append(row[cc])
dl = document.split("/")
documentx = ""
for i in range (len(dl)-1):
documentx += dl[i]
documentx += "/"
documentx =documentx + myenums.ImportantFiles.MSEXNEW.value
row = open_document(documentx )
row = row[0]
xnew = []
lengths = range( len(row))
for cc in lengths :
xnew.append(row[cc])
if ynew[-1] =="" :
xnew = xnew[:-1]
ynew = ynew [:-1]
ynew = map(float, ynew)
xnew = map(int, xnew)
xnew= xnew[:8]
ynew= ynew[:8]
plt.plot(xnew, ynew, color= FPEIstarcolor, marker= FPEIstarstyle, markersize=smarkersize[i], linewidth = linewidth, linestyle= linestyle[1])
plt.ylim(bottom = 0 )
if "Modified"in po.figuresavename:
plt.ylim(top = 7 )
else:
plt.ylim(top = 1.6 )
ttl = ax.title
ttl.set_position([.5, 1.02])
plt.title(po.title, fontsize=title_fontsize)
if po.yax_min!= None :
plt.ylim(bottom = po.yax_min)
if po.yax_max!= None :
plt.ylim(top = po.yax_max)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(tick_fontsize)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(tick_fontsize)
plt.legend(loc=loc, borderaxespad=0., prop={'size': propsize})
plt.xlabel(po.xlabel, fontsize=label_fontsize)
plt.ylabel(po.ylabel, fontsize=label_fontsize)
plt.savefig(po.figuresavename, bbox_inches='tight')
plt.show()
plt.close(fig)
def main():
global usexnewandynew
usexnewandynew = True
dc1= ["../learndnakinetics/testcisse-ssa"] #dc1 should contain path to the SSAI folder results, same as parameter_folder in config_file.txt
dc2=["../learndnakinetics/testcisse-fpei"] # This should contain path to the FPEI folder results, same as parameter_folder in config_file.txt
times = [ [r"$-$",r"$-$"]]
for a, b , time in zip(dc1, dc2 ,times ) :
if "Modified" in a:
title = "Initialization: "+r"$\theta_0''$"
else:
title = "Initialization: "+r"$\theta_0'$"
documents1 = [ a, b]
names = [ myenums.NamesinPlot.SSAI.value , myenums.NamesinPlot.FPEI.value] # do not change these names
path = "plotmse/"
if not os.path.exists(path):
os.makedirs(path)
documents2= documents1
# These files are created from running map.py The files are created and written to in learndnakinetics.py
documents1 = [doc+"/" + myenums.ImportantFiles.MSE.value for doc in documents1]
documents2 = [doc+"/"+ myenums.ImportantFiles.OVERALLTIME.value for doc in documents2]
xlabel = "Iterations"
ylabel1 = "MSE"
ylabel2 = "Overall time (s)"
figuresavename1 = path+ "msevsiteration.pdf"
figuresavename2 = path+ "totaltimevsiteration.pdf"
plotoptions = PlotOptions( figuresavename= figuresavename1, xlabel = xlabel, ylabel = ylabel1 , names= names, documents = documents1, yax_min = None , yax_max= None, title= title, time= time )
draw_plot(plotoptions)
plotoptions = PlotOptions( figuresavename= figuresavename2, xlabel = xlabel, ylabel = ylabel2, names= names, documents = documents2,yax_min = None , yax_max= None , title = title , time= time )
draw_plot(plotoptions)
if __name__ == "__main__":
main()
|
from django.shortcuts import render, get_object_or_404, redirect
from .models import Person
from . import models
from django.views.generic import View,TemplateView,ListView,DetailView,CreateView,UpdateView,DeleteView
# from .forms import PersonForm
from . import forms
def homepage(request):
return render(request,'contactapp/homepage.html')
def contact_list(request):
persons = Person.objects.all().order_by('first_name')
return render(request, 'contactapp/contact_list.html', {'persons': persons})
def contact_new(request):
if request.method == 'POST':
form = forms.PersonForm(request.POST,request.FILES)
if form.is_valid():
form.save()
return redirect('/')
else:
form = forms.PersonForm()
return render(request, 'contactapp/contact_edit.html', {'form': form})
class contact_detail(DetailView):
context_object_name = 'contactdetails'
model = models.Person
template_name = 'contactapp/contact_detail.html'
def contact_edit(request, pk):
person = get_object_or_404(models.Person, pk=pk)
if request.method == 'POST':
form = forms.PersonForm(request.POST, instance=person)
if form.is_valid():
form.save()
# return redirect('/person/' + str(person.pk))
return redirect('/') ### You have to change this !!!! ##### Take them to contact list !!
else:
form = forms.PersonForm(instance=person)
return render(request, 'contactapp/contact_edit.html', {'form': form})
def contact_delete(request, pk):
person = get_object_or_404(models.Person, pk=pk)
person.delete()
return redirect('/') |
#
# Copyright © 2021 Uncharted Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Optional, List, Any
import pandas as pd
from d3m import container, utils
from d3m.metadata import base as metadata_base, hyperparams, params
from d3m.primitive_interfaces import base
from distil.preprocessing.transformers import SVMTextEncoder, TfidifEncoder
from distil.primitives import utils as distil_utils
from distil.modeling.metrics import (
classification_metrics,
clustering_metrics,
regression_metrics,
)
from distil.utils import CYTHON_DEP
import version
__all__ = ("TextEncoderPrimitive",)
logger = logging.getLogger(__name__)
Inputs = container.DataFrame
Outputs = container.DataFrame
class Hyperparams(hyperparams.Hyperparams):
metric = hyperparams.Enumeration[str](
values=classification_metrics + regression_metrics + clustering_metrics,
default="f1Macro",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="The D3M scoring metric to use during the fit phase. This can be any of the regression, classification or "
+ "clustering metrics.",
)
use_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped.",
)
encoder_type = hyperparams.Enumeration(
default="svm",
values=["svm", "tfidf"],
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Vectorization Strategy.",
)
class Params(params.Params):
_cols: Optional[List[int]]
_encoders: Any
# _encoders: Optional[List[Union[SVMTextEncoder, TfidifEncoder]]]
class TextEncoderPrimitive(base.PrimitiveBase[Inputs, Outputs, Params, Hyperparams]):
"""
Encodes string fields using TFIDF scoring combined with a linear SVC classifier. The original string field is removed
and replaced with encoding columns.
"""
_attribute_semantic = "https://metadata.datadrivendiscovery.org/types/Attribute"
metadata = metadata_base.PrimitiveMetadata(
{
"id": "09f252eb-215d-4e0b-9a60-fcd967f5e708",
"version": version.__version__,
"name": "Text encoder",
"python_path": "d3m.primitives.data_transformation.encoder.DistilTextEncoder",
"source": {
"name": "Distil",
"contact": "mailto:cbethune@uncharted.software",
"uris": [
"https://github.com/uncharted-distil/distil-primitives/blob/main/distil/primitives/text_encoder.py",
"https://github.com/uncharted-distil/distil-primitives",
],
},
"installation": [
CYTHON_DEP,
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/uncharted-distil/distil-primitives.git@{git_commit}#egg=distil-primitives".format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
},
],
"algorithm_types": [
metadata_base.PrimitiveAlgorithmType.ENCODE_BINARY,
],
"primitive_family": metadata_base.PrimitiveFamily.DATA_TRANSFORMATION,
},
)
def __init__(self, *, hyperparams: Hyperparams, random_seed: int = 0) -> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed)
self._encoders: List[SVMTextEncoder] = []
self._cols: List[int] = []
def __getstate__(self) -> dict:
state = base.PrimitiveBase.__getstate__(self)
state["models"] = self._encoders
state["columns"] = self._cols
return state
def __setstate__(self, state: dict) -> None:
base.PrimitiveBase.__setstate__(self, state)
self._encoders = state["models"]
self._cols = state["columns"]
def set_training_data(self, *, inputs: Inputs, outputs: Outputs) -> None:
self._inputs = inputs
# https://github.com/scikit-learn/scikit-learn/issues/14429#issuecomment-513887163
if type(outputs) == container.pandas.DataFrame and outputs.shape[1] == 1:
outputs = outputs.values.reshape(
outputs.shape[0],
)
else:
outputs = outputs.iloc[:, 0].values
self._outputs = pd.Series(outputs)
def fit(
self, *, timeout: float = None, iterations: int = None
) -> base.CallResult[None]:
logger.debug(f"Fitting {__name__}")
# determine columns to operate on
cols = distil_utils.get_operating_columns(
self._inputs, self.hyperparams["use_columns"], ("http://schema.org/Text",)
)
logger.debug(f"Found {len(cols)} columns to encode")
self._cols = list(cols)
self._encoders: List[SVMTextEncoder] = []
if len(cols) is 0:
return base.CallResult(None)
for i, c in enumerate(self._cols):
if self.hyperparams["encoder_type"] == "svm":
self._encoders.append(
SVMTextEncoder(self.hyperparams["metric"], self.random_seed)
)
elif self.hyperparams["encoder_type"] == "tfidf":
self._encoders.append(TfidifEncoder())
else:
raise Exception(
f"{self.hyperparams['encoder_type']} is not a valid encoder type"
)
text_inputs = self._inputs.iloc[:, c]
try:
self._encoders[i].fit_transform(
text_inputs, self._outputs
) # requires fit transform to fit SVM on vectorizer results
except:
text_inputs[:] = "avoiding a bug"
self._encoders[i].fit_transform(text_inputs, self._outputs)
return base.CallResult(None)
def produce(
self, *, inputs: Inputs, timeout: float = None, iterations: int = None
) -> base.CallResult[Outputs]:
logger.debug(f"Producing {__name__}")
if len(self._cols) == 0:
return base.CallResult(inputs)
outputs = inputs.copy()
encoded_cols = container.DataFrame()
encoded_cols_source = []
# encode columns into a new dataframe
for i, c in enumerate(self._cols):
text_inputs = outputs.iloc[:, c]
result = self._encoders[i].transform(text_inputs)
for j in range(result.shape[1]):
encoded_idx = i * result.shape[1] + j
encoded_cols[(f"__text_{encoded_idx}")] = result[:, j]
encoded_cols_source.append(c)
# generate metadata for encoded columns
encoded_cols.metadata = encoded_cols.metadata.generate(encoded_cols)
for c in range(encoded_cols.shape[1]):
encoded_cols.metadata = encoded_cols.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, c), "http://schema.org/Float"
)
encoded_cols.metadata = encoded_cols.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, c), self._attribute_semantic
)
col_dict = dict(
encoded_cols.metadata.query((metadata_base.ALL_ELEMENTS, c))
)
col_dict["source_column"] = outputs.metadata.query(
(metadata_base.ALL_ELEMENTS, encoded_cols_source[c])
)["name"]
encoded_cols.metadata = encoded_cols.metadata.update(
(metadata_base.ALL_ELEMENTS, c), col_dict
)
# append the encoded columns and remove the source columns
outputs = outputs.append_columns(encoded_cols)
outputs = outputs.remove_columns(self._cols)
logger.debug(f"\n{outputs}")
return base.CallResult(outputs)
def get_params(self) -> Params:
return Params(_encoders=self._encoders, _cols=self._cols)
def set_params(self, *, params: Params) -> None:
self._encoders = params["_encoders"]
self._cols = params["_cols"]
|
from __future__ import absolute_import
import math
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import init
from torch.autograd import Variable
import torchvision
# from torch_deform_conv.layers import ConvOffset2D
from reid.utils.serialization import load_checkpoint, save_checkpoint
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
class ResNet(nn.Module):
__factory = {
18: torchvision.models.resnet18,
34: torchvision.models.resnet34,
50: torchvision.models.resnet50,
101: torchvision.models.resnet101,
152: torchvision.models.resnet152,
}
def __init__(self, depth, checkpoint=None, pretrained=True, num_features=2048,
dropout=0.1, num_classes=0):
super(ResNet, self).__init__()
self.depth = depth
self.checkpoint = checkpoint
self.pretrained = pretrained
self.num_features = num_features
self.dropout = dropout
self.num_classes = num_classes
if self.dropout > 0:
self.drop = nn.Dropout(self.dropout)
# Construct base (pretrained) resnet
if depth not in ResNet.__factory:
raise KeyError("Unsupported depth:", depth)
self.base = ResNet.__factory[depth](pretrained=pretrained)
out_planes = self.base.fc.in_features
# resume from pre-iteration training
if self.checkpoint:
state_dict = load_checkpoint(checkpoint)
self.load_state_dict(state_dict['state_dict'], strict=False)
self.feat = nn.Linear(out_planes, self.num_features, bias=False)
self.feat_bn = nn.BatchNorm1d(self.num_features)
self.relu = nn.ReLU(inplace=True)
init.normal(self.feat.weight, std=0.001)
init.constant(self.feat_bn.weight, 1)
init.constant(self.feat_bn.bias, 0)
#x2 classifier
self.classifier_x2 = nn.Linear(self.num_features, self.num_classes)
init.normal(self.classifier_x2.weight, std=0.001)
init.constant(self.classifier_x2.bias, 0)
if not self.pretrained:
self.reset_params()
def forward(self, x):
for name, module in self.base._modules.items():
if name == 'avgpool':
break
x = module(x)
x1 = F.avg_pool2d(x, x.size()[2:])
x1 = x1.view(x1.size(0), -1)
x2 = F.avg_pool2d(x, x.size()[2:])
x2 = x2.view(x2.size(0), -1)
x2 = self.feat(x2)
x2 = self.feat_bn(x2)
x2 = self.relu(x2)
return x1,x2
def reset_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.normal(m.weight, std=0.001)
if m.bias is not None:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=0.001)
if m.bias is not None:
init.constant(m.bias, 0)
def resnet18(**kwargs):
return ResNet(18, **kwargs)
def resnet34(**kwargs):
return ResNet(34, **kwargs)
def resnet50(**kwargs):
return ResNet(50, **kwargs)
def resnet101(**kwargs):
return ResNet(101, **kwargs)
def resnet152(**kwargs):
return ResNet(152, **kwargs)
|
from sklearn.metrics import roc_curve, auc
import json
import numpy as np
import pandas as pd
from sklearn.preprocessing import MultiLabelBinarizer
import os
import pdb
import matplotlib.pyplot as plt
outputs = pd.read_csv('validation_outputs.csv')
mean_out = np.mean(outputs.values)
print(mean_out)
print(max(outputs.values.flatten()))
print(min(outputs.values.flatten()))
print(np.median(outputs.values.flatten()))
data_train = json.load(open('/Users/jiayi/1008 deep learning/data/train.json'))
df_train = pd.DataFrame.from_records(data_train["annotations"])
data_val = json.load(open('/Users/jiayi/1008 deep learning/data/validation.json'))
df_val = pd.DataFrame.from_records(data_val["annotations"])
val_size = 1984
val_ = df_val[:val_size]
mlb = MultiLabelBinarizer()
mlb = mlb.fit(df_train['labelId'])
labels = mlb.transform(val_['labelId'])
def compute_f1(endpoint,num_pts):
thresholds = np.linspace(0,endpoint,num=num_pts)
f1_all = []
for t in thresholds:
pred = outputs.gt(t)
pred = pred.astype(int)
tp = (pred + labels).eq(2).values.sum()
fp = (pred - labels).eq(1).values.sum()
fn = (pred - labels).eq(-1).values.sum()
tn = (pred + labels).eq(0).values.sum()
acc = (tp + tn) / (tp + tn + fp + fn)
try:
prec = tp / (tp + fp)
except ZeroDivisionError:
prec = 0.0
try:
rec = tp / (tp + fn)
except ZeroDivisionError:
rec = 0.0
try:
f1 = 2*(rec * prec) / (rec + prec)
except ZeroDivisionError:
f1 = 0.0
f1_all.append(f1)
return thresholds,f1_all
def plot_f1(thresholds,f1s):
fig = plt.figure(1,figsize=(11,7))
ax = plt.subplot(111)
ax.grid()
ax.set_title("F1 score under different thresholds")
ax.set_xlabel("Threshold")
ax.set_ylabel("F1 score")
plt.plot(thresholds,f1s)
plt.show()
|
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from .models import Record, List
from .serializers import UserSerializer, GroupSerializer, RecordSerializer, ListSerializer
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
class RecordViewSet(viewsets.ModelViewSet):
queryset = Record.objects.all() # .order_by('list.owner')
serializer_class = RecordSerializer
class ListViewSet(viewsets.ModelViewSet):
queryset = List.objects.all()
serializer_class = ListSerializer
|
"""
Http API for the mypaas daemon.
"""
import os
import io
import time
import queue
import shutil
import logging
import datetime
import asyncio
import zipfile
from mypaas.server import get_deploy_generator, get_public_key
logger = logging.getLogger("mypaasd")
# Fix encoding
os.environ.setdefault("LC_ALL", "C.UTF-8")
# Keep track of tokens that have been used. These expire after x seconds.
invalid_tokens = queue.deque() # contains (timestamp, token) tuples
# Keep track of whether a deploy is in progress.
deploy_in_progress = False
# %% Utilities
def authenticate(request):
"""Check if the request comes from someone that has a private key
that we have have authorized.
This is done by validating (using the public key) that the token is
signed correctly. We also make sure that keys can only be used once.
"""
# Get authentication details
key_id = request.querydict.get("id", "") # aka fingerprint
token = request.querydict.get("token", "")
signature = request.querydict.get("sig1", "")
if not token or not signature:
return None
# Check the timestamp (first part of the token)
client_time = int(token.split("-")[0])
server_time = int(time.time())
if not (server_time - 5 <= client_time <= server_time):
return None # too late (or early)
# Validate the signature
public_key = get_public_key(key_id)
if public_key is None:
return None
if not public_key.verify_data(signature, token.encode()):
return None
# Ok, but what if someone somehow read the key during its transfer
# and tries to re-use it?
for _, invalid_token in invalid_tokens:
if token == invalid_token:
return None
# Clear invalid tokens that have expired and mark this one as invalid
old = server_time - 10
while invalid_tokens and invalid_tokens[0][0] < old:
invalid_tokens.popleft()
invalid_tokens.append((server_time, token))
# todo: return string based on "comment" in public key.
return public_key.get_id() # fingerprint
def validate_payload(request, payload):
"""Verify that the given payload matches the signature."""
# Get authentication details
key_id = request.querydict.get("id", "") # aka fingerprint
signature = request.querydict.get("sig2", "")
if not signature:
return None
# Validate the payload
public_key = get_public_key(key_id)
if public_key is None:
return None
if not public_key.verify_data(signature, payload):
return None
return public_key.get_id()
def get_uptime_from_start_time(start_time):
start_time = start_time.rpartition(".")[0] + "+0000" # get rid of subsecs
started = datetime.datetime.strptime(start_time, "%Y-%m-%dT%H:%M:%S%z")
now = datetime.datetime.now(datetime.timezone.utc)
nsecs = ori_nsecs = (now - started).seconds
result = []
if ori_nsecs >= 86400:
result.append(f"{int(nsecs / 86400)} days")
nsecs = nsecs % 86400
if ori_nsecs >= 3600:
result.append(f"{int(nsecs / 3600)} hours")
nsecs = nsecs % 3600
if ori_nsecs >= 60:
result.append(f"{int(nsecs / 60)} min")
nsecs = nsecs % 60
result.append(f"{int(nsecs)} secs")
return " ".join(result[:2])
# %% Handlers
MAIN_HTML = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>MyPaas Daemon</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="/style.css">
</head>
<body>
<h1>MyPaas Daemon</h1>
<p style='max-width: 700px;'>
Hi! This is the MyPaas daemon that handles the deploys. It also measures
the system's CPU, memory and disk usage, as well as the CPU and memory
usage of the other MyPaas services, and sends these measurements to the
stats server.
</p>
</body>
</html>
""".lstrip()
async def main_handler(request):
"""Main entry point"""
if request.path.startswith("/daemon/"):
path = request.path[7:]
else:
return 404, {}, "404 not found"
if path == "/":
return 200, {}, MAIN_HTML
elif path == "/time":
return 200, {}, str(int(time.time()))
elif path == "/push":
return await push(request)
else:
return 404, {}, "404 not found"
async def push(request):
"""Push handler. Authenticate, then return generator."""
if request.method != "POST":
return 405, {}, "Invalid request"
fingerprint = authenticate(request)
if not fingerprint:
return 403, {}, "Access denied"
# Get given file
payload = await request.get_body(100 * 2**20) # 100 MiB limit
# Also validate it
if not validate_payload(request, payload):
return 403, {}, "Payload could not be verified."
# Return generator -> do a deploy while streaming feedback on status
gen = push_generator(fingerprint, payload)
return 200, {"content-type": "text/plain"}, gen
async def push_generator(fingerprint, payload):
"""Generator that extracts given zipfile and does the deploy."""
global deploy_in_progress
# Make sure that only one push happens at a given time
if deploy_in_progress:
yield f"Another deploy is in progress by {deploy_in_progress}. Please wait.\n"
while deploy_in_progress:
await asyncio.sleep(1)
yield "."
deploy_in_progress = True # Really, really make sure we set this back to False!
try:
logger.warn(f"Deploy invoked by {fingerprint}") # log
yield f"Hi! This is the MyPaas server. Let's deploy this!\n"
yield f"Signature validated with public key (fingerprint {fingerprint}).\n"
# Extract zipfile
yield "Extracting ...\n"
deploy_dir = os.path.expanduser("~/_mypaas/deploy_cache")
shutil.rmtree(deploy_dir, ignore_errors=True)
os.makedirs(deploy_dir, exist_ok=True)
with zipfile.ZipFile(io.BytesIO(payload), "r") as zf:
zf.extractall(deploy_dir)
# Deploy
await asyncio.sleep(0.1)
for step in get_deploy_generator(deploy_dir):
yield step + "\n"
await asyncio.sleep(0.1)
except Exception as err:
yield "FAIL: " + str(err)
finally:
deploy_in_progress = False
|
class BoundingBox:
def __init__(self):
pass
def createFromPoints(self):
pass
|
from django.db import models
# Create your models here.
class Person(models.Model):
name = models.CharField(max_length=64)
surname =models.CharField(max_length=64)
description = models.TextField()
class Adress(models.Model):
city = models.CharField(max_length=64)
street = models.CharField(max_length=64)
house_number = models.SmallIntegerField()
apartment_number = models.SmallIntegerField()
person_adress = models.ForeignKey(Person, on_delete=models.CASCADE)
class Phone(models.Model):
PHONE_TYPE = (
(1, "domowy"),
(2, "służbowy"),
(3, "inny")
)
number = models.IntegerField()
type_number = models.IntegerField(choices=PHONE_TYPE)
person_phone = models.ForeignKey(Person, on_delete=models.CASCADE)
class Email(models.Model):
EMAIL_TYPE =(
(1, "domowy"),
(2, "służbowy"),
(3, "inny")
)
email = models.CharField(max_length=64)
email_type = models.IntegerField(choices=EMAIL_TYPE, default=1)
person_email = models.ForeignKey(Person, on_delete=models.CASCADE)
class Group(models.Model):
name = models.CharField(max_length=64)
person = models.ManyToManyField(Person)
|
# -*- coding: utf-8 -*-
class Solution:
def defangIPaddr(self, address):
return address.replace(".", "[.]")
if __name__ == "__main__":
solution = Solution()
assert "1[.]1[.]1[.]1" == solution.defangIPaddr("1.1.1.1")
assert "255[.]100[.]50[.]0" == solution.defangIPaddr("255.100.50.0")
|
import json
from sqlalchemy import create_engine, pool
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.orm import sessionmaker
from chalicelib.config.settings import (ENV, PROD, DATABASE)
class ConferenceDatabaseConnection:
ENGINE = None
@staticmethod
def engine(default_database=None):
if default_database is None:
default_database = DATABASE['db']
string_conn = "mysql+pymysql://{}:{}@{}:{}/{}".format(
DATABASE['user'],
DATABASE['password'],
DATABASE['host'],
DATABASE['port'],
default_database
)
engine = create_engine(
string_conn,
echo=False,
poolclass=pool.StaticPool
)
if ENV != PROD:
engine.echo = True
return engine
@staticmethod
def get_engine():
if ConferenceDatabaseConnection.ENGINE is None:
ConferenceDatabaseConnection.ENGINE = ConferenceDatabaseConnection.engine()
return ConferenceDatabaseConnection.ENGINE
@staticmethod
def get_session(session=None):
try:
if session is not None:
return session
engine = ConferenceDatabaseConnection.get_engine()
db_session = sessionmaker(bind=engine)
db_session = db_session()
return db_session
except Exception as e:
print('ERROR TO GET SESSION:: {}'.format(e))
raise e
@staticmethod
def close_session(session, close_session=True):
try:
if close_session:
print('CLOSING SESSION::')
session.close()
except Exception as e:
print('ERROR TO CLOSE SESSION:: {}'.format(e))
raise e
@staticmethod
def alchemy_entity_to_dict(alchemy_entity):
if isinstance(alchemy_entity.__class__, DeclarativeMeta):
fields = {}
for field in [x for x in dir(alchemy_entity) if not x.startswith('_') and x != 'metadata']:
data = alchemy_entity.__getattribute__(field)
try:
json.dumps(data)
fields[field] = data
except TypeError:
fields[field] = None
return fields
|
import functools
from flask import (
Blueprint, redirect, render_template, request, session, url_for, flash, g
)
from werkzeug.security import check_password_hash, generate_password_hash
from ISRS.model import db, User
from ISRS.color import colors
bp = Blueprint('auth', __name__, url_prefix='/auth')
@bp.route('/register/', methods=('GET', 'POST'))
def register():
if request.method == 'POST':
# TODO: 'username' -> form and variable name
print(request.form)
username = request.form['username']
password = request.form['password']
error_msg = None
if not username:
error_msg = 'Username is required'
elif not password:
error_msg = 'Password is required'
elif User.query.filter_by(username=username).first() is not None:
error_msg = 'User {} is already registered'.format(username)
if error_msg is None:
print(len(generate_password_hash(password, 'sha256')))
new_user = User(username=username, password=generate_password_hash(password, method='sha256'))
db.session.add(new_user)
db.session.commit()
print(colors.GREEN + 'Register success' + colors.END)
return redirect(url_for('auth.login'))
flash(error_msg)
print(colors.RED + 'Register fail' + colors.END)
return render_template('signup.html', register='menu-active')
@bp.route('/login/', methods=('GET', 'POST'))
def login():
if request.method == 'POST':
# TODO: 'username' -> form and variable name
print(request.form)
username = request.form['username']
password = request.form['password']
error_msg = None
user = User.query.filter_by(username=username).first()
if user is None:
error_msg = 'Wrong username'
elif not check_password_hash(user.password, password):
error_msg = 'Wrong password'
if error_msg is None:
endpoint = session.get('next', 'index')
session.clear()
session['user_id'] = user.id
print(colors.GREEN + 'Login success' + colors.END)
return redirect(url_for(endpoint, sheet_id=request.args.get('sheet_id')))
flash(error_msg)
print(colors.RED + 'Login fail' + colors.END)
return render_template('login.html', login='menu-active')
@bp.before_app_request
def load_logging_in_user_data():
#print(colors.YELLOW + '----- Request header -----' + colors.END)
#print(request.headers)
user_id = session.get('user_id')
if user_id is None:
g.user = None
else:
g.user = User.query.filter_by(id=user_id).first()
@bp.route('/logout/')
def logout():
session.clear()
return redirect(url_for('index'))
def force_login(endpoint):
def deco(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
"""if not logged in, redirect to login page"""
if g.user is None:
session['next'] = endpoint
print("NO")
return redirect(url_for('auth.login', sheet_id=kwargs.get('sheet_id')))
return view(**kwargs)
return wrapped_view
return deco
|
import random
magic_number = random.randrange(1,10,1)
cont = 0
play = input("Wanna guess the number? (yes/no) ")
if (play == 'no'):
print("The times you try where: " + str(cont))
else:
while (play != 'exit'):
guess = int(input("Give me a number:"))
if (guess > magic_number):
cont += 1
print ("Your number is too high.")
print("Wanna keep guessing the number? ")
play = input("Yes to continue or exit to leave:\n")
if (play == 'exit'):
print("The times you try where: " + str(cont))
elif (guess < magic_number):
cont += 1
print("Your number is too low.")
print("Wanna keep guessing the number? ")
play = input("Yes to continue or exit to leave:\n")
if (play == 'exit'):
print("The times you try where: " + str(cont))
elif (guess == magic_number):
cont += 1
print("You guess the number.\n")
print("The times you try where: " + str(cont))
break |
from __future__ import division, print_function
import numpy as np
import tensorflow as tf
from vgg19.vgg import Vgg19
from PIL import Image
import time
from closed_form_matting import getLaplacian
import math
from functools import partial
import copy
import os
# try:
# xrange # Python 2
# except NameError:
# xrange = range # Python 3
# VGG_MEAN = [103.939, 116.779, 123.68]
# def rgb2bgr(rgb, vgg_mean=True):
# if vgg_mean :
# return rgb[:,:,::-1] - VGG_MEAN
# else:
# return rgb[:,:,::-1]
# def bgr2rgb(bgr,vgg_mean=False):
# if vgg_mean :
# return bgr[:,:,::-1] + VGG_MEAN
# else:
# return rgb[:,:,::-1]
# def load_seg(content_seg_path, style_seg_path, content_shape, style_shape):
# color_codes = ['BLUE', 'GREEN', 'BLACK', 'WHITE', 'RED', 'YELLOW', 'GREY', 'LIGHT_BLUE', 'PURPLE']
# def _extract_mask(seg, color_str)
# h,w,c = np.shape(seg)
# if color_str == 'BLUE'
# mask_r = (seg[:,:,0]<0.1).astype(np.unit8) # 2
# content_seg = np.array(Image.open(content_seg_path).convert("RGB").resize(content_shape, resample=Image.BILINEAR), dtype=np.float32)/255.0
# color_content_masks = []
# color_style_masks = []
# for i in xrange(len(color_codes)):
# color_content_masks.append(tf.expand_dims(tf.expand_dims))
# import tensorflow as tf
import numpy as np
# a=np.array([[[ 1, 2, 3],
# [ 4, 5, 6]],
# [[ 7, 8, 9],
# [10, 11, 12]]])
# print(a.shape)
# b = tf.transpose(a,[0,1,2])
# print(b.shape)
# c = tf.transpose(a,[0,2,1])
# print(c.shape)
# c = tf.transpose(a,[0,2,3])
# _, content_seg_height, content_seg_wdth,_ = content_segs[]
import cv2
from PIL import Image, ImageShow
# import matplotlib.pyplot as plt
# a = cv2.imread('C:/Study/deep-photo-styletransfer-tf-master/wonbin.jpg')
# cv2.imshow('a',a)
# cv2.waitKey()
# cv2.destroyAllWindows()
# im = Image.open('C:/Study/deep-photo-styletransfer-tf-master/out_iter_4000.png')
# plt.imshow(a[:,:,::-1])
# plt.show()
# plt.imshow(a) #bgr
# plt.show()
# VGG_MEAN = [103.939, 116.779, 123.68]
# mean_pixel = tf.constant(VGG_MEAN)
# print(mean_pixel)
# a= np.array([[1,1],[2,1]])
# b= np.array([[1,1],[2,1]])
# print(a)
# gram = tf.transpose(a, [1, 0])
# print(gram)
# gram = tf.matmul(a, b, transpose_b=True)
# print(gram.shape)
layer_structure_all = [layer.name for layer in vgg_var.get_all_layers()]
style_loss(layer_structure_all) |
import pandas as pd
import numpy as np
def build_q_table(n_states, actions):
# q_table 全 0 初始
# columns 对应的是行为名称
table = pd.DataFrame(np.zeros((n_states, len(actions))), columns=actions, )
return table
acs = ['l', 'r']
q = build_q_table(10,acs)
q.iloc[3, 0] = 0.1
q.iloc[3, 1] = 0.9
d1 = q.iloc[3,:]
print(d1)
print(d1.max())
print('-' * 10)
a = [1,2,3,4]
b = [5,6,7,8]
c = [9,10,11,12]
print("op_fuwu_order.txt=",a)
print("b=",b)
print("c=",c)
print("增加一维,新维度的下标为0")
d=np.stack((a,b,c),axis=0)
# print(np.stack((op_fuwu_order.txt, b, c)))
print(d)
print("增加一维,新维度的下标为1")
d=np.stack((a,b,c),axis=1)
print(d)
print('vstack')
print(np.vstack((a,b,c)))
print(np.vstack([1, 2, 3, 4]))
|
array = [1, 1, 1, 1, 1]
for i in range(len(array)):
if i%2 != 0:
array[i] = 0
print(array) |
# encoding: utf-8
from src.config import AnnoyConfig
from src.utils import singleton
import logging.config
import logging
from annoy import AnnoyIndex
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
logging.config.fileConfig(fname='log.config', disable_existing_loggers=False)
# 使用annoy进行相似(最近邻)搜索
@ singleton
class AnnoySearch:
def __init__(self, vec_dim=100, metric='angular'):
self.vec_dim = vec_dim # 要index的向量维度
self.metric = metric # 度量可以是"angular","euclidean","manhattan","hamming",或"dot"
self.annoy_instance = AnnoyIndex(self.vec_dim, self.metric)
self.logger = logging.getLogger('AnnoySearch')
def save_annoy(self, annoy_file, prefault=False):
self.annoy_instance.save(annoy_file, prefault=prefault)
self.logger.info('save annoy SUCCESS !')
def unload_annoy(self):
self.annoy_instance.unload()
def load_annoy(self, annoy_file, prefault=False):
try:
self.annoy_instance.unload()
self.annoy_instance.load(annoy_file, prefault=prefault)
self.logger.info('load annoy SUCCESS !')
except FileNotFoundError:
self.logger.error(
'annoy file DOES NOT EXIST , load annoy FAILURE !',
exc_info=True)
# 创建annoy索引
def build_annoy(self, n_trees):
self.annoy_instance.build(n_trees)
# 查询最近邻,通过index
def get_nns_by_item(
self,
index,
nn_num,
search_k=-1,
include_distances=False):
return self.annoy_instance.get_nns_by_item(
index, nn_num, search_k, include_distances)
# 查询最近邻,通过向量
def get_nns_by_vector(
self,
vec,
nn_num,
search_k=-1,
include_distances=False):
return self.annoy_instance.get_nns_by_vector(
vec, nn_num, search_k, include_distances)
def get_n_items(self):
return self.annoy_instance.get_n_items()
def get_n_trees(self):
return self.annoy_instance.get_n_trees()
def get_vec_dim(self):
return self.vec_dim
# 添加item
def add_item(self, index, vec):
self.annoy_instance.add_item(index, vec)
def get_item_vector(self, index):
return self.annoy_instance.get_item_vector(index)
def init_annoy_search(ann_config: AnnoyConfig):
logger = logging.getLogger('init_annoy_search')
ann_s = AnnoySearch(vec_dim=ann_config.vec_dim)
try:
ann_s.load_annoy(ann_config.annoy_file)
logger.info('init annoy search SUCCESS !')
except IndexError:
logger.error(
'ERROR : vector length DOES NOT match dim of vector of loaded annoy !',
exc_info=True)
def search_by_vector(
vector,
vec_dim=100,
top_n=15,
search_k=-1,
include_distance=True):
logger = logging.getLogger('search_by_vector')
ann_s = AnnoySearch(vec_dim=vec_dim)
sea_res = None
try:
sea_res = ann_s.get_nns_by_vector(
vec=vector,
nn_num=top_n,
search_k=search_k,
include_distances=include_distance)
logger.debug('annoy search by vector: ' + str(sea_res))
except IndexError:
logger.error(
'ERROR : vector length DOES NOT match dim of vector of loaded annoy ! vector dim is: ' +
str(len(vector)) +
', annoy dim is: ' +
str(vec_dim),
exc_info=True)
return sea_res
if __name__ == '__main__':
from src.tfidf_transformer import TfidfTransformer
from src.elastic_search import search_by_ids
from src.config import init_faq_config, FaqConfig
faq_config = init_faq_config('faq.config')
tt = TfidfTransformer()
tt.load_model('tfidftransformer.pkl')
v = tt.predict('who are you?')
print(v)
print(tt.get_feature_dims())
init_annoy_search(faq_config.annoy_search)
res = search_by_vector(
v,
vec_dim=FaqConfig('faq.config').annoy_search.vec_dim,
include_distance=True)
print(res)
rsp = search_by_ids(res[0])
print(rsp)
|
from random import shuffle
aluno1 = str(input('Digite o nome do Aluno 1:'))
aluno2 = str(input('Digite o nome do Aluno 2:'))
aluno3 = str(input('Digite o nome do Aluno 3:'))
aluno4 = str(input('Digite o nome do Aluno 4:'))
lista = [aluno1, aluno2, aluno3, aluno4]
shuffle(lista)
print('A ordem de Apresentação será:')
print(lista)
|
# -*- coding: utf-8 -*-
from .utils import SlotDefinedClass, merge_dicts
from .defaults import BASE_INDENT_SIZE
class Type(SlotDefinedClass):
__slots__ = ("name", )
__types__ = {"name": str}
def __str__(self):
return self.name
def __eq__(self, other):
return isinstance(other, Type) and self.name == other.name
def __ne__(self, other):
return not (self == other)
class CharType(Type):
def __init__(self):
super().__init__(name="char")
class IntType(Type):
def __init__(self):
super().__init__(name="int")
class FloatType(Type):
def __init__(self):
super().__init__(name="float")
class VoidType(Type):
def __init__(self):
super().__init__(name="void")
class Pointer(Type):
__slots__ = ("type", )
__types__ = {"type": Type}
def __str__(self):
return str(self.type) + "*"
def __eq__(self, other):
return isinstance(other, Pointer) and self.type == other.type
class StringType(Pointer):
def __init__(self):
super().__init__(CharType())
|
import numpy as np
print("Input n:")
n = int(input())
a = np.array([[i+j for i in range(n)] for j in range(n)]).reshape(n, n)
print(a)
|
import copy
import math
# This class is used to store a move.
class Move:
def __init__(self, the_place_board: int, the_position: int, the_twist_board: int, the_direction: str):
self.place_board = the_place_board
self.position = the_position
self.twist_board = the_twist_board
self.direction = the_direction
# This class is used to build the AI's game tree.
class Node:
def __init__(self, the_boards: list, depth: int, token, move: Move, is_max, alpha, beta):
self.children = []
self.boards = copy.deepcopy(the_boards)
self.depth = depth
self.token = token
self.move = move
self.is_max = is_max
self.best_node = None
self.utility_value = math.inf
if self.is_max:
self.utility_value = -math.inf
self.alpha = alpha
self.beta = beta
# This class simulates a twist. It is used to generate the possible moves.
def simulate_twist(the_current_boards, the_board: int, the_direction: str):
board = the_current_boards[the_board]
new_board = ['.'] * 9
new_board[4] = board[4]
if the_direction.lower() == 'l':
new_board[0] = board[1]
new_board[1] = board[2]
new_board[2] = board[5]
new_board[5] = board[8]
new_board[8] = board[7]
new_board[7] = board[6]
new_board[6] = board[3]
new_board[3] = board[0]
else:
new_board[0] = board[3]
new_board[1] = board[0]
new_board[2] = board[1]
new_board[5] = board[2]
new_board[8] = board[5]
new_board[7] = board[8]
new_board[6] = board[7]
new_board[3] = board[6]
the_current_boards[the_board] = new_board
# This class can generate the game tree and assign utility values to the game states using the Minimax algorithm.
class AI:
def __init__(self, the_token: str):
self.name = 'AI'
self.token = the_token
self.victory = False
self.is_max = False
self.root = None
self.max_depth = 3
self.nodes_total = 0
# Generates the game tree. It is call recursively until max depth is reached.
def generate_tree(self, current_node: Node):
if current_node.depth < self.max_depth:
new_token = 'b'
if current_node.token == new_token:
new_token = 'w'
seen_moves = []
possible_moves = []
for i in range(4):
for j in range(9):
if current_node.boards[i][j] == '.':
possible_moves.append([i, j])
exit_signal = False
for move in possible_moves:
boards = copy.deepcopy(current_node.boards)
boards[move[0]][move[1]] = current_node.token
for board in range(4):
for direction in ['l', 'r']:
simulate_twist(boards, board, direction)
if boards not in seen_moves:
seen_moves.append(copy.deepcopy(boards))
node = Node(boards, current_node.depth + 1, new_token,
Move(move[0] + 1, move[1] + 1, board + 1, direction),
not current_node.is_max, current_node.alpha, current_node.beta)
current_node.children.append(node)
self.generate_tree(node)
if current_node.is_max:
if current_node.alpha < node.utility_value:
current_node.alpha = node.utility_value
if current_node.utility_value < node.utility_value:
current_node.utility_value = node.utility_value
current_node.best_node = node
else:
if current_node.beta > node.utility_value:
current_node.beta = node.utility_value
if current_node.utility_value > node.utility_value:
current_node.utility_value = node.utility_value
current_node.best_node = node
if current_node.alpha > current_node.beta or current_node.alpha == current_node.beta:
exit_signal = True
break
if exit_signal:
break
if exit_signal:
break
else:
current_node.utility_value = self.utility(current_node.boards)
def play(self, the_current_board: list):
self.root = Node(the_current_board, 0, self.token, None, self.is_max, -math.inf, math.inf)
self.generate_tree(self.root)
return self.root.best_node.move
def utility(self, the_boards: list):
value = 0
modifier = -1
if self.is_max:
modifier = 1
# Calculate columns
for i in range(2):
for j in range(3):
ai_tally = 0
human_tally = 0
if the_boards[i][j] == self.token:
ai_tally += 1
elif the_boards[i][j] != '.':
human_tally += 1
if the_boards[i][j + 3] == self.token:
ai_tally += 1
elif the_boards[i][j + 3] != '.':
human_tally += 1
if the_boards[i][j + 6] == self.token:
ai_tally += 1
elif the_boards[i][j + 6] != '.':
human_tally += 1
if the_boards[i + 2][j] == self.token:
ai_tally += 1
elif the_boards[i + 2][j] != '.':
human_tally += 1
if the_boards[i + 2][j + 3] == self.token:
ai_tally += 1
elif the_boards[i + 2][j + 3] != '.':
human_tally += 1
if the_boards[i + 2][j + 6] == self.token:
ai_tally += 1
elif the_boards[i + 2][j + 6] != '.':
human_tally += 1
if ai_tally > 1:
value += modifier * ai_tally
if human_tally > 1:
value -= modifier * human_tally
# Calculate rows
for i in range(0, 3, 2):
for j in range(0, 9, 3):
ai_tally = 0
human_tally = 0
if the_boards[i][j] == self.token:
ai_tally += 1
elif the_boards[i][j] != '.':
human_tally += 1
if the_boards[i][j + 1] == self.token:
ai_tally += 1
elif the_boards[i][j + 1] != '.':
human_tally += 1
if the_boards[i][j + 2] == self.token:
ai_tally += 1
elif the_boards[i][j + 2] != '.':
human_tally += 1
if the_boards[i + 1][j] == self.token:
ai_tally += 1
elif the_boards[i + 1][j] != '.':
human_tally += 1
if the_boards[i + 1][j + 1] == self.token:
ai_tally += 1
elif the_boards[i + 1][j + 1] != '.':
human_tally += 1
if the_boards[i + 1][j + 2] == self.token:
ai_tally += 1
elif the_boards[i + 1][j + 2] != '.':
human_tally += 1
if ai_tally > 1:
value += modifier * ai_tally
if human_tally > 1:
value -= modifier * human_tally
return value
|
import logging
import random
from fastapi import APIRouter
import pandas as pd
from pydantic import BaseModel, Field, validator
log = logging.getLogger(__name__)
router = APIRouter()
@router.post('/healthCheck')
async def healthCheck():
"""
Returns 200 for a healthcheck for AWS
"""
return {'ok'} |
#!/usr/bin/python3
def best_score(a_dictionary):
maximo = 0
llave = ""
if a_dictionary is None or len(a_dictionary) == 0:
return None
else:
for clave, valor in a_dictionary.items():
if valor > maximo:
llave = clave
maximo = valor
return llave
|
#!/usr/bin/python3
""" module containts unittests for our console """
import unittest
import json
from .models.base_model import BaseModel
from .models.engine.file_storage import FileStorage
class testconsole(unittest.TestCase):
""" unittests for console """
def test_created_console(self):
""" Datetime at creation of an object from console """
def test_updated_console(self):
""" Datetime at update of an object from console """
def test_create(self):
""" Test creation of new instances in the console """
def test_show(self):
""" Test retrieval of data from show command """
def test_destroy(self):
""" Test to destroy instance from file storage """
def test_all(self):
""" Tests printing of all instances in storage with attributes """
def test_update(self):
""" Test update of instances from the console """
def test_EOF(self):
""" EOF to quit console """
def test_help(self):
""" Test help commnand to display details of commands in console """
def test_quit(self):
""" Test that quit exits console """
if __name__ == "__main__":
testConsole()
|
import os
import pickle
from copy import deepcopy
from util import *
class Client(object):
def __init__(self, torrent, args):
self.save_path = os.path.join(args.save_path, os.path.basename(os.path.splitext(args.torrent_path)[0])).encode()
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
self.files = []
if args.method==2:
try:
self.stream = open("pieces.sav", 'r+b')
except:
self.stream = open("pieces.sav", 'w+b')
def build_array():
number_of_pieces = get_number_of_pieces(torrent)
arr = [[False for j in range(blocks_per_piece(torrent, i))] for i in range(number_of_pieces)]
return arr
self.__get_file_info(torrent)
try:
self.load_received()
except:
self.__received = build_array()
self.__requested = build_array()
def __get_file_info(self, torrent):
if b'files' in torrent[b'info']:
prev = 0
for file_ in torrent[b'info'][b'files']:
temp = {}
temp['length'] = file_[b'length']
try:
temp['descriptor'] = open(os.path.join(self.save_path, *file_[b'path']), 'r+b')
except:
temp['descriptor'] = open(os.path.join(self.save_path, *file_[b'path']), 'w+b')
temp['offset'] = prev
prev += temp['length']
self.files.append(temp)
else:
temp = {}
temp['length'] = torrent[b'info'][b'length']
try:
temp['descriptor'] = open(os.path.join(self.save_path, torrent[b'info'][b'name']), 'r+b')
except:
temp['descriptor'] = open(os.path.join(self.save_path, torrent[b'info'][b'name']), 'w+b')
temp['offset'] = 0
self.files.append(temp)
def add_requested(self, piece_block):
block_index = int(piece_block['begin'] // BLOCK_LENGTH)
self.__requested[piece_block['index']][block_index] = True
def add_received(self, piece_block):
block_index = int(piece_block['begin'] // BLOCK_LENGTH)
self.__received[piece_block['index']][block_index] = True
def needed(self, piece_block):
if all(all(piece) for piece in self.__requested):
self.__requested = deepcopy(self.__received)
block_index = int(piece_block['begin'] // BLOCK_LENGTH)
return not (self.__requested[piece_block['index']][block_index])
def is_done(self):
return all(all(piece) for piece in self.__received)
def print_progress(self):
downloaded = 0
total = 0
for piece in self.__received:
for block in piece:
if block:
downloaded += 1
total += 1
progress = math.ceil(downloaded*100)//total
print("Progress:", progress, end='\r')
def dump_received(self):
with open("received.sav", 'wb') as f:
pickle.dump(self.__received, f)
def load_received(self):
with open("received.sav", 'rb') as f:
self.__received = pickle.load(f)
self.__requested = deepcopy(self.__received)
def piece_to_file(self, payload, torrent):
offset = payload['index']*torrent[b'info'][b'piece length'] + payload['begin']
offset_end = offset + len(payload['block'])
temp = open("temp.txt", 'a')
for file_ in self.files:
start = file_['offset']
end = file_['offset'] + file_['length']
temp.writelines(f'{offset} {offset_end} {start} {end}\n')
if offset>=start and offset<end:
file_['descriptor'].seek(offset-start)
file_['descriptor'].write(payload['block'][len(payload['block'])-(offset_end-offset):len(payload['block'])-(offset_end-min(end, offset_end))])
offset = min(end, offset_end)
if offset>=offset_end:
break
def write_to_file(self):
for file_ in self.files:
self.stream.seek(file_['offset'])
file_['descriptor'].write(self.stream.read(file_['length']))
|
from django.shortcuts import get_object_or_404, render
from django.urls import reverse_lazy
from django.views.generic import ListView, CreateView, UpdateView, DeleteView
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.db.models import Q
from .form import GiftForm
from .filters import GiftsFilter
from .models import Gift
import redis
from django.conf import settings
# connect to redis
r = redis.StrictRedis(host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
db=settings.REDIS_DB)
def index(request):
query = request.GET.get('q')
if query:
advert_list = Gift.objects.filter(Q(title__icontains=query) | Q(description__icontains=query))
else:
advert_list = Gift.objects.order_by('-modified')
filters = GiftsFilter(request.GET, queryset=advert_list)
page = request.GET.get('page', 1)
paginator = Paginator(filters.qs, 10)
try:
adverts = paginator.page(page)
except PageNotAnInteger:
adverts = paginator.page(1)
except EmptyPage:
adverts = paginator.page(paginator.num_pages)
context = {'adverts': adverts,
'filters': filters
}
return render(request, 'gifts/index.html', context)
def detail(request, advert_id):
advert = get_object_or_404(Gift, pk=advert_id)
total_views = r.incr('gift:{}:views'.format(advert.id))
return render(request, 'gifts/detail.html', {'advert': advert,
'total_views': total_views})
class GiftList(ListView):
queryset = Gift.objects.filter(point__isnull=False)
class GiftCreate(CreateView):
model = Gift
form_class = GiftForm
login_required = True
success_url = reverse_lazy('gifts:index')
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
class GiftUpdate(UpdateView):
model = Gift
fields = '__all__'
success_url = reverse_lazy('gifts:index')
class GiftDelete(DeleteView):
model = Gift
success_url = reverse_lazy('gifts:index')
|
import time
x1=int(input("Please enter the x-xordinate of point 1 :"))
y1=int(input("Please enter the y-xordinate of point 1 :"))
x2=int(input("Please enter the x-xordinate of point 2 :"))
y2=int(input("Please enter the y-xordinate of point 2 :"))
print("Calculating mid-points ...")
point_x=(x2+x1)/2
point_y=(y2+y1)/2
time.sleep(1)
print("Mid-point is :",point_x,point_y)
|
import matplotlib as mpl
import pandas as pd
# https://www.tensorflow.org/tutorials/structured_data/time_series
mpl.rcParams['figure.figsize'] = (8, 6)
mpl.rcParams['axes.grid'] = False
df = pd.read_csv("jena_climate_2009_2016.csv")
print(df)
# TS 10 min )> 1 hour
df = df[5::6]
# Replace -9999 => 0
wv = df['wv (m/s)']
bad_wv = wv == -9999.0
wv[bad_wv] = 0.0
max_wv = df['max. wv (m/s)']
bad_max_wv = max_wv == -9999.0
max_wv[bad_max_wv] = 0.0
df.to_csv("jena_climate_2009_2016_00.csv", index=False)
|
"""
Train and generate models for the SMQTK IQR Application.
"""
import argparse
import glob
import json
import logging
import os.path as osp
import six
from smqtk import algorithms
from smqtk import representation
from smqtk.utils import bin_utils, jsmin, plugin
__author__ = 'paul.tunison@kitware.com'
def cli_parser():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-c", "--config",
required=True,
help="IQR application configuration file.")
parser.add_argument("-t", "--tab",
type=int, default=0,
help="The configuration tab to generate the model for.")
parser.add_argument('-v', '--verbose',
action='store_true', default=False,
help='Show debug logging.')
parser.add_argument("input_files",
metavar='GLOB', nargs="*",
help="Shell glob to files to add to the configured "
"data set.")
return parser
def main():
parser = cli_parser()
args = parser.parse_args()
#
# Setup logging
#
if not logging.getLogger().handlers:
if args.verbose:
bin_utils.initialize_logging(logging.getLogger(), logging.DEBUG)
else:
bin_utils.initialize_logging(logging.getLogger(), logging.INFO)
log = logging.getLogger("smqtk.scripts.iqr_app_model_generation")
search_app_config = json.loads(jsmin.jsmin(open(args.config).read()))
#
# Input parameters
#
# The following dictionaries are JSON configurations that are used to
# configure the various data structures and algorithms needed for the IQR
# demo application. Values here can be changed to suit your specific data
# and algorithm needs.
#
# See algorithm implementation doc-strings for more information on
# configuration parameters (see implementation class ``__init__`` method).
#
# base actions on a specific IQR tab configuration (choose index here)
if args.tab < 0 or args.tab > (len(search_app_config["iqr_tabs"]) - 1):
log.error("Invalid tab number provided.")
exit(1)
search_app_iqr_config = search_app_config["iqr_tabs"][args.tab]
# Configure DataSet implementation and parameters
data_set_config = search_app_iqr_config['data_set']
# Configure DescriptorGenerator algorithm implementation, parameters and
# persistent model component locations (if implementation has any).
descriptor_generator_config = search_app_iqr_config['descr_generator']
# Configure NearestNeighborIndex algorithm implementation, parameters and
# persistent model component locations (if implementation has any).
nn_index_config = search_app_iqr_config['nn_index']
# Configure RelevancyIndex algorithm implementation, parameters and
# persistent model component locations (if implementation has any).
#
# The LibSvmHikRelevancyIndex implementation doesn't actually build a
# persistent model (or doesn't have to that is), but we're leaving this
# block here in anticipation of other potential implementations in the
# future.
#
rel_index_config = search_app_iqr_config['rel_index_config']
# Configure DescriptorElementFactory instance, which defines what
# implementation of DescriptorElement to use for storing generated
# descriptor vectors below.
descriptor_elem_factory_config = search_app_iqr_config['descriptor_factory']
#
# Initialize data/algorithms
#
# Constructing appropriate data structures and algorithms, needed for the
# IQR demo application, in preparation for model training.
#
descriptor_elem_factory = \
representation.DescriptorElementFactory \
.from_config(descriptor_elem_factory_config)
#: :type: representation.DataSet
data_set = \
plugin.from_plugin_config(data_set_config,
representation.get_data_set_impls())
#: :type: algorithms.DescriptorGenerator
descriptor_generator = \
plugin.from_plugin_config(descriptor_generator_config,
algorithms.get_descriptor_generator_impls())
#: :type: algorithms.NearestNeighborsIndex
nn_index = \
plugin.from_plugin_config(nn_index_config,
algorithms.get_nn_index_impls())
#: :type: algorithms.RelevancyIndex
rel_index = \
plugin.from_plugin_config(rel_index_config,
algorithms.get_relevancy_index_impls())
#
# Build models
#
# Perform the actual building of the models.
#
# Add data files to DataSet
DataFileElement = representation.get_data_element_impls()["DataFileElement"]
for fp in args.input_files:
fp = osp.expanduser(fp)
if osp.isfile(fp):
data_set.add_data(DataFileElement(fp))
else:
log.debug("Expanding glob: %s" % fp)
for g in glob.iglob(fp):
data_set.add_data(DataFileElement(g))
# Generate a mode if the generator defines a known generation method.
if hasattr(descriptor_generator, "generate_model"):
descriptor_generator.generate_model(data_set)
# Add other if-else cases for other known implementation-specific generation
# methods stubs
# Generate descriptors of data for building NN index.
data2descriptor = descriptor_generator.compute_descriptor_async(
data_set, descriptor_elem_factory
)
try:
nn_index.build_index(six.itervalues(data2descriptor))
except RuntimeError:
# Already built model, so skipping this step
pass
rel_index.build_index(six.itervalues(data2descriptor))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
from time import sleep, monotonic, process_time, time
from ctypes import CDLL
from sys import stdout, stderr, argv, exit
def mlockall():
"""
"""
MCL_CURRENT = 1
MCL_FUTURE = 2
MCL_ONFAULT = 4
libc = CDLL('libc.so.6', use_errno=True)
libc.mlockall(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)
def write(path, string):
"""
"""
with open(path, 'w') as f:
f.write(string)
mlockall()
kmsg_path = '/dev/kmsg'
p0 = process_time()
m0 = monotonic()
test_string = 'foo: clock calibration: {}\n'.format(m0)
try:
write(kmsg_path, test_string)
except Exception as e:
print(e)
exit(1)
m1 = monotonic()
KMSG_OK = False
try:
with open(kmsg_path) as f:
while True:
s = f.readline()
if KMSG_OK:
print(s[:-1])
else:
if test_string in s:
print(s[:-1])
s0 = float(s.split(',')[2]) / 1000000
delta = s0 - m0
print('delta:', delta)
print('kmsg monitoring has started!')
KMSG_OK = True
except KeyboardInterrupt:
print()
p10 = process_time()
m10 = monotonic()
m = m10 - m0
p = p10 - p0
x = p / m * 100
print('p, s:', p)
print('%', x)
print('Exit.')
exit()
|
import unittest
import numpy as np
from core.game import Game
from core.player import Player
from core.turn import Turn
class GameTest(unittest.TestCase):
def test_game_state_is_empty_for_new_game(self):
game = self.create_game()
empty_matrix = np.matrix('0 0 0; 0 0 0; 0 0 0')
assert (game.state == empty_matrix).all()
def test_game_turn_number_is_zero_for_new_game(self):
game = self.create_game()
assert game.turn_number == 0
def test_perform_game_turn_increments_turn_number(self):
game = self.create_game()
turn = self.create_turn()
game.perform_turn(turn)
assert game.turn_number == 1
def test_get_available_turns_returns_list_of_available_turn_objects(self):
game = self.create_game()
game.state = np.matrix('0 1 0; 1 1 1; 1 1 1')
available_turns = game.get_available_turns()
assert len(available_turns) == 2
assert available_turns[0].coordinates == (0, 0)
assert available_turns[1].coordinates == (0, 2)
def create_game(self):
return Game()
def create_turn(self):
return Turn((0, 0))
|
"""
author songjie
"""
from flask import render_template
from app.api import api
from app.libs.email import send_test
from app.libs.reply import Reply
from tool.lib.function import curl_data, debug
@api.route('/test')
def test():
data = {"title": "测试页"}
return render_template("test/test.html", data=data)
@api.route('/test/testCurl')
def test_curl():
data, res = curl_data("https://blog.tan90.club", return_response=True)
return data
@api.route("/test/sendEmail")
def send_mail():
# send_test("这是一封测试邮件")
return Reply.success("send successful")
|
#!/usr/bin/env python3
import inspect
global debugging
debugging = False
#debugging = True
def err(msg):
print("Error: {0}(): {1}".format(inspect.stack()[1][3], msg))
def show(*s):
"""Print but only when debugging"""
if debugging:
print(*s)
def test_func(function, outputs, *inputs):
"""Test function with inputs and compare actual outputs with expected"""
result = True
for o, i in zip(outputs, *inputs):
actual = function(*i)
if(actual != o):
result = False
# Create visual seperation between failures
debug('=' * _PRINT_WIDTH)
debug(function.__name__ + "(" + str(i).strip('[]()') + ")")
debug('-' * _PRINT_WIDTH)
debug("Actual:")
debug(actual)
debug('.' * _PRINT_WIDTH)
debug("Expected:")
debug(o)
# Create visual seperation between tested functions, if there is need
if(result == False):
debug(('#' * _PRINT_WIDTH) + '\n')
return result
def test_class(obj, outputs, *inputs):
"""Test object's class by applying inputs and comparing actual outputs with expected
Takes an object, expected output, and a input set of the form (method, method_input)"""
result = True
# test_cass(stack, outputs, actions)
# obj is of the class to be tested
# apply set of actions on obj
# check
for output, input_list in zip(outputs, *inputs):
for i in input_list:
if hasattr(obj, *i[0]):
method = getattr(obj, *i)
actual = method(*i[1])
else:
print(type(obj).__name__ + " does not have the method " + method)
#print(type(obj).__name__ + " does not have the method " + method.__name__)
# Output of last input method is
actual = function(*i)
if(actual != output):
result = False
# Create visual seperation between failures
debug('=' * _PRINT_WIDTH)
debug(function.__name__ + "(" + str(i).strip('[]()') + ")")
debug('-' * _PRINT_WIDTH)
debug("Actual:")
debug(actual)
debug('.' * _PRINT_WIDTH)
debug("Expected:")
debug(output)
# Create visual seperation between tested functions, if there is need
if(result == False):
debug(('#' * _PRINT_WIDTH) + '\n')
return result
|
import numpy as np
import pandas as pd
import pylab as pl
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
%matplotlib inline
df = pd.read_csv("https://s3.amazonaws.com/demo-datasets/wine.csv")
df.head()
test_idx = np.random.uniform(0, 1, len(df)) <= 0.8
train = df[test_idx==True]
test = df[test_idx==False]
features = ['density', 'sulphates', 'residual_sugar']
target = ['high_quality']
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(train[features], train[target])
predicts = knn.predict(test[features])
accuracy = np.where(predicts == test['high_quality'], 1, 0).sum()/float(len(test))
print accuracy
knn.score(test[features], test[target])
# Accuracy and score are the same values.
def ideal_k():
k =[]
accuracy =[]
for i in range(1,51):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(train[features], train[target])
k.append(i)
accuracy.append(knn.score(test[features], test[target]))
print 'Using %r neighbors gives you an R^2 score of %.3f.' % (i, knn.score(test[features], test[target]))
plt.plot(k, accuracy)
ideal_k()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Imports
import torch
import random
from IPython.display import clear_output
from glob import glob
import pandas as pd
cuda = torch.cuda.is_available()
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import sys
import os
import pydicom, numpy as np
from skimage.transform import resize
from imgaug import augmenters as iaa
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix
sys.path.append("../semi-supervised-pytorch-master/semi-supervised") # path to models
det_class_path = '../Kaggle/all/stage_2_detailed_class_info.csv' # class info
bbox_path = '../Kaggle/all/stage_2_train_labels.csv' # labels
dicom_dir = '../Kaggle/all/stage_2_train_images/' # train images
det_class_df = pd.read_csv(det_class_path)
print(det_class_df.shape[0], 'class infos loaded')
print(det_class_df['patientId'].value_counts().shape[0], 'patient cases')
# Some useful functions
def indices_to_one_hot(data, nb_classes):
"""Convert an iterable of indices to one-hot encoded labels."""
targets = np.array(data).reshape(-1)
return np.eye(nb_classes)[targets]
def batch(iterable, n=1):
"""Return a batch from the iterable"""
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
# create training dataset with labelled and unlabelled images
image_df = pd.DataFrame({'path': glob(os.path.join(dicom_dir, '*.dcm'))})
image_df['patientId'] = image_df['path'].map(lambda x: os.path.splitext(os.path.basename(x))[0])
from models import AuxiliaryDeepGenerativeModel
image_resize = 225
y_dim = 2
z_dim = 128
a_dim = 128
h_dim = [2048, 1024, 512, 256]
model = AuxiliaryDeepGenerativeModel([image_resize*image_resize, y_dim, z_dim, a_dim, h_dim])
model
if cuda: model = model.cuda()
# training/validation slit
validation = 0.1
# image resize (the bigger the better, except for computational power ...)
image_resize = 225
# number of unlabelled images in the training dataset
labelled_images = 1000
# number of labelled ones (the rest of the training dataset)
unlabelled_images = int(image_df.shape[0]*(1-validation)-labelled_images)
# number of validation images
validation_images = int(validation*image_df.shape[0])
# Some list for the dataset probably I should use something faster ...
labelled = []
label = []
unlabelled = []
# We don't need the same subject repated when multiple bounding boxes occur
det_class_df.drop_duplicates()
allLabel = pd.get_dummies(pd.Series(list(det_class_df['class']))).values
label0Count = 0
label1Count = 0
labelIndex = []
finishLabelling = False
# Prepare training dataset
i = 0
done = 0
while(not finishLabelling):
if (allLabel[i][0] == 1 or allLabel[i][1] == 1) and label0Count < labelled_images/2:
done += 1
label0Count += 1
labelIndex.append(i)
k = np.where(image_df['patientId'] == det_class_df['patientId'][i])
labelled.append(resize(pydicom.read_file(image_df['path'].values[k[0]][0]).pixel_array/255,
(image_resize,image_resize), anti_aliasing=True, mode='constant'))
label.append([1, 0])
elif allLabel[i][2] == 1 and label1Count < labelled_images/2:
done += 1
label1Count += 1
labelIndex.append(i)
k = np.where(image_df['patientId'] == det_class_df['patientId'][i])
labelled.append(resize(pydicom.read_file(image_df['path'].values[k[0]][0]).pixel_array/255,
(image_resize,image_resize), anti_aliasing=True, mode='constant'))
label.append([0, 1])
if label0Count == labelled_images/2 and label1Count == labelled_images/2:
finishLabelling = True
i += 1
if done % 1000 == 0:
print(str(done) + ' labelled images out of ' + str(labelled_images) + ' done')
print(str(labelled_images) + ' training images labelled loaded')
done = 0
for i in range(labelled_images + unlabelled_images):
if i not in labelIndex:
done += 1
labelIndex.append(i)
k = np.where(image_df['patientId'] == det_class_df['patientId'][i])
unlabelled.append(resize(pydicom.read_file(image_df['path'].values[k[0]][0]).pixel_array/255,
(image_resize,image_resize), anti_aliasing=True, mode='constant'))
if done % 1000 == 0 and done != 0:
print(str(done) + ' unlabelled images out of ' + str(unlabelled_images) + ' done')
print(str(unlabelled_images) + ' training images unlabelled loaded')
# Prepare validation dataset
labelled_val = []
label_val = []
done = 0
for i in range(image_df.shape[0]):
if i not in labelIndex:
done += 1
if allLabel[i][0] == 1 or allLabel[i][1] == 1:
label_val.append([1, 0])
elif allLabel[i][2] == 1:
label_val.append([0, 1])
k = np.where(image_df['patientId'] == det_class_df['patientId'][i])
labelled_val.append(resize(pydicom.read_file(image_df['path'].values[k[0]][0]).pixel_array/255,
(image_resize,image_resize), anti_aliasing=True, mode='constant').ravel())
if done % 1000 == 0:
print(str(done) + ' images out of ' + str(validation_images) + ' done')
print('Validation images loaded')
trainNbr = np.sum(label, axis=0)
valNbr = np.sum(label_val, axis=0)
print('Summary:')
print('Training images: ' + str(labelled_images + unlabelled_images))
print('Labelled: ' + str(labelled_images) + ', Unlabelled: ' + str(unlabelled_images))
print('Labels: NotNormal ' + str(trainNbr[0]) + ', Normal ' + str(trainNbr[1]))
print('Validation images: ' + str(validation_images))
print('Labels: NotNormal ' + str(valNbr[0]) + ', Normal ' + str(valNbr[1]))
from itertools import cycle
from inference import SVI, DeterministicWarmup, log_gaussian
# We will need to use warm-up in order to achieve good performance.
# Over 200 calls to SVI we change the autoencoder from
# deterministic to stochastic.
def log_gauss(x, mu, log_var):
return -log_gaussian(x, mu, log_var)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, betas=(0.9, 0.999), weight_decay=1e-5)
beta = DeterministicWarmup(n=100)
beta_constant = 1
alpha = beta_constant * (len(unlabelled) + len(labelled)) / len(labelled)
elbo = SVI(model, likelihood=log_gauss, beta=beta)
from torch.autograd import Variable
n_epochs = 200
batchSize= 15
# Some variables for plotting losses
accuracyTrain = []
accuracyVal = []
LTrain = []
LVal = []
UTrain = []
UVal = []
classTrain = []
classVal = []
JAlphaTrain = []
JAlphaVal = []
image_augmenter = iaa.SomeOf((0, None),[iaa.Fliplr(0.5),
iaa.Affine(scale=(0.8, 1.2),
translate_percent={"x": (-0.05, 0.05), "y": (-0.05, 0.05)},
rotate=(-15, 15))
],random_order=True,)
for epoch in range(n_epochs):
model.train()
total_L_train, total_U_train, total_classification_loss_train, total_loss_train, accuracy_train = (0, 0, 0, 0, 0)
total_L_val, total_U_val, total_classification_loss_val, total_loss_val, accuracy_val = (0, 0, 0, 0, 0)
m_train, m_val = (0, 0)
latent = []
y_pred = []
y_true = []
l = 0
# Shuffle the data every epoch (labelled and label should keep the same index ordering!)
z = list(zip(labelled, label))
random.shuffle(z)
random.shuffle(unlabelled)
labelled, label = zip(*z)
for x, y, u in zip(cycle(batch(labelled, batchSize)), cycle(batch(label, batchSize)), (batch(unlabelled, batchSize))):
m_train+=1
x = image_augmenter.augment_images(x)
u = image_augmenter.augment_images(u)
# Wrap in variables
x, y, u = torch.from_numpy(np.asarray(x).reshape(-1, image_resize*image_resize)), torch.Tensor(y), torch.from_numpy(np.asarray(u).reshape(-1, image_resize*image_resize))
x, y, u = x.type(torch.FloatTensor), y.type(torch.FloatTensor), u.type(torch.FloatTensor)
if cuda:
# They need to be on the same device and be synchronized.
x, y = x.cuda(device=0), y.cuda(device=0)
u = u.cuda(device=0)
L, z = elbo(x, y)
U, _ = elbo(u)
if l < 2000:
latent.append(z.cpu().detach().numpy())
l = l + 1
y_true.append(y.cpu().detach().numpy())
# Add auxiliary classification loss q(y|x)
logits = model.classify(x)
# Regular cross entropy
classication_loss = - torch.sum(y * torch.log(logits + 1e-8), dim=1).mean()
J_alpha_train = - L + alpha * classication_loss - U
J_alpha_train.backward()
optimizer.step()
optimizer.zero_grad()
total_L_train -= L.item()
total_U_train -= U.item()
total_classification_loss_train += classication_loss.item()
total_loss_train += J_alpha_train.item()
accuracy_train += torch.mean((torch.max(logits, 1)[1].data == torch.max(y, 1)[1].data).float())
model.eval()
for x, y in zip(batch(labelled_val, batchSize), batch(label_val, batchSize)):
m_val+=1
x, y = torch.from_numpy(np.asarray(x).reshape(-1, image_resize*image_resize)), torch.Tensor(y)
x, y = x.type(torch.FloatTensor), y.type(torch.FloatTensor)
if cuda:
x, y = x.cuda(device=0), y.cuda(device=0)
L, _ = elbo(x, y)
U, _ = elbo(x)
logits = model.classify(x)
y_pred.append(torch.max(logits, 1)[1].cpu().detach().numpy())
classication_loss = - torch.sum(y * torch.log(logits + 1e-8), dim=1).mean()
J_alpha_val = - L + alpha * classication_loss - U
total_L_val -= L.item()
total_U_val -= U.item()
total_classification_loss_val += classication_loss.item()
total_loss_val += J_alpha_val.item()
accuracy_val += torch.mean((torch.max(logits, 1)[1].data == torch.max(y, 1)[1].data).float())
print("Epoch: {}".format(epoch+1))
print("[Train]\t\t L: {:.2f}, U: {:.2f}, class: {:.2f}, J_a: {:.2f}, accuracy: {:.2f}".format(total_L_train / m_train, total_U_train / m_train, total_classification_loss_train / m_train, total_loss_train / m_train, accuracy_train / m_train))
print("[Validation]\t L: {:.2f}, U: {:.2f}, class: {:.2f}, J_a: {:.2f}, accuracy: {:.2f}".format(total_L_val / m_val, total_U_val / m_val, total_classification_loss_val / m_val, total_loss_val / m_val, accuracy_val / m_val))
accuracyTrain.append(accuracy_train / m_train)
accuracyVal.append(accuracy_val / m_val)
LTrain.append(total_L_train / m_train)
LVal.append(total_L_val / m_val)
UTrain.append(total_U_train / m_train)
UVal.append(total_U_val / m_val)
classTrain.append(total_classification_loss_train / m_train)
classVal.append(total_classification_loss_val / m_val)
JAlphaTrain.append(total_loss_train / m_train)
JAlphaVal.append(total_loss_val / m_val)
classes = np.argmax(label_val, axis=1)
y_pred = np.concatenate( y_pred, axis=0 )
conf_matrix = confusion_matrix(classes, np.vstack(y_pred[0:classes.shape[0]]))
PATHMODEL = '../Models/beta2'
PATHFIGURE = '../Figure/training2.npz'
torch.save(model.state_dict(), PATHMODEL)
np.savez(PATHFIGURE,accuracyTrain=accuracyTrain,
accuracyVal=accuracyVal,
classTrain=classTrain,
classVal=classVal,
LTrain=LTrain,
LVal=LVal,
UTrain=UTrain,
UVal=UVal,
JAlphaTrain=JAlphaTrain,
JAlphaVal=JAlphaVal,
conf_matrix=conf_matrix,
latent=latent,
classes=classes,
y_true=y_true)
|
s, sub, x = input(), input(), 0
for i in range(len(s)):
if s[i:i+len(sub)] == sub:
x += 1
print(x) |
from sqlite3 import connect
db_name = ":memory:" |
from datetime import datetime
from datetime import timedelta
from django.shortcuts import render, redirect
from django.core.paginator import Paginator
from django.db.models import Count
from django.core.mail import send_mail
from django.contrib import messages
from django.urls import reverse
from django.http import HttpResponseRedirect
from forum.models import Category, CategoryDetail, Topic, Post
from forum.forms import PostForm, TopicForm, EmailPostForm
def category_list(request):
categories = Category.objects.all()
main_topics = Topic.objects.annotate(num_posts=Count('posts')).filter(num_posts__gte=1).order_by('-num_posts')[:6]
return render(request, 'forum/category_list.html',
{
'categories': categories,
'main_topics': main_topics
})
def topic_list(request, detail_pk):
parent_topic = CategoryDetail.objects.get(pk=detail_pk)
topics = Topic.objects.filter(category_detail=detail_pk)
counter_topics = topics.count()
paginator = Paginator(topics, 10)
page_num = request.GET.get('page')
page = paginator.get_page(page_num)
if request.method == 'POST':
topic_form = TopicForm(request.POST)
if topic_form.is_valid():
topic = topic_form.save(commit=False)
topic.author = request.user
topic.category_detail = CategoryDetail.objects.filter(pk=detail_pk).first()
topic.save()
return redirect('forum:topic_list', detail_pk=parent_topic.pk)
else:
topic_form = TopicForm()
return render(request, 'forum/topic_list.html', {'parent_topic': parent_topic,
'topics': topics,
'topic_form': topic_form,
'counter_topics': counter_topics,
'page': page
})
def topic_detail(request, topic_pk):
topic = Topic.objects.get(pk=topic_pk)
posts = Post.objects.filter(topic=topic_pk)
counter = posts.count()
paginator = Paginator(posts, 10)
page_num = request.GET.get('page', 1)
page = paginator.get_page(page_num)
is_paginated = page.has_other_pages()
if request.method == 'POST':
post_form = PostForm(request.POST)
if post_form.is_valid():
post = post_form.save(commit=False)
post.author = request.user
post.published = datetime.now()
post.topic = Topic.objects.filter(pk=topic_pk).first()
post.save()
return redirect('forum:topic_detail', topic_pk=topic.pk)
else:
post_form = PostForm()
context = {
'topic': topic,
'posts': posts,
'counter': counter,
'post_form': post_form,
'page': page,
'is_paginated': is_paginated,
}
return render(request, 'forum/topic_detail.html', context)
def topic_filter_new(request):
now = datetime.now()
day_ago = now - timedelta(days=1)
new_topics = Topic.objects.filter(posts__created__gte=day_ago)
context = {
'new_topics': new_topics
}
return render(request, 'forum/topic_filter_new.html', context)
def topic_filter_author(request):
author_topics = Topic.objects.filter(author=request.user)
context = {
'author_topics': author_topics
}
return render(request, 'forum/topic_filter_author.html', context)
def topic_filter_takepart(request):
take_part_topics = Topic.objects.filter(posts__author=request.user)
context = {
'take_part_topics': take_part_topics
}
return render(request, 'forum/topic_filter_takepart.html', context)
def complain(request, post_pk):
post = Post.objects.filter(pk=post_pk).first()
if request.method == 'POST':
form = EmailPostForm(request.POST)
if form.is_valid():
subject = (form.cleaned_data['name'], form.cleaned_data['email'])
post_text = post.text
complain_text = (form.cleaned_data['comments'])
massage = f'Сообщение форума {post_text} вызвало жалобу {complain_text}'
mail = send_mail(subject, massage, 'usermail.2021@mail.ru', ['olga_muhina81@mail.ru'], fail_silently=True)
if mail:
messages.success(request, 'Письмо отправлено')
return redirect('forum:complain', post_pk=post.pk)
else:
messages.error(request, 'Ошибка отправки')
else:
form = EmailPostForm()
context = {
'post': post,
'form': form
}
return render(request, 'forum/complain.html', context)
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the ForsetiSystemDao."""
import json
from tests.unittest_utils import ForsetiTestCase
import mock
import unittest
from MySQLdb import DataError
from google.cloud.security.common.data_access import _db_connector
from google.cloud.security.common.data_access import errors
from google.cloud.security.common.data_access import forseti_system_dao
from google.cloud.security.common.data_access.sql_queries import cleanup_tables_sql
class ForsetiSystemDaoTest(ForsetiTestCase):
"""Tests for the ForsetiSystemDao."""
@mock.patch.object(_db_connector.DbConnector, '__init__', autospec=True)
def setUp(self, mock_db_connector):
mock_db_connector.return_value = None
self.system_dao = forseti_system_dao.ForsetiSystemDao(
global_configs={'db_name': 'forseti_security'})
self.fetch_mock = mock.MagicMock()
self.commit_mock = mock.MagicMock()
self.system_dao.execute_sql_with_fetch = self.fetch_mock
self.system_dao.execute_sql_with_commit = self.commit_mock
def test_cleanup_inventory_tables(self):
"""Test cleanup_inventory_tables(int)"""
self.fetch_mock.return_value = [{'table': 'foo'}, {'table': 'bar'}]
self.system_dao.cleanup_inventory_tables(7)
self.fetch_mock.assert_called_once_with(
cleanup_tables_sql.RESOURCE_NAME,
cleanup_tables_sql.SELECT_SNAPSHOT_TABLES_OLDER_THAN,
[7, 'forseti_security'])
calls = [mock.call(
cleanup_tables_sql.RESOURCE_NAME,
cleanup_tables_sql.DROP_TABLE.format('foo'),
None),
mock.call(
cleanup_tables_sql.RESOURCE_NAME,
cleanup_tables_sql.DROP_TABLE.format('bar'),
None)]
self.commit_mock.assert_has_calls(calls)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template, request
star = Blueprint('star', __name__)
@star.route('/<name>/<reponame>/star', methods = ['POST'])
def index():
return render_template('')
|
import tarfile
import os
import sys
import pickle
#import tensorflow as tf
from datetime import datetime
from multiprocessing import Pool
import getopt
from itertools import repeat
nr_of_cpus = 16
def get_all_tar_filenames(tar_file_dir):
tar_files = list()
### check if dir exists
if not os.path.isdir(tar_file_dir):
return tar_files
files = os.listdir(tar_file_dir)
for f in files:
if f.endswith(".tar.bz2"):
tar_files.append(f)
return tar_files
def untar_one_pickle_file(full_path_tar_file, work_dir):
tar = tarfile.open(full_path_tar_file, "r:bz2")
tar.extractall(work_dir)
tar.close()
def get_pickle_file_content(full_path_pickle_file):
pickle_file = open(full_path_pickle_file,'rb')
pickle_list = pickle.load(pickle_file, encoding='latin1')
pickle_file.close()
return pickle_list
def print_one_pickle_list_item(pickle_file_content):
item = next(iter(pickle_file_content))
if item:
print(f'function-signature: {item[0]}')
print(f'gdb-ptype: {item[1]}')
print(f'function-name: {item[2]}')
print(f'function-file-name: {item[3]}')
print(f'disassembly-att: {item[4]}')
print(f'disassembly-intel: {item[5]}')
print(f'package-name: {item[6]}')
print(f'binary-name: {item[7]}')
else:
print('Error item[0]')
def parseArgs():
short_opts = 'hp:s:w:'
long_opts = ['pickle-dir=', 'work-dir=', 'save-dir=']
config = dict()
config['pickle_dir'] = '../../../ubuntu-20-04-pickles'
config['work_dir'] = '/tmp/work_dir/'
config['save_dir'] = '/tmp/save_dir'
try:
args, rest = getopt.getopt(sys.argv[1:], short_opts, long_opts)
except getopt.GetoptError as msg:
print(msg)
print(f'Call with argument -h to see help')
exit()
for option_key, option_value in args:
if option_key in ('-p', '--pickle-dir'):
print(f'found p')
config['pickle_dir'] = option_value[1:]
elif option_key in ('-w', '--work-dir'):
config['work_dir'] = option_value[1:]
elif option_key in ('-s', '--save-dir'):
config['save_dir'] = option_value[1:]
elif option_key in ('-h'):
print(f'<optional> -p or --pickle-dir The directory with disassemblies,etc. Default: ubuntu-20-04-pickles')
print(f'<optional> -w or --work-dir The directory where we e.g. untar,etc. Default: /tmp/work_dir/')
print(f'<optional> -s or --save-dir The directory where we save dataset. Default: /tmp/save_dir')
return config
def print_5_pickle_files(pickle_files, config):
if len(pickle_files) == 0:
print(f'Pickle dir is empty')
exit()
print(f'Five files from dir >{config["pickle_dir"]}<')
c = 0
for file in pickle_files:
print(f'file >{file}<')
c += 1
if c > 5:
break
def get_string_before_function_name(function_signature):
return_type = ''
### find ( which marks the function-names end
fn_end_idx = function_signature.index('(')
### now step one char left, till * , &, or ' ' is found
c = -1
for char in function_signature[fn_end_idx::-1]:
if char == '*' or char == ' ' or char == '&':
#print(f'return-type: {function_signature[:fn_end_idx-c]}')
return_type = function_signature[:fn_end_idx-c].strip()
break
c += 1
return return_type
def get_function_return_type(string_before_func_name, gdb_ptype):
### get raw return type, e.g. "void" or "struct" instead of "struct timeval" from gdb-ptype
raw_gdb_return_type = get_raw_return_type_from_gdb_ptype(gdb_ptype)
if raw_gdb_return_type == 'unknown':
print(f'string_before_func_name: {string_before_func_name}')
return raw_gdb_return_type
def get_raw_return_type_from_gdb_ptype(gdb_ptype):
return_type_list = ['bool', 'bool *', 'const bool',
'void', 'void *', 'void **', 'void (*)(void *)', 'void * const',
'char', 'char *', 'unsigned char *', 'char **', 'const char *', 'signed char',
'const char **', 'unsigned char', 'const char', 'const unsigned char *',
'unsigned char **', 'const char * const *', 'char32_t',
'signed char *', 'wchar_t *', 'const char16_t *', 'char ***',
'wchar_t', 'const char * const', 'const wchar_t *', 'char16_t *',
'const unsigned char **', 'char * const *', 'const signed char *',
'const char ***', 'volatile char *', 'signed char * const *',
'unsigned short', 'short', 'unsigned short *', 'short *',
'const unsigned short *', 'unsigned short **', 'short **',
'const unsigned short', 'const short',
'int', 'int *', 'unsigned int', 'const int *', 'const unsigned int *',
'int **', 'unsigned int **', 'volatile int *',
'unsigned int *', 'const unsigned int', 'const int', 'int ***',
'__int128', 'long int', '__int128 unsigned',
'long','unsigned long', 'unsigned long long', 'unsigned long *', 'long long',
'const unsigned long', 'unsigned long **', 'const long', 'const long *',
'long *', 'const unsigned long long *', 'const unsigned long *',
'long long *', 'unsigned long ***', 'unsigned long long *',
'double', 'const double *', 'double *', 'const double', 'long double',
'double **', 'double ***', 'const long double',
'float', 'const float *', 'float *', 'const float',
'float **', 'float ***', 'float ****',
'complex *', 'complex double', 'complex float']
if "type =" in gdb_ptype:
### pattern based
new_gdb_ptype = gdb_ptype.replace('type =', '')
raw_gdb_ptype = new_gdb_ptype.strip()
### delete some strange return-types
if raw_gdb_ptype == 'unsigned char (*)[16]':
return 'delete'
elif raw_gdb_ptype == 'unsigned char (*)[12]':
return 'delete'
elif raw_gdb_ptype == 'int (*)(int (*)(void *, int, int), void *, int)':
return 'delete'
elif raw_gdb_ptype == 'PTR TO -> ( character )':
return 'delete'
elif raw_gdb_ptype == 'logical*4':
return 'delete'
elif raw_gdb_ptype == 'PTR TO -> ( Type _object )':
return 'delete'
elif raw_gdb_ptype == 'integer(kind=8)':
return 'delete'
elif 'GLcontext' in raw_gdb_ptype:
return 'delete'
elif raw_gdb_ptype == 'long long __attribute__ ((vector_size(2)))':
return 'delete'
elif 'Yosys' in raw_gdb_ptype:
return 'delete'
### check if we directly find a valid return type
for return_type in return_type_list:
if raw_gdb_ptype == return_type:
return return_type
elif raw_gdb_ptype == '_Bool':
return 'bool'
elif raw_gdb_ptype == '_Bool *':
return 'bool *'
elif raw_gdb_ptype == 'ulong':
return 'unsigned long'
elif raw_gdb_ptype == 'uint':
return 'unsigned int'
elif raw_gdb_ptype == 'ubyte':
return 'unsigned char'
elif raw_gdb_ptype == 'ubyte *':
return 'unsigned char *'
elif raw_gdb_ptype == 'integer':
return 'delete' ### dont know if its signed,or unsigned or ????
elif raw_gdb_ptype == 'ushort':
return "unsigned short"
### check if { is there
idx = 0
if '{' in raw_gdb_ptype:
idx = raw_gdb_ptype.index('{')
if idx > 0:
#print(f'Found braket-sign')
front_str = raw_gdb_ptype[:idx]
front_str = front_str.strip()
#print(f'front_str: {front_str}')
if 'class' in front_str:
### check if ptype got {} signs for class
if '}' in front_str:
### check if * or ** is after } available
idx = front_str.rfind('}')
last_front_str = front_str[idx:]
star_count = last_front_str.count('*')
if star_count == 0:
return 'class'
elif star_count == 1:
return 'class *'
elif star_count == 2:
return 'class **'
elif 'std::' in front_str:
return 'delete'
else:
print(f'Error star_count class >{star_count}< front_str >{front_str}<')
return 'unknown'
elif 'struct' in front_str:
star_count = front_str.count('*')
if star_count == 0:
return 'struct'
elif 'std::' in front_str:
return 'delete'
elif 'QPair' in front_str:
return 'delete'
elif 'ts::Rv' in front_str: ##strange stuff from a package,dont know,delete
return 'delete'
elif 'fMPI' in front_str: #strange
return 'delete'
else:
print(f'Error star_count struct >{star_count}< front_str >{front_str}<')
return 'unknown'
elif 'enum' in front_str:
star_count = front_str.count('*')
if star_count == 0:
return 'enum'
else:
print(f'Error star_count enum >{star_count}< front_str >{front_str}<')
return 'unknown'
elif 'union' in front_str:
#print(f'front_str-union: {front_str}')
star_count = front_str.count('*')
if star_count == 0:
return 'union'
else:
print(f'Error star_count union >{star_count}< front_str >{front_str}<')
return 'unknown'
else:
print(f'---Nothing found')
print(f'front_str: {front_str}')
return 'unknown'
elif (raw_gdb_ptype.count('(') == 2) and (raw_gdb_ptype.count(')') == 2):
#print(f'Found func-pointer as return-type, delete till now')
return 'delete'
elif 'substitution' in raw_gdb_ptype:
#print(f'Found substituion-string, dont know, delete it')
return 'delete'
else:
#print(f'------no gdb ptype-match for: >{raw_gdb_ptype}<')
return 'unknown'
else:
print(f'No gdb ptype found')
return 'unknown'
def dis_split(dis):
dis_list = list()
for line in dis.split('\t'):
#print(f'One line-----------')
#print(f'line: >{line}<')
line = line.replace('(', ' ( ')
line = line.replace(')', ' ) ')
line = line.replace('%', ' % ')
line = line.replace(',', ' , ')
line = line.replace('$', ' $ ')
line = line.replace('*', ' * ')
line = line.replace('<', ' < ')
line = line.replace('>', ' > ')
line = line.replace('+', ' + ')
line = line.replace('@', ' @ ')
line = line.replace(':', ' : ')
#print(f'line after giving space: >{line}<')
new_line = ''
for item in line.split():
#print(f'One item of one line >{item}<')
## check if we got a hex nr with chars
new_item = ''
if (len(item) >= 2) and item[0] == '0' and item[1] == 'x':
#print(f'Found Hex >{item}<, split it into single numbers and chars')
for c in item:
### replace '0' with 'null' ,for textVectorize where '0' is masked-value
if c == '0':
c = 'null'
new_item = new_item + c + ' '
#print(f'Split hex to >{new_item}<')
else:
#print(f'No hex found, check for nr')
length = len(item)
#print(f'length >{length}<')
if length > 1:
for c in item:
if str.isnumeric(c):
### replace '0' with 'null' ,for textVectorize where '0' is masked-value
if c == '0':
c = 'null'
new_item = new_item + c + ' '
else:
new_item = new_item + c
# for i in range(length):
# if isnumeric(item[i]):
# c = item[i]
# new_item = new_item + c + ' '
# #print(f'Found number >{item[i]}< new_item >{new_item}<')
# else:
# new_item = new_item + c
# #print(f'No number >{item[i]}< new_item >{new_item}<')
else:
new_item = item
### add ' ' ,so that in next line it got a space between the strings for new_line
if not new_item.endswith(' '):
new_item = new_item + ' '
#print(f'old item >{item}< new_item: >{new_item}<')
new_line = new_line + new_item
#print(f'new_line >{new_line}<')
#exit()
dis_list.append(new_line)
#print(f'Full disas: >{dis_list}<')
dis_str = ' '.join(dis_list)
return dis_str
def proc_build(pickle_file, work_dir, save_dir):
untar_one_pickle_file(pickle_file, work_dir)
pickle_file_content = get_pickle_file_content(work_dir + os.path.basename(pickle_file).replace('.tar.bz2', ''))
binaries = set()
functions = set()
for elem in pickle_file_content:
binaries.add(elem[7])
functions.add(elem[2])
print(f'binaries >{binaries}<')
counter = 0
dataset_list = list()
## 1. get one binary
## 2. get one function of this binary
## 3. get disassembly of this function
## 4. check if this disassembly calls another function
## 4.1 filter @plt
## 5. if yes: get disassembly of caller function
## 6. save caller, callee, func_signature
## 7. check again, if it calls another function
## 8. if yes: get disassembly of caller function
## 9. save caller, calle, func_signature
##10. get disassembly of next function of this binary
##11. check if ....
for bin in binaries:
for func in functions:
for elem in pickle_file_content:
if elem[7] == bin and elem[2] == func:
att_dis = elem[4]
for item in att_dis:
if 'call' in item and not '@plt' in item and not 'std::' in item:
#print(f'caller >{item}<')
## get callee name
callee_name = ''
item_split = item.split()
callee_name = item_split[len(item_split)-1]
callee_name = callee_name.replace('<', '')
callee_name = callee_name.replace('>', '')
#print(f'callee_name >{callee_name}<')
for elem2 in pickle_file_content:
if elem2[7] == bin and elem2[2] == callee_name:
#print(f'caller >{item}<')
#print(f'callee_name >{callee_name}<')
#print(f'dis-of-callee >{elem2[4]}<')
string_before_func_name = get_string_before_function_name(elem2[0])
#print(f'string_before_func_name: >{string_before_func_name}<')
return_type = get_function_return_type(string_before_func_name, elem2[1])
#print(f'return_type: >{return_type}<')
if return_type == 'unknown':
#print('unknown found')
#breaker = True
#break
pass
elif return_type == 'delete':
#print('delete found')
### no return type found, so delete this item
pass
else:
#unique_return_types.add(return_type)
### remove addr and stuff
#cleaned_att_disassembly = clean_att_disassembly(item[4])
#bag_of_words_style_assembly = build_bag_of_words_style_assembly(cleaned_att_disassembly)
#dis_str = ' '.join(bag_of_words_style_assembly)
##save caller-disassembly, callee-disassembly, callee-func-sign, callee-gdb-ptype
# print(f'att_dis >{att_dis}<')
# print(f'elem2[4] >{elem2[4]}<')
# print(f'return_type >{return_type}<')
#dis_combined = list()
#dis_combined.clear()
#dis_combined = att_dis
#for elem3 in elem2[4]:
# dis_combined.append(elem3)
dis1_str = ' '.join(att_dis)
dis2_str = ' '.join(elem2[4])
dis1_str = dis_split(dis1_str)
dis2_str = dis_split(dis2_str)
dis_str = dis1_str + dis2_str
#print(f'dis_str >{dis_str}<')
dataset_list.append((dis_str, return_type))
counter += 1
break
if dataset_list:
ret_file = open(save_dir + '/' + os.path.basename(pickle_file).replace('.tar.bz2', ''), 'wb+')
pickle_list = pickle.dump(dataset_list, ret_file)
ret_file.close()
return counter
def check_if_dir_exists(dir):
if not os.path.isdir(dir):
print(f'Directory >{dir}< does not exist. Create it.')
exit()
def main():
config = parseArgs()
print(f'config >{config}<')
check_if_dir_exists(config['pickle_dir'])
check_if_dir_exists(config['work_dir'])
check_if_dir_exists(config['save_dir'])
### get all pickle files
pickle_files = get_all_tar_filenames(config['pickle_dir'])
### print 5 files, check and debug
print_5_pickle_files(pickle_files, config)
### build
p = Pool(nr_of_cpus)
pickle_files = [config["pickle_dir"] + "/" + f for f in pickle_files]
star_list = zip(pickle_files, repeat(config['work_dir']), repeat(config['save_dir']))
all_ret_types = p.starmap(proc_build, star_list)
p.close()
p.join()
for counter in all_ret_types:
if counter > 0:
print(f'disassemblies saved >{counter}<')
break
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import webapp2
import cgi
import logging
import datetime
from webapp2_extras import json
from google.appengine.api import users
from config import jinja_enviroment
from models import *
class MainHandler(webapp2.RequestHandler):
def __init__(self, request, response):
self.initialize(request, response)
def render_html(self, template, template_values):
self.response.headers['Content-Type'] = 'text/html; charset=UTF-8'
html_template = jinja_enviroment.get_template(template)
user = users.get_current_user()
template_values['nombre'] = user.nickname()
template_values['url_logout'] = users.create_logout_url("/")
self.response.out.write(html_template.render(template_values))
def render_json(self, objeto):
rjson = json.encode(objeto)
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(rjson)
def request_format(self):
accept_types = self.request.headers['Accept'].split(',')
return (lambda x: {
'text/html': {'html': self.render_html},
'application/json': {'json': self.render_json},
'application/xml': {'xml': self.render_html}
}.get(x,'html'))(accept_types[0])
def respond_with(self, action, parametros):
if (isinstance(parametros, tuple)):
action(*parametros)
else:
action(parametros)
def respond_to(self,*args):
formato = self.request_format()
for action in args :
if formato.keys()[0] in action :
self.respond_with(formato[formato.keys()[0]], action[formato.keys()[0]])
def get(self):
user = users.get_current_user()
if user:
template_values = {
'bebidas_calientes' : Producto.gql("WHERE categoria = :1 ORDER BY nombre", 'Bebidas Calientes'),
'bebidas_frias' : Producto.gql("WHERE categoria = :1 ORDER BY nombre", 'Bebidas Frias'),
'alimentos' : Producto.gql("WHERE categoria = :1 ORDER BY nombre", 'Alimentos'),
}
self.render_html('index.html', template_values)
else:
self.redirect(users.create_login_url(self.request.uri))
class ProductosHandler(MainHandler):
# index
def get(self):
bebidas_calientes = Producto.gql("WHERE categoria = :1", 'Bebidas Calientes')
bebidas_frias = Producto.gql("WHERE categoria = :1", 'Bebidas Frias')
alimentos = Producto.gql("WHERE categoria = :1", 'Alimentos')
template_values = {
'bebidas_calientes' : bebidas_calientes,
'bebidas_frias' : bebidas_frias,
'alimentos' : alimentos,
}
self.render_html('productos/index.html', template_values)
#show
def show(self,producto_key):
producto = Producto.get(producto_key)
template_values = { 'producto': producto }
self.render_html('productos/show.html', template_values)
#new
def new(self):
producto = Producto()
template_values = {
'producto' : producto,
}
self.render_html('productos/new.html', template_values)
#create
def post(self):
producto = Producto()
producto.nombre = cgi.escape(self.request.get('nombre'))
producto.precio = float(cgi.escape(self.request.get('precio')))
producto.categoria = self.request.get('categoria')
producto.put()
self.redirect('/productos')
#edit
def edit(self, producto_id):
producto = Producto.get_by_id(long(producto_id))
if producto :
template_values = { 'producto' : producto }
self.render_html('productos/edit.html', template_values)
#update
def put(self, producto_id):
producto = Producto.get_by_id(long(producto_id))
producto.nombre = cgi.escape(self.request.get('nombre'))
producto.precio = float(cgi.escape(self.request.get('precio')))
producto.categoria = self.request.get('categoria')
producto.put()
self.redirect('/productos')
#delete
def delete(self):
self.response.headers['Content-Type'] = 'text/html; charset=UTF-8'
self.response.write('new Nota... forma')
#modificadores
def modificadores(self, producto_id):
producto = Producto.get_by_id(long(producto_id))
obj = {}
for modificador in producto.modificadores :
if modificador.tipo.nombre in obj.keys():
obj[unicode(modificador.tipo.nombre)].append({
'key' : unicode(str(modificador.key())),
'nombre': modificador.nombre,
'costo': modificador.costo})
else:
obj[unicode(modificador.tipo.nombre)] = list([{
'key' : unicode(str(modificador.key())),
'nombre': modificador.nombre,
'costo': modificador.costo}])
if obj == {}:
obj = None
self.render_json(obj)
class ModificadoresProductosHandler(MainHandler):
def get(self,producto_id):
producto = Producto.get_by_id(long(producto_id))
self.respond_to(
{'json' : (map(lambda m: m.to_dict(include={'modificador': {'tipo':None}}), producto.modificadores))}
)
def post(self,producto_id):
producto = Producto.get_by_id(long(producto_id))
modificador = Modificador.get_by_id(long(self.request.get('modificador_id')))
mod_prod = ModificadorProducto()
mod_prod.producto = producto
mod_prod.modificador = modificador
mod_prod.costo = float(self.request.get('costo'))
mod_prod.put()
self.respond_to(
{'json' : mod_prod.to_dict(include={'producto':None})}
)
def delete(self, producto_id, modificador_id):
mod_prod = ModificadorProducto.get_by_id(long(modificador_id))
mod_prod.delete()
self.respond_to(
{'json' : 'ok'}
)
class ModificadoresHandler(MainHandler):
# index
def get(self):
modificadores = Modificador.all()
template_values = {
'modificadores' : modificadores
}
self.respond_to(
{'html' : ('modificadores/index.html',template_values),
'json' : map(lambda m: m.to_dict(), modificadores)}
)
#show
def show(self,modificador_id):
modificador = Modificador.get_by_id(long(modificador_id))
template_values = {
'modificador' : modificador
}
self.render_html('modificadores/edit.html', template_values)
#new
def new(self):
modificador = Modificador()
tipos_modificador = TipoModificador.all()
template_values = {
'modificador' : modificador,
'tipos_modificador' : tipos_modificador
}
self.render_html('modificadores/new.html',template_values)
#create
def post(self):
modificador = Modificador()
modificador.tipo = TipoModificador.get_by_id(long(self.request.get('tipo_modificador')))
modificador.nombre = self.request.get('nombre')
modificador.put()
self.redirect('/modificadores')
#edit
def edit(self, modificador_id):
modificador = Modificador.get_by_id(long(modificador_id))
tipos_modificador = TipoModificador.all()
template_values = {
'tipos_modificador' : tipos_modificador,
'modificador' : modificador
}
self.render_html('modificadores/edit.html', template_values)
#update
def put(self, modificador_id):
modificador = Modificador.get_by_id(long(modificador_id))
modificador.nombre = self.request.get('nombre')
modificador.tipo = TipoModificador.get_by_id(long(self.request.get('tipo_modificador')))
modificador.put()
self.redirect('/modificadores')
#delete
def delete(self, modificador_id):
modificador = Modificador.get_by_id(long(modificador_id))
modificador.delete()
self.redirect('/modificadores')
class TiposModificadorHandler(MainHandler):
# index
def get(self):
tipos_modificador = TipoModificador.all()
template_values = {
'tipos_modificador' : tipos_modificador
}
self.respond_to(
{'html' : ('tipos_modificador/index.html', template_values) },
{'json' : (map(lambda x: x.to_dict(), tipos_modificador)) }
)
#new
def new(self):
self.response.headers['Content-Type'] = 'text/html; charset=UTF-8'
tipo_modificador = TipoModificador()
template_values = {
'tipo_modificador' : tipo_modificador
}
self.render_html('tipos_modificador/new.html',template_values)
#create
def post(self):
tipo_modificador = TipoModificador()
tipo_modificador.nombre = self.request.get('nombre')
tipo_modificador.excluyente = (self.request.get('excluyente') == 'on')
tipo_modificador.put()
self.redirect('/tipos_modificador')
#edit
def edit(self, tipo_modificador_id):
tipo_modificador = TipoModificador.get_by_id(int(tipo_modificador_id))
template_values = {
'tipo_modificador' : tipo_modificador
}
self.render_html('tipos_modificador/edit.html', template_values)
#update
def put(self, tipo_modificador_id):
tipo_modificador = TipoModificador.get_by_id(int(tipo_modificador_id))
tipo_modificador.nombre = self.request.get('nombre')
tipo_modificador.excluyente = (self.request.get('excluyente') == 'on')
tipo_modificador.put()
self.redirect('/tipos_modificador')
#modificadores
def modificadores(self, tipo_modificador_id):
tipo_modificador = TipoModificador.get_by_id(long(tipo_modificador_id))
modificadores = tipo_modificador.modificadores
template_values = { 'modificadores' : modificadores }
self.respond_to(
{'json': (map (lambda modificador: modificador.to_dict(), modificadores )) }
)
class NotasHandler(MainHandler):
# index
def get(self):
logging.info(datetime.datetime.today())
delta = datetime.timedelta(hours=-6)
ahora = datetime.datetime.today() + delta
if self.request.get('abierta'):
notas = Nota.gql("WHERE abierta = True")
elif self.request.get('hora_inicio') and self.request.get('minuto_inicio'):
hora_inicial = ahora.replace(hour=int(self.request.get('hora_inicio')),minute=int(self.request.get('minuto_inicio')),second=0,microsecond=0)
nota_gql = Nota.gql("WHERE fecha > :1 AND fecha < :2 ORDER BY fecha", hora_inicial-delta, ahora-delta)
notas = nota_gql.fetch(limit=50)
else:
ahora = ahora.replace(hour=8,minute=0,second=0,microsecond=0)
notas = Nota.gql("WHERE fecha > :1 AND abierta = False", ahora)
template_values = {'notas' : notas, 'total_venta' : sum(map(lambda n: n.total, notas)), 'delta_time' : delta }
self.respond_to(
{'html': ('notas/index.html', template_values) },
{'json': (map(lambda nota: nota.to_dict(include={'ordenes': {'producto': None, 'modificadores_producto':
{'modificador_producto': {'modificador': None} }}}), notas )) }
)
#new
def new(self):
nota = Nota()
nota.put()
nota.nombre = "Nota_"+str(nota.key().id())
nota.put()
self.render_json(nota.to_dict())
#show
def show(self, nota_id):
nota = Nota.get_by_id(long(nota_id))
template_values = { 'nota' : nota, 'delta' : datetime.timedelta(hours=-6) }
self.respond_to({'html': ('notas/show.html', template_values) })
#create
def post(self):
nota_json = json.decode(self.request.get('nota'))
nota = Nota()
if 'id' in nota_json:
nota = Nota.get_by_id(long(nota_json['id']))
nota.total = float(nota_json['total'])
nota.nombre = nota_json['nombre']
if 'fecha_impresion' in nota_json:
nota.fecha_impresion = datetime.datetime.fromtimestamp(float(nota_json['fecha_impresion'])/1000)
nota.put()
for orden_json in nota_json['ordenes']:
orden = Orden()
if 'id' in orden_json:
orden = Orden.get_by_id(long(orden_json['id']))
orden.cantidad = int(orden_json['cantidad'])
orden.producto = Producto.get_by_id(long(orden_json['producto']['id']))
orden.nota = nota
orden.put()
if 'modificadores_producto' in orden_json:
for modificador_producto_json in orden_json['modificadores_producto']:
if 'id' in modificador_producto_json:
orden_modificador_producto = OrdenModificadorProducto.get_by_id(long(modificador_producto_json['id']))
else:
orden_modificador_producto = OrdenModificadorProducto()
orden_modificador_producto.orden = orden
orden_modificador_producto.modificador_producto = ModificadorProducto.get_by_id(long(modificador_producto_json['modificador_producto']['id']))
orden_modificador_producto.put()
self.render_json(nota.to_dict())
#put
def put(self, nota_id):
nota = Nota.get_by_id(long(nota_id))
if self.request.get('abierta'):
nota.abierta = self.request.get('abierta') in ("True","true","t")
if self.request.get('total'):
nota.total = float(self.request.get('total'))
nota.put()
self.render_json(nota.key().id())
#delete
def delete(self, nota_id):
nota = Nota.get_by_id(long(nota_id))
for orden in nota.ordenes :
for orden_modificador_producto in orden.modificadores_producto :
orden_modificador_producto.delete()
orden.delete()
nota.delete()
#TODO Fijar fecha en configuracion... aquí no va.
delta = datetime.timedelta(hours=-6)
hoy = datetime.datetime.today().replace(hour=8,minute=0,second=0,microsecond=0)
notas = Nota.gql("WHERE fecha > :1 AND abierta = False", hoy+delta)
template_values = {'notas' : notas, 'total_venta' : sum(map(lambda n: n.total, notas)), 'delta_time' : delta }
self.respond_to(
{'html': ('notas/index.html', template_values) },
{'json': (map(lambda nota: nota.to_dict(include={'ordenes': {'producto': None, 'modificadores_producto':
{'modificador_producto': {'modificador': None} }}}), notas )) })
#borrar orden
def borrar_orden(self, nota_id, orden_id):
orden = Orden.get_by_id(long(orden_id))
if orden.modificadores_producto:
for orden_modificador_producto in orden.modificadores_producto :
orden_modificador_producto.delete()
orden.delete()
self.render_json(orden.key().id())
def update_orden(self,nota_id, orden_id):
orden = Orden.get_by_id(long(orden_id))
orden.cantidad = int(self.request.get('cantidad'))
orden.put()
def crea_orden(self,nota_id):
orden_json = json.decode(self.request.get('orden'))
logging.info(orden_json)
#TODO Crear oden en modelo.
orden = Orden();
orden.cantidad = orden_json['cantidad']
orden.llevar = orden_json['llevar']
orden.producto = Producto.get_by_id(long(orden_json['producto_id']))
orden.nota = Nota.get_by_id(long(nota_id))
orden.put()
if 'modificadores_producto' in orden_json:
for modificador_producto_json in orden_json['modificadores_producto']:
orden_modificador_producto = OrdenModificadorProducto()
orden_modificador_producto.orden = orden
orden_modificador_producto.modificador_producto = ModificadorProducto.get_by_id(long(modificador_producto_json['id']))
orden_modificador_producto.put()
self.render_json(orden.key().id())
|
# Simple CNN model for CIFAR-10
import numpy as np
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.constraints import maxnorm
from keras.wrappers.scikit_learn import KerasClassifier
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from sklearn.model_selection import GridSearchCV
from keras.utils import np_utils
from keras import backend as K
K.set_image_dim_ordering('th')
# fix random seed
seed = 11
np.random.seed(seed)
# load data
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# normalize inputs from 0-255 to 0.0-1.0
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = X_train / 255.0
X_test = X_test / 255.0
# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
# Create the model
model = Sequential()
model.add(Conv2D(64, (3, 3), init='glorot_uniform', input_shape=(3, 32, 32), padding='same', activation='relu', kernel_constraint=maxnorm(3)))
model.add(Conv2D(64, (3, 3), init='glorot_uniform', input_shape=(3, 32, 32), padding='same', activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), init='glorot_uniform', activation='relu', padding='same', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.1))
model.add(Conv2D(128, (3, 3), init='glorot_uniform', activation='relu', padding='same', kernel_constraint=maxnorm(4)))
model.add(Dropout(0.2))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3), init='glorot_uniform', activation='relu', padding='same', kernel_constraint=maxnorm(4)))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(1024, init='glorot_uniform', activation='relu', kernel_constraint=maxnorm(4)))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
# Compile model
epochs = 50
lrate = 0.01
decay = lrate/epochs
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
# Fit the model
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=32)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
|
#Este está bien
print('while loop')
fruit = 'banana'
index=0
while index < len(fruit):
print(index,fruit[index])
index=index+1
#pero siempre un for funciona mejor
print('for loop')
for letter in fruit:
print(letter)
#slicing avanza hasta tonde le digamos pero no incluye
complete = 'Richard'
slice1=complete[0:2] ##no incluye la posicion 2, solo 0 y 1
print(slice1)
slice2=complete[2:]
print(slice2)
|
from itertools import permutations
from time import time
from GenerateDict import GenerateDict
# Function: CombineLetters
# Dependency: itertools.permutations
# Input: a list such as ['a', 'b', 'c']
# Output: a set such as {'ab', 'bac', 'b', 'c', 'acb', 'ca', 'bc', 'cb', 'cba', 'ba', 'bca', 'ac', 'cab', 'abc', 'a'}
# Description:
def CombineLetters(letters, word_dictionary):
letters_combination = set()
for word_length in range (1, len(letters) + 1): # generate different word length
letters_combination |= set(''.join(e) for e in permutations(letters, word_length)) & word_dictionary # union all permutation()
return(letters_combination)
# Test Codes
if __name__ == "__main__":
Letters = ['e', 'a', 'e', 'o', 'r', 't', 's', 'm', 'n', 'z']
# Letters = ['a', 'b', 'c']
word_dictionary = GenerateDict("wordsEn.txt")
before_time = time()
CombineLetters(Letters, word_dictionary)
print(time()- before_time)
print('down') |
import io
import random
import aiounittest
import itchat
from collections import namedtuple
from forklift.config import STICKERS_FOR_SPAM, ANIMATED_QUERY_TYPE
from forklift.util import get_file, match_query_from_text, is_spam_msg
class TestUtil(aiounittest.AsyncTestCase):
async def test_get_file(self):
file = await get_file(random.choice(STICKERS_FOR_SPAM))
self.assertIsInstance(file, io.BytesIO)
def test_match_query_from_text(self):
query, query_type = match_query_from_text('求水果表情')
self.assertEqual(query, '水果')
self.assertNotEqual(query_type, ANIMATED_QUERY_TYPE)
query, query_type = match_query_from_text('有没有水果表情')
self.assertEqual(query, '水果')
self.assertNotEqual(query_type, ANIMATED_QUERY_TYPE)
query, query_type = match_query_from_text('谁有水果表情')
self.assertEqual(query, '水果')
self.assertNotEqual(query_type, ANIMATED_QUERY_TYPE)
query, query_type = match_query_from_text('有没有水果动图')
self.assertEqual(query, '水果')
self.assertEqual(query_type, ANIMATED_QUERY_TYPE)
def test_is_spam_msg(self):
Msg = namedtuple('Msg', ['type', 'text'])
msg = Msg(type=itchat.content.SHARING, text='')
self.assertTrue(is_spam_msg(msg))
text = '''
【我正在PK人气赢能量,快来为我点赞】,復·制这段描述¥VNEkbhtJMv0¥后咑閞👉手机淘宝👈或者用浏览器咑閞https://m.tb.cn/h.3846eWj 查看
'''.strip()
msg = Msg(type=itchat.content.TEXT, text=text)
self.assertTrue(is_spam_msg(msg))
msg = Msg(type=itchat.content.TEXT, text='asdfasdf')
self.assertFalse(is_spam_msg(msg))
|
t = int(input())
area = []
for i in range(t):
n,b = input().split()
n,b = int(n),int(b)
area.append(0)
for j in range(n):
l,h,p = input().split()
l,h,p = int(l),int(h),int(p)
if(p<=b) and (l*h>=area[0]):
area[i]=(l*h)
for i in range(t):
if(area[i]==0):
print("no tablet")
else:
print(area[i]) |
import sys
import re
n=input()
if(re.match('[0-9]{2}[A-Za-z]{3}[0-9]{4}',n)):
print("valid")
else:
print("invalid")
|
import pandas as pd
import copy
import random
def combine_interval_points(fun_result, data, just_first_one=False):
if len(fun_result) == 0:
return fun_result
# combine
need_combine = []
inter = []
len_fun_result = len(fun_result)
for index, i in enumerate(fun_result[:-1]):
if fun_result[index + 1]["type_is"] == i["type_is"]:
if i not in inter:
inter.append(i)
inter.append(fun_result[index + 1])
# check the last one
if index == len_fun_result - 2:
need_combine.append(inter)
else:
if len(inter) != 0:
need_combine.append(inter)
inter = []
else:
need_combine.append(i)
# check the last one
if index == len_fun_result - 2:
if fun_result[index + 1] not in need_combine[:-1]:
need_combine.append(fun_result[index + 1])
# replace
new_list = []
for comb in need_combine:
if not isinstance(comb, list):
new_list.append(comb)
continue
type_is = comb[0]["type_is"]
if type_is == "di":
price_type = "low"
price_fun = min
else:
price_type = "high"
price_fun = max
price = []
for i in comb:
price.append(data.loc[i["index"]][price_type])
if not just_first_one:
target_item = price_fun(price)
target_index = price.index(target_item)
else:
target_index = 0
target = comb[target_index]
new_list.append(target)
return new_list
#######################################################################################################################
# gain training data #################################################################################################
#######################################################################################################################
def gain_training_set(fun_result, data, set_len=5):
"""
:param fun_result: the ding di result
:param data: whole dataFrame
:param set_len: how many data point you want to use to predict
:return: a list of data for which ding di
"""
in_function_data = data[["open", "high", "close", "low", "volume", "p_change"]]
training_set = []
for i in fun_result:
index = i["index"]
if index < set_len:
continue
inter_data = in_function_data.loc[index - set_len + 1:index]
training_set.append({"point": i,
"point_information": copy.deepcopy(in_function_data.loc[index]),
"pre_training_data": copy.deepcopy(inter_data),
"pro_training_data": copy.deepcopy(in_function_data.loc[index + 1]),
})
return training_set
# training with one step after
def gain_random_no_meaning_point(fun_result, data, len_fun_result=None):
"""
gain some data point which is not bing and di
:param fun_result:
:param data:
:param len_fun_result:
:return:
"""
if len_fun_result is None:
len_fun_result = len(fun_result) * 2
ding_di_result = [i["index"] for i in fun_result]
all_index = set(range(len(data)))
no_meaning_point = all_index.difference(set(ding_di_result))
no_meaning_point = list(no_meaning_point)
no_meaning_point = random.sample(no_meaning_point, len_fun_result)
no_meaning_point.sort()
no_meaning_point = copy.deepcopy(data.loc[no_meaning_point])
no_meaning_point["type_is"] = "in_trend"
no_meaning_point = no_meaning_point[["open", "high", "close", "low", "volume", "p_change", "type_is"]]
return no_meaning_point
def a_point_after_ding_di(training_set, one_type=True):
result = pd.DataFrame()
for i in training_set:
inter = copy.deepcopy(i["pro_training_data"])
inter["type_is"] = i["point"]["type_is"]
result = result.append(inter)
if one_type:
result["type_is"] = "ding_di"
result = result[["open", "high", "close", "low", "volume", "p_change", "type_is"]]
return result
def the_point_at_ding_di(training_set, one_type=True):
result = pd.DataFrame()
for i in training_set:
inter = copy.deepcopy(i["point_information"])
inter["type_is"] = i["point"]["type_is"]
result = result.append(inter)
if one_type:
result["type_is"] = "ding_di"
result = result[["open", "high", "close", "low", "volume", "p_change", "type_is"]]
return result
if __name__ == '__main__':
from ding_di import run_functions, get_ding, get_di
this_code = "600030" # 600036 # 600298 # 000858
this_functions = {"ding": get_ding, "di": get_di}
this_data = pd.read_excel("code" + this_code + ".xlsx")
this_data.index = range(len(this_data))
this_fun_result = run_functions(this_functions, this_data)
# print(this_fun_result)
this_fun_result = combine_interval_points(this_fun_result, this_data)
this_training_set = gain_training_set(this_fun_result, this_data)
this_no_meaning_point = gain_random_no_meaning_point(this_fun_result, this_data)
this_points_after_ding_di = a_point_after_ding_di(this_training_set)
# print(a)
this_training_set = this_no_meaning_point.append(this_points_after_ding_di)
print(this_training_set)
|
from pyxnat import Interface
def get_XNAT(username, password, xnat_cache, xnatUrl='https://xnat.hdni.org/xnat'):
xnat = Interface(server=xnatUrl, user=username, password=password,
cachedir=xnat_cache)
return xnat
|
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.textinput import TextInput
class TestApp(App):
def build(self):
return Button(text='Hello World')
def on_enter(instance, value):
print("User pressed enter in", instance)
textinput = TextInput()
textinput.bind(text=on_text)
TestApp().run()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 28 17:02:27 2021
@author: kaydee
"""
import flask
from flask import Flask, redirect, url_for, request, render_template, send_file
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage
import os
from Alignment import ChangePerspective
from vcopy import Model as vModel
UPLOAD_FOLDER = 'static/images/'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
PATH = ""
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route("/")
def home():
return redirect(url_for("index"))
@app.route("/home")
def index():
return render_template("index.html")
@app.route("/results")
def res():
global PATH
vM1= vModel(PATH)
sudoku = vM1.predictions
return str(sudoku)
@app.route("/output",methods=["GET","POST"])
def out():
global PATH
cpers = ChangePerspective()
if request.method == "POST":
img = request.files["img"]
sfname = secure_filename(img.filename)
img.save(app.config['UPLOAD_FOLDER']+sfname)
fpath = os.path.join(app.config['UPLOAD_FOLDER'],sfname)
print(sfname,app.config['UPLOAD_FOLDER'],fpath)
cpers.readim(fpath,sfname)
edited_sfname ="edited_"+sfname
rel_image_path = "../static/images/"
PATH =os.path.join(app.config['UPLOAD_FOLDER'],edited_sfname)
return render_template("result.html",inp = rel_image_path+sfname, out = rel_image_path+edited_sfname)
return "no"
if __name__ == "__main__":
app.run(debug = True) |
from pupil_parse.preprocess_utils import config as cf
from pupil_parse.preprocess_utils import extract_session_metadata as md
from pupil_parse.preprocess_utils import edf2pd as ep
from pupil_parse.preprocess_utils import visualize as vz
from pupil_parse.analysis_utils import summarize_amplitude as amp
import time
import numpy as np
import matplotlib.pyplot as plt
import os
def main():
(raw_data_path, intermediate_data_path,
processed_data_path, figure_path, _) = cf.path_config()
(unique_subjects, unique_sessions, unique_reward_codes) = md.extract_subjects_sessions(raw_data_path,
reward_task=1)
start_time = time.time()
for subj_id in unique_subjects:
for session_n in unique_sessions:
_, _, reward_code = ep.find_data_files(subj_id=subj_id,
session_n=session_n, reward_task=1, lum_task=0,
raw_data_path=raw_data_path)
reward_samples = ep.read_hdf5('samples', subj_id, session_n,
processed_data_path, reward_code=reward_code, id_str='zscored')
reward_messages = ep.read_hdf5('messages', subj_id, session_n,
processed_data_path, reward_code=reward_code, id_str='zscored')
reward_events = ep.read_hdf5('events', subj_id, session_n,
processed_data_path, reward_code=reward_code, id_str='zscored')
if (np.isnan(reward_samples.z_pupil_diameter).sum() == 0) != 1:
print('This session has no data.')
continue
peak_df = amp.locate_peaks(reward_samples, subj_id,
session_n, reward_code, save=True)
mean_df = amp.find_mean(reward_samples, subj_id,
session_n, reward_code, save=True)
end_time = time.time()
time_elapsed = end_time - start_time
print('time elapsed: ', time_elapsed)
if __name__ == '__main__':
main()
|
#### Takes bilstm model and evaluates it on an eval file
##
## Usage: ./evaluate_taggingmodel.py PATH_TO_MODEL PATH_TO_EVAL_FILE
from simplebilty import SimpleBiltyTagger
from simplebilty import load_tagger, save_tagger
from lib.mio import read_conll_file
from vocab import Vocab
import os
from collections import namedtuple
import sys
import json
import dynet as dynet
config=sys.argv[1]
model=sys.argv[2]
testfile=sys.argv[3]
vocabfile=os.path.dirname(model)+"/vocab.txt"
d = json.load(open(config))
config = namedtuple("options", d.keys())(*d.values())
vocab = Vocab(vocabfile)
if "embeds" in config:
tagger = SimpleBiltyTagger(config.in_dim, config.h_dim, config.c_in_dim, config.h_layers,embeds_file=config.embeds,word2id=vocab.word2id,)
else:
tagger = SimpleBiltyTagger(config.in_dim, config.h_dim, config.c_in_dim, config.h_layers,embeds_file=None,word2id=vocab.word2id)
tagger = load_tagger(model)
test_X, test_Y = tagger.get_data_as_indices(testfile)
correct, total = tagger.evaluate(test_X, test_Y)
print("accuracy", correct/total)
dev_test_labels=[]
for _, tags in read_conll_file(testfile):
dev_test_labels.append(tags)
tagger.get_predictions_output(test_X, dev_test_labels, "dev.xxx.out")
|
from storm.locals import *
class Node(Storm):
__storm_table__ = "node"
id = Int(primary=True)
identifier = Unicode()
properties = ReferenceSet(id, "NodeProperty.node_id")
inbound = ReferenceSet(id, "Edge.target_id")
outbound = ReferenceSet(id, "Edge.source_id")
def __repr__(self):
return "<node: %s>" % self.identifier
def serialize(self):
obj = {
"id": self.identifier,
"properties": _key_value(self.properties)
}
return obj
class NodeProperty(Storm):
__storm_table__ = "nodeprop"
id = Int(primary=True)
node_id = Int()
key = Unicode()
value = Unicode()
node = Reference(node_id, "Node.id")
def __repr__(self):
return "<NodeProperty %s = %s>" % (self.key, repr(self.value))
class Edge(Storm):
__storm_table__ = "edge"
id = Int(primary=True)
identifier = Unicode()
origin = Unicode()
confidence = Float()
source_id = Int()
target_id = Int()
source = Reference(source_id, "Node.id")
target = Reference(target_id, "Node.id")
properties = ReferenceSet(id, "EdgeProperty.edge_id")
def __repr__(self):
return "<edge: %s (%s) %s>" % (self.source.identifier, self.origin, self.target.identifier)
def serialize(self):
obj = {
"id": self.identifier,
"confidence": self.confidence,
"origin": self.origin,
"source": self.source.identifier,
"target": self.target.identifier,
"properties": _key_value(self.properties)
}
return obj
class EdgeProperty(Storm):
__storm_table__ = "edgeprop"
id = Int(primary=True)
edge_id = Int()
key = Unicode()
value = Unicode()
edge = Reference(edge_id, "Edge.id")
def _key_value(propobj):
kv = {}
for obj in propobj:
kv[obj.key] = obj.value
return kv |
#!/usr/bin/python
import smbus
import time
import math
bus = smbus.SMBus(1)
address = 0x1e
mpu6050address = 0x68
def read_byte(adr):
return bus.read_byte_data(address, adr)
def read_word(adr):
high = bus.read_byte_data(address, adr)
low = bus.read_byte_data(address, adr+1)
val = (high << 8) + low
return val
def read_word_2c(adr):
val = read_word(adr)
if (val >= 0x8000):
return -((65535 - val) + 1)
else:
return val
def write_byte(adr, value):
bus.write_byte_data(address, adr, value)
#______________for gy87 only________________
bus.write_byte_data(mpu6050address, 0x37, 2)
bus.write_byte_data(mpu6050address, 0x6a, 0)
bus.write_byte_data(mpu6050address, 0x6b, 0)
#___________________________________________
write_byte(0, 0b01110000) # Set to 8 samples @ 15Hz
write_byte(1, 0b00100000) # 1.3 gain LSb / Gauss 1090 (default)
write_byte(2, 0b00000000) # Continuous sampling
scale = 0.92
while True:
x_out = read_word_2c(3) * scale
y_out = read_word_2c(7) * scale
z_out = read_word_2c(5) * scale
bearing = math.atan2(y_out, x_out)
bearing1 = math.atan2(x_out, math.sqrt((y_out*y_out)+(z_out*z_out)))
if (bearing < 0):
bearing += 2 * math.pi
if (bearing1 < 0):
bearing1 += 2 * math.pi
print "Bearing: ", math.degrees(bearing)
print "Bearing1: ", math.degrees(bearing1)
print "________________________________________________"
time.sleep(2) |
# BABY QUAKEBOT!
#
# https://source.opennews.org/en-US/articles/how-break-news-while-you-sleep/
#
#
# This is a python file, so you're going to run them from the
# command line by going to the folder it's in and running this terminal
# command...
#
# python eq_homework.py
#
# Then a little magic will happen! Unfortunately, to make a lot of magic
# happen you'll need to fill in the functions below.
#
# The info that comes from USGS looks something like this:
earthquake = {
'rms': '1.85',
'updated': '2014-06-11T05:22:21.596Z',
'type': 'earthquake',
'magType': 'mwp',
'longitude': '-136.6561',
'gap': '48',
'depth': '10',
'dmin': '0.811',
'mag': '5.7',
'time': '2014-06-04T11:58:58.200Z',
'latitude': '59.0001',
'place': '73km WSW of Haines, Alaska',
'net': 'us',
'nst': '',
'id': 'usc000rauc'}
# We're going to break it into pieces and see if we can make a nice little version of Quakebot.
# If there was an earthquake, it might say this
#
# There was a big earthquake Monday morning a ways away from Haines, Alaska
#
# But what if it was huge, or tiny, or just medium-sized? Or not near Haines at all?
# Let's see what we can do using functions to make this magic happen.
# PROBLEM 1:
# Let's make the print statement reflect the size of the earthquake
#
# Write a function that describes each earthquake using a scale similar to the
# one at the link below.
#
# Hint: You'll use if statements
#
# http://www.sdgs.usd.edu/publications/maps/earthquakes/images/RichterScale.gif
# Here, I have created a similar scale to the USGS richter scale that where we have made an adjustment to
# the not felt and minor categories. I have also added to the scale category ranges a measure of real world resulting damage.
# Defining an earthquake size function that inputs a richter measurement, it takes sample inputs and outputs the scale category.
def earthquake_size(richter_measurement):
richter_float = float(richter_measurement)
if richter_float <= 2 and richter_float > 0:
return "imperceivable"
if richter_float <= 4 and richter_float > 2:
return "minor"
if richter_float <= 5 and richter_float > 4:
return "moderate"
if richter_float <= 6 and richter_float > 5:
return "strong"
if richter_float <= 7 and richter_float > 6:
return "very strong"
if richter_float <= 8 and richter_float > 7:
return "major"
if richter_float > 8:
return "devastating"
print "Strength: " + earthquake_size(1.5)
print "Strength: " + earthquake_size('3.9')
print "Strength: " + earthquake_size(4.5)
print "Strength: " + earthquake_size(5.8)
print "Strength: " + earthquake_size(6.8)
print "Strength: " + earthquake_size(7.1)
print "Strength: " + earthquake_size(8.0)
# PROBLEM 2:
# Let's make the print statement reflect the depth of the earthquake
#
# Make a function that describes each earthquake using a depth according to
# the information at the linke below
#
# http://earthquake.usgs.gov/learn/topics/seismology/determining_depth.php
#
# Hint: You'll use if statements, and be careful about types!
# We will use the following scale categories (km below the earth's surface):
# Shallow earthquakes = 0-70 km deep, Intermediate earthquakes= 70-300 km, Deep earthquakes= 300-700km deep
# Here we define another earthquake function with shallow, intermediate, and deep categories. Anything else is impossible or unmeasurable.
def earthquake_depth(depth):
depth_float = float(depth)
if depth_float > 0 and depth_float <= 70:
return "shallow"
elif depth_float > 70 and depth_float <= 300:
return "intermediate"
elif depth_float > 300 and depth_float <= 700:
return "deep"
return "impossible or unmeasurable"
print earthquake_depth('70')
print earthquake_depth(71)
print earthquake_depth(150)
print earthquake_depth(700)
print earthquake_depth(7000)
print earthquake_depth('-1')
# PROBLEM 3:
# Let's make the print statement reflect the location the earthquake
# happened by
#
# Use regular expressions to extract the location from the argument location_string
# *or* research the 'split' function and see if it can be of use if you pass
# it a certain special separator
# Here, we used a split method on the location variable based on finding the string " of ".
# It takes whatever is after that, which is the location and returns that.
def earthquake_location(location_str):
location = location_str.split(" of ", 1)[1]
return location
print earthquake_location("73km WSW of Haines, Alaska")
# PROBLEM 4:
# Let's make the print statement reflect the distance between the earthquake
# and the city
#
# You'll want to use several different categories, ie 'nearby',
# 'far away from', and 'nowhere near'
#
# Hint: You'll use regular expressions to extract the kilometers from location_string,
# then use if statements on the result
#
# After importing the re library, we define a distance function that inputs a location string.
# The function searches for all digits within the string, takes the first digit value and assigns to a variable.
# If the variable is >100km, it is not near, if it is >50km, it is somewhat close, and if it is <50km, it is dangerously close.
import re
def earthquake_distance(location_str):
distance = re.findall(r"\d+", location_str)[0]
if float(distance) > 100:
return "not near"
elif float(distance) > 50:
return "somewhat close"
return "dangerously close"
print earthquake_distance("101km N of Tokyo, Japan 243")
print earthquake_distance("73km WSW of Haines, Alaska")
print earthquake_distance("19km SE of Istanbul, Turkey")
# PROBLEM 5 & 6:
#
# These were written by the instructures to show what the full bot will do.
# Message: Don't worry about these two functions yet.
#
#
def earthquake_day(time_string):
return "Monday"
def earthquake_time(time_string):
return "morning"
print "There was a " + earthquake_size(1.1) + \
", " + earthquake_depth('0.1') + \
" earthquake " + earthquake_day('2014-06-04T11:58:58.200Z') + \
" " + earthquake_time('2014-06-04T11:58:58.200Z') + \
" " + earthquake_distance("73km WSW of Haines, Alaska") + \
" " + earthquake_location("73km WSW of Haines, Alaska")
print "There was a " + earthquake_size(8.7) + \
", " + earthquake_depth('98.22') + \
" earthquake " + earthquake_day('2014-06-04T11:58:58.200Z') + \
" " + earthquake_time('2014-06-04T11:58:58.200Z') + \
" " + earthquake_distance("238km N of Tobelo, Indonesia") + \
" " + earthquake_location("238km N of Tobelo, Indonesia")
print "There was a " + earthquake_size(3.3) + \
", " + earthquake_depth('344.32') + \
" earthquake " + earthquake_day('2014-06-04T11:58:58.200Z') + \
" " + earthquake_time('2014-06-04T11:58:58.200Z') + \
" " + earthquake_distance("10km NE of Medford, Oklahoma") + \
" " + earthquake_location("10km NE of Medford, Oklahoma")
print "There was a " + earthquake_size(6.1) + \
", " + earthquake_depth(5.289) + \
" earthquake " + earthquake_day('2014-06-04T11:58:58.200Z') + \
" " + earthquake_time('2014-06-04T11:58:58.200Z') + \
" " + earthquake_distance("91km NE of Miches, Dominican Republic") + \
" " + earthquake_location("91km NE of Miches, Dominican Republic")
|
num =1
num2 = 233
num3 = 433
lulala
num4 = 2344455
num5 = 234567
|
def soma(L):
total = 0
for e in L:
total += e
return total
L=[1,7,2,9,15]
print(soma(L))
print(soma([7,9,12,3,100,20,4])) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# weather_for_conky.py
# same as weather.py, but small fix for static location and print to use with conky
#
# Copyright 2013 Raymond Aarseth <raymond@lappy>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
3
import requests
import time
import json
def main():
"""same as weather.py, but small fix for static location and print to use with conky"""
clientid="ClientId"
loc= "bergen,no"
r = requests.get("http://api.aerisapi.com/observations/"+ loc +"?client_id="+clientid)
response = r.json()
print response
ob = response['response']['ob']
out = "%s | %dc" % (ob['weather'].lower(), ob['tempC'])
out=out.capitalize()
print out
r.close()
if __name__ == '__main__':
main()
|
"""
Brazil Data Cube Configuration
You can define these configurations and call using environment variable
`ENVIRONMENT`. For example: `export ENVIRONMENT=ProductionConfig`
"""
# pylint: disable=too-few-public-methods
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
def get_settings(env):
"""Retrieve Config class from environment"""
return CONFIG.get(env)
class Config:
"""Base configuration with default flags"""
DEBUG = False
TESTING = False
CSRF_ENABLED = False
WTF_CSRF_ENABLED = False
SECRET_KEY = os.environ.get('SECRET_KEY', 'APi-Users-123456')
STAC_URL = os.environ.get(
'STAC_URL', 'http://brazildatacube.dpi.inpe.br/bdc-stac/0.8.0/'
)
REDIS_URL = os.environ.get('REDIS_URL', 'redis://:passRedis@localhost:6381')
REDIS_CACHE_TIME = int(
os.environ.get('REDIS_CACHE_TIME', 300)) # time in ms
class ProductionConfig(Config):
"""Production Mode"""
DEBUG = False
class DevelopmentConfig(Config):
"""Development Mode"""
DEVELOPMENT = True
DEBUG = True
ENVIRONMENT = 'development'
class TestingConfig(Config):
"""Testing Mode (Continous Integration)"""
TESTING = True
DEBUG = True
key = Config.SECRET_KEY
CONFIG = {
"DevelopmentConfig": DevelopmentConfig(),
"ProductionConfig": ProductionConfig(),
"TestingConfig": TestingConfig()
}
|
import daft
import jax
import jax.numpy as np
import mcx
import mcx.distributions as dist
from matplotlib import rc
from IPython.display import display, Math
from infer.model import Model
class NaiveBayes(Model):
"""Naive Bayes classifier.
We note :math:`x_{jc}` the value of the j-th element of the data vector :math:`x`
conditioned on x belonging to the class :math:`c`. The Gaussian Naive Bayes
algorithm models :math:`x_{jc}` as:
.. math::
x_{jc} \\sim Normal(\\mu_{jc}, \\sigma_{jc})
While the probability that :math:`x` belongs to the class :math:`c` is given by the
categorical distribution:
.. math::
P(y=c|x_i) = Cat(\\pi_1, \\dots, \\pi_C)
where :math:`\\pi_i` is the probability that a vector belongs to category :math:`i`.
We assume that the :math:`\\pi_i` follow a Dirichlet distribution:
.. math::
\\pi \\sim Dirichlet(\\alpha)
with hyperparameter :math:`\\alpha = [1, .., 1]`. The :math:`\\mu_{jc}`
are sampled from a Normal distribution centred on :math:`0` with
variance :math:`100`, and the :math:`\\sigma_{jc}` are sampled from a
HalfNormal distribuion of variance :math:`100`:
.. math::
\\mu_{jc} \\sim Normal(0, 100)
\\sigma_{jc} \\sim HalfNormal(100)
Note that the Gaussian Naive Bayes model is equivalent to a Gaussian
mixture with a diagonal covariance [1].
Note
----
MCX is in an integration testing phase and this example will likely not
work as compilation may output garbage. Even if it did work we would need
to be able to deal with the fact that `z` is a discrete random variable
thus not sample-able by an evaluator in the HMC family.
References
----------
.. [1] Murphy, K. P. (2012). Machine learning: a probabilistic perspective.
"""
def __init__(self, num_categories):
super.__init__(self)
self.num_categories = num_categories
@property
def model(self):
# flake8: noqa
@mcx.model
def naive_bayes(X, num_categories):
num_predictors = np.shape(X)[1]
num_training_samples = np.shape(X)[0]
# Priors
alpha = np.ones(num_categories)
pi <~ dist.Dirichlet(alpha, shape=num_categories)
mu <~ dist.Normal(mu=0, sigma=100, shape=(num_categories, num_predictors))
sigma <~ dist.Exponential(100, shape=(num_categories, num_predictors))
# Assign classes to data points
z <~ dist.Categorical(pi, shape=num_training_samples)
# The components are independent and normally distributed
xi <~ dist.Normal(mu=mu[z], sd=sigma[z])
return z
return naive_bayes
def fit(self, kernel=None, num_samples=1000, accelerate=False, **observations):
"""Fit the Naive Bayes model.
The kernel is currently set to a HMC sampler for all variables; however 'z' is
a discrete variable and this will need to be changed in the future.
This paradigm allows to specify a default (explicit) evaluator while letting
the user experiment with others.
"""
kwargs = dict({'num_categories': num_categories}, **observations)
if not kernel:
kernel = HMC(30)
trace = self._fit(kernel, num_samples, accelerate, **kwargs)
return trace
@property
def math_repr(self):
"""LateX representation of the model for Jupyter notebooks."""
representation = r"""
\begin{align}
\mu_{jc} & \sim \text{Normal}(0, 100) \\
\sigma_{jc} &\sim \text{HalfNormal}(100)\\
x_{jc} & \sim \text{Normal}(\mu_{jc}, \sigma_{jc}) \\
\pi & \sim \text{Dirichlet}(\alpha)\\
P(y=c|x_i) &= \text{Cat}(\pi_1, \dots, \pi_C)
\end{align}
"""
return display(Math(representation))
@property
def graph(self):
"""This is just an example, not the actual model.
"""
rc("font", family="serif", size=12)
rc("text", usetex=True)
# Colors.
p_color = {"ec": "#46a546"}
s_color = {"ec": "#f89406"}
pgm = daft.PGM()
n = daft.Node("phi", r"$\phi$", 1, 3, plot_params=s_color)
n.va = "baseline"
pgm.add_node(n)
pgm.add_node("speckle_coeff", r"$z_i$", 2, 3, plot_params=s_color)
pgm.add_node("speckle_img", r"$x_i$", 2, 2, plot_params=s_color)
pgm.add_node("spec", r"$s$", 4, 3, plot_params=p_color)
pgm.add_node("shape", r"$g$", 4, 2, plot_params=p_color)
pgm.add_node("planet_pos", r"$\mu_i$", 3, 3, plot_params=p_color)
pgm.add_node("planet_img", r"$p_i$", 3, 2, plot_params=p_color)
pgm.add_node("pixels", r"$y_i ^j$", 2.5, 1, observed=True)
# Edges.
pgm.add_edge("phi", "speckle_coeff")
pgm.add_edge("speckle_coeff", "speckle_img")
pgm.add_edge("speckle_img", "pixels")
pgm.add_edge("spec", "planet_img")
pgm.add_edge("shape", "planet_img")
pgm.add_edge("planet_pos", "planet_img")
pgm.add_edge("planet_img", "pixels")
# And a plate.
pgm.add_plate([1.5, 0.2, 2, 3.2], label=r"exposure $i$", shift=-0.1)
pgm.add_plate([2, 0.5, 1, 1], label=r"pixel $j$", shift=-0.1)
# Render and save.
pgm.render(dpi=120)
|
import unittest
from katas.kyu_7.discover_the_original_price import discover_original_price
class DiscoverOriginalPriceTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(discover_original_price(75, 25), 100)
def test_equal_2(self):
self.assertEqual(discover_original_price(25, 75), 100)
def test_equal_3(self):
self.assertEqual(discover_original_price(75.75, 25), 101)
def test_equal_4(self):
self.assertEqual(discover_original_price(373.85, 11.2), 421)
def test_equal_5(self):
self.assertEqual(discover_original_price(458.2, 17.13), 552.91)
|
from collections import namedtuple
n = int(input())
student = namedtuple('student', input())
print('{:05.2f}'.format(sum(map(lambda x: int(x.MARKS), [
student(*input().split()) for i in range(n)])) / n))
|
import xml.etree.ElementTree as ET
from django.core import serializers
from django.shortcuts import render,redirect
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from .models import Aparcamiento, AparcaSeleccionado, Comentario, Css
from django.contrib.auth import logout, login
from django.template.loader import get_template
from django.template import Context
from django.template import RequestContext
import urllib
#from __future__ import unicode_literals
#import aparcamientos.feedparser as feedparser
def userLog(request):
user = False
if request.user.is_authenticated():
respuesta = "Logged in as " + request.user.username + ". "
respuesta += '<a href="/logout">Logout</a>'
else:
respuesta = "Not logged in. " + '<a href="/login">Login</a>'
return respuesta
def usuariosLat(request):
# Muestra los enlaces a páginas personales en columna lateral
luser = []
luserObj = AparcaSeleccionado.objects.all()
for a in luserObj:
add = a.usuario
if add not in luser:
luser.append(add)
return luser
def parsear(request):
#d = feedparser.parse('http://datos.munimadrid.es/portal/site/egob/menuitem.ac61933d6ee3c31cae77ae7784f1a5a0/?vgnextoid=00149033f2201410VgnVCM100000171f5a0aRCRD&format=xml&file=0&filename=202584-0-aparcamientos-residentes&mgmtid=e84276ac109d3410VgnVCM2000000c205a0aRCRD&preview=full')
munimadrid = 'http://datos.munimadrid.es/portal/site/egob/menuitem.ac61933d6ee3c31cae77ae7784f1a5a0/?vgnextoid=00149033f2201410VgnVCM100000171f5a0aRCRD&format=xml&file=0&filename=202584-0-aparcamientos-residentes&mgmtid=e84276ac109d3410VgnVCM2000000c205a0aRCRD&preview=full'
xml = urllib.request.urlopen(munimadrid)
tree = ET.parse(xml)
root = tree.getroot()
#for child in root:
#for grandson in child:
#respuesta += grandson.tag + str(grandson.attrib)
#grandson = root.findall('contenido')
#respuesta += str(grandson)
cont = 0
lista = []
#for neighbor in root.findall('atributos'):
# for texto in neighbor.itertext():
# lista = lista.append(texto)
for neighbor in root.iter('atributos'):
email,phone,num = "", "", ""
for filaBD in neighbor.iterfind('atributo'):
#dic = filaBD.attrib
for elem in filaBD.attrib:
if elem.find('atributo'):
for x in filaBD.iterfind('atributo'):
for varsDir in x.attrib:
if x.attrib[varsDir]=="CLASE-VIAL": # or "NUM":
via = ' '.join(x.itertext())
elif x.attrib[varsDir]== "NOMBRE-VIA":
street = ' '.join(x.itertext())
elif x.attrib[varsDir]== "NUM":
num = ' '.join(x.itertext())
elif x.attrib[varsDir]== "CODIGO-POSTAL":
postal = ' '.join(x.itertext())
elif x.attrib[varsDir]== "LOCALIDAD":
city = ' '.join(x.itertext())
elif x.attrib[varsDir]=="LATITUD":
lat=' '.join(x.itertext())
elif x.attrib[varsDir]=="LONGITUD":
l = ' '.join(x.itertext())
elif x.attrib[varsDir]=="BARRIO":
bar = ' '.join(x.itertext())
elif x.attrib[varsDir]=="DISTRITO":
district = ' '.join(x.itertext())
elif x.attrib[varsDir]=="TELEFONO":
phone = ' '.join(x.itertext())
elif x.attrib[varsDir]=="EMAIL":
email = ' '.join(x.itertext())
if filaBD.attrib[elem]=="NOMBRE":
name = ' '.join(filaBD.itertext())
elif filaBD.attrib[elem]=="CONTENT-URL":
url = ' '.join(filaBD.itertext())
elif filaBD.attrib[elem]=="ACCESIBILIDAD":
access = int(' '.join(filaBD.itertext()))
elif filaBD.attrib[elem]=="DESCRIPCION":
descrip = ' '.join(filaBD.itertext())
elif filaBD.attrib[elem]=="LOCALIZACION":
address2 = ' '.join(filaBD.itertext())
address = " ".join([via, street, num, postal, city]) # + city
contenido = Aparcamiento(id=cont,nombre=name,url=url,dir2=address,latitud=lat,longitud=l,descripcion=descrip,accesible=access,barrio=bar,distrito=district,email=email,telf=phone)
contenido.save()
cont = cont +1
lAparcaParseada = Aparcamiento.objects.all()
return lAparcaParseada
@csrf_exempt
def mostrar_todo(request):
maxListar = 5
validar,acceso = 0, 0
cruz = 'unchecked'
luser = []
user = request.user.username
regUsuarios = userLog(request)
lAparcamientos = Aparcamiento.objects.all().order_by("-ncomment")[:5]
titulo = " Los aparcamientos almacenados son:"
if request.method == 'POST':
acceso=request.POST.get('Accesible')
if request.user.is_authenticated():
user = request.user.username
if '_submit' in request.POST:
validar = 1
if validar or not len(lAparcamientos):
lAparcamientos = parsear(request)
lAparcamientos = Aparcamiento.objects.all().order_by("-ncomment")[:5]
#ko = Aparcamiento.objects.get(ncomment=0) #get
#ko.delete()
#Filtrar por numero de comentarios
#lAparcaOrden = Aparcamiento.objects.all().order_by("-ncomment")[0:10]
num = int(lAparcamientos[0].ncomment)
print(num)
#for aparca in lAparcaOrden:
#num = str(1):
#lAparcamientos = Aparcamiento.objects.filter(ncomment=num)
#num = num - 1
if acceso:
lAparcamientos = Aparcamiento.objects.filter(accesible=acceso)
cruz = 'checked'
# Muestra los enlaces a páginas personales en columna lateral
luserObj = AparcaSeleccionado.objects.all()
for a in luserObj:
add = a.usuario
if add not in luser:
luser.append(add)
plantilla = get_template("businessxhtml/index.html")
c = Context({'name': titulo, 'listaAparca': lAparcamientos, 'users': luser, 'login': regUsuarios, 'acceso':cruz, 'usuario': user})
return HttpResponse(plantilla.render(c))
@csrf_exempt
def aparcamientos(request):
filtro = "None"
lDistritos = []
lusers = usuariosLat(request)
regUsuarios = userLog(request)
respuesta = "Todos los aparcamientos son:"
lAparcamientos = Aparcamiento.objects.all()
#lDistr = Aparcamiento.objects.annotate(distrito)
#lDistritos = Aparcamiento.objects.update("distrito")
for a in lAparcamientos:
add = a.distrito
if add not in lDistritos:
lDistritos.append(add)
if request.method == 'POST':
filtro = request.POST.get('filtro')
if filtro != "None":
dis = request.POST['distrito']
if dis in lDistritos:
lAparcamientos = Aparcamiento.objects.filter(distrito=dis)
else:
lAparcamientos = Aparcamiento.objects.all()
if request.user.is_authenticated():
user = request.user.username
if '_submit' in request.POST:
cuerpo = request.body.decode('utf-8')
ap_id = int(cuerpo.split('=')[-1])
lApSel = AparcaSeleccionado.objects.all() #.order_by("-ncomment")
ko = AparcaSeleccionado.objects.filter(aparcamiento=ap_id)
if not len(ko):
#respuesta += "Aparcamiento no almacenado"
info = Aparcamiento.objects.get(id=ap_id)
addAp = AparcaSeleccionado(usuario=user, aparcamiento=info)
addAp.save()
else:
print(ko)
plantilla = get_template("businessxhtml/aparcamientos.html")
c = Context({'listaAparca': lAparcamientos, 'content':respuesta, 'login': regUsuarios, 'users': lusers, 'listadistrito':lDistritos})
return HttpResponse(plantilla.render(c))
@csrf_exempt
def aparca_id(request, ap_id):
regUsuarios = userLog(request)
lusers = usuariosLat(request)
ap_id = int(ap_id.split('/')[-1])
respuesta = "Página de aparcamiento seleccionado"
if request.method == 'GET':
try:
info = Aparcamiento.objects.get(id=ap_id)
comentario = Comentario.objects.filter(aparcamiento=ap_id)
except Aparcamiento.DoesNotExist:
respuesta += "Aparcamiento no almacenado"
except Comentario.DoesNotExist:
respuesta += "No tiene comentarios."
elif request.method == 'POST':
if request.user.is_authenticated():
com = request.POST['comment']
info = Aparcamiento.objects.get(id=ap_id)
addcom = Comentario(contenido=com, aparcamiento=info)
addcom.save()
info.ncomment += 1
enlace = request.get_host()
comentario = Comentario.objects.filter(aparcamiento=ap_id)
plantilla = get_template("businessxhtml/aparcamiento.html")
c = Context({'aparca': info, 'content':respuesta,'login':regUsuarios, 'dir': enlace, 'users': lusers, 'lcomment':comentario})
return HttpResponse(plantilla.render(c))
# usermame lista de usuarios login redirige a la principal filter distrito title??? accesible get_path en plantillas....respuesta
@csrf_exempt
def usuario(request, recurso):
regUsuarios = userLog(request)
lusers = usuariosLat(request)
cuerpo = request.body
user = recurso.split('/')[0]
#user = request.user.username
respuesta = "Página de usuario"
if request.method == 'GET':
try:
lAparcaUser = AparcaSeleccionado.objects.filter(usuario=recurso)[:5]
except AparcaSeleccionado.DoesNotExist:
respuesta += recurso + " no tiene aparcamientos seleccionados"
elif request.method == 'POST':
lAparcaUser = AparcaSeleccionado.objects.filter(usuario=recurso)[:5]
if request.user.is_authenticated():
user = request.user.username
background = request.POST['fondo']
size = request.POST['letraTam']
title = request.POST['titulo']
try:
cssObj = Css.objects.get(usuario=user)
if user == cssObj.usuario:
cssObj = Css.objects.get(id=cssObj.id)
cssObj.tamLetra=size
cssObj.colorFondo=background
cssObj.titulo=title
#contenido = cssObj(tamLetra=size, colorFondo=background, titulo=title)
cssObj.save()
except:
t= "Pagina de " + user
#cssObj = Css.objects.get(usuario=user)
contenido = Css(usuario=user, tamLetra=size, colorFondo=background ,titulo=t)
contenido.save()
if '_submit' in request.POST:
cuerpo = request.body.decode('utf-8')
ap_id = int(cuerpo.split('=')[-1])
#lApSel = AparcaSeleccionado.objects.all() #.order_by("-ncomment")
ko = AparcaSeleccionado.objects.get(aparcamiento=ap_id) #get
ko.delete()
if len(lAparcaUser) > 5:
lAparcaUser = AparcaSeleccionado.objects.filter(usuario=recurso)[5:10]
plantilla = get_template("businessxhtml/usuario.html")
c = Context({'listaAparca': lAparcaUser, 'content':respuesta, 'login': regUsuarios, 'users': lusers, 'usuario':user})
return HttpResponse(plantilla.render(c))
def user_xml(request, recurso):
user = recurso.split('/')[0]
lAparcaUser = AparcaSeleccionado.objects.filter(usuario=user)
hijos = serializers.serialize("xml" ,lAparcaUser)
f = open('usuario.xml',"w")
f.write(hijos)
f.close()
f2 = open('usuario.xml',"r")
xml = f2.read()
return HttpResponse(xml, content_type='text/xml')
@csrf_exempt
def css(request):
if request.user.is_authenticated():
user = request.user.username
css = Css.objects.get(usuario=user)
plantilla = get_template("businessxhtml/style.css")
c = Context({'varsCss': css.colorFondo })
redirect('/style.css')
return HttpResponse(plantilla.render(c), content_type="text/css")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.