text
stringlengths 8
6.05M
|
|---|
#!/usr/bin/python3
# python3 Othello.py
#
# COMP3270 Mini-Project
# Yan Kai
# UID: 3035141231
#
"""
This is an Othello game implemented with Python3.
The AI is based on the minimax search algorithm with alpha-beta pruning.
A history table is introduced to accelerate searching.
The GUI is implemented using Tkinter
"""
import matplotlib.pyplot as plt
import numpy as np
from tkinter import *
import itertools
import copy
import random
import math
from counter import *
import history
Winrecord = []
WINREWARD = 10
LOSEREWARD = -10
WINLIST = []
SIZE = 40 # UI: size of each square
NUMBER = 8 # 8*8 borad
COLOR = {1:'black', -1:'white'} # colors of players
BG_COLOR = 'lightgray' # UI: background color
P_SIZE = SIZE - 6 # UI: size of each piece
C_SIZE = SIZE - 2 # UI: size of the canvas
P_POS = C_SIZE - P_SIZE # UI: position of the piece's top-left conner
move_table = {} # recording moves that have already been explored
WEIGHT = 3
VALUEMATRIX = [15,-3, 8, 5, 5, 8,-3,15,
-3,-7,-5,-1,-1,-5,-7,-3,
8,-5, 3, 3, 3, 3,-5, 8,
5,-1, 3, 0, 0, 3,-1, 5,
5,-1, 3, 0, 0, 3,-1, 5,
8,-5, 3, 3, 3, 3,-5, 8,
-3,-7,-5,-1,-1,-5,-7,-3,
15,-3, 8, 5, 5, 8,-3,15]
# a sugar providing a generator of (i,j) pairs
def multirange(arg):
if type(arg) == int:
return itertools.product(range(arg), range(arg))
return itertools.product(arg, arg)
def _get_count(state,action):
newstate = State(state)
color = getcolor(state)
nextstate = newstate.update(action, color)
count = nextstate.get_count()
return (count[color]+1)/(count[-color]+1)
def _get_mobility(state,action):
newstate = State(state)
color = getcolor(state)
nextstate = newstate.update(action,color)
return (len(nextstate.get_moves(-color,False)) + 1) / (len(nextstate.get_moves(color,False)) + 1)
def _get_stability(state,action):
newstate = State(state)
nextstate = newstate.update(action, -1)
stability = 0
for pos in multirange([0,NUMBER-1]):
stability += nextstate.get(pos)
if nextstate.is_valid(pos, 1):
stability += 1
elif nextstate.is_valid(pos, -1):
stability -= 1
return stability
def _get_position(state,action):
newstate = State(state)
color = getcolor(state)
nextstate = newstate.update(action, color)
count = nextstate.get_count()
l = sum(nextstate._value, [])
s = sum([l[i]*VALUEMATRIX[i] for i in range(64)])
return color*s/(count[color]+count[-color])
Func = [_get_count,_get_mobility,_get_position,_get_stability]
class Random(object):
def __init__(self, colorIndex, depth=4):
self._colorIndex = colorIndex # indicating black or white
self._depth = depth # depth of the game tree search
# get the best move for the current state
def get_best_move(self, state):
actions = state.get_moves(self._colorIndex)
return random.choice(actions)
def getcolor(state):
l = sum(state._value, [])
count = 64
for each in l:
if each == 0:
count -= 1
if count % 2 == 0:
return 1
else:
return -1
# the artificial agent
class AI(object):
def __init__(self, colorIndex, depth=4):
self._colorIndex = colorIndex # indicating black or white
self._depth = depth # depth of the game tree search
# get the best move for the current state
def get_best_move(self, state):
r = random.random()
if r < 0.3:
return random.choice(state.get_moves(self._colorIndex))
move = self.alpha_beta(state, self._depth, -math.inf, math.inf, self._colorIndex)
return move
# minimax algorithm with alpha-beta pruning
def alpha_beta(self, state, depth, alpha, beta, colorIndex):
if self._depth != 0:
if depth == 0 or not state.get_moves(colorIndex):
return self.evaluation(state)
# if the ai self plays at this node
if colorIndex == self._colorIndex:
move = None # next move with the maximum benefit
for pos in state.get_moves(colorIndex):
child = State(state).update(pos, colorIndex)
if self._depth != 0:
value = self.alpha_beta(child, depth-1, alpha, beta, -colorIndex)
else:
value = self.evaluation(state)
if alpha < value:
alpha = value
move = pos
if beta <= alpha:
break
return alpha if depth != self._depth else move
# if the adversary plays at this node
for pos in state.get_moves(colorIndex):
child = State(state).update(pos, colorIndex)
beta = min(beta, self.alpha_beta(child, depth-1, alpha, beta, -colorIndex))
if beta <= alpha:
break
return beta
# evaluation function of the given state
def evaluation(self, state):
count = state.get_count()
imparity = (count[1]+1)/(count[-1]+1) * (1 if count[0] < 20 else -1)
mobility = (len(state.get_moves(1,count[0]%2==0)) + 0.01) \
/ (len(state.get_moves(-1,count[0]%2==1)) + 0.01)
stability = 0
for pos in multirange([0,NUMBER-1]):
stability += state.get(pos)
if state.is_valid(pos, 1):
stability += 1
elif state.is_valid(pos, -1):
stability -= 1
return (mobility + 10*stability + imparity) * self._colorIndex - (64 if count[self._colorIndex] == 0 else 0)
class Qlearning(object):
def __init__(self, colorIndex, discount = 0.8, alpha = 0.02, eps = 0.3):
self._colorIndex = colorIndex # indicating black or white
self.qvalue = Counter()
self.discount = discount
self.alpha = alpha
self.epsilon = eps
def getQvalue(self,state,action):
l = sum(state._value, [])
value = self.qvalue[(tuple(l), action)]
if value == 0:
newstate = State(state)
nextstate = newstate.update(action, self._colorIndex)
return self.evaluation(nextstate)
else:
return value
def computeValueFromQValues(self, state):
actions = state.get_moves(self._colorIndex)
if len(actions) == 0:
return 0
value = -math.inf
for action in actions:
v = self.getQvalue(state,action)
if v > value:
value = v
return value
# get the best move for the current state
def get_best_move(self, state):
actions = state.get_moves(self._colorIndex)
r = random.random()
if r < self.epsilon:
return random.choice(actions)
value = -math.inf
bestAction = None
for action in actions:
v = self.getQvalue(state,action)
if v > value:
value = v
bestAction = action
newstate = State(state)
nextstate = newstate.update(bestAction, self._colorIndex)
self.update(state,bestAction,nextstate,self.evaluation(nextstate))
return bestAction
def update(self,state,action,nextState,reward):
sample = reward + self.discount * self.computeValueFromQValues(nextState)
self.qvalue[(tuple(sum(state._value, [])), action)] = (1-self.alpha)*self.getQvalue(state,action)+ self.alpha * sample
# evaluation function of the given state
def evaluation(self, state):
count = state.get_count()
imparity = (count[1]+1)/(count[-1]+1) * (1 if count[0] < 20 else -1)
mobility = (len(state.get_moves(1,count[0]%2==0)) + 0.01) \
/ (len(state.get_moves(-1,count[0]%2==1)) + 0.01)
stability = 0
for pos in multirange([0,NUMBER-1]):
stability += state.get(pos)
if state.is_valid(pos, 1):
stability += 1
elif state.is_valid(pos, -1):
stability -= 1
return (mobility + 10*stability + imparity) * self._colorIndex - (64 if count[self._colorIndex] == 0 else 0)
class anotherQlearning(object):
def __init__(self, colorIndex, discount = 0.8, alpha = 0.2, eps = 0):
self._colorIndex = colorIndex # indicating black or white
self.qvalue = history.loadQvalue()
#self.qvalue = Counter()
self.discount = discount
self.alpha = alpha
self.epsilon = eps
self.record = []
def getcolor(self,state):
l = sum(state._value, [])
count = 64
for each in l:
if each == 0:
count -= 1
if count % 2 == 0:
return 1
else:
return -1
def getQvalue(self,state,action):
l = sum(state._value, [])
value = self.qvalue[(tuple(l), action)]
return value
if value == 0:
newstate = State(state)
nextstate = newstate.update(action, self._colorIndex)
return self.evaluation(nextstate)
else:
return value
def computeValueFromQValues(self, state):
color = self.getcolor(state)
actions = state.get_moves(color)
if len(actions) == 0:
return 0
value = -math.inf
for action in actions:
v = self.getQvalue(state,action)
if v > value:
value = v
return value
# get the best move for the current state
def get_best_move(self, state):
actions = state.get_moves(self._colorIndex)
r = random.random()
if r < self.epsilon:
return random.choice(actions)
value = -math.inf
bestAction = None
if len(actions) == 0:
print("no action can be made")
return None
for action in actions:
v = self.getQvalue(state,action)
if v > value:
value = v
bestAction = action
self.recordMove(state,bestAction)
#self.update(state,bestAction,nextstate)
return bestAction
def update(self,reward):
for each in self.record:
state = each[0]
action = each[1]
nextState = each[2]
if each == self.record[-1]:
self.qvalue[(tuple(sum(state._value, [])), action)] = reward
continue
#sample = reward + self.discount * self.computeValueFromQValues(nextState)
sample = self.discount * self.computeValueFromQValues(nextState) * self._colorIndex * self.getcolor(nextState)
self.qvalue[(tuple(sum(state._value, [])), action)] = (1-self.alpha)*self.getQvalue(state,action)+ self.alpha * sample
self.record = []
# evaluation function of the given state
def evaluation(self, state):
count = state.get_count()
imparity = (count[1]+1)/(count[-1]+1) * (1 if count[0] < 20 else -1)
mobility = (len(state.get_moves(1,count[0]%2==0)) + 0.01) \
/ (len(state.get_moves(-1,count[0]%2==1)) + 0.01)
stability = 0
for pos in multirange([0,NUMBER-1]):
stability += state.get(pos)
if state.is_valid(pos, 1):
stability += 1
elif state.is_valid(pos, -1):
stability -= 1
return (mobility + 10*stability + imparity) * self._colorIndex - (64 if count[self._colorIndex] == 0 else 0)
def recordMove(self,state,action):
newstate = State(state)
nextState = newstate.update(action, self._colorIndex)
self.record.append([state,action,nextState])
class approxQlearning(object):
def __init__(self, colorIndex, discount = 0.7, alpha = 0.01, eps = 0.5):
self._colorIndex = colorIndex # indicating black or white
'''self.qvalue = Counter()'''
self.discount = discount
self.alpha = alpha
self.epsilon = eps
self.weight = Counter()
#self.weight = history.loadQvalue()
self.record = []
self.iter = 0
def getQvalue(self,state,action):
'''l = sum(state._value, [])'''
'''value = 0
for i in range(WEIGHT):
value += self.weight[i]*(Func[i](state,action))
return value'''
newstate = State(state)
nextstate = newstate.update(action, self._colorIndex)
value = 0
for i in range(WEIGHT):
value += self.weight[i]*(Func[i](state,action))
return (0.5)*self.evaluation(nextstate) + (0.5)*value
'''value = self.qvalue[(tuple(l), action)]
if value == 0:
newstate = State(state)
nextstate = newstate.update(action, self._colorIndex)
return self.evaluation(nextstate)
else:
return value'''
def computeValueFromQValues(self, state):
color = getcolor(state)
actions = state.get_moves(color)
if len(actions) == 0:
return 0
value = -math.inf
for action in actions:
v = self.getQvalue(state,action)
if v > value:
value = v
return value
# get the best move for the current state
def get_best_move(self, state):
actions = state.get_moves(self._colorIndex)
r = random.random()
if r < self.epsilon:
return random.choice(actions)
value = -math.inf
bestAction = None
for action in actions:
v = self.getQvalue(state,action)
if v > value:
value = v
bestAction = action
#newstate = State(state)
#nextstate = newstate.update(bestAction, self._colorIndex)
#self.recordMove(state,bestAction,nextstate)
self.recordMove(state,bestAction)
return bestAction
def update(self,reward):
'''difference = reward + self.discount*self.computeValueFromQValues(nextState)-self.getQvalue(state,action)
self.qvalue[(tuple(sum(state._value, [])), action)] = self.getQvalue(state,action)+ self.alpha * difference
'''
num = len(self.record)
for each in self.record[::-1]:
state = each[0]
action = each[1]
nextState = each[2]
if each == self.record[-1]:
difference = reward-self.getQvalue(state,action)
else:
difference = self.discount*self.computeValueFromQValues(nextState)* self._colorIndex * getcolor(nextState)-self.getQvalue(state,action)
for i in range(WEIGHT):
self.weight[i] = self.weight[i]+self.alpha*difference*(Func[i](state,action))
s = 0
for i in range(WEIGHT):
s += self.weight[i]
for i in range(WEIGHT):
self.weight[i] /=s
self.record = []
'''sample = reward + self.discount * self.computeValueFromQValues(nextState)
self.qvalue[(tuple(sum(state._value, [])), action)] = (1-self.alpha)*self.getQvalue(state,action)+ self.alpha * sample
'''
def recordMove(self,state,action):
newstate = State(state)
nextState = newstate.update(action, self._colorIndex)
self.record.append([state,action,nextState])
'''newstate = State(state)
nextState = newstate.update(action, self._colorIndex)
reward = self.evaluation(nextState)
difference = reward + self.discount*self.computeValueFromQValues(nextState)-self.getQvalue(state,action)
for i in range(WEIGHT):
self.weight[i] = self.weight[i]+self.alpha*difference*(Func[i](state,action))'''
# evaluation function of the given state
def evaluation(self, state):
count = state.get_count()
imparity = (count[1]+1)/(count[-1]+1) * (1 if count[0] < 20 else -1)
mobility = (len(state.get_moves(1,count[0]%2==0)) + 0.01) \
/ (len(state.get_moves(-1,count[0]%2==1)) + 0.01)
stability = 0
for pos in multirange([0,NUMBER-1]):
stability += state.get(pos)
if state.is_valid(pos, 1):
stability += 1
elif state.is_valid(pos, -1):
stability -= 1
return (mobility + 10*stability + imparity) * self._colorIndex - (64 if count[self._colorIndex] == 0 else 0)
# state of the board
class State(object):
def __init__(self, state=None):
if state:
self._value = copy.deepcopy(state._value)
else:
# the initial state of an Othello game
self._value = [[0 for i in range(NUMBER)] for j in range(NUMBER)]
for i,j in multirange([NUMBER//2-1, NUMBER//2]):
self._value[i][j] = -1 if i == j else 1
def set(self, pos, colorIndex):
self._value[pos[0]][pos[1]] = colorIndex
def get(self, pos):
return self._value[pos[0]][pos[1]]
# get all valid moves for the player in this state
def get_moves(self, colorIndex, insert=True):
key = str(self._value)+str(colorIndex)
if key in move_table:
return move_table[key]
moves = []
for i,j in multirange(NUMBER):
if self.is_valid((i,j), colorIndex):
moves.append((i,j))
if insert:
move_table[key] = moves
return moves
# check whether a square is valid for the player
def is_valid(self, pos, colorIndex):
if self._value[pos[0]][pos[1]] != 0:
return False
for i,j in (-1,-1),(-1,0),(-1,1),(0,-1),(0,1),(1,-1),(1,0),(1,1):
x = i+pos[0]
y = j+pos[1]
if x<0 or x>=NUMBER or y<0 or y>=NUMBER or self._value[x][y] != -colorIndex:
continue
x += i
y += j
while 0<=x<NUMBER and 0<=y<NUMBER and self._value[x][y] != 0:
if self._value[x][y] == colorIndex:
return True
x += i
y += j
return False
# reverse all the pieces that shall be reversed
def reverse(self, pos, colorIndex):
for i,j in (-1,-1),(-1,0),(-1,1),(0,-1),(0,1),(1,-1),(1,0),(1,1):
x = i+pos[0]
y = j+pos[1]
if x<0 or x>=NUMBER or y<0 or y>=NUMBER or self._value[x][y] != -colorIndex:
continue
x += i
y += j
while 0<=x<NUMBER and 0<=y<NUMBER and self._value[x][y] != 0:
if self._value[x][y] == -colorIndex:
x += i
y += j
continue
x -= i
y -= j
while self._value[x][y] != colorIndex:
self._value[x][y] *= -1
x -= i
y -= j
break
# get the number of black, white, and empty
def get_count(self):
count = {1:0, 0:0, -1:0}
for i,j in multirange(NUMBER):
count[self._value[i][j]] += 1
return count
# update the state after a piece is placed on a square
def update(self, pos, colorIndex):
self.set(pos, colorIndex)
self.reverse(pos, colorIndex)
return self
# the controller managing the progress of the game and the players' behaviour
class Controller(object):
def __init__(self):
self._player = 1 # indicating the current player
self._stopped = False # whether the game is stopped
self._ai = {1:None, -1:None} # AI agents of black and white
self._state = State() # the current state
self.WINBLACK = 0
self.WINWHITE = 0
self.winrate = []
self.wincount = [0,0]
# stop the current game and back to the initial panel
def restart(self):
self._stopped = True
initPanel.pack()
board.pack_forget()
bottomFrame.pack_forget()
# initialize a new game
def init_game(self, mode):
total = 0
'''if mode == 3:
self._ai[-1] = AI(-1,3)
self._ai[1] = Random(1)
for i in range(1,501):
if i % 50 == 0:
#print(self.WINWHITE, self.WINBLACK)
total += self.WINWHITE + self.WINBLACK
print((self.WINWHITE + self.WINBLACK)/100)
self.winrate.append((self.WINWHITE + self.WINBLACK)/100)
self.WINWHITE = 0
self.WINBLACK = 0
#print(total)
#self._ai[-1].epsilon = 0.5-(0.5/2500*i)
#self._ai[-1].alpha = 0.2/(math.sqrt(i))
self.training(1)
self.training(-1)
self.plot()'''
if mode == 3:
self._ai[1] = AI(1,2)
self._ai[-1] = anotherQlearning(-1)
for i in range(1,1000):
if i % 50 == 0:
#print(self.WINWHITE, self.WINBLACK)
total += self.WINWHITE + self.WINBLACK
print((self.WINWHITE + self.WINBLACK)/100,',')
self.winrate.append((self.WINWHITE + self.WINBLACK)/100)
self.wincount[0]+=self.WINWHITE
self.wincount[1]+=self.WINBLACK
self.WINWHITE = 0
self.WINBLACK = 0
#print(total)
#self._ai[-1].epsilon = 0
self._ai[-1].epsilon = math.exp(-0.017*i/25)
#self._ai[-1].epsilon = 0.5/math.sqrt(i-1000)
#self._ai[-1].alpha = 0.1/math.sqrt(i-1000)
self.training(1)
self.training(-1)
#print(self._ai[-1].weight)
#self.plot()
history.saveNewQvalue(self._ai[-1].qvalue)
print("end",self.wincount)
elif mode == 4:
self._ai[1] = AI(1,2)
self._ai[-1] = approxQlearning(-1)
for i in range(1,101):
if i % 10 == 0:
print(WINCOUNT)
#self._ai[-1].epsilon = 0.7/(math.sqrt(i))
self._ai[-1].alpha = 0.2/(math.sqrt(i))
self.training()
print(WINCOUNT,self._ai[-1].weight)
history.saveNewQvalue(self._ai[-1].weight)
else:
self._player = -1
self._stopped = False
self._ai[1] = AI(1,1) if mode == -1 or mode == 2 else None
self._ai[-1] = anotherQlearning(-1) if mode > 0 else None
'''self._ai[1] = AI(1,2) if mode == -1 or mode == 2 else None
self._ai[-1] = anotherQlearning(-1) if mode > 0 else None'''
self._state = State()
initPanel.pack_forget()
board.pack()
bottomFrame.pack(fill=X, expand=True)
for i in -1,1:
label[i].icon.itemconfig(label[i].count, text='2')
for i,j in multirange(NUMBER):
if squares[i][j].winfo_children():
squares[i][j].winfo_children().pop().destroy()
if self._state.get((i,j)) != 0:
self.place_piece((i,j), self._state.get((i,j)))
self.switch_player()
if self._ai[1]: # if black is an AI, let the AI move
pos = self._ai[self._player].get_best_move(self._state)
board.after(1000, self.move, pos)
else:
self.enable_move() # enable human player to move
def training(self,color):
'''self._ai[1] = AI(1,2)
self._ai[-1] =
self._ai[1]._colorIndex = -color
self._ai[-1]._colorIndex = color'''
self._ai[1], self._ai[-1] = self._ai[-1], self._ai[1]
self._ai[1]._colorIndex = 1
self._ai[-1]._colorIndex = -1
self._player = -1
self._stopped = False
self._state = State()
initPanel.pack_forget()
board.pack()
bottomFrame.pack(fill=X, expand=True)
for i in -1,1:
label[i].icon.itemconfig(label[i].count, text='2')
for i,j in multirange(NUMBER):
if squares[i][j].winfo_children():
squares[i][j].winfo_children().pop().destroy()
if self._state.get((i,j)) != 0:
self.place_piece((i,j), self._state.get((i,j)))
self.switch_player()
pos = self._ai[self._player].get_best_move(self._state)
self.move(pos, 1)
count = self.count()
print(color,count[1], count[-1])
if count[color] >= count[-color]:
if count[color]>count[-color]:
Winrecord.append(self._ai[color].record)
print(count[color])
if color == 1:
self.WINBLACK += 1
else:
self.WINWHITE += 1
self._ai[color].update(count[color])
else:
print(-count[-color])
self._ai[color].update(-count[-color])
def plot(self):
X = np.linspace(0, 10000, 100, endpoint=False)
#plt.plot(X, self.rew)
#plt.plot(X, self.reg[1:])
plt.plot(X, self.winrate,label="greedy")
plt.axis([0,10000,0,1])
plt.legend()
plt.show()
# perform a move
def move(self, pos, mode = 0):
if mode == 0:
self.disable_move() # disable the human player from clicking the board
self.place_piece(pos, self._player) # place the piece on the square
self.reverse_pieces() # reverse relevant pieces
self.count() # count and print the numbers of pieces
self.switch_player() # switch to the next player
board.after(500, self.check) # check whether the player is movable after 0.5s
else:
self._state.update(pos, self._player)
self.switch_player()
self.check(1)
# place the piece on the board
def place_piece(self, pos, colorIndex):
piece = Canvas(squares[pos[0]][pos[1]], width=C_SIZE, height=C_SIZE, bd=0, highlightthickness=0, relief='ridge', bg=BG_COLOR)
piece.item = piece.create_oval(P_POS, P_POS, P_SIZE , P_SIZE, fill=COLOR[colorIndex])
piece.place(x=1, y=1)
self._state.update(pos, colorIndex)
# reverse the pieces on the board
def reverse_pieces(self):
for i,j in multirange(NUMBER):
colorIndex = self._state.get((i,j))
if colorIndex != 0:
piece = squares[i][j].winfo_children().pop()
piece.itemconfig(piece.item, fill=COLOR[colorIndex])
# count and print the numbers of pieces
def count(self):
count = self._state.get_count()
for i in -1,1:
label[i].icon.itemconfig(label[i].count, text=str(count[i]))
return count
# switch the current player
def switch_player(self):
self._player *= -1
label[self._player].var.set('Move')
label[-self._player].var.set('')
# check the game status, including asking the player to move
def check(self,mode = 0):
if self._stopped:
return
for i in range(2):
if not self.exist_move():
label[self._player].var.set('Pass')
self.switch_player()
elif self._ai[self._player]: # ask AI to move
pos = self._ai[self._player].get_best_move(self._state)
self.move(pos,mode)
return
else:
self.enable_move() # enable human player to move
return
# game over, check who is the winner
count = self.count()
if count[1] == count[-1]:
for i in -1,1:
label[i].var.set('Draw')
else:
winner = 1 if count[1] > count[-1] else -1
label[winner].var.set('Win!')
label[-winner].var.set('Lose!')
# whether there exist moves on the board
def exist_move(self):
moves = self._state.get_moves(self._player)
for i,j in multirange(NUMBER):
squares[i][j].valid = False
for i,j in moves:
squares[i][j].valid = True
return moves != []
# disable the human player from clicking the board
def disable_move(self):
for i,j in multirange(NUMBER):
squares[i][j].unbind('<Button-1>')
squares[i][j].config(cursor='')
# enable the human player to click the valid squares
def enable_move(self):
for i,j in multirange(NUMBER):
if self._state.is_valid((i,j), self._player):
squares[i][j].bind('<Button-1>', lambda event, pos=(i,j): self.move(pos))
squares[i][j].config(cursor='hand')
controller = Controller()
#
# Setup the basic UI
#
win = Tk()
win.title('Othello Game')
win.geometry('+300+100')
board = Frame(win, width=NUMBER*SIZE+1, height=NUMBER*SIZE+1)
squares = [[Label(board, relief=SOLID, borderwidth=1, highlightbackground='black', bg=BG_COLOR) \
for j in range(NUMBER)] for i in range(NUMBER)]
for i,j in multirange(NUMBER):
squares[i][j].place(x=SIZE*j, y=SIZE*i, width=SIZE+1, height=SIZE+1)
bottomFrame = Frame(win, relief=RAISED, height=SIZE, borderwidth=1)
button = Button(bottomFrame, width=6, relief=RAISED, text='Restart', command=controller.restart)
button.pack(side=TOP, padx=2, pady=0.55*SIZE-15)
label = {}
for i in -1,1:
label[i] = Label(bottomFrame)
label[i].place(relx=(1-i)*3/8, y=0, width=2*SIZE, height=SIZE)
label[i].icon = Canvas(label[i], width=C_SIZE, height=C_SIZE, bd=0, highlightthickness=0, relief='ridge')
label[i].icon.create_oval(P_POS, P_POS, P_SIZE , P_SIZE, fill=COLOR[i])
label[i].icon.place(relx=(1-i)/4, y=0)
label[i].count = label[i].icon.create_text(SIZE//2-1, SIZE//2, fill=COLOR[-i], text='')
label[i].var = StringVar()
label[i].text = Label(label[i], textvariable=label[i].var)
label[i].text.place(relx=(i+1)/4, y=0, width=SIZE, height=SIZE)
initPanel = Frame(win, width=NUMBER*SIZE+1, height=(NUMBER+1)*SIZE+4)
option = {}
optionText = {1:'Black vs Computer', -1:'White vs Computer',
0:'Player vs Player', 2:'Computer vs Computer',
3:'Training'}
optionRely = {1:0.13, -1:0.33, 0:0.53, 2:0.73, 3:0.93}
for i in -1,0,1,2,3:
option[i] = Label(initPanel)
option[i].icon = Canvas(option[i], width=C_SIZE, height=C_SIZE, bd=0, highlightthickness=0, relief='ridge')
option[i].icon.place(x=0, y=0)
option[i].button = Button(option[i], width=16, relief=RAISED, text=optionText[i], command=lambda i=i: controller.init_game(i))
option[i].button.pack(side=RIGHT)
option[i].place(relx=5/32, rely=optionRely[i], width=5.5*SIZE, height=SIZE+1)
if abs(i) == 1:
option[i].icon.create_oval(P_POS, P_POS, P_SIZE , P_SIZE, fill=COLOR[i])
elif i >= 3:
option[i].icon.create_oval(P_POS-2, P_POS-2, P_SIZE-7 , P_SIZE-7, fill=COLOR[1])
option[i].icon.create_oval(P_POS+6, P_POS+6, P_SIZE , P_SIZE, fill=COLOR[-1])
else:
option[i].icon.create_oval(P_POS-2, P_POS-2, P_SIZE-7 , P_SIZE-7, fill=COLOR[i-1])
option[i].icon.create_oval(P_POS+6, P_POS+6, P_SIZE , P_SIZE, fill=COLOR[1-i])
if __name__ == "__main__":
controller.restart()
win.mainloop()
|
# -*- coding: utf-8 -*-
import scrapy
class BaiduSpider(scrapy.Spider):
name = 'baidu'
allowed_domains = ['www.baidu.com']
start_urls = ['http://www.baidu.com/']
custom_settings = {
'DEFAULT_REQUEST_HEADERS': {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' +
'AppleWebKit/537.36 (KHTML, like Gecko) ' +
'Chrome/75.0.3770.142 Safari/537.36'
}
}
def __init__(self, category=None, *args, **kwargs):
super(BaiduSpider, self).__init__(*args, **kwargs)
self.category = category
self.logger.info(self.category)
def parse(self, response):
self.logger.info(self.category)
|
from ivis import Ivis
from sklearn import datasets
from sklearn.preprocessing import MinMaxScaler
from ivis_animate import IvisAnimate
digits = datasets.load_digits()
X = digits.data
X = MinMaxScaler().fit_transform(X)
model = Ivis(embedding_dims=2, k=32,
epochs=1)
animation = IvisAnimate(model, frames=500).animate(X, y=digits.target,
c=digits.target)
animation.save('digits.mp4')
|
#! /usr/bin/python
from logging import getLogger, ERROR
getLogger("scapy.runtime").setLevel(ERROR)
from scapy.all import *
import sys
from datetime import datetime
from time import strftime
try:
target = raw_input("[*] Enter Target IP Address: ")
min_port = raw_input("[*] Enter Minimum Port Number: ")
max_port = raw_input("[*] Enter Maximum Port Number: ")
try:
if int(min_port) >= 0 and int(max_port) >= 0 and int(max_port) >= int(min_port):
pass
else:
print "\n[!] Invalid Range of Ports"
print "[!] Exiting..."
sys.exit(1)
except Exception:
print "\n[!] Invalid Range of Ports"
print "[!] Exiting..."
sys.exit(1)
except KeyboardInterrupt:
print "\n[*] User Requested Shutdown..."
print "[*] Exiting..."
sys.exit(1)
ports = range(int(min_port), int(max_port) + 1)
start_clock = datetime.now()
SYNACK = 0x12
RSTACK = 0X14
def checkhost(ip):
conf.verb = 0
try:
ping = sr1(IP(dst = ip)/ICMP())
print "\n[*] Target is Up, Beginning Scan..."
except Exception:
print "\n[!] Couldn't Resolve Target"
print "[!] Exiting..."
sys.exit(1)
def scanport(port):
srcport = RandShort()
conf.verb = 0
SYNACKpkt = sr1(IP(dst = target)/TCP(sport = srcport, dport = port, flags = "S"))
pktflags = SYNACKpkt.getlayer(TCP).flags
if pktflags == SYNACK:
return True
else:
return False
RSTpkt = IP(dst = target)/TCP(sport = srcport, dport = port, flags = "R")
send(RSTpkt)
checkhost(target)
print "[*] Scanning Started at " + strftime("%H:%M:%S") + "!\n"
for port in ports:
status = scanport(port)
if status == True:
print "Port " + str(port) + ": Open"
stop_clock = datetime.now()
total_time = stop_clock - start_clock
print "\n[*] Scanning Finished!"
print "[*] Total Scan Duration: " + str(total_time)
|
# Leitura
with open('pessoas.csv', 'r') as f:
# print(f.readlines())
headers = f.readline().split(';')
print(headers)
selecionados = []
for line in f:
data = line.split(';')
op = input(f'Deseja selecionar a pessoa {data[0]}? (s\\n) ')
if op.lower() == 's':
selecionados.append('{};{}'.format(data[0], data[2]))
# for i, data in enumerate(line.split(';')):
# print(f'{headers[i]}: {data}')
# Escrita
with open('pessoas_out.csv', 'w') as fout:
# Header
fout.write('Nome;E-mail\n')
for selecionado in selecionados:
fout.write(selecionado)
fout.write('\n')
fout.close()
|
intro_and_difficulty_prompt = """\nHello! This is a fill-in-the-blanks style American history trivia game.\n
Please select a difficulty by typing in easy, medium, or hard. \n\n"""
easy_test = """Originally, America had __1__ colonies. The 16th president
of United States was __2__. As of 2016, the United States consists
of __3__ states. America was founded in the year __4__."""
medium_test = """O say can you see, by the dawn's early light,
What so proudly we hail'd at the twilight's last __1__,
Whose broad stripes and bright stars through the perilous fight
O'er the __2__ we watch'd were so gallantly __3__?
And the rocket's red glare, the bombs bursting in air,
Gave proof through the night that our flag was still there,
O say does that star-spangled banner yet wave
O'er the land of the free and the home of the __4__?"""
hard_test = """The first president of the United States was __1__,
his vice president was __2__, he dropped out of school at the age of __3__,
and he was the only president to go into __4__ as president."""
easy_answers = ['13', 'Abraham Lincoln', '50', '1776']
medium_answers = ['gleaming', 'ramparts', 'streaming', 'brave']
hard_answers = ['George Washington', 'John Adams', '15', 'battle']
answer_lists = [easy_answers, medium_answers, hard_answers]
list_of_tests = [easy_test, medium_test, hard_test]
user_input_start_test = raw_input(intro_and_difficulty_prompt)
list_of_blanks = ['__1__', '__2__', '__3__', '__4__']
def select_difficulty(user_input):
#This function takes the user_input from play_game and outputs the corresponding test's string and answer list.
if user_input == 'easy':
print "\nYou selected easy difficutly!"
return list_of_tests[0], answer_lists[0]
if user_input == 'medium':
print "\nYou selected medium difficutly!"
return list_of_tests[1], answer_lists[1]
if user_input == 'hard':
print "\nYou selected hard difficutly!"
return list_of_tests[2], answer_lists[2]
test, answers = select_difficulty(user_input_start_test)
def get_answer(blank, answers):
#This function takes the string that represents the blank space
#that the user is prompted to fill in and outputs the correct answer for that blank.
if '1' in blank:
return answers[0]
if '2' in blank:
return answers[1]
if '3' in blank:
return answers[2]
if '4' in blank:
return answers[3]
def check_if_blank(element_in_test, list_of_blanks):
#This function takes an element of a test string and the list containing the strings of the blank spaces
#and then outputs True or False for whether the element is a blank space.
for blank in list_of_blanks:
if blank in element_in_test:
return True
else:
return False
def play_game(test, answers):
#This function takes the test string and list of answers as inputs and then outputs various prompts to the user, asks them to fill in blanks,
#tells them if there answer is correct or incorrect, and tells them if they have completed the test.
filled_blank_index = 0
progress_of_user = test
#This while loop stops if the number of blanks that are filled in by the user matches the total number of blanks in the test.
while filled_blank_index < len(list_of_blanks):
split_test = test.split()
answered = False
for element_in_test in split_test:
"""first we need to check if there is a blank in element_in_test"""
blank = check_if_blank(element_in_test, list_of_blanks)
if blank == True:
"""then we need to retrieve the blank that is in element_in_test"""
blank = list_of_blanks[filled_blank_index]
"""then we get the correct answer for the current blank"""
correct_answer = get_answer(blank, answers)
"""then we attempt to get the answer from the user"""
while answered == False:
"""here we tell the user what they have filled so far"""
"""here we ask the user for what should be in the blank"""
user_input = raw_input("\n\nThis is what's filled in so far: \n\n" + progress_of_user + "\n\nWhat should replace " + blank + "?\n\n")
"""here we see if they gave us the right answer"""
if user_input == correct_answer:
answer = user_input
print '\n' + answer + " is correct!\n"
answered = True
elif user_input != correct_answer:
print "\nYour answer was incorrect, please try again."
progress_of_user = progress_of_user.replace(blank, answer)
"""replace the blank with the answer"""
"""here we count how many blanks the user has filled"""
filled_blank_index += 1
print "Congratulations, you have completely filled in all of the blanks!"
play_game(test, answers)
|
from google.appengine.dist import use_library
use_library('django', '1.2')
#from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
from app.api import api
def main():
application = webapp.WSGIApplication([
('/api', api.Mainx),
('/api/ticket/add', api.APITicketHandler),
('/api/(.*)', api.APIHandler)
],
debug=True)
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
# made
# relevance feedback if user download ittenary
import settings
import json
# input : array of downloaded place id
# Update sum of download in
def update_downloads(pid):
place = settings.places[pid]
if place.get("feedback"):
cmax = place["feedback"]["download"]
cmax += 1
settings.places[pid]["feedback"]["download"] = cmax
if cmax > settings.max_download:
settings.max_download = cmax
else:
settings.places[pid]["feedback"] = {"download": 1}
def give_feedback(par):
for plc in par:
if settings.name_ids.get(plc["location"]):
aloc = settings.name_ids[plc["location"]]
for loc in aloc:
ceklocation = loc["location"]["address"]+" "+loc["location"]["city"]+" "+loc["location"]["province"]+" "+loc["location"]["island"]
if ceklocation == plc["address"]:
update_downloads(loc["index"])
|
class A:
def __init__(self):
print("I am default const of A")
class B(A):
def __init__(self):
super().__init__() # calling super class const
print("I am default Const of B")
#---------------------
b1 = B()
|
def digadd(n):
num_ref = [9,1,2,3,4,5,6,7,8]
if n > 0:
results = num_ref[n % 9]
else:
results = 0
return results
print digadd(n)
|
#Title: Neural Network on prediction of cleavage sites
#Author: Changpeng Lu
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
from keras import regularizers
#Helper Libraries
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
from sklearn import preprocessing
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, roc_auc_score, auc
import random
data_train = pd.read_csv("new_X_train_30000.csv", index_col = 0)
y_train = pd.read_csv("new_y_train_30000.csv",index_col=0)
data_test = pd.read_csv("X_test.csv", index_col = 0)
remain = np.genfromtxt("test_index_30000",dtype='str')
data_test = data_test.loc[remain,:]
#result = y_train.copy().values
class_names = ["CLEAVED","MIDDLE","UNCLEAVED"]
#class_names = ["CLEAVED","UNCLEAVED"]
#cn = sum(data_train['result'] == 'CLEAVED')
#un = sum(data_train['result'] == 'UNCLEAVED')
#mn = sum(data_train['result'] == 'MIDDLE')
#rcn = np.random.randint(0,cn,size = un-cn)
#rmn = np.random.randint(0,mn,size = un-mn)
m = data_train.shape[0]
#data_train.index = range(0,data_train.shape[0])
#new_data_train = data_train.copy()
#data_train_c = data_train[data_train['result'] == 'CLEAVED'].copy()
#for num in rcn:
# new_data_train.loc[len(result)+1] = data_train_c.iloc[num,:]
# m=m+1
#data_train_m = data_train[data_train['result'] == 'MIDDLE'].copy()
#for nn in rmn:
# new_data_train.loc[m+1] = data_train_m.iloc[nn,:]
# m=m+1
#print(new_data_train.shape[0] == data_train[data_train['result']=='UNCLEAVED'].shape[0]*3)
result_n = y_train['result'].copy().values
newre = np.zeros(len(result_n))
for i in range(0,len(result_n)):
if result_n[i] == 'CLEAVED':
newre[i] = 0
elif result_n[i] == 'MIDDLE':
newre[i] = 1.0
else:
newre[i] = 2.0
newre = newre.astype(int)
model = keras.Sequential([keras.layers.Dense(1024,activation=tf.nn.relu),keras.layers.Dense(3, activation=tf.nn.softmax)])
model.compile(optimizer=tf.train.AdamOptimizer(),loss='sparse_categorical_crossentropy',metrics=['accuracy'])
model.fit(data_train.values, newre, epochs=5)
result_test = data_test.copy().values
newre_test = np.zeros(len(result_test))
for i in range(0,len(result_test)):
if result_test[i] == 'CLEAVED':
newre_test[i] = 0
elif result_test[i] == 'MIDDLE':
newre_test[i] = 1
else:
newre_test[i] = 2
newre_test = newre_test.astype(int)
test_loss, test_acc = model.evaluate(data_test.values, newre_test)
print('Test accuracy:', test_acc)
predictions = model.predict(data_test.values)
label_pre = []
for i in range(0,len(predictions)):
label_pre.append(np.argmax(predictions[i]))
#label_df = pd.DataFrame({'result' : label_pre},index=data_test.index.values)
#df1 = pd.DataFrame({'score': predictions[:,0]},index=data_test.index.values)
#df2 = pd.DataFrame({'score' : predictions[:,1]},index=data_test.index.values)
#df3 = pd.DataFrame({'score' : predictions[:,2]},index=data_test.index.values)
#result_cleaved = pd.concat([data_test,df1,label_df],axis=1)
#result_middle =pd.concat([data_test,df2,label_df],axis=1)
#result_uncleaved = pd.concat([data_test,df3,label_df],axis=1)
#result_cleaved.to_csv("result_cleaved_ann")
#result_middle.to_csv("result_middle_ann")
#result_uncleaved.to_csv("result_uncleaved_ann")
np.savetxt("label_result_ann_resample_30000",label_pre,delimiter=",")
np.savetxt("result_ann_resample_30000",predictions,delimiter=",")
print(data_train.shape[0],data_test.shape[0])
#def plotROC(preds, truth, classification, name):
# fpr, tpr, thresholds = roc_curve(truth, preds , pos_label = classification)
# roc_auc = auc(fpr, tpr)
# chooses a random color for plotting
# c = (np.random.rand(), np.random.rand(), np.random.rand())
#create the plot
# plt.plot(fpr, tpr, color = c, label = name + ' (AUC = %0.3f)' % roc_auc)
# plt.plot([0, 1], [0, 1], 'k--')
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.0])
# plt.xlabel('FPR')
# plt.ylabel('TPR')
# plt.title('ROC')
# plt.legend(loc="lower right")
# return roc_auc
#mu_data_test = pd.read_csv("mu_data_test.csv",index_col=0)
#mc_data_test = pd.read_csv("mc_data_test.csv",index_col=0)
#mm_data_test = pd.read_csv("mm_data_test.csv",index_col=0)
# Plot ROC for both models
#fig = plt.figure(figsize = (16, 12))
#plt.subplot(221)
#plotROC(predictions[:,0], mu_data_test['result'] ,'CLEAVED','CLEAVED versus REST')
#plt.subplot(222)
#plotROC(predictions[:,2], mc_data_test['result'],'UNCLEAVED','UNCLEAVED versus REST')
#plt.subplot(223)
#plotROC(predictions[:,1], mm_data_test['result'], 'MIDDLE', 'MIDDLE versus REST')
#plt.show()
|
import logging
from seat.models.exam import Exam
logger = logging.getLogger(__name__)
class ExamApplication(object):
"""description of class"""
def get_exam_by_id(self, exam_id):
try:
exam = Exam.objects.get(id=exam_id)
return exam
except Exception, error:
logger.info("get_exam_by_id error:"+str(error))
raise error
def delete_exam(self, exam_id):
try:
Exam.objects.get(id=exam_id).delete()
except Exception, error:
logger.warn("failed to delete exam!:"+str(error))
raise(error)
|
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
from film.models import categories, actors, Director, origin, categories_film
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework import status
from django.core.exceptions import ObjectDoesNotExist
from django.http import JsonResponse
from django.db import connection
from film.models import *
from film.serializers import *
import csv
# Create your views here.
import time
import faiss
import numpy as np
import pandas as pd
import pickle
# from bert_score import score
from googletrans import Translator
from sentence_transformers import SentenceTransformer
import json
def getmoviedef():
movies = pd.read_csv('movies.csv')
# df2 = movies[["movieId","Release Year","Title" ,"Origin/Ethnicity" ,"Director","Cast","Genre","Plot"]]
# df = movies[['Title','Plot']]
return movies
def fetch_movie_info(dataframe_idx):
df = getmoviedef()
info = df.iloc[dataframe_idx]
meta_dict = {}
meta_dict['movieId'] = info['movieId']
# meta_dict['Plot'] = info['Plot'][:500]
return meta_dict
def search(query, top_k, index, model):
t = time.time()
query_vector = model.encode([query])
top_k = index.search(query_vector, top_k)
top_k_ids = top_k[1].tolist()[0]
top_k_ids = list(np.unique(top_k_ids))
results = [fetch_movie_info(idx) for idx in top_k_ids]
return results
def translatorsearch1(query):
translator = Translator()
if ('\u0600' <= query <= '\u06FF' or
'\u0750' <= query <= '\u077F' or
'\u08A0' <= query <= '\u08FF' or
'\uFB50' <= query <= '\uFDFF' or
'\uFE70' <= query <= '\uFEFF' or
'\U00010E60' <= query <= '\U00010E7F' or
'\U0001EE00' <= query <= '\U0001EEFF'):
res = translator.translate(query)
result = res.text
# print('yes')
else:
result = query
return result
def finallsearch(query):
# loaded_model = pickle.load(open('finalized_model.sav', 'rb'))
# print((loaded_model))
resfinal = translatorsearch1(query)
model = SentenceTransformer('msmarco-distilbert-base-dot-prod-v3')
index = faiss.deserialize_index(np.load("test.npy"))
print(index)
t = time.time()
query_vector = model.encode([query])
top_k = index.search(query_vector, 50)
top_k_ids = top_k[0].tolist()[0]
top_k_ids = list(np.unique(top_k_ids))
results = [fetch_movie_info(idx) for idx in top_k_ids]
# results = search(resfinal, top_k=5, index=index, model=model)
# ranked_results_bert = []
# ref = [resfinal]
# for cand in results:
# P, R, F1 = score([cand['Plot']], ref, lang='en')
# ranked_results_bert.append({'Title': cand['Title'], 'Score': F1.numpy()[0]})
return results
class searchApi(APIView):
def post(self, request):
query = request.data['search']
resfinal = translatorsearch1(query)
model = SentenceTransformer('msmarco-distilbert-base-dot-prod-v3')
index = faiss.deserialize_index(np.load("test.npy"))
query_vector = model.encode([resfinal])
top_k = index.search(query_vector, 50)
top_k_ids = top_k[1].tolist()[0]
top_k_ids = list(np.unique(top_k_ids))
results = [fetch_movie_info(idx) for idx in top_k_ids]
listresult = []
finalres =[]
for x in results:
listresult.append(x.get('movieId'))
for x in listresult:
movies = film.objects.filter(id=x)
serializers = FilmSerializer(movies, many=True)
finalres.append(serializers.data[0])
newMax = json.dumps(finalres)
return Response(json.loads(newMax))
|
from collections import namedtuple, deque
from enum import Enum
import logging
import os.path
import datetime as dt
import numpy as np
import imageio
import cv2
Frame = namedtuple('Frame', ['filename', 'frame_number', 'img_data', 'timestamp'])
ExtractedChip = namedtuple('ExtractedChip', ['filename', 'frame_number', 'x', 'y', 'w', 'h', 'img_data', 'timestamp'])
logger = logging.getLogger('Chipper')
class Methods(Enum):
OPENCV=1
BACKGROUND_SUB=2
class FrameProducer(object):
def __init__(self,
file_list,
open_func=open,
decoder='ffmpeg',
desired_framerate=2):
self.current_file = 0
self.file_list = file_list
self.open_func = open_func
self.decoder = decoder
self.desired_framerate = desired_framerate
def __iter__(self):
for filename in self.file_list:
logger.info('Staring file: {}'.format(filename))
self.vid = imageio.get_reader(self.open_func(filename), self.decoder)
self.vid_metadata = self.vid.get_meta_data()
self.step_size = int(self.vid_metadata['fps']/self.desired_framerate)
for frame_number in range(0, self.vid.get_length(), self.step_size):
try:
timestamp = self.__get_frame_time(filename, frame_number)
except:
timestamp = 0
yield Frame(filename, frame_number, self.vid.get_data(frame_number), timestamp)
raise StopIteration()
def __get_frame_time(self, filename, frame_number):
# Get the number of seconds relative to the start of the video
fps = self.vid_metadata['fps']
seconds_from_start = frame_number / fps
frame_delta = dt.timedelta(seconds=seconds_from_start)
# Get the time from the file name, and the offset from that time as well
# File names look like:
#
# /foo/bar/baz_20151001T223412-00600-01200.mp4
#
# Where '20151001T223412' is the date and time, and '00600' is the
# offset from that time in seconds. The first video starts at the
# correct time and has an offset of '00000'. Other videos after that
# have offsets (normally in multiples of 10 minutes).
base = os.path.basename(filename)
tmp_str = base.split('_')[-1]
time_str, start_second, _ = tmp_str.split('-')
time = dt.datetime.strptime(time_str, "%Y%m%dT%H%M%S")
time_delta = dt.timedelta(seconds=int(start_second))
return time + time_delta + frame_delta
class Chipper(object):
def __init__(self,
frame_producer,
mask_modifier=None,
box_expander=None,
suppress_shadows=True,
kernel_size=(7, 7),
threshold=30,
chipping_method=Methods.BACKGROUND_SUB,
min_size=125):
self.frame_producer = frame_producer
self.suppress_shadows = suppress_shadows
if self.suppress_shadows:
self.fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=True)
else:
self.fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=False)
self.mask_modifier = mask_modifier
self.box_expander = box_expander
self.kernel_size = kernel_size
self.threshold = threshold
self.chipping_method = chipping_method
self.min_size = min_size
def __iter__(self):
if self.chipping_method == Methods.BACKGROUND_SUB:
last_N_frames = deque()
N = 10
for frame in self.frame_producer:
extracted_chips = []
img_data = frame.img_data
original_img_data = np.copy(frame.img_data)
if self.chipping_method == Methods.OPENCV:
fg_mask = self.fgbg.apply(img_data)
if self.mask_modifier:
fg_mask = self.mask_modifier(fg_mask)
if self.suppress_shadows:
fg_mask[fg_mask <250] = 0
img_data = cv2.bitwise_and(img_data, img_data, mask=fg_mask)
difference_image = cv2.cvtColor(img_data, cv2.COLOR_BGR2GRAY)
else:
gray = cv2.cvtColor(img_data, cv2.COLOR_BGR2GRAY)
if self.mask_modifier:
gray = self.mask_modifier(gray)
if len(last_N_frames) < N:
last_N_frames.append(gray)
continue
else:
background_image = np.median(last_N_frames, axis=0)
background_image = np.array(background_image, dtype=np.uint8)
difference_image = cv2.absdiff(background_image, gray)
_ = last_N_frames.popleft()
last_N_frames.append(gray)
blurred_diff_image = cv2.GaussianBlur(difference_image, self.kernel_size, 0)
_, th1 = cv2.threshold(blurred_diff_image, self.threshold, 255, cv2.THRESH_BINARY)
_, contours, hierarchy = cv2.findContours(th1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
if cv2.contourArea(cnt) < self.min_size:
continue
x, y, w, h = cv2.boundingRect(cnt)
if self.box_expander:
x, y, w, h = self.box_expander(x, y, w, h)
ec = ExtractedChip(filename=frame.filename,
frame_number=frame.frame_number,
x=x,
y=y,
w=w,
h=h,
img_data=np.copy(original_img_data[y:y+h, x:x+w]),
timestamp=frame.timestamp,
)
extracted_chips.append(ec)
yield extracted_chips
raise StopIteration()
def main():
import argparse
import os
parser = argparse.ArgumentParser(prog='chipper.py', description='Extract trips for a video')
parser.add_argument("dataset_path", default="dataset_path", action="store", type=str,
help="Path to the dataset in hdfs.")
parser.add_argument('string_to_match', type=str, help='string to match <str> in filename')
args = parser.parse_args()
from hdfs3 import HDFileSystem
hdfs = HDFileSystem(host='namenode', port=8020)
filenames = hdfs.glob(args.dataset_path)
def get_info(filename):
bname = os.path.basename(filename)
return bname.split('-')[0], int(bname.split('-')[1])
filenames = sorted(filenames, key=get_info)
filenames_filtered = [filename for filename in filenames if args.string_to_match in filename]
fp = FrameProducer(filenames_filtered, hdfs.open)
chipper = Chipper(fp)
count = 0
for _ in chipper:
count += 1
logger.warn('Total Chips: {}'.format(count))
if __name__ == '__main__':
main()
|
import cv2 as cv
import numpy as np
# #图像就是一个(或几个)矩阵组成的,图像有其属性:通道数目,高与宽,图像类型等
# ##读取图像的属性
def get_image_info(image):
print(type(image))
print('图像形状', image.shape)
print('图像大小', image.size)
print(image.dtype)
pixel_data = np.array(image)
print(pixel_data)
# 读取视频
def video_demo():
capture = cv.VideoCapture(0) # 调用摄像头 0代表摄像头序号,或者也可以用视频文件路径
while(1):
ret, frame = capture.read() # 读取摄像头 return frame(帧)
frame = cv.flip(frame, 1) # 图像镜像(flip)
cv.imshow("video", frame)
c = cv.waitKey(50) # 50毫秒
if c == 27:
break
# #####读取图片并显示######
# src = cv.imread('C:/Users/zx/Desktop/test.jpg') # 读图片,就是个数组了
# cv.namedWindow('input image', cv.WINDOW_AUTOSIZE)
# cv.imshow("input image", src) # 通过open_cv的GUI显示图像
# get_image_info(src)
video_demo()
cv.imwrite("C:/Users/zx/Desktop/result.jpg", src)
cv.waitKey(0) # 等待一个按键输入后退出
cv.destroyAllWindows() # 关闭所有窗口
|
from setuptools import setup, find_packages
from apkdl import __version__
import os
setup(
name ='apkdl',
version = __version__,
description = 'Search and download APKs from the command line',
url = 'https://github.com/sereneblue/apkdl',
author = 'sereneblue',
license = "MIT",
packages = find_packages(),
classifiers = [
'Intended Audience :: Developers',
'Environment :: Console',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords = ['android', 'apk', 'cli'],
install_requires = ['requests', 'beautifulsoup4'],
entry_points = {
'console_scripts':[
'apkdl=apkdl.__main__:main'
],
}
)
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
# Import BaseEventHandler
from tonga.models.handlers.event.event_handler import BaseEventHandler
# Import StoreBuilderBase
from tonga.stores.manager.kafka_store_manager import KafkaStoreManager
# Import BaseProducer
from tonga.services.producer.base import BaseProducer
from tonga.services.coordinator.partitioner.key_partitioner import KeyPartitioner
# Import Coffee Model
from examples.coffee_bar.cash_register import transactional_manager
from examples.coffee_bar.cash_register.models.events.coffee_ordered import CoffeeOrdered
from examples.coffee_bar.cash_register.models.events.bill_created import BillCreated
from examples.coffee_bar.cash_register.models.bill import Bill
class CoffeeOrderedHandler(BaseEventHandler):
_store_builder: KafkaStoreManager
_transactional_producer: BaseProducer
def __init__(self, store_builder: KafkaStoreManager, transactional_producer: BaseProducer):
self._store_builder = store_builder
self._transactional_producer = transactional_producer
@transactional_manager
async def handle(self, event: CoffeeOrdered) -> None:
key_partitioner = KeyPartitioner()
while True:
bill = Bill(coffee_uuid=event.uuid, amount=event.amount)
if key_partitioner(bill.uuid, [0, 1], [0, 1]) == self._store_builder.get_current_instance():
break
# Sets on cash register local store
await self._store_builder.set_from_local_store(bill.uuid, bill.__to_bytes_dict__())
# Creates BillCreated event
bill_created = BillCreated(partition_key=bill.uuid, uuid=bill.uuid, coffee_uuid=bill.coffee_uuid,
amount=bill.amount, context=bill.context)
# Sends BillCreated event
await self._transactional_producer.send_and_wait(bill_created, 'cash-register-events')
@classmethod
def handler_name(cls) -> str:
return 'tonga.waiter.event.CoffeeOrdered'
|
import time
import math
from flask import session
from sqlalchemy import Column, Integer, String, Text, ForeignKey, Table, DateTime, func
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from apps.db import engine, dbsession
from common.serializer import objToDict, listToTree
Base = declarative_base()
class User(Base):
__tablename__ = "tb_user"
id = Column(Integer,primary_key=True)
username = Column(String(64),nullable=False,index=True)
password = Column(String(64),nullable=False)
email = Column(String(64),nullable=False,index=True)
credit = Column(Integer,default=0)
articles = relationship("Article",backref="author")
credits = relationship("Credit",backref="user")
def getUserByName(self,name):
row = dbsession.query(User).filter(User.username==name).first()
return row
def getUserByAll(self, name,password):
row = dbsession.query(User).filter(User.username == name,User.password == password).first()
return row
def registerUser(self,name,password):
user = User(username=name,password=password,email=name)
dbsession.add(user)
dbsession.commit()
return user
def updateCredit(self,credit):
user = dbsession.query(User).filter(User.id == session.get('userid')).first()
user.credit += credit
dbsession.commit()
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.username)
article_tag = Table(
"article_tag",Base.metadata,
Column("article_id",Integer,ForeignKey("tb_article.id")),
Column("tag_id",Integer, ForeignKey("tb_tag.id"))
)
class Article(Base):
__tablename__ = "tb_article"
id = Column(Integer,primary_key = True)
titile = Column(String(255),nullable=False,index=True)
pubdate = Column(DateTime,nullable=False)
type = Column(Integer,nullable=False)
reading_volume = Column(Integer)
cost_integral = Column(Integer)
content = Column(Text)
user_id = Column(Integer,ForeignKey("tb_user.id"))
tags = relationship('Tag', backref='articles', secondary=article_tag)
def getOneArticle(self,id):
row = dbsession.query(Article.type,Article.titile
,Article.pubdate,Article.reading_volume,Article.cost_integral,User.username).outerjoin(User,User.id == Article.user_id).filter(Article.id==id).first()
return row
def getPageNumber(self):
articleTotal = dbsession.query(Article).count()
number = math.ceil(articleTotal / 10)
return number
def getPageArticle(self,start,end):
row = dbsession.query(Article)[start:end]
return row
def getSideArticle(self,type,page):
row = dbsession.query(Article).filter(Article.type == type)[page*10-10:page*10]
return row
def getTypeNumber(self,type):
number = math.ceil(dbsession.query(Article).filter(Article.type == type).count()/10)
return number
def getSearchArticle(self,keyword,page):
row = dbsession.query(Article).filter(Article.titile.like('%'+keyword+'%'))[page*10-10:page*10]
return row
def getSearchNumber(self, keyword):
number = math.ceil(dbsession.query(Article).filter(Article.titile.like('%'+keyword+'%')).count()/10)
return number
def getReadList(self):
row = dbsession.query(Article.id,Article.titile).order_by(Article.reading_volume.desc()).limit(10).all()
return row
def getNewList(self):
row = dbsession.query(Article.id, Article.titile).order_by(Article.pubdate.desc()).limit(10).all()
return row
def getPopList(self):
row = dbsession.query(Article.id, Article.titile).order_by(func.rand()).limit(9).all()
return row
def isBuy(self,articleId):
data = dbsession.query(Credit.id).filter(Credit.target==articleId,Credit.category=='购买文章').first()
if data:
return True
else:
return False
def getHalfContent(self,articleId):
row = dbsession.query(Article.content).filter(Article.id == articleId).first()
data = row[0][:len(row[0])//2]
return data+"..."
def getAllContent(self, articleId):
row = dbsession.query(Article.content).filter(Article.id == articleId).first()
return row[0]
def buyArticle(self,username,articleid):
user = dbsession.query(User).filter(User.username==username).first()
article = dbsession.query(Article).filter(Article.id==articleid).first()
if user.credit < article.cost_integral:
return {"code":10004,"msg":"not enough credit!"}
else:
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
user.credit -= article.cost_integral
credits = Credit(category="购买文章",target=articleid,credit=-article.cost_integral,
createtime=now,updatetime=now,user_id= user.id)
dbsession.add(credits)
dbsession.commit()
return {"code":10001,"msg":"ok"}
# def __repr__(self):
# return '%s(%r)' % (self.__class__.__name__, self.titile)
class Tag(Base):
__tablename__ = "tb_tag"
id = Column(Integer,primary_key=True)
name = Column(String(64))
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.name)
class Credit(Base):
__tablename__ = "tb_credit"
id = Column(Integer,primary_key=True)
category = Column(String(20),nullable=False)
target = Column(Integer,default=0)
credit = Column(Integer)
createtime = Column(DateTime)
updatetime = Column(DateTime)
user_id = Column(Integer, ForeignKey("tb_user.id"))
def creaet(self,credit,category):
now = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime())
credit = Credit(category=category,credit=credit,createtime=now,updatetime=now,user_id=session.get('userid'))
dbsession.add(credit)
dbsession.commit()
return credit
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.category)
class Comment(Base):
__tablename__ = "tb_comment"
id = Column(Integer,primary_key=True)
user_id = Column(Integer,ForeignKey("tb_user.id"))
article_id = Column(Integer,ForeignKey("tb_article.id"))
user = relationship("User",backref="comments")
article = relationship("Article", backref="comments")
reply_id = Column(Integer,default=0)
content = Column(Text)
is_hidden = Column(Integer)
createtime = Column(DateTime)
updatetime = Column(DateTime)
def addComment(self,uesrid,articleid,replyid,content):
if replyid:
now = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime())
com = Comment(user_id=uesrid,article_id=articleid,reply_id=replyid,content=content,is_hidden=0,
createtime=now,updatetime=now)
dbsession.add(com)
dbsession.commit()
if not replyid:
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
com = Comment(user_id=uesrid, article_id=articleid,content=content, is_hidden=0,
createtime=now, updatetime=now)
dbsession.add(com)
dbsession.commit()
def getCommentsList(self,articleid,page_index,page_size):
row = dbsession.query(Comment).filter(Comment.article_id==articleid,Comment.reply_id==0).order_by(Comment.createtime.desc()).limit(page_size).offset((page_index-1)*page_size)
#不带子评论的评论列表
data = objToDict(row)
for item in data:
row = dbsession.query(Comment).filter(Comment.reply_id==item['id']).order_by(Comment.createtime.desc()).all()
son_data = objToDict(row)
item['reply_list'] = son_data
return data
# print(row[0])
return data
class Favorite(Base):
__tablename__ = "tb_favorite"
id = Column(Integer,primary_key=True)
user_id = Column(Integer,ForeignKey("tb_user.id"))
article_id = Column(Integer,ForeignKey("tb_article.id"))
is_collect = Column(Integer)
create = Column(DateTime)
#获取一个收藏
def getCollect(self,articleid,userid):
fa = dbsession.query(Favorite).filter(Favorite.article_id==articleid,Favorite.user_id==userid).first()
return fa
# #新建一个收藏
# def addCollect(self,userid,articleid):
# now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# fa = Favorite(user_id=userid,article_id=articleid,is_collect=1,create=now)
# dbsession.add(fa)
# dbsession.commit()
# return fa
#修改收藏状态
def resetCollect(self,userid,articleid):
fa = dbsession.query(Favorite).filter(Favorite.user_id==userid,Favorite.article_id==articleid).first()
if not fa:
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
fa = Favorite(user_id=userid, article_id=articleid, is_collect=1, create=now)
dbsession.add(fa)
dbsession.commit()
return fa.is_collect
if fa.is_collect == 0:
fa.is_collect =1
dbsession.commit()
return fa.is_collect
if fa.is_collect == 1:
fa.is_collect =0
dbsession.commit()
return fa.is_collect
def getStatu(self,userid,articleid):
fa = dbsession.query(Favorite).filter(Favorite.user_id==userid,Favorite.article_id==articleid).first()
if fa:
return fa.is_collect
else:
return 0
# Base.metadata.create_all(engine)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import configparser
from multiprocess import Pool
from datetime import datetime
from device.bras import ME60, M6k
from py2neo import authenticate, Graph, Node
from funcy import lmap, partial, compose, map
basFile = 'bras.txt'
logFile = 'result/bas_log.txt'
infoFile = 'result/bas_info.txt'
conf = configparser.ConfigParser()
conf.read(os.path.expanduser('~/.weihu/config.ini'))
# conf.read('config.ini')
neo4j_username = conf.get('neo4j', 'username')
neo4j_password = conf.get('neo4j', 'password')
authenticate('localhost:7474', neo4j_username, neo4j_password)
graph = Graph("http://localhost:7474/db/data")
def clear():
for f in [logFile, infoFile]:
if os.path.exists(f):
os.remove(f)
os.mknod(f)
def import_bras(file):
bras = (x.strip().split(',') for x in open(file))
brasNode = lambda x: graph.create(
Node('Bras', **dict(zip(('name', 'ip', 'model', 'area'), x))))
lmap(brasNode, bras)
def _model(funcs, device):
no_model = lambda x: ('fail', None, x)
ip, model = device
return funcs.get(model, no_model)(ip)
def bingfa_check():
funcs = {
'ME60': ME60.get_bingfa,
'ME60-X16': ME60.get_bingfa,
'M6000': M6k.get_bingfa,
'M6000-S': M6k.get_bingfa,
}
_get_bf = partial(_model, funcs)
clear()
nodes = graph.find('Bras')
bras = [(x['ip'], x['model']) for x in nodes]
rslt = map(_get_bf, bras)
with open(logFile, 'w') as flog, open(infoFile, 'w') as frslt:
for mark, record, ip in rslt:
flog.write('{ip}:{mark}\n'.format(ip=ip, mark=mark))
for slot, user, date in record:
frslt.write('{ip},{slot},{user},{date}\n'.format(
ip=ip, slot=slot, user=user, date=date))
def _add_bingfa(rslt):
cmd = """
match (b:Bras {ip:{ip}})
merge (b)-[:HAS]->(c:Card {slot:{slot}})
set c.peakUsers={peakUsers},c.peakTime={peakTime},c.updated=timestamp()
"""
mark, record, ip = rslt
with open(logFile, 'a') as flog:
flog.write('{ip}:{mark}\n'.format(ip=ip, mark=mark))
if mark == 'success':
tx = graph.cypher.begin()
lmap(lambda x: tx.append(cmd, ip=ip, slot=x[
0], peakUsers=x[1], peakTime=x[2]), record)
tx.process()
tx.commit()
def add_bingfa():
funcs = {
'ME60': ME60.get_bingfa,
'ME60-X16': ME60.get_bingfa,
'M6000-S': M6k.get_bingfa,
'M6000': M6k.get_bingfa
}
_get_bf = partial(_model, funcs)
clear()
nodes = graph.find('Bras')
bras = [(x['ip'], x['model']) for x in nodes]
lmap(compose(_add_bingfa, _get_bf), bras)
def add_itv_online():
devices = {'ME60': ME60, 'ME60-X16': ME60, 'M6000-S': M6k, 'M6000': M6k}
action = 'get_itv_online'
nodes = graph.find('Bras')
bras = [(x['ip'], x['model']) for x in nodes]
funcs = map(lambda x: partial(getattr(devices.get(x[1]), action), x[0]),
bras)
with Pool(16) as p:
rslt = p.map(lambda f: f(), funcs)
rslt = filter(lambda x: x[0] == 'success', rslt)
update_time = datetime.now().strftime('%Y-%m-%d %H:%M')
cmd = """
match (b:Bras {ip:{ip}})
merge (b)-[:HAS]->(i:ITV)
set i.count={count},i.updateTime={time}
"""
tx = graph.cypher.begin()
lmap(lambda y: tx.append(cmd, ip=y[2], count=y[1], time=update_time), rslt)
tx.process()
tx.commit()
def main():
# pass
add_bingfa()
if __name__ == '__main__':
main()
|
#!/usr/bin/python
"""
Demonstration of Graph and BokehGraph functionality.
"""
import random
from sys import argv
from graph import Graph
from draw import BokehGraph
def getGraph(**kwargs):
graph = Graph() # Instantiate your graph
graph.add_vertex(0)
graph.add_vertex(1)
graph.add_vertex(2)
graph.add_vertex(3)
graph.add_vertex(4)
graph.add_vertex(5)
graph.add_vertex(6)
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
graph.add_edge(3, 4)
graph.add_edge(4, 5)
graph.add_edge(5, 6)
bokeh_graph = BokehGraph(graph)
bokeh_graph.draw()
def getRandomGraph(numVerts, numEdges):
graph = Graph()
for vert_id in range(0, numVerts):
graph.add_vertex(vert_id)
allEdges = []
for i in range(0, numVerts):
for j in range(0, numVerts):
if i < j:
allEdges.append( (i, j) )
random.shuffle(allEdges)
randomEdges = allEdges[:numEdges]
for edge in randomEdges:
graph.add_edge(edge[0], edge[1])
bokeh_graph = BokehGraph(graph)
bokeh_graph.draw()
if __name__ == '__main__':
style = 'default'
num_verts = 5
num_edges = 5
for arg in argv[1:]:
arg_split = arg.split("=")
if len(arg_split) == 2:
if arg_split[0] == "style":
style = arg_split[1].lower()
elif arg_split[0] == "verts":
num_verts = int(arg_split[1])
elif arg_split[0] == "edges":
num_edges = int(arg_split[1])
print(style)
getGraph(style=style, num_verts=num_verts, num_edges=num_edges)
getRandomGraph(5, 2)
|
class Solution(object):
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
nl = len(needle)
if nl == 0:
return 0
for i in range(0, len(haystack)-nl):
if haystack[i:i+nl] == needle:
return i
return -1
|
def char_mixing(a, b):
a1 = b[:2] + a[2]
b1 = a[0:2] + b[2]
return a1 + ' ' + b1
print(char_mixing("abc", "xyz"))
|
n = int(input("Digite um número: "))
print('sucessor: {}, antecessor: {}'.format(n + 1, n - 1))
|
# Copyright (c) 2021 Mahdi Biparva, mahdi.biparva@gmail.com
# miTorch: Medical Imaging with PyTorch
# Deep Learning Package for 3D medical imaging in PyTorch
# Implemented by Mahdi Biparva, April 2021
# Brain Imaging Lab, Sunnybrook Research Institute (SRI)
import argparse
from copy import deepcopy
import functools
import glob
import os
from pathlib import Path
import time
import yaml
import numpy as np
################################################################################
### ROBUSTNESS TRANSFORMS
################################################################################
# for most transforms, convert to tensor first if torchio else convert after
rob_trfs = {
'Anisotropy': {
'severity_controllers': [
{
'downsampling_range': [float(i) for i in np.linspace(2., 10., 5)]
}
]
},
'BiasField': {
'severity_controllers': [
{
'coefficients_range': [float(i) for i in np.linspace(0., 1.5, 6)[1:]]
}
]
},
'Blur': {
'severity_controllers': [
{
'std': [float(i) for i in np.linspace(0., 4., 6)[1:]]
}
]
},
'ContrastCompression': {
'severity_controllers': [
{
'adjuster.gamma': np.linspace(1., 0.3, 6)[1:]
}
]
},
'ContrastExpansion': {
'severity_controllers': [
{
'adjuster.gamma': np.linspace(1., 3., 6)[1:].tolist()
}
]
},
'Downsample': {
'severity_controllers': [
{
'spacing_transform.pixdim': [float(i) for i in np.linspace(1., 4., 6)[1:]]
}
]
},
'DownsampleKeepSize': {
'severity_controllers': [
{
'spacing_transform.pixdim': [float(i) for i in np.linspace(1., 4., 6)[1:]]
}
]
},
'ElasticDeformation': {
'severity_controllers': [
{
'max_displacement': [float(x) for x in np.linspace(0., 30., 6)[1:]]
}
]
},
'Ghosting': {
'severity_controllers': [
{
'num_ghosts_range': [float(x) for x in [3, 5, 7, 9, 11]],
'intensity_range': [float(x) for x in np.linspace(0.0, 2.5, 6)[1:]]
}
]
},
'Motion': {
'severity_controllers': [
{
'degrees_range': [float(i) for i in np.linspace(0.0, 5.0, 6)[1:]],
'translation_range': [float(i) for i in np.linspace(0.0, 10.0, 6)[1:]],
'num_transforms': [2, 4, 6, 8, 10]
}
]
},
'RicianNoise': {
'severity_controllers': [
{
'std': np.linspace(0., 0.1, 6)[1:].tolist()
}
]
},
'Rotate': {
'severity_controllers': [
{
f'range_{axis}': [float(x) for x in np.linspace(0.0, np.pi/2, 6)[1:]]
for axis in 'xyz'
}
]
},
'Upsample': {
'severity_controllers': [
{
'spacing_transform.pixdim': [float(i) for i in np.linspace(1., 0.5, 6)[1:]]
}
]
}
}
################################################################################
### LOOP OVER DATASET
################################################################################
transforms_dict = dict()
for rob_transform, settings in rob_trfs.items():
print("-" * 10, rob_transform)
transforms_dict[rob_transform] = dict()
sv_controller = settings['severity_controllers'][0]
for param, values in sv_controller.items():
if isinstance(values, np.ndarray):
values = values.tolist()
if isinstance(values, list) and isinstance(values[0], tuple):
values = [list(value) for value in values]
if isinstance(values, list) and isinstance(values[0], np.ndarray):
values = [value.tolist() for value in values]
transforms_dict[rob_transform][param] = values
print(param, values)
with open(r'generate_benchmark.yaml', 'w') as file:
yaml.dump(transforms_dict, file, default_flow_style=False, encoding='UTF-8')
|
# -*- coding: utf-8 -*-
# Version: 1.0.0
__author__ = 'John Lampe'
__email__ = 'dmitry.chan@gmail.com'
import requests
import pdb
class tmobile:
def __init__(self, ip, port, verify=False):
self.ip = ip
self.port = port
self.base_url = "http://{}:{}".format(ip, port)
self.session = requests.Session()
self.session.verify = verify
self.session.headers = {"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json",
"Sec-GPS": "1",
"Referer": "{}/web_whw".format(self.base_url),
"Accept-Encoding": "gzip, deflate"}
self.load_cgis()
def get_page(self, url):
"""
generic GET
:param url: string url
:return: status_code, (requests-response|error string)
"""
try:
ret = self.session.get("{}/{}".format(self.base_url, url))
return ret.status_code, ret
except Exception as e:
print("Error retrieving page. Error: {}".format(e))
return ret.status_code, e
def post_page(self, url, post_data):
"""
generic POST
:param url: string url (the portion after the POST command)
:param post_data: the POST data
:return: status_code, (requests-response|error string)
"""
try:
full_path = "{}/{}".format(self.base_url, url)
#pdb.set_trace()
ret = self.session.post(url=full_path, data=post_data)
return ret.status_code, ret
except Exception as e:
print("Error retrieving page. Error: {}".format(e))
try:
return ret.status_code, e
except:
return "999", e
def load_cgis(self):
self.cgi_files = [
"login_web_app.cgi",
"check_expire_web_app.cgi",
"main_web_app.cgi",
"system_web_app.cgi",
"device_status_web_app.cgi",
"dashboard_device_info_status_web_app.cgi",
"fastmile_statistics_status_web_app.cgi",
"fastmile_radio_status_web_app.cgi",
"lan_ipv4_status_web_app.cgi",
"statistics_status_web_app.cgi",
"pon_status_web_app.cgi",
"voice_info_status_web_app.cgi",
"lan_ipv6_status_web_app.cgi",
"wan_config_glb_status_web_app.cgi",
"wifi_schedule_status_web_app.cgi",
"dashboard_status_web_app.cgi",
"wan_internet_status_web_app.cgi",
"wlan_config_status_web_app.cgi",
"wan_dhcp_status_web_app.cgi",
"tr69_status_web_app.cgi",
"qos_status_web_app.cgi",
"route_status_web_app.cgi",
"access_control_status_web_app.cgi",
"dns_status_web_app.cgi",
"reboot_web_app.cgi",
"ddns_status_web_app.cgi",
"ipfilter_status_web_app.cgi",
"device_name_status_web_app.cgi",
"sntp_status_web_app.cgi",
"macfilter_status_web_app.cgi",
"urlfilter_status_web_app.cgi",
"password_status_web_app.cgi",
"xlink_status_web_app.cgi",
"xlink_web_app.cgi",
"upgrade_web_app.cgi",
"diag_status_web_app.cgi",
"parental_control_status_web_app.cgi",
"mesh_status_web_app.cgi",
"troubleshooting_status_web_app.cgi",
"slidconfig_status_web_app.cgi",
"loidconfig_status_web_app.cgi",
"storage_status_web_app.cgi",
"whw_beacon_mode_app_web_app.cgi",
"dongle_status_web_app.cgi",
"uplink_management_status_web_app.cgi",
"dashboard_device_status_web_app.cgi",
"dashboard_ntwtopo_status_web_app.cgi",
"fastmile_statistics_status_web_app.cgi",
"apn_config_status_web_app.cgi",
"radio_config_status_web_app.cgi",
"multi_apn_status_web_app.cgi",
"multi_apn_delete_web_app.cgi",
"multi_apn_config_web_app.cgi",
"jsencrypt.min.js",
"sjcl.js",
"crypto_page.js",
"runtime-es2015.1eba213af0b233498d9d.js",
"jsencrypt.min.js",
"sjcl.js",
"crypto_page.js",
"runtime-es2015.1eba213af0b233498d9d.js",
"jsencrypt.min.js",
"sjcl.js",
"crypto_page.js",
"runtime-es2015.1eba213af0b233498d9d.js",
"jsencrypt.min.js",
"sjcl.js",
"crypto_page.js",
"runtime-es2015.1eba213af0b233498d9d.js"
]
self.cgi_dirs = ["/web_whw/", "/cgi-bin/", "/app-root/", "/assets/", "/assets/js/", "/"]
def find_cgi_locations(self):
for dirs in self.cgi_dirs:
for files in self.cgi_files:
try:
ret = self.session.get("{}{}{}".format(self.base_url,dirs,files))
if ret.status_code != 404:
print("{} - Found path of {}{}{}".format(ret.status_code, self.base_url,dirs,files))
except Exception as e:
print("Error requesting {}{}{}. Error: {}".format(self.base_url,dirs,files,e))
def show_all(self):
self.lan_status()
self.wan_status()
self.main_status()
self.data_fields()
self.web_stats()
def lan_status(self):
print("LAN Stats")
try:
ret = self.session.get("{}/lan_status_web_app.cgi?lan".format(self.base_url))
print(ret.json())
except Exception as e:
print("Error retrieving LAN Stats. Error: {}".format(e))
def wan_status(self):
print("WAN Stats")
try:
ret = self.session.get("{}/fastmile_statistics_status_web_app.cgi".format(self.base_url))
print(ret.json())
except Exception as e:
print("Error retrieving WAN Stats. Error: {}".format(e))
def main_status(self):
#main_web_app.cgi
print("Device Config and attached devices")
try:
ret = self.session.get("{}/main_web_app.cgi".format(self.base_url))
print(ret.json())
except Exception as e:
print("Error retrieving device config and attached devices. Error: {}".format(e))
def data_fields(self):
#GET /web_whw/assets/i18n/en.json
print("Information about the configured data fields format")
try:
ret = self.session.get("{}/web_whw/assets/i18n/en.json".format(self.base_url))
print(ret.json())
except Exception as e:
print("Error retrieving configured data field formats. Error: {}".format(e))
def web_stats(self):
#/statistics_status_web_app.cgi
print("Web App Statistics")
try:
ret = self.session.get("{}/statistics_status_web_app.cgi".format(self.base_url))
print(ret.json())
except Exception as e:
print("Error retrieving Web App statistics. Error: {}".format(e))
|
"""
This provide some common utils methods for YouTube resource.
"""
import isodate
from isodate.isoerror import ISO8601Error
from pyyoutube.error import ErrorMessage, PyYouTubeException
def get_video_duration(duration: str) -> int:
"""
Parse video ISO 8601 duration to seconds.
Refer: https://developers.google.com/youtube/v3/docs/videos#contentDetails.duration
Args:
duration(str)
Videos ISO 8601 duration. Like: PT14H23M42S
Returns:
integer for seconds.
"""
try:
seconds = isodate.parse_duration(duration).total_seconds()
return int(seconds)
except ISO8601Error as e:
raise PyYouTubeException(
ErrorMessage(
status_code=10001,
message=f"Exception in convert video duration: {duration}. errors: {e}",
)
)
|
from glob import glob
import numpy as np
import cv2 as cv
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix
from sklearn.neighbors import KNeighborsClassifier
import seaborn as sns
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
"""
NAMA Kelompok
Reizha fajiran 1810511078
M.Fadillah 1810511064
"""
"""Tugas akhir smester PCD Implementasi knn untuk
menentukan kematangan pisang metodologi yang di gunakan
yaitu ektrasi menggunakan rata rata nilai RGB dari citra"""
klasifikasis=[]#digunkan untuk menyimpan kalsifikasi data training
trainings=[]#menyimpan hasil extrasi data training
testing_data=[]#digunkan untuk menyimpan kalsifikasi data testing
testings=[]#menyimpan hasil extrasi data testing
def KNN():# fungsi ini di gunakan untuk melihat model apakah sudah berjalan sesuai dengan data dan menampilkan hasil dari pengolahan data
knn=KNeighborsClassifier(n_neighbors=1) #define K=3
knn.fit(data_average_training,klasifikasis)
res = knn.predict(data_average_testing)
results = confusion_matrix(testing_data, res)
response = np.concatenate([klasifikasis, res]).astype(np.float32)
Dataset = np.concatenate([data_average_training, data_average_testing]).astype(np.float32)
knn=KNeighborsClassifier(n_neighbors=1)
score= cross_val_score(knn, Dataset, response, cv=2, scoring='accuracy')
ax= plt.subplot()
sns.heatmap(results, annot=True, ax = ax)
ax.set_xlabel('Predicted labels');
ax.set_ylabel('True labels');
ax.set_title('Confusion Matrix');
ax.xaxis.set_ticklabels(['1 ','2']); ax.yaxis.set_ticklabels(['1', '2']);
print(res)
print("Cross Validation : ", score.mean())
print ('Confusion Matrix :\n', results)
print ('Accuracy Score :',accuracy_score(testing_data, res) )
print ('Report : ')
print (classification_report(testing_data, res))
return res
def training(namafile):#fungsi untuk mengektrasi data training
training=[]
for filename in namafile:
x=0
if x<50:
print(filename)
myimg = cv.imread(filename)
avg_color_per_row = np.average(myimg, axis=0)
avg_color = np.average(avg_color_per_row, axis=0)
data_warna=np.transpose(avg_color[0:3,np.newaxis])
if namafile==data_sudah_matang or namafile==data_belum_matang:
if namafile==data_sudah_matang:
training.extend(data_warna)
else:
training.extend(data_warna)
x+=1
return training
def testing(namafile):#fungsi untuk mengektrasi data testing
testing=[]
for filename in namafile:
x=0
if x<50:
print(filename)
myimg = cv.imread(filename)
avg_color_per_row = np.average(myimg, axis=0)
avg_color = np.average(avg_color_per_row, axis=0)
data_warna=np.transpose(avg_color[0:3,np.newaxis])
if namafile==data_testing_belum_matang or namafile==data_testing_matang:
if namafile==data_testing_matang:
testing.extend(data_warna)
else:
testing.extend(data_warna)
x+=1
return testing
def klasifikasi_fungsi(namafile):#fungsi ini digunakan untuk menyimpan klasifikasi dari data training
klasifikasi=[]
for filename in namafile:
if namafile==data_belum_matang:
klasifikasi.append(1.0)
else:
klasifikasi.append(2.0)
return klasifikasi
def prediksi_testing(namafile):#fungsi ini di gunakan untuk menyimpan data testing yang telah di ketahui untuk dibandingkan nantinta tingkat keakuratan data
testing_predic=[]
for filename in namafile:
if namafile==data_testing_belum_matang:
testing_predic.append(1.0)
else:
testing_predic.append(2.0)
return testing_predic
def pemersatu_path(filename,filename1):#Fungsi ini di gunakan untuk menyatukan dua alamat/direktori testing
tampilan_gambar=[]
for i in filename:
tampilan_gambar.append(i)
for j in filename1:
tampilan_gambar.append(j)
return tampilan_gambar
def show_gambar(nama,new):#fungsi ini digunakan untuk menampilkan gambar sesuai prediksi
pathfileindex=np.size(nama)
res=new
for i in range(0,pathfileindex+1):
img = cv.imread("{}".format(nama[i-1]))
plt.title(nama_pisang(res[i-1]))
plt.imshow(cv.cvtColor(img,cv.COLOR_BGR2RGB))
plt.show()
def nama_pisang(value):#fungsi ini untuk menentukan kematangan pisang berdasarkan res
if value == 1.0:
title="pisang Belum Matang"
else:
title="pisang Sudah Matang"
return title
data_sudah_matang=sorted(glob('banana/matang/*.png'), key=lambda name: int(name[21:-4])) #direktori training untuk pisang matang
data_belum_matang=sorted(glob('banana/mentah/*.jpg'), key=lambda name: int(name[21:-4]))#direktori training untuk pisang belum matang
data_testing_matang=sorted(glob('banana/test/test_matang/*.png'), key=lambda name: int(name[34:-4]))#direktori testing untuk pisang matang
data_testing_belum_matang=sorted(glob('banana/test/test_mentah/*.jpg'), key=lambda name: int(name[34:-4]))#direktori testing untuk pisang belum matang
trainings.extend(training(data_sudah_matang))#menambahkan extrasi dari data pisang matang fungsi ke dalam list
trainings.extend(training(data_belum_matang))#menambahkan extrasi dari data pisang belum matang dari fungsi ke dalam list
testings.extend(testing(data_testing_matang))#menambahakan data extrasi dari data testing pisang matang ke dalam list
testings.extend(testing(data_testing_belum_matang))#menambahakan data extrasi dari data testing pisang belum matang ke dalam list
klasifikasis.extend(klasifikasi_fungsi(data_sudah_matang))#mengkalsifikasikan objek berdasarkan angka dimana 2 matang dan 1 belum matang
klasifikasis.extend(klasifikasi_fungsi(data_belum_matang))
testing_data.extend(prediksi_testing(data_testing_matang))#mengklasifikasin data testing matang dan belum matang
testing_data.extend(prediksi_testing(data_testing_belum_matang))
data_average_testing=pd.DataFrame(testings).astype(np.float32)#mennyimpan hasil ektrasi kedalam data frame agar mudah untuk diolah
data_average_training=pd.DataFrame(trainings).astype(np.float32)
knn=KNeighborsClassifier(n_neighbors=1) #define K=3
knn.fit(data_average_training,klasifikasis)
res = knn.predict(data_average_testing)
results = confusion_matrix(testing_data, res)
response = np.concatenate([klasifikasis, res]).astype(np.float32)
Dataset = np.concatenate([data_average_training, data_average_testing]).astype(np.float32)
knn=KNeighborsClassifier(n_neighbors=1)
score= cross_val_score(knn, Dataset, response, cv=2, scoring='accuracy')
ax= plt.subplot()
sns.heatmap(results, annot=True, ax = ax)
ax.set_xlabel('Predicted labels');
ax.set_ylabel('True labels');
ax.set_title('Confusion Matrix');
ax.xaxis.set_ticklabels(['1 ','2']); ax.yaxis.set_ticklabels(['1', '2']);
print(res)
print("Cross Validation : ", score.mean())
print ('Confusion Matrix :\n', results)
print ('Accuracy Score :',accuracy_score(testing_data, res) )
print ('Report : ')
print (classification_report(testing_data, res))
new=KNN()
pemersatu_alamat=pemersatu_path(data_testing_matang, data_testing_belum_matang)
show_gambar(pemersatu_alamat,new)
|
def copy_n_times(filepath,repetitions):
with open(filepath,"r") as getdata:
content = getdata.read()
# print(content)
for i in range(repetitions):
with open(filepath,"a+") as mytext:
mytext.write("\n")
mytext.write(content)
copy_n_times("files/data.txt",3)
|
"""
Longest common subsequence problem
"""
import json
from tokenizer import tokenize
def tokenize_by_lines(text: str) -> tuple:
"""
Splits a text into sentences, sentences – into tokens,
converts the tokens into lowercase, removes punctuation
:param text: the initial text
:return: a list of sentences with lowercase tokens without punctuation
e.g. text = 'I have a cat.\nHis name is Bruno'
--> (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))
"""
if not isinstance(text, str):
return ()
tokens = []
sentences = text.split('.')
for sentence in sentences:
# tokenized = [item for item in tokenize(sentence)]
tokenized = list(tokenize(sentence))
token = tuple(tokenized)
if len(token) > 1:
tokens.append(token)
return tuple(tokens)
def create_zero_matrix(rows: int, columns: int) -> list:
"""
Creates a matrix rows * columns where each element is zero
:param rows: a number of rows
:param columns: a number of columns
:return: a matrix with 0s
e.g. rows = 2, columns = 2
--> [[0, 0], [0, 0]]
"""
if isinstance(rows, int) and isinstance(columns, int) and not (isinstance(rows, bool) or isinstance(columns, bool)):
if rows < 1 or columns < 1:
return []
else:
return []
matrix = []
for _ in range(rows):
row = [0 for _ in range(columns)]
matrix.append(row)
return matrix
def fill_lcs_matrix(first_sentence_tokens: tuple, second_sentence_tokens: tuple) -> list:
"""
Fills a longest common subsequence matrix using the Needleman–Wunsch algorithm
:param first_sentence_tokens: a tuple of tokens
:param second_sentence_tokens: a tuple of tokens
:return: a lcs matrix
"""
if not isinstance(first_sentence_tokens, tuple) or not isinstance(second_sentence_tokens, tuple):
return []
if not len(first_sentence_tokens) > 1 or not len(second_sentence_tokens) > 1:
return []
if not isinstance(first_sentence_tokens[0], str) or not isinstance(second_sentence_tokens[0], str):
return []
rows = len(first_sentence_tokens)
columns = len(second_sentence_tokens)
matrix = create_zero_matrix(rows, columns)
if len(matrix) < 1:
return []
for row in range(rows):
for column in range(columns):
common = first_sentence_tokens[row] == second_sentence_tokens[column]
matrix[row][column] = max(matrix[row-1][column], matrix[row][column-1])
if common:
matrix[row][column] += 1
return matrix
def find_lcs_length(first_sentence_tokens: tuple, second_sentence_tokens: tuple, plagiarism_threshold: float) -> int:
"""
Finds a length of the longest common subsequence using the Needleman–Wunsch algorithm
When a length is less than the threshold, it becomes 0
:param first_sentence_tokens: a tuple of tokens
:param second_sentence_tokens: a tuple of tokens
:param plagiarism_threshold: a threshold
:return: a length of the longest common subsequence
"""
matrix = fill_lcs_matrix(first_sentence_tokens, second_sentence_tokens)
if not matrix:
if first_sentence_tokens == () or second_sentence_tokens == ():
return 0
return -1
if not isinstance(plagiarism_threshold, float):
return -1
if plagiarism_threshold > 1 or plagiarism_threshold < 0:
return -1
if matrix[-1][-1]/len(second_sentence_tokens) >= plagiarism_threshold:
return matrix[-1][-1]
return 0
def find_lcs(first_sentence_tokens: tuple, second_sentence_tokens: tuple, lcs_matrix: list) -> tuple:
"""
Finds the longest common subsequence itself using the Needleman–Wunsch algorithm
:param first_sentence_tokens: a tuple of tokens
:param second_sentence_tokens: a tuple of tokens
:param lcs_matrix: a filled lcs matrix
:return: the longest common subsequence
"""
row = -1
column = -1
c_s = []
inputs = [(first_sentence_tokens, tuple, str, str),
(second_sentence_tokens, tuple, str, str), (lcs_matrix, list, list, int)]
for given_input in inputs:
if not isinstance(given_input[0], given_input[1]):
return ()
if not len(given_input[0]) > 0:
return ()
if not isinstance(given_input[0][0], given_input[2]):
return ()
if not isinstance(given_input[0][0][0], given_input[3]):
return ()
if lcs_matrix[-1][-1] == 0 or not(lcs_matrix[0][0] == 1 or lcs_matrix[0][0] == 0):
return ()
while abs(row) <= len(first_sentence_tokens) or abs(column) <= len(second_sentence_tokens):
if abs(column - 1) <= len(second_sentence_tokens):
if lcs_matrix[row][column] == lcs_matrix[row][column - 1]:
column = column - 1
continue
if abs(row - 1) <= len(first_sentence_tokens):
if lcs_matrix[row][column] == lcs_matrix[row - 1][column]:
row = row - 1
continue
c_s.append(first_sentence_tokens[row])
row = row - 1
column = column - 1
return tuple(c_s[::-1])
def calculate_plagiarism_score(lcs_length: int, suspicious_sentence_tokens: tuple) -> float:
"""
Calculates the plagiarism score
The score is the lcs length divided by the number of tokens in a suspicious sentence
:param lcs_length: a length of the longest common subsequence
:param suspicious_sentence_tokens: a tuple of tokens
:return: a score from 0 to 1, where 0 means no plagiarism, 1 – the texts are the same
"""
if not isinstance(lcs_length, int) or not isinstance(suspicious_sentence_tokens, tuple):
return -1
if not suspicious_sentence_tokens:
return 0.0
if len(suspicious_sentence_tokens) < lcs_length or lcs_length < 0 or isinstance(lcs_length, bool):
return -1
for token in suspicious_sentence_tokens:
if not isinstance(token, str):
return -1
return lcs_length/len(suspicious_sentence_tokens)
def calculate_text_plagiarism_score(original_text_tokens: tuple,
suspicious_text_tokens: tuple,
plagiarism_threshold=0.3) -> float:
"""
Calculates the plagiarism score: compares two texts line by line using lcs
The score is the sum of lcs values for each pair divided by the number of tokens in suspicious text
At the same time, a value of lcs is compared with a threshold (e.g. 0.3)
:param original_text_tokens: a tuple of sentences with tokens
:param suspicious_text_tokens: a tuple of sentences with tokens
:param plagiarism_threshold: a threshold
:return: a score from 0 to 1, where 0 means no plagiarism, 1 – the texts are the same
"""
if not isinstance(original_text_tokens, tuple) or not isinstance(suspicious_text_tokens, tuple):
return -1
if len(original_text_tokens) < len(suspicious_text_tokens):
# original = [line for line in original_text_tokens]
original = list(original_text_tokens)
for _ in range(len(suspicious_text_tokens) - len(original_text_tokens)):
original.append('')
original_text_tokens = tuple(original)
scores = []
for i, tokens in enumerate(suspicious_text_tokens):
lcs_length = find_lcs_length(original_text_tokens[i], tokens, plagiarism_threshold)
scores.append(calculate_plagiarism_score(lcs_length, tokens))
return sum(scores)/len(scores)
def find_diff_in_sentence(original_sentence_tokens: tuple, suspicious_sentence_tokens: tuple, lcs: tuple) -> tuple:
"""
Finds words not present in lcs.
:param original_sentence_tokens: a tuple of tokens
:param suspicious_sentence_tokens: a tuple of tokens
:param lcs: a longest common subsequence
:return: a tuple with tuples of indexes
"""
def get_diff(sentence: list, lcs: tuple) -> tuple:
length = len(sentence)
basal_index = -1
output = []
for item in lcs:
basal_index += 1
splitted = ' '.join(sentence).split(item)
chunk = splitted[0]
if chunk:
index_first = basal_index
index_last = basal_index + len(chunk.split())
output.extend([index_first, index_last])
sentence = (item).join(splitted[1:]).split()
if item == lcs[-1]:
if sentence:
output.extend([length - len(sentence), length])
basal_index = max(basal_index + len(chunk.split()), basal_index)
return tuple(output)
for given_input in [original_sentence_tokens, suspicious_sentence_tokens, lcs]:
if not isinstance(given_input, tuple):
return ()
if len(given_input) > 0:
if not isinstance(given_input[0], str):
return ()
if not lcs:
output = []
for sentence in [original_sentence_tokens, suspicious_sentence_tokens]:
if not sentence:
output.append(())
else:
output.append((0, len(sentence)))
return tuple(output)
output_first = get_diff(list(original_sentence_tokens), lcs)
output_second = get_diff(list(suspicious_sentence_tokens), lcs)
return (output_first, output_second)
def accumulate_diff_stats(original_text_tokens: tuple, suspicious_text_tokens: tuple, plagiarism_threshold=0.3) -> dict:
"""
Accumulates the main statistics for pairs of sentences in texts:
lcs_length, plagiarism_score and indexes of differences
:param original_text_tokens: a tuple of sentences with tokens
:param suspicious_text_tokens: a tuple of sentences with tokens
:return: a dictionary of main statistics for each pair of sentences
including average text plagiarism, sentence plagiarism for each sentence and lcs lengths for each sentence
{'text_plagiarism': int,
'sentence_plagiarism': list,
'sentence_lcs_length': list,
'difference_indexes': list}
"""
addition = ['' for _ in range(len(suspicious_text_tokens) - len(original_text_tokens))]
if addition:
original_text_tokens = list(original_text_tokens)
original_text_tokens.extend(addition)
original_text_tokens = tuple(original_text_tokens)
stats = dict()
stats['text_plagiarism'] = calculate_text_plagiarism_score(original_text_tokens,
suspicious_text_tokens,
plagiarism_threshold)
for param in ['sentence_plagiarism', 'sentence_lcs_length', 'difference_indexes']:
stats[param] = []
for i, tokens in enumerate(suspicious_text_tokens):
length = find_lcs_length(original_text_tokens[i], tokens, plagiarism_threshold)
stats['sentence_plagiarism'].append(calculate_plagiarism_score(length, tokens))
stats['sentence_lcs_length'].append(length)
stats['difference_indexes'].append(find_diff_in_sentence(original_text_tokens[i], tokens,
find_lcs(original_text_tokens[i], tokens, fill_lcs_matrix(original_text_tokens[i],
tokens))))
return stats
def create_diff_report(original_text_tokens: tuple, suspicious_text_tokens: tuple, accumulated_diff_stats: dict) -> str:
"""
Creates a diff report for two texts comparing them line by line
:param original_text_tokens: a tuple of sentences with tokens
:param suspicious_text_tokens: a tuple of sentences with tokens
:param accumulated_diff_stats: a dictionary with statistics for each pair of sentences
:return: a report
"""
result = []
length = accumulated_diff_stats['sentence_lcs_length']
for i, item in enumerate(suspicious_text_tokens):
lines = []
try:
for number, sentence in enumerate([original_text_tokens[i], item]):
dif = accumulated_diff_stats['difference_indexes'][i][number]
line = ''
for index, word in enumerate(sentence):
if index in dif:
line += ' |'
line = line + ' ' + word
if len(sentence) in dif:
line += ' |'
lines.append(line)
result.append(f'''- {lines[0]}
+ {lines[1]}
lcs = {length[i]}, plagiarism = {accumulated_diff_stats['sentence_plagiarism'][i] * 100}%''')
except IndexError:
result.append(f'''-
+ {' '.join(list(suspicious_text_tokens[i]))}
lcs = {length[i]}, plagiarism = {accumulated_diff_stats['sentence_plagiarism'][i] * 100}%\n''')
result.append(f'Text average plagiarism (words): {accumulated_diff_stats["text_plagiarism"] * 100}%')
print('\n'.join(result))
return '\n'.join(result)
def find_lcs_length_optimized(first_sentence_tokens: tuple, second_sentence_tokens: tuple,
plagiarism_threshold: float) -> int:
"""
Finds a length of the longest common subsequence using an optimized algorithm
When a length is less than the threshold, it becomes 0
:param first_sentence_tokens: a tuple of tokens
:param second_sentence_tokens: a tuple of tokens
:param plagiarism_threshold: a threshold
:return: a length of the longest common subsequence
"""
if not isinstance(first_sentence_tokens, tuple) or not isinstance(second_sentence_tokens, tuple):
return []
if not len(first_sentence_tokens) > 1 or not len(second_sentence_tokens) > 1:
return []
if not isinstance(first_sentence_tokens[0], str) or not isinstance(second_sentence_tokens[0], str):
return []
lcs = 0
for row in first_sentence_tokens:
for column in second_sentence_tokens:
if second_sentence_tokens[column] == first_sentence_tokens[row]:
lcs += 1
if lcs/len(second_sentence_tokens) < plagiarism_threshold:
return 0
return lcs
def get_tokens_id(path_to_file: str, i_d: dict, last_index: int) -> list:
with open(path_to_file, encoding='utf-8') as file:
tokens = []
for line in file:
addition = []
for token in line.split():
try:
addition.append(i_d[token])
except KeyError:
i_d[token] = last_index + 1
addition.append(i_d[token])
last_index += 1
tokens.extend(addition)
return tuple(tokens)
def tokenize_big_file(path_to_file: str) -> tuple:
"""
Reads, tokenizes and transforms a big file into a numeric form
:param path_to_file: a path
:return: a tuple with ids
"""
with open('indexes.json', 'r', encoding='utf-8') as file:
i_d = json.load(file)
return get_tokens_id(path_to_file, i_d, len(i_d))
|
# -*- coding: utf-8 -*-
import re
from pycolorname.color_system import ColorSystem
class Wikipedia(ColorSystem):
def __init__(self, *args, **kwargs):
ColorSystem.__init__(self, *args, **kwargs)
self.load()
def refresh(self):
full_data = self.request(
'GET',
"https://en.wikipedia.org/wiki/List_of_RAL_colors")
trs = full_data.find_all('tr')
data = {}
style_regex = re.compile(r'.*background *: *'
r'(?P<rgb_hex>[0-9a-fA-F#]+).*')
for tr in trs:
tds = tr.find_all('td')
# The tds are in the order:
# RAL code, colored box, L, a, b, german name, english name, desc
if len(tds) != 8:
continue
name = "{0} ({1})".format(tds[0].text.strip(), tds[6].text.strip())
rgb_hex = re.findall(style_regex, tds[1]['style'])[0]
color = self.hex_to_rgb(rgb_hex)
data[name] = color
return data
|
end = 0
ok = False
lst = []
while end < 9:
i = list(input("Enter {} line of sudoku: ".format(end+1)))
end += 1
lst.append(i)
check_list = [chr(x + ord('0')) for x in range(1, 10)]
trans_lst = [[lst[j][i] for j in range(len(lst))] for i in range(len(lst[0]))]
for rows in lst:
if sorted(rows) == check_list:
ok = True
else:
ok = False
break
if ok:
for cols in trans_lst:
if sorted(cols) == check_list:
ok = True
else:
ok = False
break
if ok:
for
if ok:
print("ok")
else:
print("not ok")
|
import random
#number_per_file = 100
dir = 'data/generation-projet-test/'
def save_test_file():
with open(dir+'ask_for_recipe-newP-newV-test.bio','r') as f:
a1 = f.read()
text_a1 = a1.split('\n\n')
with open(dir+'ask_for_recipe-newP-oldV-test.bio','r') as f:
a2 = f.read()
text_a2 = a2.split('\n\n')
with open(dir+'ask_for_recipe-oldP-newV-test.bio','r') as f:
a3 = f.read()
text_a3 = a3.split('\n\n')
with open(dir+'give_cat-ingredients-newP-newV-test.bio','r') as f:
b1 = f.read()
text_b1 = b1.split('\n\n')
with open(dir+'give_cat-ingredients-newP-oldV-test.bio','r') as f:
b2 = f.read()
text_b2 = b2.split('\n\n')
with open(dir+'give_cat-ingredients-oldP-newV-test.bio','r') as f:
b3 = f.read()
text_b3 = b3.split('\n\n')
with open(dir+'give_ingredients-newP-newV-test.bio','r') as f:
c1 = f.read()
text_c1 = c1.split('\n\n')
with open(dir+'give_ingredients-newP-oldV-test.bio','r') as f:
c2 = f.read()
text_c2 = c2.split('\n\n')
with open(dir+'give_ingredients-oldP-newV-test.bio','r') as f:
c3 = f.read()
text_c3 = c3.split('\n\n')
text = []
text.extend(text_a1)
text.extend(text_a2)
text.extend(text_a3)
text.extend(text_b1)
text.extend(text_b2)
text.extend(text_b3)
text.extend(text_c1)
text.extend(text_c2)
text.extend(text_c3)
random.shuffle(text)
text_s = ''
for i in text:
text_s = text_s + i + '\n\n'
print('Total Number of examples in test_file: ' + str(len(text)))
print("Number of enamples of type 'ask_for_recipe_newP_newV': " + str(len(text_a1)))
print("Number of enamples of type 'ask_for_recipe_newP_oldV': " + str(len(text_a2)))
print("Number of enamples of type 'ask_for_recipe_oldP_newV': " + str(len(text_a3)))
print("Number of enamples of type 'give_cat-ingredients_newP_newV': " + str(len(text_b1)))
print("Number of enamples of type 'give_cat-ingredients_newP_oldV': " + str(len(text_b2)))
print("Number of enamples of type 'give_cat-ingredients_oldP_newV': " + str(len(text_b3)))
print("Number of enamples of type 'give_ingredients_newP_newV': " + str(len(text_c1)))
print("Number of enamples of type 'give_ingredients_newP_oldV': " + str(len(text_c2)))
print("Number of enamples of type 'give_ingredients_oldP_newV': " + str(len(text_c3)))
print("saved to 'data/test.iob'")
f = open('data/test.iob', 'w')
f.write(text_s)
f.close()
def save_test_file_newP_newV():
with open(dir+'ask_for_recipe-newP-newV-test.bio','r') as f:
a1 = f.read()
text_a1 = a1.split('\n\n')
with open(dir+'give_cat-ingredients-newP-newV-test.bio','r') as f:
b1 = f.read()
text_b1 = b1.split('\n\n')
with open(dir+'give_ingredients-newP-newV-test.bio','r') as f:
c1 = f.read()
text_c1 = c1.split('\n\n')
text = []
text.extend(text_a1)
text.extend(text_b1)
text.extend(text_c1)
random.shuffle(text)
text_s = ''
for i in text:
text_s = text_s + i + '\n\n'
print('Total Number of examples in test_file: ' + str(len(text)))
print("Number of enamples of type 'ask_for_recipe_newP_newV': " + str(len(text_a1)))
print("Number of enamples of type 'give_cat-ingredients_newP_newV': " + str(len(text_b1)))
print("Number of enamples of type 'give_ingredients_newP_newV': " + str(len(text_c1)))
print("saved to 'data/test.iob'")
f = open('data/test.iob', 'w')
f.write(text_s)
f.close()
def save_test_file_newP_oldV():
with open(dir + 'ask_for_recipe-newP-oldV-test.bio', 'r') as f:
a1 = f.read()
text_a1 = a1.split('\n\n')#[:number_per_file]
with open(dir+'give_cat-ingredients-newP-oldV-test.bio', 'r') as f:
b1 = f.read()
text_b1 = b1.split('\n\n')
with open(dir+'give_ingredients-newP-oldV-test.bio', 'r') as f:
c1 = f.read()
text_c1 = c1.split('\n\n')#[:number_per_file]
text = []
text.extend(text_a1)
text.extend(text_b1)
text.extend(text_c1)
random.shuffle(text)
text_s = ''
for i in text:
text_s = text_s + i + '\n\n'
print('Total Number of examples in test_file: ' + str(len(text)))
print("Number of enamples of type 'ask_for_recipe_newP_oldV': " + str(len(text_a1)))
print("Number of enamples of type 'give_cat-ingredients_newP_oldV': " + str(len(text_b1)))
print("Number of enamples of type 'give_ingredients_newP_oldV': " + str(len(text_c1)))
print("saved to 'data/test.iob'")
f = open('data/test.iob', 'w')
f.write(text_s)
f.close()
def save_test_file_oldP_newV():
with open(dir + 'ask_for_recipe-oldP-newV-test.bio', 'r') as f:
a1 = f.read()
text_a1 = a1.split('\n\n')#[:number_per_file]
with open(dir+'give_cat-ingredients-oldP-newV-test.bio', 'r') as f:
b1 = f.read()
text_b1 = b1.split('\n\n')
with open(dir+'give_ingredients-oldP-newV-test.bio', 'r') as f:
c1 = f.read()
text_c1 = c1.split('\n\n')#[:number_per_file]
text = []
text.extend(text_a1)
text.extend(text_b1)
text.extend(text_c1)
random.shuffle(text)
text_s = ''
for i in text:
text_s = text_s + i + '\n\n'
print('Total Number of examples in train_file: ' + str(len(text)))
print("Number of enamples of type 'ask_for_recipe_oldP_newV': " + str(len(text_a1)))
print("Number of enamples of type 'give_cat-ingredients_oldP_newV': " + str(len(text_b1)))
print("Number of enamples of type 'give_ingredients_oldP_newV': " + str(len(text_c1)))
print("saved to 'data/test.iob'")
f = open('data/test.iob', 'w')
f.write(text_s)
f.close()
def save_test_file_little():
with open(dir+'give_cat-ingredients-newP-newV-test.bio','r') as f:
b1 = f.read()
text_b1 = b1.split('\n\n')
with open(dir+'give_cat-ingredients-newP-oldV-test.bio','r') as f:
b2 = f.read()
text_b2 = b2.split('\n\n')
with open(dir+'give_cat-ingredients-oldP-newV-test.bio','r') as f:
b3 = f.read()
text_b3 = b3.split('\n\n')
text = []
text.extend(text_b1)
text.extend(text_b2)
text.extend(text_b3)
random.shuffle(text)
text_s = ''
for i in text:
text_s = text_s + i + '\n\n'
print('Total Number of examples in train_file: ' + str(len(text)))
print("Number of enamples of type 'give_cat-ingredients_newP_newV': " + str(len(text_b1)))
print("Number of enamples of type 'give_cat-ingredients_newP_oldV': " + str(len(text_b2)))
print("Number of enamples of type 'give_cat-ingredients_oldP_newV': " + str(len(text_b3)))
print("saved to 'data/test.iob'")
f = open('data/test.iob', 'w')
f.write(text_s)
f.close()
|
# virtualenv
# 图形界面
# Python支持多種圖形界面的第三方庫
# Tk,是一個圖形庫.
# 在gui中,每個Button、label,輸入框等,都是一個widget。frame是可以容納其他widget的widget。
# pack()方法把widget放入父容器中,并實現佈局。pack是最簡單的佈局,grid可以實現複雜的佈局。
# 在createWidgets方法中,我們創建一個label和一個button。quit方法退出。
from tkinter import *
import tkinter.messagebox as messagebox
class Application(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.createWidgets()
def createWidgets(self):
self.helloLabel = Label(self, text='Hello, world')
self.helloLabel.pack()
self.quitButton = Button(self, text='Quit', command = self.quit)
self.quitButton.pack()
app = Application()
app.master.title('hello,world')
app.mainloop()
class Appli(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.createWidgets()
def createWidgets(self):
self.nameInput = Entry(self)
self.nameInput.pack()
self.alertButton = Button(self, text='hello', command=self.hello)
self.alertButton.pack()
def hello(self):
name = self.nameInput.get() or 'world'
messagebox.showinfo('Message', 'Hello, %s' % name)
app2 = Appli()
app2.master.title('hello world')
app2.mainloop()
# wxWidgets,Qt,GTK
# 网络编程
# TCP/IP简介
# Ip協議負責吧數據從一台計算機通過網絡發送到另一台計算機。數據被分割成一小塊一小塊。
#然後通過IP包發送出去。TCP協議是建立在IP協議上的。TCP協議負責兩台計算機之間建立可靠連接,保證數據包順序到達。,
# TCP協議會通過握手建立連接,然後對每個IP包編號,確保對方按順序收到,若包丟失,就會自動重發。
# 許多更高級的協議都是建立在TCP協議上的,例如瀏覽器的HTTP協議,發送郵件的SMTP協議。
# 一個IP包除了要傳輸的數據外,還包含IP地址和目標IP地址,源端口和目標端口。
# TCP编程
import socket
# 創建一個socket AF_INFO指定使用IPv4,AF_INET6是IPv6。SOCK_STREAM指定使用面向流的TCP協議
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('www.sina.com.cn', 80)) # 建立連接。注意是一個tuple參數
s.send(b'GET/HTTP/1.1\r\nHost:www.sina.com.cn\r\nConnection: close\r\n\r\n') # 向新浪發送請求要求返回首頁的內容
# TCP 連接創建的是雙向通道,雙方都可以同時給對方發送數據。
buffer = []
while True:
d = s.recv(1024)
if d:
buffer.append(d)
else:
break
data = b''.join(buffer)
s.close()
header, html = data.split(b'\r\n\r\n', 1)
print(header.decode('utf-8'))
with open('sina.html', 'wb') as f:
f.write(html)
# UDP编程
# udp是面向無連接的協議
# 使用UDP協議不需要建立連接,只要知道對方的IP地址和端口號,就可以直接發送數據包。udp傳輸數據不可靠,但是速度快。
# 使用udp協議,也需要客戶端和服務器
Sudp= socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # SOCK_DGRAM指定的socket類型是udp。
Sudp.bind(('127.0.0.1', 9999))
print('bind udp on 9999')
while True:
data, addr=Sudp.recvfrom(1024) # recvfrom方法返回數據和客戶端的地址和端口,服務器收到數據后,直接調用sendto方法把數據用udp發送給客戶端
print('received from %s:%s'% addr)
Sudp.sendto(b'hello, %s'% data, addr)
# 客戶端使用udp時,不需要調用connect,直接通過sendto給服務器發送數據。
Cudp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for data in [b'superMan', b'superStar',b'SuperGod']:
Cudp.sendto(data)
print(Cudp.recv(1024).decode('utf-8'))
Cudp.close
|
from info.info import TestData
from pages.BasePage import BasePage
from selenium.webdriver.common.by import By
from time import sleep
class LoginPage(BasePage):
"""Locators"""
EMAIL_FIELD = (By.ID, "login-email")
PASSWORD_FIELD = (By.ID, "login-password")
NEXT_BUTTON = (By.XPATH, '//*[@id="login-form"]/button')
SIGNUP_LINK = (By.LINK_TEXT, "Sign up here.")
TRYAGAIN_BUTTON = (By.XPATH, '/html/body/div/div[2]/div/div/div/button[2]')
YAHOO_BUTTON = (By.XPATH, '/html/body/div/div[2]/div/div/div/button[1]/span')
HELP_LINK = (By.LINK_TEXT, "Help")
PRIVACY_LINK = (By.LINK_TEXT, "Privacy")
TERMS_LINK = (By.LINK_TEXT, "Terms")
def __init__(self, driver):
super().__init__(driver)
self.driver.get(TestData.LOGIN_URL)
def fill_email_field(self, email):
self.send(self.EMAIL_FIELD, email)
def click_next_button(self):
self.click(self.NEXT_BUTTON)
def click_help_link(self):
self.click(self.HELP_LINK)
def click_privacy_link(self):
self.click(self.PRIVACY_LINK)
def click_terms_link(self):
self.click(self.TERMS_LINK)
def fill_password_field(self, password):
self.send(self.PASSWORD_FIELD, password)
def click_signup_link(self):
self.click(self.SIGNUP_LINK)
def try_again_button(self):
if self.element_located(self.TRYAGAIN_BUTTON, 10) & self.element_clickable(self.TRYAGAIN_BUTTON, 10):
self.click(self.TRYAGAIN_BUTTON)
def yahoo_button(self):
if self.element_located(self.YAHOO_BUTTON, 10) & self.element_clickable(self.YAHOO_BUTTON, 10):
self.click(self.YAHOO_BUTTON)
def log_in(self, email, password):
self.fill_email_field(email)
self.click_next_button()
result: bool = self.element_located(self.PASSWORD_FIELD)
if result:
self.element_clickable(self.PASSWORD_FIELD)
self.fill_password_field(password)
self.click_next_button()
sleep(5)
return result
else:
result = False
return result
|
"""
Microframework for composing iterator pipelines
"""
import string
from StringIO import StringIO
class Pipe(object):
"""
Pipe creator
Initializes pipeline. Other pipeline filters can be attached using pipe.
Pipeline filter is callable object having at least one argument for iterator.
>>> cat << (1,2,3) | foreach(lambda item: item+1) > list
[2, 3, 4]
>>> Pipe((1,2,3)).join(foreach(lambda item: item+1)).apply(list)
[2, 3, 4]
"""
def __init__(self, pipe=None):
self.pipe = pipe
if self.pipe:
self.pipe = iter(pipe)
def join(self, right):
if not self.pipe:
return Pipe(right)
self.pipe = right(self.pipe)
return self
__or__ = join
def __iter__(self):
return self.pipe
def apply(self, *p):
res = self.pipe
for func in p:
res = func(res)
return res
# self > other
__gt__ = apply
def __call__(self, pipe):
return Pipe(pipe)
# self << other
__lshift__ = __call__
cat = Pipe()
# filters
def foreach(func):
def process(pipe):
for item in pipe:
yield func(item)
return process
def tee(func):
"""
>>> items = [1, 2, 3]
>>> target = []
>>> list(tee(target.append)(items))
[1, 2, 3]
>>> target
[1, 2, 3]
"""
def process(pipe):
for obj in pipe:
func(obj)
yield obj
return process
def null(pipe):
"""
>>> null([1,2,3])
"""
for obj in pipe:
pass
def skip(head=0, tail=0):
"""
>>> list(skip(head=1,tail=2)([1,2,3,4,5]))
[2, 3]
>>> list(skip(head=3)([1,2,3,4,5]))
[4, 5]
>>> list(skip(tail=3)([1,2,3,4,5]))
[1, 2]
"""
def process(pipe):
buf = []
for i, line in enumerate(pipe):
if i < head:
continue
if tail > len(buf):
buf.append(line)
continue
buf.append(line)
yield buf.pop(0)
return process
def head(count):
"""
>>> list(head(count=2)([1,2,3,4,5]))
[1, 2]
"""
def process(pipe):
for i, line in enumerate(pipe):
if i < count:
yield line
return process
def tail(count):
"""
>>> list(tail(count=2)([1,2,3,4,5]))
[4, 5]
"""
def process(pipe):
buf = []
for line in pipe:
if len(buf) >= count:
buf.pop(0)
buf.append(line)
for line in buf:
yield line
return process
def split(sep='|', cols=None, proc=string.strip):
"""
>>> items = ['a | b ', 'c |d']
>>> list(split()(items))
[['a', 'b'], ['c', 'd']]
>>> list(split(cols=['f1', 'f2'])(items))
[{'f1': 'a', 'f2': 'b'}, {'f1': 'c', 'f2': 'd'}]
"""
def process(pipe):
for line in pipe:
row = line.split(sep)
if proc:
row = map(proc, row)
if cols:
row = dict(zip(cols, row))
yield row
return process
def transform(mapping):
"""
>>> items = [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]
>>> res = list(transform({'a': 'b', 'b': 'a', 'c': lambda r: r['a']+r['b']})(items))
>>> cmp(res, [{'a': 2, 'b': 1, 'c': 3}, {'a': 4, 'b': 3, 'c': 7}])
0
"""
def process(pipeline):
for row in pipeline:
if isinstance(mapping, dict):
row = dict((col, value(row) if callable(value) else row[value])
for col, value in mapping.items())
elif isinstance(mapping, (tuple, list)):
row = [value(row) if callable(value) else row[value]
for i,value in enumerate(mapping)]
yield row
return process
def join(sep='\t', cols=None, proc=None, prefix='', suffix=''):
"""
>>> items = [['a', 'b'], ['c', 'd']]
>>> list(join(sep=',')(items))
['a,b', 'c,d']
>>> list(join(sep=',', cols=[1,0])(items))
['b,a', 'd,c']
>>> items = [{'f1': 'a', 'f2': 'b'}, {'f1': 'c', 'f2': 'd'}]
>>> list(join(sep='|', cols=['f2', 'f1'])(items))
['b|a', 'd|c']
"""
def process(pipe):
for row in pipe:
if cols:
row = [row[col] for col in cols]
if proc:
row = map(proc, row)
yield "{prefix}{line}{suffix}".format(
line=sep.join(row),
prefix=prefix,
suffix=suffix
)
return process
def header(*p):
"""
>>> items = ['c', 'd']
>>> list(header('a', 'b')(items))
['a', 'b', 'c', 'd']
"""
def process(pipe):
for item in p:
yield item
for item in pipe:
yield item
return process
def footer(*p):
"""
>>> items = ['a', 'b']
>>> list(footer('c', 'd')(items))
['a', 'b', 'c', 'd']
"""
def process(pipe):
for item in pipe:
yield item
for item in p:
yield item
return process
def load_data(db, table, columns=None, clean=True, before=None, after=None):
"""
loads pipeline to table
db - cursor object
table - table name to copy data to
columns - table columns to use
clean - if true, table is truncated before load
before - called before execution without arguments
after - called afrer execution without arguments
"""
def process(pipe):
try:
cur = db.cursor()
if before:
before()
f = StringIO('\n'.join(pipe))
if clean:
cur.execute('truncate table %s' % table)
cur.copy_from(f, table, columns=columns)
if after:
after()
db.commit()
except:
db.rollback()
raise
return process
if __name__ == "__main__":
import doctest
doctest.testmod()
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils
# Apply unfold operation to input in order to prepare it to be processed against a sliding kernel whose shape
# is passed as argument.
def unfold_map2d(input, kernel_height, kernel_width):
# Before performing an operation between an input and a sliding kernel we need to unfold the input, i.e. the
# windows on which the kernel is going to be applied are extracted and set apart. For this purpose, the kernel
# shape is passed as argument to the operation. The single extracted windows are reshaped by the unfold operation
# to rank 1 vectors. The output of F.unfold(input, (kernel_height, kernel_width)).transpose(1, 2) is a
# tensor structured as follows: the first dimension is the batch dimension; the second dimension is the slide
# dimension, i.e. each element is a window extracted at a different offset (and reshaped to a rank 1 vector);
# the third dimension is a scalar within said vector.
inp_unf = F.unfold(input, (kernel_height, kernel_width)).transpose(1, 2)
# Now we need to reshape our tensors to the actual shape that we want in output, which is the following: the
# first dimension is the batch dimension, the second dimension is the output channels dimension, the third and
# fourth are height and width dimensions (obtained by splitting the former third dimension, the slide dimension,
# representing a linear offset within the input map, into two new dimensions representing height and width), the
# fifth is the window components dimension, corresponding to the elements of a window extracted from the input with
# the unfold operation (reshaped to rank 1 vectors). The resulting tensor is then returned.
inp_unf = inp_unf.view(
input.size(0), # Batch dimension
1, # Output channels dimension
input.size(2) - kernel_height + 1, # Height dimension
input.size(3) - kernel_width + 1, # Width dimension
-1 # Filter/window dimension
)
return inp_unf
# Custom vectorial function representing sum of an input with a sliding kernel, just like convolution is multiplication
# by a sliding kernel (as an analogy think convolution as a kernel_mult2d)
def kernel_sum2d(input, kernel):
# In order to perform the sum with the sliding kernel we first need to unfold the input. The resulting tensor will
# have the following structure: the first dimension is the batch dimension, the second dimension is the output
# channels dimension, the third and fourth are height and width dimensions, the fifth is the filter/window
# components dimension, corresponding to the elements of a window extracted from the input with the unfold
# operation and equivalently to the elements of a filter (reshaped to rank 1 vectors)
inp_unf = unfold_map2d(input, kernel.size(2), kernel.size(3))
# At this point the two tensors can be summed. The kernel is reshaped by unsqueezing singleton dimensions along
# the batch dimension and the height and width dimensions. By exploiting broadcasting, it happens that the inp_unf
# tensor is broadcast over the output channels dimension (since its shape along this dimension is 1) and therefore
# it is automatically processed against the different filters of the kernel. In the same way, the kernel is
# broadcast along the first dimension (and thus automatically processed against the different inputs along
# the batch dimension) and along the third and fourth dimensions (and thus automatically processed against
# different windows extracted from the image at different height and width offsets).
out = inp_unf + kernel.view(1, kernel.size(0), 1, 1, -1)
return out
# Test the implementation of the kernel_sum2d function
def test_kernelsum():
x = torch.randn(
8, # Batch dimension
3, # Input channels dimension
10, # Height dimension
12 # Width dimension
)
w = torch.randn(
6, # Output channels dimension
3, # Input channels dimension
4, # Height dimension
5 # Width dimension
)
output = torch.empty(
x.shape[0], # Batch dimension
w.shape[0], # Output channels dimension
x.shape[2] - w.shape[2] + 1, # Height dimension
x.shape[3] - w.shape[3] + 1, # Width dimension
w.shape[1] * w.shape[2] * w.shape[3] # Filter dimension
)
# Cross-validate vectorial implementation with for-loop implementation
for batch in range(0, x.shape[0]): # Loop over batch dimension
for outchn in range(0, w.shape[0]): # Loop over output channel dimension
for i in range(0, x.shape[2] - w.shape[2] + 1): # Loop over height dimension
for j in range(0, x.shape[3] - w.shape[3] + 1): # Loop over width dimension
output[batch, outchn, i, j, :] = (
x[batch, :, i:i + w.shape[2], j:j + w.shape[3]] + w[outchn, :, :, :]).view(-1)
out = kernel_sum2d(x, w)
print((output.equal(out))) # Should print out True
# Compute product between input and sliding kernel
def kernel_mult2d(x, w, b=None):
return F.conv2d(x, w, b)
# Projection of input on weight vectors
def vector_proj2d(x, w, bias=None):
# Compute scalar product with sliding kernel
prod = kernel_mult2d(x, w)
# Divide by the norm of the weight vector to obtain the projection
norm_w = torch.norm(w.view(w.size(0), -1), p=2, dim=1).view(1, -1, 1, 1)
norm_w += (norm_w == 0).float() # Prevent divisions by zero
if bias is None: return prod / norm_w
return prod / norm_w + bias.view(1, -1, 1, 1)
# Projection of input on weight vector clipped between 0 and +inf
def clp_vector_proj2d(x, w, bias=None):
return vector_proj2d(x, w, bias).clamp(0)
# Sigmoid similarity
def sig_sim2d(x, w, bias=None):
proj = vector_proj2d(x, w, bias)
# return torch.sigmoid((proj - proj.mean())/proj.std())
return torch.sigmoid(proj)
# Cosine similarity between an input map and a sliding kernel
def cos_sim2d(x, w, bias=None):
proj = vector_proj2d(x, w)
# Divide by the norm of the input to obtain the cosine similarity
x_unf = unfold_map2d(x, w.size(2), w.size(3))
norm_x = torch.norm(x_unf, p=2, dim=4)
norm_x += (norm_x == 0).float() # Prevent divisions by zero
if bias is None: return proj / norm_x
return (proj / norm_x + bias.view(1, -1, 1, 1)).clamp(-1, 1)
# Cosine similarity clipped between 0 and 1
def clp_cos_sim2d(x, w, bias=None):
return cos_sim2d(x, w, bias).clamp(0)
# Cosine similarity remapped to 0, 1
def raised_cos2d(x, w, bias=None):
return (cos_sim2d(x, w, bias) + 1) / 2
# Returns function that computes raised cosine power p
def raised_cos2d_pow(p=2):
def raised_cos2d_pow_p(x, w, bias=None):
if bias is None: return raised_cos2d(x, w).pow(p)
return (raised_cos2d(x, w).pow(p) + bias.view(1, -1, 1, 1)).clamp(0, 1)
return raised_cos2d_pow_p
# Softmax on weight vector projection activation function
def proj_smax2d(x, w, bias=None):
e_pow_y = torch.exp(vector_proj2d(x, w, bias))
return e_pow_y / e_pow_y.sum(1, keepdims=True)
# Response of a gaussian activation function
def gauss(x, w, sigma=None):
d = torch.norm(kernel_sum2d(x, -w), p=2, dim=4)
if sigma is None: return torch.exp(
-d.pow(2) / (2 * utils.shape2size(tuple(w[0].size())))) # heuristic: use number of dimensions as variance
# if sigma is None: return torch.exp(-d.pow(2) / (2 * torch.norm(w.view(w.size(0), 1, -1) - w.view(1, w.size(0), -1), p=2, dim=2).max().pow(2)/w.size(0))) # heuristic: normalization condition
# if sigma is None: return torch.exp(-d.pow(2) / (2 * d.mean().pow(2)))
return torch.exp(-d.pow(2) / (2 * (sigma.view(1, -1, 1, 1).pow(2))))
# Returns lambda function for exponentially decreasing learning rate scheduling
def sched_exp(tau=1000, eta_min=0.01):
gamma = torch.exp(torch.tensor(-1. / tau)).item()
return lambda eta: (eta * gamma).clamp(eta_min)
# This module represents a layer of convolutional neurons that are trained with a Hebbian-WTA rule
class HebbianMap2d(nn.Module):
# Types of learning rules
RULE_BASE = 'base' # delta_w = eta * lfb * (x - w)
RULE_HEBB = 'hebb' # delta_w = eta * y * lfb * (x - w)
# Types of LFB kernels
LFB_GAUSS = 'gauss'
LFB_DoG = 'DoG'
LFB_EXP = 'exp'
LFB_DoE = 'DoE'
def __init__(self,
in_channels,
out_channels,
kernel_size,
competitive=True,
random_abstention=False,
lfb_value=0,
similarity=raised_cos2d_pow(2),
out=vector_proj2d,
weight_upd_rule='hebb',
eta=0.1,
lr_schedule=None,
tau=1000):
super(HebbianMap2d, self).__init__()
#RULE_HEBB = 'hebb'
# Init weights
if hasattr(kernel_size, '__len__') and len(kernel_size) == 1: kernel_size = kernel_size[0]
if not hasattr(kernel_size, '__len__'): kernel_size = [kernel_size, kernel_size]
stdv = 1 / (in_channels * kernel_size[0] * kernel_size[1]) ** 0.5
self.register_buffer('weight', torch.empty(out_channels, in_channels, kernel_size[0], kernel_size[1]))
nn.init.uniform_(self.weight, -stdv,
stdv) # Same initialization used by default pytorch conv modules (the one from the paper "Efficient Backprop, LeCun")
# Enable/disable features as random abstention, competitive learning, lateral feedback
self.competitive = competitive
self.random_abstention = competitive and random_abstention
self.lfb_on = competitive and isinstance(lfb_value, str)
self.lfb_value = lfb_value
# Set output function, similarity function and learning rule
self.similarity = similarity
self.out = out
self.teacher_signal = None # Teacher signal for supervised training
self.weight_upd_rule = weight_upd_rule
# Initial learning rate and lR scheduling policy. LR wrapped into a registered buffer so that we can save/load it
self.register_buffer('eta', torch.tensor(eta))
self.lr_schedule = lr_schedule # LR scheduling policy
# Init variables for statistics collection
if self.random_abstention:
self.register_buffer('victories_count', torch.zeros(out_channels))
else:
self.register_buffer('victories_count', None)
def set_teacher_signal(self, y):
self.teacher_signal = y
def forward(self, x):
y = self.out(x, self.weight)
if self.training: self.update(x)
return y
def update(self, x):
# Prepare the inputs
y = self.similarity(x, self.weight)
t = self.teacher_signal
if t is not None: t = t.unsqueeze(2).unsqueeze(3) * torch.ones_like(y, device=y.device)
y = y.permute(0, 2, 3, 1).contiguous().view(-1, self.weight.size(0))
if t is not None: t = t.permute(0, 2, 3, 1).contiguous().view(-1, self.weight.size(0))
x_unf = unfold_map2d(x, self.weight.size(2), self.weight.size(3))
x_unf = x_unf.permute(0, 2, 3, 1, 4).contiguous().view(y.size(0), 1, -1)
# Random abstention
if self.random_abstention:
abst_prob = self.victories_count / (self.victories_count.max() + y.size(0) / y.size(1)).clamp(1)
scores = y * (torch.rand_like(abst_prob, device=y.device) >= abst_prob).float().unsqueeze(0)
else:
scores = y
# Competition. The returned winner_mask is a bitmap telling where a neuron won and where one lost.
if self.competitive:
if t is not None: scores *= t
winner_mask = (scores == scores.max(1, keepdim=True)[0]).float()
if self.random_abstention: # Update statistics if using random abstension
winner_mask_sum = winner_mask.sum(0) # Number of inputs over which a neuron won
self.victories_count += winner_mask_sum
self.victories_count -= self.victories_count.min().item()
else:
winner_mask = torch.ones_like(y, device=y.device)
# Lateral feedback
if self.lfb_on:
thing = 4
else:
lfb_out = winner_mask
if self.competitive:
lfb_out[lfb_out == 0] = self.lfb_value
elif t is not None:
lfb_out = t
# Compute step modulation coefficient
r = lfb_out # RULE_BASE
if self.weight_upd_rule == 'hebb': r *= y
# Compute delta
r_abs = r.abs()
r_sign = r.sign()
delta_w = r_abs.unsqueeze(2) * (r_sign.unsqueeze(2) * x_unf - self.weight.view(1, self.weight.size(0), -1))
# Since we use batches of inputs, we need to aggregate the different update steps of each kernel in a unique
# update. We do this by taking the weighted average of teh steps, the weights being the r coefficients that
# determine the length of each step
r_sum = r_abs.sum(0)
r_sum += (r_sum == 0).float() # Prevent divisions by zero
delta_w_avg = (delta_w * r_abs.unsqueeze(2)).sum(0) / r_sum.unsqueeze(1)
# Apply delta
self.weight += self.eta * delta_w_avg.view_as(self.weight)
# LFB kernel shrinking and LR schedule
if self.lfb_on: self.lfb_kernel = self.lfb_kernel.pow(self.alpha)
if self.lr_schedule is not None: self.eta = self.lr_schedule(self.eta)
# Generate a batch of random inputs for testing
def gen_batch(centers, batch_size, win_height, win_width):
# Generate an input "image" by first generating patches as random perturbations on the cluster centers and then
# concatenating them in the horizontal and vertical dimensions. Repeat to generate a batch.
batch = torch.empty(0)
for j in range(batch_size): # Loop to generate batch
image = torch.empty(0)
for k in range(win_height): # Loop to concat image rows vertically
row = torch.empty(0)
for l in range(win_width): # Loop to concat patches horizontally
# Generate an input patch by perturbing a cluster center
index = int(torch.floor(torch.rand(1) * centers.size(0)).item())
patch = centers[index] + 0.1 * torch.randn_like(centers[index])
# Concatenate patch horizonally to the image row
row = torch.cat((row, patch), 2)
# Concatenate row to the image vertically
image = torch.cat((image, row), 1)
# Concatenate the image to the batch
batch = torch.cat((batch, image.unsqueeze(0)), 0)
return batch
# Test for the batch generation function
def test_genbatch():
# Generate centers around which clusters are built
centers = torch.randn(6, 3, 4, 5)
# Generate a batch of inputs around the centers
batch = gen_batch(centers, 10, 2, 2)
# Check that the batch size is correct (just to be sure)
print(batch.size()) # Should print 10x3x8x10
# Test the implementation of the HebbianMap2d
def test_hebbianmap():
# Function for printing summary information
def print_results(model, centers):
print('\n' + '#' * 79 + '\n')
responses = model(centers).squeeze()
top_act, closest_neurons = responses.max(1)
for i in range(responses.size(0)): print(
"Closest neuron to center " + str(i) + ": " + str(closest_neurons[i].item()) + ", output: " + str(
top_act[i].item()))
print()
top_act, closest_centers = responses.max(0)
for i in range(responses.size(1)): print(
"Closest center to neuron " + str(i) + ": " + str(closest_centers[i].item()) + ", output: " + str(
top_act[i].item()))
print('\n' + '#' * 79 + '\n')
torch.random.manual_seed(3)
kernel_shape = (6, 3, 4, 5)
num_centers = 6
num_iter = 2000
batch_size = 10
win_height = 2
win_width = 2
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = HebbianMap2d(
in_channels=kernel_shape[1],
out_size=kernel_shape[0],
kernel_size=[kernel_shape[2], kernel_shape[3]],
competitive=True,
random_abstention=False,
lfb_value=0,
similarity=raised_cos2d_pow(2),
out=cos_sim2d,
weight_upd_rule=HebbianMap2d.RULE_BASE,
eta=0.1,
lr_schedule=sched_exp(1000, 0.01),
tau=1000
)
model.eval()
model.to(device)
# Generate centers around which clusters are built
centers = torch.randn(num_centers, *kernel_shape[1:4])
# Check the distance between the centers and the randomly initialized weight vectors
print_results(model, centers)
# Train the model: generate a batch of inputs and feed it to the model, repeat for the desired number of iterations
model.train()
for i in range(num_iter):
batch = gen_batch(centers, batch_size, win_height, win_width)
batch = batch.to(device)
model(batch)
model.eval()
# Verify that the weight vectors of the model have converged to the cluster centers
print_results(model, centers)
if __name__ == '__main__':
test_kernelsum()
test_genbatch()
test_hebbianmap()
|
from numpy.random import poisson
from random import randint
from math import ceil,floor
for team in range(4,7):
if team == 4:
initialTime =15
elif team == 5:
initialTime = 10
elif team == 6:
initialTime = 5
workMins = 0
wait = 0
trucks = 0
while workMins < 480 or trucks > 0:
trucks += poisson(2)
nextHour = workMins + 60
for i in range(trucks):
trucks -= 1
time = randint(initialTime, initialTime + 10)
wait += time * trucks
workMins += time
if workMins >= nextHour:
break
if workMins <= nextHour:
workMins = nextHour
else:
wait += trucks * (workMins - nextHour)
workSalary = team * ((workMins/60.0) * 25)
waitCost = (wait/60.0) * 50
print("Number of workers:",team)
print("Final cost: $", "{0:.2f}".format(workSalary+waitCost))
print("Hours worked:", "{0:.2f}".format(workMins/60.0))
print("\tSalary: $", "{0:.2f}".format(workSalary))
print("Truck waiting time:", "{0:.2f}".format(wait/60))
print("\tTruck cost: $", "{0:.2f}".format(waitCost))
|
"""Hooks which override deep learning interfaces with remote execution functionality."""
from .base import BaseHook
from .keras import KerasHook
from .tensorflow import TensorflowHook
from .torch import TorchHook
__all__ = ['BaseHook', 'TorchHook', 'KerasHook', 'TensorflowHook']
|
from WebScraper import WebScraper
import argparse
def run_scraper(extract=None,name=None):
scraper = WebScraper()
if extract=='CDS':
data = scraper.cds_scrape()
if name!=None:
print('CDS:',data[data.Name==name].Name.iloc[-1],'Price:',data[data.Name==name].Price.iloc[-1],'bps')
else:
print(data)
return data
if __name__=="__main__":
parser=argparse.ArgumentParser()
parser.add_argument("--extract",type=str)
parser.add_argument("--name",type=str)
args=parser.parse_args()
data=run_scraper(
extract=args.extract,
name=args.name
)
|
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def ex2():
s = raw_input()
tlist = s.split()
#First Solution - Using Bubble sort
for a in range(len(tlist)-1):
for b in range(1, len(tlist)-a):
if int(tlist[b-1]) > int(tlist[b]):
temp = tlist[b-1]
tlist[b-1] = tlist[b]
tlist[b] = temp
print tlist[0]
print tlist[len(tlist)-1]
#Second Solution - just Solution(for min, max values)
"""
mini = int(tlist[0])
maxi = int(tlist[0])
for i in range(1, len(tlist)):
if int(tlist[i]) < mini:
mini = int(tlist[i])
else:
if maxi < int(tlist[i]):
maxi = int(tlist[i])
print mini
print maxi
"""
if __name__ == '__main__':
ex2()
|
"""Management command to create DKIM keys."""
import os
from django.core.management.base import BaseCommand
from django.utils.encoding import smart_str
from django.utils.translation import gettext as _
from modoboa.lib import sysutils
from modoboa.parameters import tools as param_tools
from .... import models
from .... import signals
from ....constants import DKIM_WRITE_ERROR, ALARM_OPENED
class ManageDKIMKeys(BaseCommand):
"""Command class."""
def create_new_dkim_key(self, domain):
"""Create a new DKIM key."""
storage_dir = param_tools.get_global_parameter("dkim_keys_storage_dir")
pkey_path = os.path.join(storage_dir, "{}.pem".format(domain.name))
alarm_qset = domain.alarms.filter(internal_name=DKIM_WRITE_ERROR)
if not os.access(storage_dir, os.W_OK):
if not alarm_qset.exists():
domain.alarms.create(
title=_("DKIM path non-writable"),
internal_name=DKIM_WRITE_ERROR)
else:
alarm = alarm_qset.first()
if alarm.status != ALARM_OPENED:
alarm.reopen()
return
elif alarm_qset.exists():
alarm_qset.first().close()
key_size = (
domain.dkim_key_length if domain.dkim_key_length
else self.default_key_length)
code, output = sysutils.exec_cmd(
"openssl genrsa -out {} {}".format(pkey_path, key_size))
if code:
print("Failed to generate DKIM private key for domain {}: {}"
.format(domain.name, smart_str(output)))
domain.dkim_private_key_path = pkey_path
code, output = sysutils.exec_cmd(
"openssl rsa -in {} -pubout".format(pkey_path))
if code:
print("Failed to generate DKIM public key for domain {}: {}"
.format(domain.name, smart_str(output)))
public_key = ""
for cpt, line in enumerate(smart_str(output).splitlines()):
if cpt == 0 or line.startswith("-----"):
continue
public_key += line
domain.dkim_public_key = public_key
domain.save(update_fields=["dkim_public_key", "dkim_private_key_path"])
def add_arguments(self, parser):
"""Add arguments to command."""
parser.add_argument(
"--domain", type=str, dest="domain", default="",
help="Domain target for keys generation."
)
def handle(self, *args, **options):
"""Entry point."""
self.default_key_length = param_tools.get_global_parameter(
"dkim_default_key_length")
if options["domain"] != "":
domain = models.Domain.objects.filter(name=options["domain"],
enable_dkim=True,
dkim_private_key_path="")
if domain.exists():
self.create_new_dkim_key(domain[0])
signals.new_dkim_keys.send(sender=self.__class__,
domains=domain)
return
qset = models.Domain.objects.filter(
enable_dkim=True, dkim_private_key_path="")
for domain in qset:
self.create_new_dkim_key(domain)
if qset.exists():
signals.new_dkim_keys.send(sender=self.__class__, domains=qset)
|
"""
===========================
How to use CutMix and MixUp
===========================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_cutmix_mixup.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_transforms_plot_cutmix_mixup.py>` to download the full example code.
:class:`~torchvision.transforms.v2.CutMix` and
:class:`~torchvision.transforms.v2.MixUp` are popular augmentation strategies
that can improve classification accuracy.
These transforms are slightly different from the rest of the Torchvision
transforms, because they expect
**batches** of samples as input, not individual images. In this example we'll
explain how to use them: after the ``DataLoader``, or as part of a collation
function.
"""
# %%
import torch
from torchvision.datasets import FakeData
from torchvision.transforms import v2
NUM_CLASSES = 100
# %%
# Pre-processing pipeline
# -----------------------
#
# We'll use a simple but typical image classification pipeline:
preproc = v2.Compose([
v2.PILToTensor(),
v2.RandomResizedCrop(size=(224, 224), antialias=True),
v2.RandomHorizontalFlip(p=0.5),
v2.ToDtype(torch.float32, scale=True), # to float32 in [0, 1]
v2.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), # typically from ImageNet
])
dataset = FakeData(size=1000, num_classes=NUM_CLASSES, transform=preproc)
img, label = dataset[0]
print(f"{type(img) = }, {img.dtype = }, {img.shape = }, {label = }")
# %%
#
# One important thing to note is that neither CutMix nor MixUp are part of this
# pre-processing pipeline. We'll add them a bit later once we define the
# DataLoader. Just as a refresher, this is what the DataLoader and training loop
# would look like if we weren't using CutMix or MixUp:
from torch.utils.data import DataLoader
dataloader = DataLoader(dataset, batch_size=4, shuffle=True)
for images, labels in dataloader:
print(f"{images.shape = }, {labels.shape = }")
print(labels.dtype)
# <rest of the training loop here>
break
# %%
# %%
# Where to use MixUp and CutMix
# -----------------------------
#
# After the DataLoader
# ^^^^^^^^^^^^^^^^^^^^
#
# Now let's add CutMix and MixUp. The simplest way to do this right after the
# DataLoader: the Dataloader has already batched the images and labels for us,
# and this is exactly what these transforms expect as input:
dataloader = DataLoader(dataset, batch_size=4, shuffle=True)
cutmix = v2.CutMix(num_classes=NUM_CLASSES)
mixup = v2.MixUp(num_classes=NUM_CLASSES)
cutmix_or_mixup = v2.RandomChoice([cutmix, mixup])
for images, labels in dataloader:
print(f"Before CutMix/MixUp: {images.shape = }, {labels.shape = }")
images, labels = cutmix_or_mixup(images, labels)
print(f"After CutMix/MixUp: {images.shape = }, {labels.shape = }")
# <rest of the training loop here>
break
# %%
#
# Note how the labels were also transformed: we went from a batched label of
# shape (batch_size,) to a tensor of shape (batch_size, num_classes). The
# transformed labels can still be passed as-is to a loss function like
# :func:`torch.nn.functional.cross_entropy`.
#
# As part of the collation function
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Passing the transforms after the DataLoader is the simplest way to use CutMix
# and MixUp, but one disadvantage is that it does not take advantage of the
# DataLoader multi-processing. For that, we can pass those transforms as part of
# the collation function (refer to the `PyTorch docs
# <https://pytorch.org/docs/stable/data.html#dataloader-collate-fn>`_ to learn
# more about collation).
from torch.utils.data import default_collate
def collate_fn(batch):
return cutmix_or_mixup(*default_collate(batch))
dataloader = DataLoader(dataset, batch_size=4, shuffle=True, num_workers=2, collate_fn=collate_fn)
for images, labels in dataloader:
print(f"{images.shape = }, {labels.shape = }")
# No need to call cutmix_or_mixup, it's already been called as part of the DataLoader!
# <rest of the training loop here>
break
# %%
# Non-standard input format
# -------------------------
#
# So far we've used a typical sample structure where we pass ``(images,
# labels)`` as inputs. MixUp and CutMix will magically work by default with most
# common sample structures: tuples where the second parameter is a tensor label,
# or dict with a "label[s]" key. Look at the documentation of the
# ``labels_getter`` parameter for more details.
#
# If your samples have a different structure, you can still use CutMix and MixUp
# by passing a callable to the ``labels_getter`` parameter. For example:
batch = {
"imgs": torch.rand(4, 3, 224, 224),
"target": {
"classes": torch.randint(0, NUM_CLASSES, size=(4,)),
"some_other_key": "this is going to be passed-through"
}
}
def labels_getter(batch):
return batch["target"]["classes"]
out = v2.CutMix(num_classes=NUM_CLASSES, labels_getter=labels_getter)(batch)
print(f"{out['imgs'].shape = }, {out['target']['classes'].shape = }")
|
def fatal(msg="***No error message provided***"):
print("FATAL ERROR:",msg,"EXITING...", sep="\n")
|
from scipy.io import wavfile
import math
import cmath
import numpy as np
import matplotlib.pyplot as plt
###########
#DFT function
############
def dft(data, N):
out = np.zeros(shape = N, dtype=np.complex_)
for m in range (N):
for n in range(N):
out[m] += data[n] * ( np.cos(2 * cmath.pi * n * m / N) - (np.sin(2 * cmath.pi * n * m / N) * 1j))
return out
###########
#IDFT function
############
def idft(x, N):
out = np.zeros(shape = N, dtype=np.complex_)
for n in xrange (N):
for m in xrange (N):
out [n] += x[m] * (np.cos(2 * cmath.pi * n * m / N) + (np.sin(2 * cmath.pi * n * m / N) * 1j))
out[n] /= N
return out
# ##########
# #Trying idft and dft on a small signal
# ###########
# x = [0, 1, 2, 3]
# dft_out = dft (x,4)
# print "My DFT output: ", dft_out
# print "Numpy dft output: ", np.fft.fft(x)
# idft_out = idft(dft_out,4)
# print "My idft output: ", idft_out
# print "Numpy idft output: ", np.fft.ifft(np.fft.fft(x))
# mag = abs(dft_out)
# print "My Magnitude is: ", mag
# t1 = np.linspace(0.0,1.0,4)
# ##########
# #Trying wav file generation for small file
# ###########
# wavIn = np.zeros(shape = 4)
# print t1
# wavIn = idft_out.real.astype(np.int16)
# print wavIn
# wavfile.write("output1.wav", 4, wavIn)
###########
#Reading from wav file
############
rate, data = wavfile.read("try4.wav")
# ###########
# #GETTING THE DFT AND ITS MAGNITUDE
# ############
DFT = dft (data, rate)
magnitude = abs(DFT)
###########
#Plotting the magnitude
############
t = np.linspace(0.0,1.0,rate)
plt.figure(1)
plt.plot(t , magnitude)
plt.xlabel('Time')
plt.ylabel('Magnitude')
plt.grid(True)
plt.show()
# ###########
# #GETTING THE IDFT AND DOING THE OUTPUT WAV FILE
# ############
IDFT = idft(DFT, rate)
wavfile.write("output.wav", rate, IDFT.real)
|
from __future__ import annotations
import textwrap
from typing import (
Any,
Dict,
Final,
Sequence,
Tuple,
TypeVar,
)
from .output.types import FieldSpec, PaginatedResult
from .exceptions import BackendAPIVersionError
from .session import api_session
MAX_PAGE_SIZE: Final = 100
T = TypeVar('T')
async def execute_paginated_query(
root_field: str,
variables: Dict[str, Tuple[Any, str]],
fields: Sequence[FieldSpec],
*,
limit: int,
offset: int,
) -> PaginatedResult:
if limit > MAX_PAGE_SIZE:
raise ValueError(f"The page size cannot exceed {MAX_PAGE_SIZE}")
query = '''
query($limit:Int!, $offset:Int!, $var_decls) {
$root_field(
limit:$limit, offset:$offset, $var_args) {
items { $fields }
total_count
}
}'''
query = query.replace('$root_field', root_field)
query = query.replace('$fields', ' '.join(f.field_ref for f in fields))
query = query.replace(
'$var_decls',
', '.join(f'${key}: {value[1]}'
for key, value in variables.items()),
)
query = query.replace(
'$var_args',
', '.join(f'{key}:${key}'
for key in variables.keys()),
)
query = textwrap.dedent(query).strip()
var_values = {key: value[0] for key, value in variables.items()}
var_values['limit'] = limit
var_values['offset'] = offset
data = await api_session.get().Admin._query(query, var_values)
return PaginatedResult(
total_count=data[root_field]['total_count'],
items=data[root_field]['items'],
fields=fields,
)
async def generate_paginated_results(
root_field: str,
variables: Dict[str, Tuple[Any, str]],
fields: Sequence[FieldSpec],
*,
page_offset: int,
page_size: int,
) -> PaginatedResult:
if page_size > MAX_PAGE_SIZE:
raise ValueError(f"The page size cannot exceed {MAX_PAGE_SIZE}")
if api_session.get().api_version < (6, '20210815'):
if variables['filter'][0] is not None or variables['order'][0] is not None:
raise BackendAPIVersionError(
"filter and order arguments for paginated lists require v6.20210815 or later.",
)
# should remove to work with older managers
variables.pop('filter')
variables.pop('order')
offset = page_offset
while True:
limit = page_size
result = await execute_paginated_query(
root_field, variables, fields,
limit=limit, offset=offset,
)
offset += page_size
return result
|
'''
This lets you specify an experiment to run. You must provide it with:
- an experiment name
- the number of iterations to run for
- the constants file to use
- the statistics to write out
Each one of these may be a list. If you want to use the same statistics
or number of iterations, then they will be duplicated over and over
for each experimental run.
'''
import os
import sys
import shutil
import cPickle
from numpy import mean
import numpy as np
import pdb, sys
########################################################################
##########CONFIGURATION OPTIONS#########################################
########################################################################
# experiment_list is a list of experiments to perform. It must be a
# list.
pre_experiment_list = ['baseline','always_play','no_world',
'only_CD','only_CD_always_play',
'symmetric_payoff','long_genes',
'traitor_superpayoff','nonnegative_payoff',
'more_mutations','bigsim']
experiment_list = [x + '_1' for x in pre_experiment_list]
experiment_list += [x + '_2' for x in pre_experiment_list]
experiment_list += [x + '_3' for x in pre_experiment_list]
# iteration_list is a list of the number of iterations to run for each
# experiment.
iteration_list = [400,400,400,400,400,400,600,400,400,400,400]*3
# constants_list is a list of files containing constants.
constants_list = ['baseline','always_play','high_permeability',
'onlyCD','always_play_onlyCD',
'symmetric_payoff','long_genes','traitor_megapayoff',
'nonegative_payoff','more_mutations','bigsim']*3
# statistics_list is a list of all the statistics to write out each
# iteration. It must be a list, but may also be a list of lists.
statistics_list = ['mean_fitness', 'die_offs','num_agents','fitness',
'vengeful','sucker','collaborator','cruel',
'selective','traitor','retreating','popular',
'forgiving','prisoner','timid','nice','tot_games',
'per_game_fitness','mean_per_game_fitness',
'tot_games_played','cooperator','defector','quitter',
'optimality']
save_the_world = [True, False, False, False, True, False, True,
False, False, False,False] * 3
save_tile_stats = True
########################################################################
##########CONSTANT PARAMETERS###########################################
########################################################################
# DO NOT CHANGE THESE
home = os.getenv("HOME")
subdirs = ''
root = os.path.join(home, subdirs, 'evolution/experiments')
constant_dir = os.path.join(home, subdirs, 'evolution/constant_files') # the directory that contains all the constants
abs_root = os.path.join(home, subdirs, 'evolution/')
########################################################################
########################################################################
########################################################################
# elaborate the stuff into a list
def mkdir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
mkdir(root)
if not type(iteration_list) == list:
iteration_list = [iteration_list]
if not type(constants_list) == list:
constants_list = [constants_list]
if not type(statistics_list[0]) == list:
statistics_list = [statistics_list]
if not type(save_the_world) == list:
save_the_world = [save_the_world]
if not type(save_tile_stats) == list:
save_tile_stats = [save_tile_stats]
if not len(experiment_list) == len(iteration_list):
iteration_list *= len(experiment_list)
if not len(experiment_list) == len(constants_list):
iteration_list *= len(experiment_list)
if not len(experiment_list) == len(statistics_list):
statistics_list *= len(experiment_list)
if not len(experiment_list) == len(save_the_world):
save_the_world *= len(experiment_list)
if not len(experiment_list) == len(save_tile_stats):
save_tile_stats *= len(experiment_list)
experiments = zip(experiment_list, iteration_list, constants_list, statistics_list)
def cleanModules():
# removes all the goddamn modules from pythons goddamn namespace
# WHY IS THIS SO HARD TO DO!?
modules = ['agent','constants','gameMaster','genome',
'tile','trust','world','constants_override']
for m in modules:
if m in sys.modules:
del(sys.modules[m])
for exp, iters, const, stats in experiments:
# create a directory for the files to go in
cleanModules()
# this is the best way to do the constant file reloading. i know,
# it's ridiculous, but whatever.
constant_file = os.path.join(constant_dir, const + '.py')
target_constant_file = os.path.join(abs_root, 'constants_override.py')
shutil.copy(constant_file, target_constant_file)
exp_root = os.path.join(root, exp)
exp_iter_root = os.path.join(exp_root, 'iterations')
if os.path.exists(exp_root):
continue
mkdir(exp_root)
mkdir(exp_iter_root)
# write out some parameters
f = open(os.path.join(exp_root, 'experiment_params'),'w')
f.write('Constants file: %s\n'%const)
f.write('Statistics: %s\n'%str(stats))
f.write('Iterations: %s\n'%str(iters))
module = __import__('constants_override', globals(), locals(), ['*'])
for k in dir(module):
if not hasattr(getattr(module, k), '__call__') and '__' not in k:
f.write('%s : %s\n'%(k, str(getattr(module, k))))
f.close()
import world
w = world.World()
for i in range(iters-1):
w.iterate()
w.iterate(regen_children=False)
for stat in stats:
if stat == 'mean_fitness':
fname = os.path.join(exp_root, stat)
np.save(fname, w.mean_fitness)
elif stat == 'mean_trust':
fname = os.path.join(exp_root, stat)
np.save(fname, w.mean_trust)
elif stat == 'die_offs':
fname = os.path.join(exp_root, stat)
np.save(fname, w.die_offs)
elif stat == 'mean_per_game_fitness':
fname = os.path.join(exp_root, stat)
np.save(fname, w.mean_per_game_fitness)
elif stat == 'tot_games_played':
fname = os.path.join(exp_root, stat)
np.save(fname, w.tot_games_played)
else:
if save_tile_stats:
fname = os.path.join(exp_iter_root,stat)
np.save(fname, w.statistics[stat])
if stat in w.global_statistics:
fname = os.path.join(exp_root, 'global_'+stat)
np.save(fname, w.global_statistics[stat])
f = open(os.path.join(exp_root, 'lineages'),'w')
for x in w.agents:
f.write('%s\n'%x.parents)
f.close()
if save_the_world:
# finally, pickle the world
world_dest = os.path.join(exp_root, 'world.pkl')
f = open(world_dest,'w')
cPickle.dump(w, f)
f.close()
|
from xlutils.copy import copy # 只能修改xls后缀的文件格式,xlxs后缀格式在写入时会被破坏原文件
import os
import xlrd
def base_url(filename=None):
return os.path.join(os.path.dirname(__file__), filename)
work = xlrd.open_workbook(base_url('test1.xls')) # 实例化文件对象
print(work)
old_content = copy(work) # 复制未信息的xls
ws = old_content.get_sheet(0) # 通过索引获取sheet
ws.write(3, 3, 111) # 修改新的xls文件内容
old_content.save(base_url('test1.xls')) # 修改后保存
|
from setuptools import setup
setup(
name='Fisheye Webservice',
version='0.1',
long_description="web service for convertion of fisheye videos",
packages=['Webservice'],
include_package_data=True,
zip_safe=False,
install_requires=['Flask', 'peewee', 'Flask-WTF', 'wtforms']
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 8 13:16:19 2019
histogramm and timecourse data
@author: fabio
"""
import numpy as np
#import pandas as pd
import matplotlib.pyplot as plt
#from smaller_model_function_MtoU import simple_small as ss
from MtoU_model import simple as ss
import multiprocessing
#import time
import pickle
pool = multiprocessing.Pool(multiprocessing.cpu_count())
X_Y=[[153,80,40]]
X_Y_sm=[[182,80,40]]
X_Y_sl=[[191,80,40]]
X_Y_max=[[203,80,40]]
reps=10000
repeat=reps*X_Y
repeat_sm=reps*X_Y_sm
repeat_sl=reps*X_Y_sl
repeat_max=reps*X_Y_max
duration=201
if __name__ == '__main__':
status_small = pool.map(ss, repeat)
status_m = pool.map(ss, repeat_sm)
status_large = pool.map(ss, repeat_sl)
status_max = pool.map(ss, repeat_max)
# status_small = []
# for i in repeat:
# status_small.append(ss(i))
# print(status_small)
# #
# status_large = []
# for i in repeat:
# status_large.append(sl(i))
# status_m = []
# for i in repeat:
# status_m.append(sl(i))
#small system
reporters_diff_small = np.zeros([len(repeat),duration])
reporters_off_small = np.zeros([len(repeat),duration])
reporters_on_small = np.zeros([len(repeat),duration])
cenH_list_small = np.zeros([len(repeat),duration])
EcoRV_list_small = np.zeros([len(repeat),duration])
#medium system
reporters_diff_m = np.zeros([len(repeat),duration])
reporters_off_m = np.zeros([len(repeat),duration])
reporters_on_m = np.zeros([len(repeat),duration])
cenH_list_m = np.zeros([len(repeat),duration])
EcoRV_list_m = np.zeros([len(repeat),duration])
#large system
reporters_diff_large = np.zeros([len(repeat),duration])
reporters_off_large = np.zeros([len(repeat),duration])
reporters_on_large = np.zeros([len(repeat),duration])
cenH_list_large = np.zeros([len(repeat),duration])
EcoRV_list_large = np.zeros([len(repeat),duration])
#max system
reporters_diff_max = np.zeros([len(repeat),duration])
reporters_off_max = np.zeros([len(repeat),duration])
reporters_on_max = np.zeros([len(repeat),duration])
cenH_list_max = np.zeros([len(repeat),duration])
EcoRV_list_max = np.zeros([len(repeat),duration])
#wt system
cenH_average = np.zeros(len(repeat))
EcoRV_average = np.zeros(len(repeat))
#4.5kb insert system
cenH_average_m = np.zeros(len(repeat))
EcoRV_average_m = np.zeros(len(repeat))
#6kb insert system
cenH_average_large = np.zeros(len(repeat))
EcoRV_average_large = np.zeros(len(repeat))
for elt in range(len(repeat)):
cenH_small = np.array(status_small[elt][0])
EcoRV_small = np.array(status_small[elt][1])
# generate list with cenH and EcoRV states being both at different states (1)
reporter_diff_small = cenH_small != EcoRV_small
#transform that vector into a int vector
reporter_diff_small = reporter_diff_small.astype(int)
# copy this vector into reporter_states vector
reporters_diff_small[elt]=reporter_diff_small
# generate list with cenH and EcoRV states being both switched off
reporter_off_small = np.zeros(len(cenH_small),'int')
for index in range(len(cenH_small)):
if cenH_small[index]==1 and EcoRV_small[index]==1:
reporter_off_small[index]=1
else:
reporter_off_small[index]=0
reporters_off_small[elt]=reporter_off_small
# generate list with cenH and EcoRV states being both switched on
reporter_on_small = np.zeros(len(cenH_small),'int')
for Index in range(len(cenH_small)):
if cenH_small[Index]==0 and EcoRV_small[Index]==0:
reporter_on_small[Index]=1
else:
reporter_on_small[Index]=0
reporters_on_small[elt]=reporter_on_small
#switch the values of the list (1 stands now for timepoint when reporter is on)
cenH_small=1-cenH_small
EcoRV_small=1-EcoRV_small
cenH_list_small[elt]=cenH_small
EcoRV_list_small[elt]=EcoRV_small
cenH_m = np.array(status_m[elt][0])
EcoRV_m = np.array(status_m[elt][1])
# generate list with cenH and EcoRV states being both at different states (1)
reporter_diff_m = cenH_m != EcoRV_m
#transform that vector into a int vector
reporter_diff_m = reporter_diff_m.astype(int)
# copy this vector into reporter_states vector
reporters_diff_m[elt]=reporter_diff_m
# generate list with cenH and EcoRV states being both switched off
reporter_off_m = np.zeros(len(cenH_m),'int')
for index in range(len(cenH_m)):
if cenH_m[index]==1 and EcoRV_m[index]==1:
reporter_off_m[index]=1
else:
reporter_off_m[index]=0
reporters_off_m[elt]=reporter_off_m
# generate list with cenH and EcoRV states being both switched on
reporter_on_m = np.zeros(len(cenH_m),'int')
for Index in range(len(cenH_m)):
if cenH_m[Index]==0 and EcoRV_m[Index]==0:
reporter_on_m[Index]=1
else:
reporter_on_m[Index]=0
reporters_on_m[elt]=reporter_on_m
#switch the values of the list (1 stands now for timepoint when reporter is on)
cenH_m=1-cenH_m
EcoRV_m=1-EcoRV_m
cenH_list_m[elt]=cenH_m
EcoRV_list_m[elt]=EcoRV_m
cenH_large = np.array(status_large[elt][0])
EcoRV_large = np.array(status_large[elt][1])
# generate list with cenH and EcoRV states being both at different states (1)
reporter_diff_large = cenH_large != EcoRV_large
#transform that vector into a int vector
reporter_diff_large = reporter_diff_large.astype(int)
# copy this vector into reporter_states vector
reporters_diff_large[elt]=reporter_diff_large
# generate list with cenH and EcoRV states being both switched off
reporter_off_large = np.zeros(len(cenH_large),'int')
for index in range(len(cenH_large)):
if cenH_large[index]==1 and EcoRV_large[index]==1:
reporter_off_large[index]=1
else:
reporter_off_large[index]=0
reporters_off_large[elt]=reporter_off_large
# generate list with cenH and EcoRV states being both switched on
reporter_on_large = np.zeros(len(cenH_large),'int')
for Index in range(len(cenH_large)):
if cenH_large[Index]==0 and EcoRV_large[Index]==0:
reporter_on_large[Index]=1
else:
reporter_on_large[Index]=0
reporters_on_large[elt]=reporter_on_large
#switch the values of the list (1 stands now for timepoint when reporter is on)
cenH_large=1-cenH_large
EcoRV_large=1-EcoRV_large
cenH_list_large[elt]=cenH_large
EcoRV_list_large[elt]=EcoRV_large
cenH_max = np.array(status_max[elt][0])
EcoRV_max = np.array(status_max[elt][1])
# generate list with cenH and EcoRV states being both at different states (1)
reporter_diff_max = cenH_max != EcoRV_max
#transform that vector into a int vector
reporter_diff_max = reporter_diff_max.astype(int)
# copy this vector into reporter_states vector
reporters_diff_max[elt]=reporter_diff_max
# generate list with cenH and EcoRV states being both switched off
reporter_off_max = np.zeros(len(cenH_max),'int')
for index in range(len(cenH_max)):
if cenH_max[index]==1 and EcoRV_max[index]==1:
reporter_off_max[index]=1
else:
reporter_off_max[index]=0
reporters_off_max[elt]=reporter_off_max
# generate list with cenH and EcoRV states being both switched on
reporter_on_max = np.zeros(len(cenH_max),'int')
for Index in range(len(cenH_max)):
if cenH_max[Index]==0 and EcoRV_max[Index]==0:
reporter_on_max[Index]=1
else:
reporter_on_max[Index]=0
reporters_on_max[elt]=reporter_on_max
#switch the values of the list (1 stands now for timepoint when reporter is on)
cenH_max=1-cenH_max
EcoRV_max=1-EcoRV_max
cenH_list_max[elt]=cenH_max
EcoRV_list_max[elt]=EcoRV_max
print(cenH_small)
print(cenH_m)
print(cenH_large)
print(cenH_max)
#calculate the average switch off time off all system sizes
# #wt system
# CENH = np.array(status_small[elt][0])
# ECORV = np.array(status_small[elt][1])
# if 1 in CENH:
# # tell me when
# cenH_silenced = list(CENH).index(1)
# if 1 in ECORV:
# # tell me when
# EcoRV_silenced = list(ECORV).index(1)
# #4.5kb insert system
# CENH_m = np.array(status_m[elt][0])
# ECORV_m = np.array(status_m[elt][1])
# if 1 in CENH_m:
# # tell me when
# cenH_silenced_m = list(CENH_m).index(1)
# if 1 in ECORV_m:
# # tell me when
# EcoRV_silenced_m = list(ECORV_m).index(1)
# #6kb insert system
# CENH_large = np.array(status_large[elt][0])
# ECORV_large = np.array(status_large[elt][1])
# if 1 in CENH_large:
# # tell me when
# cenH_silenced_large = list(CENH_large).index(1)
# if 1 in ECORV_large:
# # tell me when
# EcoRV_silenced_large = list(ECORV_large).index(1)
# #wt system
# cenH_average[elt] = cenH_silenced
# EcoRV_average[elt] = EcoRV_silenced
# #4.5kb insert system
# cenH_average_m[elt] = cenH_silenced_m
# EcoRV_average_m[elt] = EcoRV_silenced_m
# #6kb insert system
# cenH_average_large[elt] = cenH_silenced_large
# EcoRV_average_large[elt] = EcoRV_silenced_large
# #wt system
# cenH_Average = sum(cenH_average)/len(repeat)
# EcoRV_Average = sum(EcoRV_average)/len(repeat)
# #4.5kb insert system
# cenH_Average_m = sum(cenH_average_m)/len(repeat)
# EcoRV_Average_m = sum(EcoRV_average_m)/len(repeat)
# #6kb insert system
# cenH_Average_large = sum(cenH_average_large)/len(repeat)
# EcoRV_Average_large = sum(EcoRV_average_large)/len(repeat)
# print(cenH_Average)
# print(cenH_Average_m)
# print(cenH_Average_large)
# print(EcoRV_Average)
# print(EcoRV_Average_m)
# print(EcoRV_Average_large)
diff_small = (sum(reporters_diff_small))/reps
off_small = (sum(reporters_off_small))/reps
on_small = (sum(reporters_on_small))/reps
diff_m = (sum(reporters_diff_m))/reps
off_m = (sum(reporters_off_m))/reps
on_m = (sum(reporters_on_m))/reps
diff_large = (sum(reporters_diff_large))/reps
off_large = (sum(reporters_off_large))/reps
on_large = (sum(reporters_on_large))/reps
diff_max = (sum(reporters_diff_max))/reps
off_max = (sum(reporters_off_max))/reps
on_max = (sum(reporters_on_max))/reps
#output
cenH_total_small = (sum(cenH_list_small))/reps
#
EcoRV_total_small = (sum(EcoRV_list_small))/reps
cenH_total_m = (sum(cenH_list_m))/reps
#
EcoRV_total_m = (sum(EcoRV_list_m))/reps
cenH_total_large = (sum(cenH_list_large))/reps
#
EcoRV_total_large = (sum(EcoRV_list_large))/reps
cenH_total_max = (sum(cenH_list_max))/reps
EcoRV_total_max = (sum(EcoRV_list_max))/reps
# save state_list
with open('cenH_small_1x.txt', 'wb') as F:
pickle.dump(cenH_total_small, F)
# save state_list
with open('cenH_m_1x.txt', 'wb') as F:
pickle.dump(cenH_total_m, F)
# save state_list
with open('cenH_large_1x.txt', 'wb') as F:
pickle.dump(cenH_total_large, F)
# save state_list
with open('cenH_max_1x.txt', 'wb') as F:
pickle.dump(cenH_total_max, F)
# save state_list
with open('EcoRV_small_1x.txt', 'wb') as F:
pickle.dump(EcoRV_total_small, F)
# save state_list
with open('EcoRV_m_1x.txt', 'wb') as F:
pickle.dump(EcoRV_total_m, F)
# save state_list
with open('EcoRV_large_1x.txt', 'wb') as F:
pickle.dump(EcoRV_total_large, F)
# save state_list
with open('EcoRV_max_1x.txt', 'wb') as F:
pickle.dump(EcoRV_total_max, F)
# ## save state_list
# with open('cenH_small_SAU24_new.txt', 'rb') as F:
# cenH_total_small = pickle.load(F)
# ## save state_list
# with open('cenH_m_SAU24_new.txt', 'rb') as F:
# cenH_total_m = pickle.load(F)
# ## save state_list
# with open('cenH_large_SAU24_new.txt', 'rb') as F:
# cenH_total_large = pickle.load(F)
# ## save state_list
# with open('cenH_max_SAU24_new.txt', 'rb') as F:
# cenH_total_max = pickle.load(F)
# ## save state_list
# with open('EcoRV_small_SAU24_new.txt', 'rb') as F:
# EcoRV_total_small = pickle.load(F)
# ## save state_list
# with open('EcoRV_m_SAU24_new.txt', 'rb') as F:
# EcoRV_total_m = pickle.load(F)
# ## save state_list
# with open('EcoRV_large_SAU24_new.txt', 'rb') as F:
# EcoRV_total_large = pickle.load(F)
# ## save state_list
# with open('EcoRV_max_SAU24_new.txt', 'rb') as F:
# EcoRV_total_max = pickle.load(F)
time = np.array(range(duration))
y_axis = np.array([cenH_total_small, EcoRV_total_small, cenH_total_m, EcoRV_total_m, cenH_total_large, EcoRV_total_large,cenH_total_max, EcoRV_total_max,])
#fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=((36, 12)))
fig, (ax1) = plt.subplots(nrows=1, ncols=1, figsize=((15, 10)))
#default line colors and styles
ax1.plot(time,EcoRV_total_small, color='yellowgreen', label='mCherry 23 kb region')
ax1.plot(time,cenH_total_small, color='cyan', label='cenH 23 kb region')
ax1.plot(time,EcoRV_total_m, color='black', label='mCherry 27.5 kb region')
#ax1.plot(time,cenH_total_m,'ro', label='cenH 24 kb region')
ax1.plot(time,EcoRV_total_large, color='red', label='mCherry 29 kb region')
#ax1.plot(time,cenH_total_large, color='blue', label='cenH 26 kb region')
ax1.plot(time,EcoRV_total_max, color='gold', label='mCherry 31 kb region')
#ax1.plot(time,cenH_total_max, color='purple', label='cenH 28 kb region')
ax1.legend(loc='upper left')
#ax1.set_ylabel("fraction of 'ON' cells", fontsize = 35)
#ax1.set_xlabel('t (generations)', fontsize = 35)
ax1.set_yscale('log')
ax1.tick_params(labelsize='30')
ax1.set_ylim([0.001,1])
ax1.set_xlim([1,200])
ax1.legend(fontsize='25')
plt.savefig("timecourse_big_1x.pdf")
# #fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=((36, 12)))
# fig, (ax1) = plt.subplots(nrows=1, ncols=1, figsize=((12, 10)))
# #default line colors and styles
# ax1.plot(time,EcoRV_total_small, color='yellowgreen', label='EcoRV 20 kb region')
# ax1.plot(time,cenH_total_small, color='cyan', label='cenH 20 kb region')
# ax1.plot(time,EcoRV_total_m, color='red', label='EcoRV 24 kb region')
# #ax1.plot(time,cenH_total_m,'ro', label='cenH 24 kb region')
# ax1.plot(time,EcoRV_total_large, color='black', label='EcoRV 26 kb region')
# #ax1.plot(time,cenH_total_large, color='blue', label='cenH 26 kb region')
# ax1.plot(time,EcoRV_total_max, color='gold', label='EcoRV 28 kb region')
# #ax1.plot(time,cenH_total_max, color='purple', label='cenH 28 kb region')
# #ax1.set_title('Combined debt growth over time')
# #ax1.legend(loc='upper left')
# ax1.set_ylabel('fraction of ''ON'' cells', fontsize = 25)
# ax1.set_xlabel('t (generations)', fontsize = 25)
# ax1.set_yscale('log')
# ax1.tick_params(labelsize='18')
# ax1.set_ylim([0.01,1])
# ax1.set_xlim([1,46])
# ax1.legend(fontsize='20')
#plt.savefig("timecourse_small_SUS55_g90")
# ax2.plot(time, off_small, color='k')
# ax2.plot(time, on_small, color='b')
# ax2.plot(time, diff_small, color='r')
# #ax1.set_title('Combined debt growth over time')
# #ax1.legend(loc='upper left')
# ax2.set_ylabel('fraction of cells (small system)', fontsize = 26)
# ax2.set_xlabel('t (generations)', fontsize = 25)
# ax2.tick_params(labelsize='18')
# ax2.set_ylim([0,1])
# ax2.set_xlim([1,100])
# ax3.plot(time, off_large, color='k')
# ax3.plot(time, on_large, color='b')
# ax3.plot(time, diff_large, color='r')
# #ax1.set_title('Combined debt growth over time')
# #ax1.legend(loc='upper left')
# ax3.set_ylabel('fraction of cells (large system)', fontsize = 25)
# ax3.set_xlabel('t (generations)', fontsize = 25)
# ax3.tick_params(labelsize='18')
# ax3.set_ylim([0,1])
# ax3.set_xlim([1,100])
|
import unittest
from katas.kyu_7.reverse_it import reverse_it
class ReverseItTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(reverse_it('Hello'), 'olleH')
def test_equals_2(self):
self.assertEqual(reverse_it(314159), 951413)
def test_equals_3(self):
self.assertEqual(reverse_it('314159'), '951413')
def test_equals_4(self):
self.assertEqual(reverse_it('123.123'), '321.321')
def test_equal_5(self):
func = lambda: 1
self.assertEqual(reverse_it(func), func)
def test_equal_6(self):
self.assertEqual(reverse_it([1, 2, 3, 4]), [1, 2, 3, 4])
|
from django.contrib import admin
from django.core.mail import EmailMessage
from .models import *
# Register your models here.
class OrderAdmin(admin.ModelAdmin):
def save_model(self, request, obj, form, change):
if 'status' in form.changed_data and obj.status is 1:
obj.status = 1
obj.save()
email = EmailMessage(
'Hello',
'Comanda dumneavoastra a fost trimisa catre unul din curierii nostrii',
to=[obj.user.email])
email.send()
else:
super(OrderAdmin, self).save_model(request, obj, form, change)
admin.site.register(Order, OrderAdmin)
admin.site.register(OrderToProduct)
|
import theano.tensor as T
import theano
import numpy as np
from braindecode.veganlasagne.objectives import tied_losses
import lasagne
from numpy.random import RandomState
def test_tied_losses():
# Regression Test, expected values never checked for correctness ....
n_classes = 3
n_sample_preds = 2
n_pairs =1
lasagne.random.set_rng(RandomState(83347))
preds_sym = T.fmatrix()
loss = tied_losses(preds_sym, n_sample_preds, n_classes, n_pairs)
loss_fn = theano.function([preds_sym], loss)
# First example, both predictions identical, can only lead to one result
assert np.allclose([[ 0.89794558]],
loss_fn(np.array([[0.6,0.3,0.1],[0.6,0.3,0.1]], dtype=np.float32)))
assert np.allclose([[ 0.89794558]],
loss_fn(np.array([[0.6,0.3,0.1],[0.6,0.3,0.1]], dtype=np.float32)))
# Second example, two different predictions, can lead to different results
assert np.allclose([[ 1.46424174]],
loss_fn(np.array([[0.1,0.6,0.3],[0.6,0.3,0.1]],
dtype=np.float32)))
assert np.allclose([[ 1.65519595]],
loss_fn(np.array([[0.1,0.6,0.3],[0.6,0.3,0.1]],
dtype=np.float32)))
def test_tied_losses_multiple_pairs():
# Regression Test, expected values never checked for correctness ....
n_classes = 2
n_sample_preds = 4
n_pairs =2
lasagne.random.set_rng(RandomState(329489393))
preds = np.array([[0.1,0.9],[0.2,0.8],[0.3,0.7],[0.2,0.8],
[0.4,0.6],[0.4,0.6],[0.4,0.6],[0.4,0.6],
[0.6,0.4],[0.4,0.6],[0.2,0.8],[0.5,0.5]], dtype=np.float32)
preds_sym = T.fmatrix()
loss = tied_losses(preds_sym, n_sample_preds, n_classes, n_pairs)
loss_fn = theano.function([preds_sym], loss)
assert np.allclose([[ 0.63903177, 0.36177287],
[ 0.67301154, 0.67301154],
[ 0.59191853, 0.69314712]],
loss_fn(preds))
assert np.allclose([[ 0.54480541, 0.52613449],
[ 0.67301154, 0.67301154],
[ 0.71355808, 0.7776612 ]],
loss_fn(preds))
assert np.allclose([[ 0.63903177, 0.54480541],
[ 0.67301154, 0.67301154],
[ 0.59191853, 0.71355808]],
loss_fn(preds))
|
import pickle, uuid, base64, hashlib, base64, os
# DEVELOPER: https://github.com/undefinedvalue0103/nullcore-1.0/
database = {}
def save_database(database):
pickle.dump(database, open('database.pickle', 'wb'))
def load_database():
try:
return pickle.load(open('database.pickle', 'rb'))
except:
save_database({})
return load_database()
def setValue(key, value):
database = load_database()
database[key] = value
save_database(database)
def deleteValue(key):
database = load_database()
del database['key']
save_database(database)
def getValue(key):
return load_database().get(key)
def random_key():
return base64.encodestring(
bytes.fromhex(
hashlib.md5(
bytes(
str(
uuid.uuid4()), 'utf-8')).hexdigest()[:16])).decode('utf-8').replace('\n', '')
def set_random(value):
key = random_key()
setValue(key, value)
return key
def size(self=None):
return os.stat('database.pickle').st_size
|
from __future__ import print_function
from sys import argv
def trim_broadcast( str ):
with open(str) as fp:
f = open (str+'.out', 'w')
key = ""
for line in fp:
if len(line) > 10:
temp = line.split ( )[0]
if not key == temp:
f.write(line)
key = temp
f.close()
return;
total = len(argv)
cmdargs = str(argv)
for i in range (1, total):
trim_broadcast(argv[i]);
|
import amath.ext._rounding as r
ROUND_TRUNC = 'ROUND_TRUNC' # Round towards 0
ROUND_CEILING = 'ROUND_CEILING' # Round towards Infinity
ROUND_FLOOR = 'ROUND_FLOOR' # Round towards -Infinity
ROUND_UP = 'ROUND_UP' # Round away from zero
ROUND_HALF_UP = 'ROUND_HALF_UP' # Round to nearest with ties going towards Infinity
ROUND_HALF_AWAY = 'ROUND_HALF_AWAY' # Round to nearest with ties going away from 0
ROUND_HALF_EVEN = 'ROUND_HALF_EVEN' # Round to nearest with ties going to nearest even
ROUND_HALF_IN = 'ROUND_HALF_IN' # Round to nearest with ties going towards 0
ROUND_HALF_DOWN = 'ROUND_HALF_DOWN' # Round to nearest with ties going towards -Infinity
def round(n, d=0, *, tp=ROUND_HALF_AWAY):
if tp == ROUND_TRUNC:
return r.trunc(n, d)
if tp == ROUND_CEILING:
return r.ceil(n, d)
if tp == ROUND_FLOOR:
return r.floor(n, d)
if tp == ROUND_UP:
return r.away(n, d)
if tp == ROUND_HALF_UP:
return r.half_up(n, d)
if tp == ROUND_HALF_AWAY:
return r.half_away(n, d)
if tp == ROUND_HALF_EVEN:
return r.half_even(n, d)
if tp == ROUND_HALF_IN:
return r.half_in(n, d)
if tp == ROUND_HALF_DOWN:
return r.half_down(n, d)
else:
raise ValueError("Rounding Type not given")
def floor(n, d=0):
return round(n, d, tp=ROUND_FLOOR)
def ceil(n, d=0):
return round(n, d, tp=ROUND_CEILING)
def trunc(n, d=0):
return round(n, d, tp=ROUND_TRUNC)
def chop(x, max=1e-10):
if type(x) is complex:
if abs(x.imag) <= max:
x = x.real
if abs(x.real) <= max:
x = x.imag
return x
return x if abs(x) > max else 0
|
from django.contrib.admin.sites import site
from django.contrib import admin
from leds.models import Led
# Register your models here.
site.register(Led)
|
import asyncio
import aiomysql
magnet_head = 'magnet:?xt=urn:btih:'
async def test_example(loop):
pool = await aiomysql.create_pool(host='127.0.0.1', port=3306,
user='root', password='xuexi520',
db='database_test', loop=loop)
async with pool.acquire() as conn:
async with conn.cursor() as cur:
# await cur.execute("INSERT INTO table_test VALUES('%s', '%s', '%s', '%s')" % ('174.110.220.255', '44787', 'B5EA0A22225644CFB745E4668397D45706C4F18C', 'get_peers'))
# await cur.execute("create table table_test (ip varchar(20), port varchar(5), infohash char(40) primary key not null, type varchar(20))")
# await cur.execute("drop table table_test")
# await cur.execute("DELETE FROM table_test WHERE port='44787'")
await cur.execute("select infohash from table_test where type='announce_peer'")
await conn.commit()
r = cur.fetchall().result()
for i in r:
link = magnet_head + i[0]
print(link)
pool.close()
await pool.wait_closed()
loop = asyncio.get_event_loop()
loop.run_until_complete(test_example(loop))
|
import json
from django.core.management import BaseCommand
from documentos.models import BaseDocument
from gerente.datatxt_helpers import Datatxt
from documentos.models import Node, Frame
class Command(BaseCommand):
help = 'Convert old style goal standard into a new one'
def handle(self, *args, **options):
dt = Datatxt()
for document in BaseDocument.objects.all():
gs = document.goal_standard
for class_name, value in gs.iteritems():
mc = Node.objects.get_or_create(name=class_name)[0]
for frame in value['frames']:
annotations = {}
res = dt.nex(frame)
if res.ok:
annotations = res.json().get('annotations')
Frame.objects.create(
node=mc,
text=frame,
annotations=annotations
)
|
import logging
import sqlite3
def query_db():
"""
A function that uses a preconstructed query to retrieve data from a SQLite DB iinstance.
Parameters: None
Returns: A list of results
"""
conn = sqlite3.connect('pets.db')
cursor = conn.cursor()
sql_query = """
SELECT
person.first_name AS 'PersonFirst',
person.last_name AS 'PersonLast',
person.age AS 'PersonAge',
pet.name AS 'PetName',
pet.breed AS 'PetBreed',
pet.age AS 'PetAge',
pet.dead AS 'PetDead',
person_pet.person_id AS 'PersonID',
person_pet.pet_id AS 'PetID'
FROM person_pet
JOIN person ON person_pet.person_id = person.id
JOIN pet ON person_pet.pet_id = pet.id;
"""
cursor.execute(sql_query)
query_result = cursor.fetchall()
header = [i[0] for i in cursor.description]
return [header, query_result]
def value_getter(dict, key, tup):
"""
A simple key/value getter.
Parameters:
dict(dict)
key(str)
tup(tuple)
Returns: An integer.
"""
index = dict[key]
return tup[index]
def print_person_info(tup_info):
"""
A print function that prints the persons name and age.
Parameters:
tup_info(tuple)
Prints: the person info
"""
(name, age) = tup_info
print(f"{name}, {age} years old")
def print_pet_owner_details(owner, pet_details):
"""
A print function that prints the owner and the pet details.
Parameters:
owner:(str)
pet_details:(str)
Prints: owner and pet details.
"""
print(f"{owner} {pet_details}")
def print_results(query_result, key_dict):
"""
The main print function that triggers printing results.
Parameters:
query_result(list)
key_dict(dict)
Prints:
Calls print_person_info and print_pet_owner_details
"""
head = query_result[0]
person_name = f"{value_getter(key_dict, 'PersonFirst', head)} {value_getter(key_dict, 'PersonLast', head)}"
person_age = f"{value_getter(key_dict, 'PersonAge', head)}"
print_person_info((person_name, person_age))
for pet in query_result:
pet_info = f"owned {value_getter(key_dict, 'PetName', pet)}, a {value_getter(key_dict, 'PetBreed', pet)}, that was {value_getter(key_dict, 'PetAge', pet)} years old."
print_pet_owner_details(person_name, pet_info)
def find_person(query_results, keys_dict, user_input):
"""
A function that loops over the query_results and filters the list with the personId
keyed in by the query user.
Parameters:
query_results(list)
keys_dict(dict)
user_input(int)
Returns: A filtered list.
"""
person_id = keys_dict['PersonID']
filtered_list = filter(lambda x: x[person_id] == user_input, query_results)
return list(filtered_list)
def get_keys(header_list):
"""
A utility function that creates an index dictionary for key/value lookup.
Parameters:
header_list(list)
Returns: A dict with header keys as the keys and integer values denoting the index value
within the result list.
"""
config_fields = [
'PersonFirst',
'PersonLast',
'PersonAge',
'PetName',
'PetBreed',
'PetAge',
'PetDead',
'PersonID',
'PetID'
]
return { k:header_list.index(k) for k in config_fields }
def safe_int_checker(int_str):
"""
A function that checks if the string is actually an int. used for the CLI.
Parameters:
int_str(str): A string representing an int.
Returns:
A tuple with a boolean as the first item and a value if its successfuly cast or None if it isnt.
"""
try:
num = int(int_str)
return (True, num)
except ValueError:
return (False, None)
def print_error(num):
"""
A print function that prints out an error when a user keys in a non numerical key.
Parameters:
num(int)
Prints: An error message and logs the userinput key.
"""
print(f'Sorry the personId of {num} does not exist')
logging.error(f'Error processing <{num}>')
def main():
"""
The primary function of this application.
Parameters:
None
Logs:
An error if the string url is entered incorrectly.
"""
logging.basicConfig(filename='errors.log',
level=logging.ERROR, format='%(message)s')
logging.getLogger('assignment10')
CLI = True
while CLI:
keyed = input('\nPlease enter a numerical ID\n')
(is_int, cast_num) = safe_int_checker(keyed)
if is_int and cast_num > -1 :
db_query = query_db()
[header, db_data] = db_query
header_index_dict = get_keys(header)
plucked_values = find_person(db_data, header_index_dict, cast_num)
print_error(cast_num) if not plucked_values else print_results(plucked_values, header_index_dict)
elif is_int == False:
print('It looks like you entered a non-numerical key, please enter in a numerical key')
logging.error(f'Error processing <{keyed}>')
else:
CLI = False
print('CLI Exiting....')
SystemExit
if __name__ == '__main__':
main()
|
import pymysql.cursors
import requests
import re
import uuid
from bs4 import BeautifulSoup
from time import sleep
connection = pymysql.connect(host=#'hostname',
user=#'username',
password=#'password',
db=#'dbname',
charset=#'utf8',
cursorclass=pymysql.cursors.DictCursor)
cur = connection.cursor()
cur.execute('select * from new_variables_card_data;')
#cur.execute('select new_variables_card_id,image_uri,redirect_name_id,redirect_image_uri,col_num,version from new_variables_card_data;')
card = cur.fetchall()
#uri = cur.fetchall()
for i in range(1,39000):#カード枚数
print(i)
nameid = card[i]["new_variables_card_id"]
uri = card[i]["image_uri"]
col = card[i]["col_num"]
exp = card[i]["version"]
target_url1 = uri
redirectid = card[i]["redirect_name_id"]
redirecturi = card[i]["redirect_image_uri"]
target_url2 = redirecturi
#print(target_url)
print(nameid)
print(exp)
r = requests.get(target_url1)
soup = BeautifulSoup(r.text, 'lxml')
#print(nameid)
#print(uri)
#print(redirecturi)
#if not uri==None:
with open(str('/home/satokoichiro/ダウンロード/mtg_img/') + str(exp) + str(col) + str('f') + str('.jpeg'),'wb') as file:
file.write(r.content)
sleep(1)
if redirecturi=='':
pass
elif not redirecturi==None:
r = requests.get(target_url2)
soup = BeautifulSoup(r.text, 'lxml')
with open(str('/home/satokoichiro/ダウンロード/mtg_img/') + str(exp) + str(col) + str('b') + str('.jpeg'),'wb') as file:
file.write(r.content)
sleep(1)
|
from flask import Flask,render_template,url_for,redirect,request,json,jsonify
from flask_sqlalchemy import SQLAlchemy
import datetime
from bs4 import BeautifulSoup
import requests
import re
from urllib.request import urlopen
import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:1346536639@127.0.0.1/password?charset=utf8mb4'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = 'aodong'
app.debug=True
db=SQLAlchemy(app)
class Allpasswords(db.Model):
__tablename__='allpasswords'
id=db.Column(db.Integer,primary_key=True)
name=db.Column(db.String(20),nullable=False)
userid=db.Column(db.String(30),nullable=False)
password=db.Column(db.String(30),nullable=False)
weburl=db.Column(db.String(300),nullable=False)
iconurl=db.Column(db.String(100),nullable=False)
createtime=db.Column(db.String(50),nullable=False)
changetime=db.Column(db.String(50),nullable=False)
class Collections(db.Model):
__tablename__='collections'
id=db.Column(db.Integer,primary_key=True)
name=db.Column(db.String(20),nullable=False)
userid=db.Column(db.String(30),nullable=False)
password=db.Column(db.String(30),nullable=False)
weburl=db.Column(db.String(300),nullable=False)
iconurl=db.Column(db.String(50),nullable=False)
@app.route('/mypassword',methods=['GET','POST'])
def indexpage():
if request.method=="GET":
return render_template('index.html')
@app.route('/getallpassword',methods=['POST'])
def getallpassword():
if request.method=="POST":
allinformation=Allpasswords.query.all()
alllist=[]
for i in range(len(allinformation)):
alllist.append([allinformation[i].name,allinformation[i].userid,allinformation[i].iconurl])
return jsonify(alllist)
@app.route('/getdetail',methods=['POST'])
def getdetail():
if request.method=="POST":
webname=json.loads(request.form.get('mywebname'))
print(webname)
detal=Allpasswords.query.filter_by(name=webname).first()
detallist=[webname,detal.userid,detal.password,detal.weburl,detal.iconurl]
detalincollect=Collections.query.filter_by(name=webname).first()
if detalincollect==None:
detallist.append(0)
else:
detallist.append(1)
detallist.append(detal.createtime)
detallist.append(detal.changetime)
return jsonify(detallist)
@app.route('/getdelete',methods=['POST'])
def getdelete():
if request.method=="POST":
webname=json.loads(request.form.get('webname'))
webobj=Allpasswords.query.filter_by(name=webname).first()
db.session.delete(webobj)
db.session.commit()
print("删除成功")
if Collections.query.filter_by(name=webname).first()!=None:
db.session.delete(Collections.query.filter_by(name=webname).first())
db.session.commit()
return ''
# newpassword=Allpasswords(name=webname,userid=username,password=password,weburl=weburl,iconurl=icourl)
@app.route('/getcollect',methods=['POST'])
def getcollect():
if request.method=="POST":
webname=json.loads(request.form.get('webname'))
flag=json.loads(request.form.get('flag'))
if flag=="1":
# 存入收藏夹
passwordobj=Allpasswords.query.filter_by(name=webname).first()
userid=passwordobj.userid
password=passwordobj.password
weburl=passwordobj.weburl
iconurl=passwordobj.iconurl
newcollect=Collections(name=webname,userid=userid,password=password,weburl=weburl,iconurl=iconurl)
db.session.add(newcollect)
db.session.commit()
else:
deleteobj=Collections.query.filter_by(name=webname).first()
db.session.delete(deleteobj)
db.session.commit()
return ''
@app.route('/getitchange',methods=['POST'])
def getitchange():
if request.method=='POST':
name=json.loads(request.form.get('name'))
webname=json.loads(request.form.get('newwebname'))
username=json.loads(request.form.get('newusername'))
password=json.loads(request.form.get('newpassword'))
weburl=json.loads(request.form.get('newweburl'))
changeitem=Allpasswords.query.filter_by(name=name).first()
changeitem.name=webname
changeitem.userid=username
changeitem.password=password
changeitem.weburl=weburl
nowtime=str(datetime.datetime.now())[:16]
changeitem.changetime=nowtime
db.session.commit()
if Collections.query.filter_by(name=name).first()!=None:
changeincollect=Collections.query.filter_by(name=name).first()
changeincollect.name=webname
changeincollect.userid=username
changeincollect.password=password
changeincollect.weburl=weburl
db.session.commit()
return ''
@app.route('/getcollectlist',methods=['POST'])
def getcollectlist():
if request.method=="POST":
collectlist=[]
allcollect=Collections.query.all()
for i in range(len(allcollect)):
collectlist.append([allcollect[i].name,allcollect[i].userid,allcollect[i].password,allcollect[i].weburl,allcollect[i].iconurl])
return jsonify(collectlist)
@app.route('/getsearch',methods=['POST'])
def getsearch():
if request.method=="POST":
content=json.loads(request.form.get('content'))
searchlist=[]
alllist=Allpasswords.query.all()
for i in range(len(alllist)):
searchstr=str(alllist[i].name)+" "+str(alllist[i].userid)+" "+str(alllist[i].password)+" "+str(alllist[i].weburl)+" "+str(alllist[i].iconurl)
if content in searchstr:
searchlist.append([alllist[i].name,alllist[i].userid,alllist[i].iconurl])
return jsonify(searchlist)
@app.route('/addpassword',methods=['POST'])
def addpassword():
if request.method=="POST":
webname=json.loads(request.form.get('webname'))
username=json.loads(request.form.get('username'))
password=json.loads(request.form.get('password'))
weburl=json.loads(request.form.get('weburl'))
icourl=""
if weburl!="":
# 爬取图标
html=urlopen(weburl)
bsobj=BeautifulSoup(html.read())
ico=bsobj.find("link",{'href':re.compile('//.*ico')})
if ico==None:
# 换一个选择方式
icoofref=bsobj.find("link",{'rel':'icon'})
if icoofref==None:
icourl=""
else:
icourl=icoofref['href']
else:
icourl=ico['href']
if ('com' not in icourl) and ('cn' not in icourl):
newweburl=""
for i in range(len(weburl)):
if i != len(weburl)-1:
newweburl+=weburl[i]
else:
if weburl[i] !='/':
newweburl+=weburl[i]
icourl=newweburl+icourl
if icourl==None:
icourl=""
createtime=str(datetime.datetime.now())[:16]
changetime=createtime
newpassword=Allpasswords(name=webname,userid=username,password=password,weburl=weburl,iconurl=icourl,createtime=createtime,changetime=changetime)
db.session.add(newpassword)
db.session.commit()
return ''
if __name__ == '__main__':
db.create_all()
app.run()
|
import pandas as pd
import urllib
import requests
from bs4 import BeautifulSoup
import concurrent.futures
from time import sleep
import re
from multiprocessing import Pool
from Queue import Queue
import sys
from collections import defaultdict
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:42.0) Gecko/20100101 Firefox/42.0",\
"Referer": "http://www.wikiart.org/en/paintings-by-style",\
"Host": "www.wikiart.org",\
"Origin": "www.wikiart.org"}
gallery_home_page='http://www.wikiart.org'
painting_descriptors = ['Title','Artist','Completion Date','Style','Period','Genre','Technique','Material','Gallery','Tags']
painting_lives_at = ['link_to','local_jpeg','jpeg_url']
painting_compo = ['from']
image_df_cols = painting_descriptors + painting_lives_at + painting_compo
def to_file_name(its_link):
return "__".join(its_link.split("/")[-2:])
def to_soup(a_link):
#print "In soup"
s = requests.Session()
s.headers.update(headers)
response = s.get(a_link)
soup = BeautifulSoup(response.content, from_encoding='UTF-8')
return soup
class ImageScrape(object):
def __init__(self,gallery_type):
self.gallery_type = gallery_type
self.gallery_page = gallery_home_page + "/en/paintings-by-style/" + self.gallery_type
self.paintings_df = pd.DataFrame(columns=image_df_cols)
def build_df(self):
total_paintings_scraped = 0
for a_page in self._collect_pages():
sleep(5)
for a_painting_link in self._collect_links_to_paintings(a_page):
sleep(2)
a_painting_df = self._scrape_painting_link(a_painting_link)
self.paintings_df = self.paintings_df.append(a_painting_df,ignore_index=True)
total_paintings_scraped += 1
if total_paintings_scraped >= 2500:
print self.gallery_type, "Limit reached"
break
self.paintings_df['from'] = self.gallery_type
def save_locally(self,df_file,links_file):
#Save pandas dataframe locally
df = self.paintings_df
df.to_pickle(df_file)
with open(links_file, 'w') as f:
for a_jpeg_url,a_jpeg_name in zip(list(self.paintings_df['jpeg_url']),list(self.paintings_df['local_jpeg'])):
f.write(a_jpeg_url.encode(errors='ignore') + ',' + a_jpeg_name.encode(errors='ignore') + '\n')
def _collect_pages(self):
soup = to_soup(self.gallery_page)
page_num_list = [int(x.text) for x in soup.select('.pager-items a') if x.text != 'Next' and x.text != 'Previous' and x.text != '...']
if page_num_list:
max_page_num = max(page_num_list)
else:
max_page_num = 1
return [self.gallery_page+'/'+ str(i) for i in range(1,max_page_num+1)]
def _collect_links_to_paintings(self,a_page):
soup = to_soup(a_page)
return [gallery_home_page + a_painting['href'] for a_painting in soup.select('.rimage')]
def _scrape_painting_link(self,a_painting_link):
soup = to_soup(a_painting_link)
painting_dict = dict(zip(image_df_cols,[[""]]*len(image_df_cols)))
try:
print "======================= Started ======================",a_painting_link
painting_dict['link_to'] = [a_painting_link]
painting_dict['local_jpeg'] = [to_file_name(a_painting_link)]
my_jpeg_url_from = soup.select('#paintingImage')[0]
print ">>>>"
painting_dict['jpeg_url'] = [soup.select('#paintingImage')[0]['href']]
painting_dict['Title'] = [soup.find('h1').findChildren()[0].getText()]
for a_row in soup.select('.DataProfileBox p'):
#print a_row
if re.search(':', a_row.getText()):
k,v = "".join(a_row.getText().splitlines()).split(":")
k,v = k.strip(),v.strip()
#print k,v
if k and k in image_df_cols:
painting_dict[k] = [v]
print "======================== Finished ======================",a_painting_link
except:
print "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^"
print "Raised exception, ",a_painting_link
print "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^"
return pd.DataFrame(data=painting_dict,columns=image_df_cols)
else:
return pd.DataFrame(data=painting_dict,columns=image_df_cols)
def call_image_scrape(scrape_style):
a_scraper = ImageScrape(scrape_style)
a_scraper.build_df()
a_scraper.save_locally(scrape_style+'.pkl', scrape_style+'_links.txt')
return (scrape_style+'.pkl',scrape_style+'_links.txt')
if __name__ == '__main__':
#print call_image_scrape('pointillism')
styles_to_scrape_parallel = \
["photorealism", "contemporary-realism", "american-realism", "hyper-realism", "post-impressionism", "pointillism", "cloisonnism", "fauvism", "intimism", "cubism", "cubo-futurism", "cubo-expressionism", "tubism", "transavantgarde", "transautomatism", "mechanistic-cubism", "futurism", "abstract-art", "abstract-expressionism","realism","impressionism"]
with concurrent.futures.ProcessPoolExecutor() as executor:
for a_style, style_files in zip(styles_to_scrape_parallel, executor.map(call_image_scrape, styles_to_scrape_parallel)):
print('%s is DONE: %s' % (a_style, style_files))
#call_image_scrape('photorealism')
|
import json
import ssl
import urllib.request
import sqlite3
# Ignore SSL certificate errors
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
connection = sqlite3.connect("db.sqlite3")
cur = connection.cursor()
cur.execute("DROP TABLE IF EXISTS Games")
cur.execute(
"""CREATE TABLE IF NOT EXISTS Games(id TEXT, title TEXT,
thumbnail TEXT,short_description TEXT, game_url TEXT,
genre TEXT,platform TEXT, publisher TEXT, developer TEXT,
release_date TEXT);"""
)
api_endpoint = "https://www.freetogame.com/api/games"
response_data = urllib.request.urlopen(api_endpoint, context=context)
data = response_data.read().decode()
json_data = json.loads(data)
# json_dump = json.dumps(json_data, indent=4)
# print (json_dump)
for game in json_data:
id = game["id"]
title = game["title"]
thumbnail = game["thumbnail"]
short_description = game["short_description"]
game_url = game["game_url"]
genre = game["genre"]
platform = game["platform"]
publisher = game["publisher"]
developer = game["developer"]
release_date = game["release_date"]
cur.execute(
"INSERT INTO Games VALUES (?,?,?,?,?,?,?,?,?,?)",
(
id,
title,
thumbnail,
short_description,
game_url,
genre,
platform,
publisher,
developer,
release_date,
),
)
connection.commit()
|
import logging, json, argparse, yaml
from prometheus_client.core import CounterMetricFamily, GaugeMetricFamily
from helpers import global_config, logs
import config_generator
import exporter
logger = logging.getLogger('openhab_exporter')
def main():
import helpers.cmdline_args # pylint: disable=unused-import
if global_config.ACTION == 'gen_config':
config_generator.run()
elif global_config.ACTION == 'run_exporter':
exporter.run()
if __name__ == '__main__':
main()
|
import dash
import dash_table
import pandas as pd
from django_plotly_dash import DjangoDash
from .nutrienti import cibo_mangiato, identifica
import datetime
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from django.contrib.auth.models import User
import numpy as np
import pymongo
from pymongo import MongoClient
import ssl
import datetime
# Sistemare tutto
client = pymongo.MongoClient("mongodb+srv://armonia_amministrazione:uanrimcoantitao2l0i2c1a@clusterarmonia.ukpoq.mongodb.net/local?retryWrites=true&w=majority",ssl_cert_reqs=ssl.CERT_NONE)
#db = client.test
db = client['ArmoniaBot']
col = db['AlimentiDB']
prove = col.find({},{'Nome':1})
nomi =[]
for pro in prove:
nomi.append(pro['Nome'])
#data['Nome'].append(pro['Nome'])
#df = cibo_mangiato('Pranzo','AlessioNone',datetime.datetime(2021,7,15))
#print(df)
df = pd.DataFrame()
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = DjangoDash('Cibo2', external_stylesheets=external_stylesheets)
app.layout = html.Div([
html.P(html.Strong('Colazione')),
dash_table.DataTable(
id='table-colazione',
columns=[{"name": i, "id": i} for i in df.columns],
data=df.to_dict('records'),
export_format='xlsx',
export_headers='display',
merge_duplicate_headers=True,
style_as_list_view=True,
style_header={'backgroundColor': 'rgb(30, 30, 30)'},
style_cell={
'backgroundColor': 'rgb(50, 50, 50)',
'color': 'white'
},
),
# dcc.DatePickerSingle(
# id='my-date-picker-single1',
# min_date_allowed=datetime.datetime(2020, 1, 1),
# max_date_allowed=datetime.datetime(2026, 1, 1),
# initial_visible_month=datetime.datetime(2017, 8, 5),
# date=datetime.date.today()#datetime(2017, 8, 25)
# ),
html.P(html.Strong('Pranzo')),
dash_table.DataTable(
id='table-pranzo',
columns=[{"name": i, "id": i} for i in df.columns],
data=df.to_dict('records'),
export_format='xlsx',
export_headers='display',
merge_duplicate_headers=True,
),
# dcc.DatePickerSingle(
# id='my-date-picker-single2',
# min_date_allowed=datetime.datetime(2020, 1, 1),
# max_date_allowed=datetime.datetime(2026, 1, 1),
# initial_visible_month=datetime.datetime(2017, 8, 5),
# date=datetime.date.today()#datetime(2017, 8, 25)
# ),
html.P(html.Strong('Cena')),
dash_table.DataTable(
id='table-cena',
columns=[{"name": i, "id": i} for i in df.columns],
data=df.to_dict('records'),
export_format='xlsx',
export_headers='display',
merge_duplicate_headers=True
),
# dcc.DatePickerSingle(
# id='my-date-picker-single3',
# min_date_allowed=datetime.datetime(2020, 1, 1),
# max_date_allowed=datetime.datetime(2026, 1, 1),
# initial_visible_month=datetime.datetime(2017, 8, 5),
# date=datetime.date.today()#datetime(2017, 8, 25)
# ),
html.P(html.Strong('Merenda')),
dash_table.DataTable(
id='table-merenda',
columns=[{"name": i, "id": i} for i in df.columns],
data=df.to_dict('records'),
export_format='xlsx',
export_headers='display',
merge_duplicate_headers=True
),
html.P(html.Strong('Seleziona la data:')),
dcc.DatePickerSingle(
id='my-date-picker-single4',
min_date_allowed=datetime.datetime(2020, 1, 1),
max_date_allowed=datetime.datetime(2026, 1, 1),
initial_visible_month=datetime.datetime(2017, 8, 5),
date=datetime.date.today()#datetime(2017, 8, 25)
),
html.P(html.Strong('Utenti')),
dcc.Dropdown(
id='slider-radio-user',
options=[
{"label": user, "value": user}
for user in np.sort(np.unique(nomi))#data.Nome.unique())
],
value='AlessioNone'
),
])
@app.callback(
Output('table-colazione', 'data'),
Output('table-colazione', 'columns'),
[Input('my-date-picker-single4', 'date'),
Input('slider-radio-user', 'value'),])
def display_value(value, user):
print(value)
dt =datetime.datetime.strptime(value, '%Y-%m-%d')
#dt = datetime.datetime.combine(value, datetime.datetime.min.time())
print(dt)
df = cibo_mangiato('Colazione', user, dt)
print(df)
return [df.to_dict('records'),[{"name": i, "id": i} for i in df.columns]]
#raise PreventUpdate
@app.callback(
Output('table-pranzo', 'data'),
Output('table-pranzo', 'columns'),
[Input('my-date-picker-single4', 'date'),
Input('slider-radio-user', 'value'),])
def display_value(value,user):
print(value)
dt =datetime.datetime.strptime(value, '%Y-%m-%d')
#dt = datetime.datetime.combine(value, datetime.datetime.min.time())
print(dt)
df = cibo_mangiato('Pranzo', user, dt)
print(df)
return [df.to_dict('records'),[{"name": i, "id": i} for i in df.columns]]
@app.callback(
Output('table-cena', 'data'),
Output('table-cena', 'columns'),
[Input('my-date-picker-single4', 'date'),
Input('slider-radio-user', 'value'),])
def display_value(value,user):
print(value)
dt =datetime.datetime.strptime(value, '%Y-%m-%d')
#dt = datetime.datetime.combine(value, datetime.datetime.min.time())
print(dt)
df = cibo_mangiato('Cena', user, dt)
print(df)
return [df.to_dict('records'),[{"name": i, "id": i} for i in df.columns]]
@app.callback(
Output('table-merenda', 'data'),
Output('table-merenda', 'columns'),
[Input('my-date-picker-single4', 'date'),
Input('slider-radio-user', 'value'),])
def display_value(value,user):
print(value)
dt =datetime.datetime.strptime(value, '%Y-%m-%d')
#dt = datetime.datetime.combine(value, datetime.datetime.min.time())
print(dt)
df = cibo_mangiato('Merenda', user, dt)
print(df)
return [df.to_dict('records'),[{"name": i, "id": i} for i in df.columns]]
if __name__ == '__main__':
app.run_server(debug=True)
|
import socket
import sys
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = ('task.cstag.ca', 4547)
messages = ["HELP", "LOGIN MTECH GITRDONE", "STATUS", "START", "STOP", "LOGOUT"]
print "Connected.... Enter commands now..."
# for message in messages:
while True:
try:
# Send data
message = raw_input()
# print type(message)
if message not in messages:
print "Invalid command. Please try again."
continue
sent = sock.sendto(message, server_address)
# Receive response
# print 'waiting to receive'
data, server = sock.recvfrom(4096)
print data
except:
pass
|
# STAGE CONTROL
# AUTHOR: Jamie Lake
# EMAIL: jdl62@cam.ac.uk
# All units should be in relation to mm
# i.e. x = [mm], d' = [mm/s], d'' = [mm/s^2].
# If you try to run this on another PC, it is
# likely that the stage will not be recognised
# you will need to check which COM channel the OS
# has assigned the USB device corresponding to
# the DLC controller.
import sys
import os
import time
import clr
import platform
assert (platform.architecture()[0] == "64bit"), "Before continuing, you need to change the system path to " \
"the Newport DLL. The current path is for 64-bit systems only."
sys.path.append(r'C:\Windows\Microsoft.NET\assembly\GAC_64\Newport.DLS.CommandInterface\v4.0_1.0.0.4__90ac4f829985d2bf')
clr.AddReference("Newport.DLS.CommandInterface")
from CommandInterfaceDLS import *
from System import Double, String
global aDLS, instrumentPort, statusBits, errorBits, controllerState, scriptName
scriptName = os.path.basename(__file__)
# Find the instrument, COM4 for this laptop (assigned by Windows)
instrumentPort = "COM4"
# Create an instance of DLS
aDLS = DLS()
def start():
# Connect to instrument
# OpenInstrument() written about in the manual, refer to it for more info
connectaDLS = aDLS.OpenInstrument(instrumentPort)
if connectaDLS == 0:
return print(scriptName + ": Open port " + instrumentPort + ": Success")
else:
return print(scriptName + ": Open port " + instrumentPort + ": Failure", connectaDLS)
def stopMotion():
ST, STError = aDLS.ST('')
if ST == 0:
time.sleep(5)
return print(os.path.basename(__file__)+': The stage has stopped moving')
else:
return print(os.path.basename(__file__)+': Error:', STError)
def resetStage():
RS, RSError = aDLS.RS('')
if RS == 0:
time.sleep(5)
print(scriptName+': Controller reset')
else:
print(scriptName+': Error:', RSError)
def checkControllerNotInitialized():
# Can be called in ANY state
# Returns True if there are no other errors
# and the controller state is any state NOT INITIALIZED (NI)
# i.e. NI after reset, NI after CONFIGURATION ...
GetTS, GetTSStatusBits, GetTSErrorBits, GetTSControllerStatus, fill = aDLS.TS('', '', '', '')
if GetTS == 0:
if GetTSControllerStatus == "0A" or GetTSControllerStatus == "0B" or GetTSControllerStatus == "0C" or \
GetTSControllerStatus == "0D" or GetTSControllerStatus == "0E" or GetTSControllerStatus == "0F" or \
GetTSControllerStatus == "10" or GetTSControllerStatus == "11" or GetTSControllerStatus == "12" or \
GetTSControllerStatus == "12" or GetTSControllerStatus == "13":
return True
else:
return False
else:
return print("The status of the controller was unable to be ascertained")
def checkControllerInitializing():
# Can be called in ANY state
# Returns True if there are no other errors
# and the controller state is in INITIALIZING state.
# Otherwise return False.
GetTS, GetTSStatusBits, GetTSErrorBits, GetTSControllerStatus, fill = aDLS.TS('', '', '', '')
if GetTS == 0:
if GetTSControllerStatus == "1E" or GetTSControllerStatus == "1F":
return True
else:
return False
else:
return print("The status of the controller was unable to be ascertained")
def checkControllerHoming():
# Can be called in ANY state
# Returns True if there are no other errors
# and the controller state is "HOMING: launch by USB" or
# "HOMING: launch by Remote control". Otherwise return False.
GetTS, GetTSStatusBits, GetTSErrorBits, GetTSControllerStatus, fill = aDLS.TS('', '', '', '')
if GetTS == 0:
if GetTSControllerStatus == "32" or GetTSControllerStatus == "33":
return True
else:
return False
else:
return print("The status of the controller was unable to be ascertained")
def initialize():
# Can only be called in NOT INITIALIZED states
# Initialization shouldn't take longer than 4 seconds...
# Therefore, sleep for around 5 seconds.
if checkControllerNotInitialized() is True:
initializeaDLS, initializeaDLSError = aDLS.IE('')
if initializeaDLS == 0:
while checkControllerInitializing() is True:
time.sleep(0)
return print("Initialization successful")
else:
return print("Error: ", initializeaDLSError)
else:
return print("Controller must be in NOT INITIALIZED state")
def home():
# Home stage
homeaDLS, homeaDLSError = aDLS.OR('')
if homeaDLS == 0:
while checkControllerHoming() is True:
time.sleep(0)
return print("Homing successful")
else:
return print("Error:", homeaDLSError)
def setAcceleration(SetAc):
resultSetAc, errorSetAc = aDLS.AC_Set(Double(SetAc), '')
if resultSetAc == 0:
return print(scriptName+': Acceleration set to:', SetAc)
else:
return print(scriptName+': Error:', errorSetAc)
def setVelocity(SetVA):
resultSetVA, errorSetVA = aDLS.VA_Set(Double(SetVA),'')
if resultSetVA == 0:
return print(scriptName+': Positioner velocity set to:', SetVA)
else:
return print(scriptName+': Error:', errorSetVA)
def checkControllerReady():
GetTS, GetTSStatusBits, GetTSErrorBits, GetTSControllerStatus, fill = aDLS.TS('', '', '', '')
if GetTS == 0:
if GetTSControllerStatus == "46" or GetTSControllerStatus == "47" or GetTSControllerStatus == "48" \
or GetTSControllerStatus == "49":
return True
else:
return False
else:
return print(scriptName+": The status of the controller was unable to be ascertained")
def checkControllerMoving():
# Can be called in ANY state
# Returns True if there are no other errors
# and the controller state is "MOVING". Otherwise return False.
GetTS, GetTSStatusBits, GetTSErrorBits, GetTSControllerStatus, fill = aDLS.TS('', '', '', '')
if GetTS == 0:
if GetTSControllerStatus == "3C":
return True
else:
return False
else:
return print("The status of the controller was unable to be ascertained")
def moveAbs(absMove):
SetPA, SetPAError = aDLS.PA_Set(absMove, '')
if SetPA == 0:
while checkControllerMoving() is True and checkControllerReady() is not True:
time.sleep(0)
return print('The stage will move to the absolute position:', absMove)
else:
return print('Error:', SetPAError)
def exit():
# Close the instrument and inform user
# ALWAYS MOVE FROM 0, OR HOMING ERRORS OCCUR
setAcceleration(30)
setVelocity(30)
moveAbs(62.5)
aDLS.CloseInstrument()
return print(scriptName+':', instrumentPort + " is disconnected.")
def main():
start()
readyState = checkControllerReady()
if readyState is True:
exit()
return
else:
if checkControllerMoving() is True:
stopMotion()
else:
resetStage()
initialize()
home()
if readyState is True:
print("The controller is now ready.")
exit()
else:
print("Uh-oh! The stage hasn't homed properly. It isn't ready!")
if __name__ == "__main__":
main()
|
numero = int(input("digite um numero: "))
calculo = numero%2
if calculo == 0:
print("par")
else:
print("ímpar")
|
from flask import render_template, request
from flask_login import login_required, current_user
from flask.helpers import flash
from sqlalchemy.orm import query
from models.Modelos import *
from flask_sqlalchemy import SQLAlchemy
from functools import reduce
import os.path, sys, shutil
db = SQLAlchemy() # nuestro ORM
'''
Función que se ejecuta al momento de presionar el botón de búsqueda. Se encarga
de buscar los productos que coincidan con el nombre ingresado por el usuario.
'''
@login_required
def buscar_producto():
if request.method != 'POST':
return render_template('usuario/inicio_usuario.html')
producto = get_producto(request.form['search'])
if producto == []:
flash("No se encontró ningún producto con esas características")
return render_template('usuario/inicio_usuario.html')
else:
return render_template('usuario/resultado_busqueda.html')
# Redirecciona a la página con los resultados de la búsqueda.
@login_required
def resultado_busqueda():
return render_template('producto/resultado_busqueda.html', resultados=get_producto(request.form['search']))
'''
Redirecciona a la página para ver el producto seleccionado de la lista de los
resultados obtenidos.
'''
@login_required
def ver_articulo_buscado():
return render_template('producto/ver_articulo_buscado.html', producto=get_producto(request.form['search']))
# Muestra un artículo si es que el usuario es un comprador.
@login_required
def ver_articulo_comprador():
# obtenemos información del producto
id_producto = request.form['id_producto']
# y del comprador que está observando el producto
try:
correo_comprador = request.form['correo_comprador']
# si se mandó una reseña del producto, la intentamos guardar
guarda_resenia(request.form['resenia'], request.form['numero_estrellas'], correo_comprador, id_producto)
except Exception as e:
pass
#Se hace la busqueda del producto deseado
producto = db.session.query(Producto).filter(Producto.id_producto == id_producto).one()
#Una vez cargado el producto, cargamos sus imagenes
imagen = db.session.query(Imagen).filter(Imagen.id_producto == id_producto).first()
# y de los compras para obtener las reseñas y calificación
(compras,promedio_estrellas) = get_compras_con_reseña(id_producto)
return render_template('producto/ver_articulo_comprador.html', producto = producto, compras = compras, promedio_estrellas=promedio_estrellas, imagen = imagen)
# Muestra un artículo si es que el usuario es un vendedor.
@login_required
def ver_articulo_vendedor():
# obtenemos información del producto
id_producto = request.form['id_producto']
#Se hace la busqueda del producto deseado
producto = db.session.query(Producto).filter(Producto.id_producto == id_producto).one()
# y de los compras para obtener las reseñas y calificación
(compras,promedio_estrellas) = get_compras_con_reseña(id_producto)
#Una vez cargado el producto cargamos la imagen de este
imagen = db.session.query(Imagen).filter(Imagen.id_producto == id_producto).first()
return render_template('producto/ver_articulo_vendedor.html', producto = producto, compras = compras, promedio_estrellas=promedio_estrellas, url_img = imagen)
#Funcion que nos ayuda a cargar todos los productos que pertenezcan a un vendedor
@login_required
def productos_vendedor():
#Obtenemos el ID del vendedor que haya iniciado sesion
id_vendedor = current_user.correo
"""
Realizamos la busqueda en la base de datos, para ello tomaemos la tabla Producto e Imagen
y haremos una union por la izquierda, esto con el proposito de que si un producto no posee
una imagen aun asi pueda desplegarlo
"""
productos = db.engine.execute("""SELECT producto.correo_vendedor as correo_vendedor, producto.id_producto as id_producto,
nombre,precio , cantidad, detalles, descripcion, estado, ruta
FROM producto
Left JOIN imagen
ON producto.id_producto = imagen.id_producto WHERE producto.correo_vendedor = '"""+str(id_vendedor)+"'")
#Cargamos la pagina con los datos del vendedor
return render_template('producto/productos_vendedor.html', producto = productos)
###---------------------------------- MÉTODOS AUXILIARES ----------------------------------###
# Método auxiliar que nos devuelve las compras con comentario y el promedio de estrellas de un
# producto en específico.
def get_compras_con_reseña(id_producto):
compras = db.session.query(Compra, Usuario).join(Usuario, Compra.correo_comprador == Usuario.correo)\
.filter(Compra.id_producto == id_producto).all()
compras_con_opinion = list(filter(lambda compra: compra.Compra.comentario != None, compras))
compras_con_calificacion = list(filter(lambda compra: compra.Compra.numero_estrellas != None and compra.Compra.numero_estrellas != 0, compras))
promedio_estrellas = 0
if len(compras_con_calificacion) != 0:
promedio_estrellas = reduce(lambda acc, compra: compra.Compra.numero_estrellas + acc
if compra.Compra.numero_estrellas != None
else acc, compras_con_calificacion, 0)/len(compras_con_calificacion)
return (compras_con_opinion,promedio_estrellas)
# Método auxiliar para guardar reseñas
def guarda_resenia(comentario, numero_estrellas, correo_comprador, id_producto):
# buscamos todas las posibles compras que haya hecho el vendedor del producto
compras = db.session.query(Compra).filter_by(correo_comprador=correo_comprador,id_producto=id_producto).all()
for compra in compras:
# las iteramos para buscar alguna que no contenga comentario
if not compra.comentario:
# en tal caso, agregamos la reseña y terminamos
compra.comentario = comentario
compra.numero_estrellas = numero_estrellas
db.session.commit()
return
flash('Por favor, compre el producto')
'''
Método auxiliar que nos devuelve una lista de todos los productos que
coincidan con el nombre dado por el usuario.
'''
def get_producto(nombre):
producto = db.engine.execute("""SELECT producto.correo_vendedor as correo_vendedor, producto.id_producto as id_producto,
nombre,precio , cantidad, detalles, descripcion, estado, ruta
FROM producto
Left JOIN imagen
ON producto.id_producto = imagen.id_producto WHERE nombre LIKE '%%"""+nombre+"%%'")
return producto
|
# Generated by Django 3.2.4 on 2021-06-29 14:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img', models.ImageField(default='assets/images/product_03.jpg', upload_to='img/course_list')),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('name', models.CharField(default='', max_length=100)),
('desc', models.TextField(blank=True, max_length=1000, null=True)),
('url', models.CharField(max_length=200)),
],
),
]
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import re
from pants.testutil.pants_integration_test import run_pants, setup_tmpdir
def test_counters_and_histograms() -> None:
# To get the cache read histogram, we must ensure the cache is populated, so we run twice.
with setup_tmpdir(
{"src/py/app.py": "print(0)\n", "src/py/BUILD": "python_sources()"}
) as tmpdir:
argv = [
"--backend-packages=['pants.backend.python', 'pants.backend.python.lint.black']",
"--plugins=hdrhistogram",
"--stats-log",
"lint",
f"{tmpdir}::",
]
run_pants(argv).assert_success()
result = run_pants(argv)
result.assert_success()
assert "Counters:" in result.stderr
assert re.search(r"local_cache_requests: \d", result.stderr)
assert "remote_cache_requests: 0" in result.stderr
assert "Observation histogram summaries" in result.stderr
assert "Summary of `local_store_read_blob_size` observation histogram:" in result.stderr
assert re.search(r"min: \d", result.stderr)
assert re.search(r"p25: \d", result.stderr)
assert re.search(r"p99: \d", result.stderr)
def test_memory_summary() -> None:
result = run_pants(["--stats-memory-summary", "--version"])
result.assert_success()
assert "Memory summary" in result.stderr
assert "pants.engine.unions.UnionMembership" in result.stderr
def test_warn_if_no_histograms() -> None:
result = run_pants(["--stats-log", "roots"])
result.assert_success()
assert "Counters:" in result.stderr
assert "Please run with `--plugins=hdrhistogram`" in result.stderr
assert "Observation histogram summaries:" not in result.stderr
|
class Solution(object):
def titleToNumber(self, s):
total = 0
for i in range(len(s)):
multiple = len(s) - 1 - i
val = ord(s[i]) - ord('A') + 1
total += val * 26**(multiple)
return total
print(Solution().titleToNumber('A'))#1
print(Solution().titleToNumber('AA'))#27
print(Solution().titleToNumber('ZA'))#677
|
import re
from helper import *
from collections import defaultdict
IFACE = "eth1"
ZERO = "0.00"
# Class for parsing ethstats output
#
# Gives the mean and stdev of network utilization
class EthstatsParser:
def __init__(self, filename, iface=IFACE):
self.f = filename
self.iface = iface
self.lines = open(filename).readlines()
self.parse()
def parse_line(self, line):
re_spaces = re.compile(r'\s+')
iface, rest = line.split(":")
iface = iface.strip()
data = re_spaces.split(rest.strip())
return {"iface": iface, "in":data[0], "out":data[3]}
def parse(self):
ret = dict()
util = []
for line in self.lines:
d = self.parse_line(line)
if d["iface"] == self.iface and d["out"] != ZERO:
util.append(float(d["out"]))
L = len(util)
# NOTE: Ignore first few seconds of output
util = util[15:]
self.util = util
return util
def summary(self):
return dict(mean=mean(self.util), stdev=stdev(self.util))
|
d = {"a": 1, "b": 2, "c": 3}
try:
print(d["d"]) # 기본적으로 실행하는 구문
except KeyError:
print("KeyError!") # KeyError가 발생했을 때 실행하는 구문
|
import re
class Assembler:
@staticmethod
def assemble(program, start_address):
program = program.split("\n")
labels = {}
curr_address = start_address
supported_instructions = [ "LW", "SW", "JMP", "BEQ", "JALR", "RET", "ADD", "SUB", "ADDI", "NAND", "MUL", "HALT" ]
new_program = []
for line in program:
line = re.sub(", +",",",line)
line = re.sub(" +$","",line)
line = re.sub(" +",",",line)
l = line.split(",")
_l = l[0]
if _l not in supported_instructions:
labels[_l] = curr_address
new_program.append(",".join(l[1:]))
else:
new_program.append(",".join(l))
curr_address += 2
curr_address = start_address
_new_program = []
for line in new_program:
l = line.split(",")
if l[0] == "BEQ":
if l[-1] in labels:
l[-1] = str(labels[l[-1]] - curr_address - 2)
_new_program.append(",".join(l))
curr_address += 2
return "\n".join(_new_program)
|
from tkinter import *
before = [100, 100]
def line(x1, y1, x2, y2):
global canvas
canvas.create_line(x1, y1, x2, y2)
def on_click(event):
global canvas
global before
x = event.x
y = event.y
line(before[0], before[1], x, y)
before[0] = x
before[1] = y
tk = Tk()
canvas = Canvas(tk, width=500, height=500)
canvas.pack()
canvas.bind("<Button-1>", on_click)
canvas.mainloop()
|
from flask_restful import request, reqparse
from Camel import CamelResource
from Camel.auth import is_authenticated
from Camel import config
from os import path
from pathlib import Path
from uuid import uuid4
class Attachment(CamelResource):
def __init__(self):
upload_conf = config['uploads']
self.reqparse = reqparse.RequestParser()
self.tmp_uploads = Path(upload_conf['TMP'])
self.tmp_uploads.mkdir(parents=True, exist_ok=True)
##POST arguments
self.reqparse.add_argument('uuid', type = str)
def post(self):
if not is_authenticated():
return "Admin only", 401
uploadedFile = request.files['file']
uuid = str(uuid4())
target = str(self.tmp_uploads.joinpath(uuid))
uploadedFile.save(target)
return {'uuid': uuid}
|
class LinkedQ():
def __init__(self):
self.first = None
self.last = None
self.size = 0
def __str__(self):
"""Returnerar köns element som en sträng.
Tips: Titta på kursens FAQ (under Hjälp)"""
return "LinkedQ Class"
def put(self,x):
"""Stoppar in x sist i kön """
dummyVariable = Node(x)
if self.isEmpty():
#den första är lite speciell
self.first = dummyVariable
self.last = dummyVariable
else:
self.last.next = dummyVariable
self.last = dummyVariable
self.size += 1
def get(self):
"""Plockar ut och returnerar det som står först i kön """
firstValue = self.first.value
self.first = self.first.next
self.size -= 1
return firstValue
def isEmpty(self):
"""Returnerar True om kön är tom, False annars """
return (self.first == None)
class Node:
def __init__(self, value):
self.value = value
self.next = None
def __str__(self):
return "NodeClass" + str(self.value)
|
"""An example to check if BackPACK' first-order extensions are working for ResNets."""
from test.core.derivatives.utils import classification_targets
import torch
from backpack import backpack, extend, extensions
from .automated_test import check_sizes, check_values
def autograd_individual_gradients(X, y, model, loss_func):
"""Individual gradients via for loop with automatic differentiation.
Args:
X (torch.Tensor): Mini-batch of shape `(N, *)`
y (torch.Tensor: Labels for `X`
model (torch.nn.Module): Model for forward pass
loss_func (torch.nn.Module): Loss function for model prediction
Returns:
[torch.Tensor]: Individual gradients for samples in the mini-batch
with respect to the model parameters. Arranged in the same order
as `model.parameters()`.
"""
N = X.shape[0]
reduction_factor = _get_reduction_factor(X, y, model, loss_func)
individual_gradients = [
torch.zeros(N, *p.shape).to(X.device) for p in model.parameters()
]
for n in range(N):
x_n = X[n].unsqueeze(0)
y_n = y[n].unsqueeze(0)
f_n = model(x_n)
l_n = loss_func(f_n, y_n) / reduction_factor
g_n = torch.autograd.grad(l_n, model.parameters())
for idx, g in enumerate(g_n):
individual_gradients[idx][n] = g
return individual_gradients
def _get_reduction_factor(X, y, model, loss_func):
"""Return reduction factor of loss function."""
N = X.shape[0]
x_0 = X[0].unsqueeze(0)
y_0 = y[0].unsqueeze(0)
x_0_repeated = x_0.repeat([N if pos == 0 else 1 for pos, _ in enumerate(X.shape)])
y_0_repeated = y_0.repeat([N if pos == 0 else 1 for pos, _ in enumerate(y.shape)])
individual_loss = loss_func(model(x_0), y_0)
reduced_loss = loss_func(model(x_0_repeated), y_0_repeated)
return (N * individual_loss / reduced_loss).item()
def backpack_individual_gradients(X, y, model, loss_func):
"""Individual gradients with BackPACK.
Args:
X (torch.Tensor): Mini-batch of shape `(N, *)`
y (torch.Tensor: Labels for `X`
model (torch.nn.Module): Model for forward pass
loss_func (torch.nn.Module): Loss function for model prediction
Returns:
[torch.Tensor]: Individual gradients for samples in the mini-batch
with respect to the model parameters. Arranged in the same order
as `model.parameters()`.
"""
model = extend(model)
loss_func = extend(loss_func)
loss = loss_func(model(X), y)
with backpack(extensions.BatchGrad()):
loss.backward()
individual_gradients = [p.grad_batch for p in model.parameters()]
return individual_gradients
class Identity(torch.nn.Module):
"""Identity operation."""
def forward(self, input):
return input
class Parallel(torch.nn.Sequential):
"""Feed input to multiple modules, sum the result.
|-----|
| -> | f_1 | -> |
| |-----| |
| |
| |-----| |
x ->| -> | f_2 | -> + -> f₁(x) + f₂(x) + ...
| |-----| |
| |
| |-----| |
| -> | ... | -> |
|-----|
"""
def forward(self, input):
"""Process input with all modules, sum the output."""
for idx, module in enumerate(self.children()):
if idx == 0:
output = module(input)
else:
output = output + module(input)
return output
def test_individual_gradients_simple_resnet():
"""Individual gradients for a simple ResNet with autodiff and BackPACK."""
# batch size, feature dimension
N, D = 2, 5
# classification
C = 3
X = torch.rand(N, D)
y = classification_targets((N,), num_classes=C)
model = Parallel(Identity(), torch.nn.Linear(D, D, bias=True))
loss_func = torch.nn.CrossEntropyLoss(reduction="sum")
result_autograd = autograd_individual_gradients(X, y, model, loss_func)
result_backpack = backpack_individual_gradients(X, y, model, loss_func)
check_sizes(result_autograd, result_backpack)
check_values(result_autograd, result_backpack)
|
import pickle
import argparse
from genotype import *
from animat import *
from single_tmaze import *
from evolutionary_search import *
if __name__ == '__main__':
parser = argparse.ArgumentParser("experiment.py")
parser.add_argument("-s", "--src", help = "src pickle for population")
parser.add_argument("-d", "--dst", help = "dst pickle for population")
parser.add_argument("-g", "--ng", help = "number of generations", default = 100)
parser.add_argument("-p", "--np", help = "number of population", default = 300)
parser.add_argument("-n", "--nn", help = "number of neurons", default = 2)
args = parser.parse_args()
pop = None
if args.src is not None:
with open(args.src, 'rb') as f:
pop = pickle.load(f)
print('Load POPULATION from ' + args.src)
n_neu = int(args.nn) if pop is None else pop[0].n_neuron
n_pop = int(args.np) if pop is None else len(pop)
es = EvolutionarySearch(SingleTMaze, Animat, n_neu, n_pop)
es.pop = es.pop if pop is None else pop
pop = es.run(int(args.ng))
best = sorted(pop, key = lambda g:g.fitness)[-1]
best_animat = Animat(best)
print(best.fitness)
print(es.env.evaluate(best_animat))
print(best_animat.weight)
print(best_animat.rule)
if args.dst != None:
with open(args.dst, 'wb') as f:
pickle.dump(pop, f)
print('Dump POPULATION to ' + args.dst)
|
# -*- coding: utf-8 -*-
'''
Se conecta al router wamp y hace correr el Wamp de Users
'''
if __name__ == '__main__':
import sys
import logging
import inject
inject.configure()
sys.path.insert(0, '../python')
logging.basicConfig(level=logging.DEBUG)
from autobahn.asyncio.wamp import ApplicationRunner
from actions.users.users import UsersWamp
from model.registry import Registry
reg = inject.instance(Registry)
registry = reg.getRegistry('wamp')
url = registry.get('url')
realm = registry.get('realm')
debug = registry.get('debug')
runner = ApplicationRunner(url=url, realm=realm)
runner.run(UsersWamp)
|
# -*- coding: utf-8 -*-
import os
from django.conf import settings
class CorretorException(Exception):
def _limitar_texto_msg(self, msg, questao=None):
"""
Dado o texto de msg de erro e uma questao(de avaliacao),
remove o começo das paths para impedir que os usuarios possam se usar disso.
Assim o texto:
/home/arruda/projetos/amao/AMAO/media/123456/avaliacao-slug/questao-slug/fontes/main.c
se torna isso:
.../123456/avaliacao-slug/questao-slug/fontes/main.c
"""
# from Avaliacao.Questao.models import path_base
# base = path_base(questao=questao.questao,aluno=questao.avaliacao.aluno,avaliacao=questao.avaliacao)
base_abs = settings.MEDIA_ROOT
# os.path.join(settings.MEDIA_ROOT,base)
return msg.replace(base_abs,'...')
def __init__(self,msg, questao=None):
Exception.__init__(self, msg)
self.message =self._limitar_texto_msg(msg,questao=questao)
class LockException(CorretorException):
pass
class CompiladorException(CorretorException):
pass
class ExecutorException(CorretorException):
pass
class ComparadorException(CorretorException):
pass
class Corretor(object):
"""
Uma classe que representa um corretor, e seu metodos.
"""
def __init__(self):
pass
# self.nome = "Corretor"
# self.descricao = "Corretor de Base"
# class Meta:
# app_label = 'Corretor'
# abstract = True
def pre_compilar(self,**kwargs):
"""Metodo chamado antes de se compilar
"""
return
def compilar(self, **kwargs):
"""Metodo chamado quando se precisa compilar os arquivos
"""
return
def pos_compilar(self,**kwargs):
"""Metodo chamado depois de se compilar
"""
return
def compilar_completo(self,**kwargs):
"""Chama todos os metodos de compilar na ordem correta
"""
return self.pre_compilar(**kwargs),self.compilar(**kwargs),self.pos_compilar(**kwargs)
def pre_executar(self,**kwargs):
"""Metodo chamado antes de se executar
"""
return
def executar(self, **kwargs):
"""Metodo chamado quando se precisa executar um programa
"""
return
def pos_executar(self,**kwargs):
"""Metodo chamado depois de se executar
"""
return
def executar_completo(self,**kwargs):
"""Chama todos os metodos de executar na ordem correta
"""
return self.pre_executar(**kwargs),self.executar(**kwargs),self.pos_executar(**kwargs)
def pre_comparar(self,**kwargs):
"""Metodo chamado antes de se comparar
"""
return
def comparar(self, **kwargs):
"""Metodo chamado quando se precisa comparar o resultado com um gabarito
"""
return
def pos_comparar(self,**kwargs):
"""Metodo chamado depois de se comparar
"""
return
def comparar_completo(self,**kwargs):
"""Chama todos os metodos de comparar na ordem correta
"""
return self.pre_comparar(**kwargs),self.comparar(**kwargs),self.pos_comparar(**kwargs)
def avaliar(self, **kwargs):
"""Metodo chamado quando se precisa fazer alguma avaliacao alem da comparacao
"""
return
def corrigir(self, **kwargs):
"""Metodo chamado para fazer a correçao.
Este metodo chama os outros metodos necessarios para fazer a mesma, e usa uma questao de avaliacao como parametros principais.
"""
return
def nota(self,**kwargs):
"""Metodo chamado para fazer dar a nota de uma questao.
"""
return
|
"""
Given an integer array nums and an integer k, return true if nums has a continuous subarray of size at least two whose elements sum up to a multiple of k, or false otherwise.
An integer x is a multiple of k if there exists an integer n such that x = n * k. 0 is always a multiple of k.
Example 1:
Input: nums = [23,2,4,6,7], k = 6
Output: true
Explanation: [2, 4] is a continuous subarray of size 2 whose elements sum up to 6.
Example 2:
Input: nums = [23,2,6,4,7], k = 6
Output: true
Explanation: [23, 2, 6, 4, 7] is an continuous subarray of size 5 whose elements sum up to 42.
42 is a multiple of 6 because 42 = 7 * 6 and 7 is an integer.
"""
class Solution:
def checkSubarraySum(self, nums: List[int], k: int) -> bool:
hmap = { 0:-1 }
totals = 0
for i, n in enumerate(nums):
totals += n
rem = totals % k
if rem not in hmap:
hmap[rem] = i
elif i - hmap[rem] > 1:
return True
else:
return False
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" NeTV2 Platform Definition
This is a non-core platform. To use it, you'll need to set your LUNA_PLATFORM variable:
> export LUNA_PLATFORM="luna.gateware.platform.netv2:NeTV2Platform"
The NeTV2 has a fixed pull-up resistor on D-; which prevents it from being used as a
FS device. To use the platform for full-speed USB, you'll need to move the resistor
populated as R23 over to R24.
"""
import os
import subprocess
from amaranth import *
from amaranth.build import *
from amaranth.vendor.xilinx_7series import Xilinx7SeriesPlatform
from amaranth_boards.resources import *
from ..interface.pipe import AsyncPIPEInterface
from ..interface.serdes_phy import XC7GTPSerDesPIPE
from .core import LUNAPlatform
class NeTV2ClockDomainGenerator(Elaboratable):
""" Clock/Reset Controller for the NeTV2. """
def __init__(self, *, clock_frequencies=None, clock_signal_name=None):
pass
def elaborate(self, platform):
m = Module()
# Create our domains; but don't do anything else for them, for now.
m.domains.usb = ClockDomain()
m.domains.usb_io = ClockDomain()
m.domains.sync = ClockDomain()
m.domains.ss = ClockDomain()
m.domains.fast = ClockDomain()
# Grab our main clock.
clk50 = platform.request(platform.default_clk)
# USB2 PLL connections.
clk12 = Signal()
clk48 = Signal()
usb2_locked = Signal()
usb2_feedback = Signal()
m.submodules.usb2_pll = Instance("PLLE2_ADV",
p_BANDWIDTH = "OPTIMIZED",
p_COMPENSATION = "ZHOLD",
p_STARTUP_WAIT = "FALSE",
p_DIVCLK_DIVIDE = 1,
p_CLKFBOUT_MULT = 24,
p_CLKFBOUT_PHASE = 0.000,
p_CLKOUT0_DIVIDE = 100,
p_CLKOUT0_PHASE = 0.000,
p_CLKOUT0_DUTY_CYCLE = 0.500,
p_CLKOUT1_DIVIDE = 25,
p_CLKOUT1_PHASE = 0.000,
p_CLKOUT1_DUTY_CYCLE = 0.500,
p_CLKIN1_PERIOD = 20.000,
i_CLKFBIN = usb2_feedback,
o_CLKFBOUT = usb2_feedback,
i_CLKIN1 = clk50,
o_CLKOUT0 = clk12,
o_CLKOUT1 = clk48,
o_LOCKED = usb2_locked,
)
# USB3 PLL connections.
clk16 = Signal()
clk125 = Signal()
clk250 = Signal()
usb3_locked = Signal()
usb3_feedback = Signal()
m.submodules.usb3_pll = Instance("PLLE2_ADV",
p_BANDWIDTH = "OPTIMIZED",
p_COMPENSATION = "ZHOLD",
p_STARTUP_WAIT = "FALSE",
p_DIVCLK_DIVIDE = 1,
p_CLKFBOUT_MULT = 20, # VCO = 1000 MHz
p_CLKFBOUT_PHASE = 0.000,
p_CLKOUT0_DIVIDE = 4, # CLKOUT0 = 250 MHz (1000/4)
p_CLKOUT0_PHASE = 0.000,
p_CLKOUT0_DUTY_CYCLE = 0.500,
p_CLKOUT1_DIVIDE = 8, # CLKOUT1 = 125 MHz (1000/8)
p_CLKOUT1_PHASE = 0.000,
p_CLKOUT1_DUTY_CYCLE = 0.500,
p_CLKOUT2_DIVIDE = 64, # CLKOUT2 = 16 MHz (1000/64)
p_CLKOUT2_PHASE = 0.000,
p_CLKOUT2_DUTY_CYCLE = 0.500,
p_CLKIN1_PERIOD = 20.000,
i_CLKFBIN = usb3_feedback,
o_CLKFBOUT = usb3_feedback,
i_CLKIN1 = clk50,
o_CLKOUT0 = clk250,
o_CLKOUT1 = clk125,
o_CLKOUT2 = clk16,
o_LOCKED = usb3_locked,
)
# Connect up our clock domains.
m.d.comb += [
ClockSignal("usb") .eq(clk12),
ClockSignal("usb_io") .eq(clk48),
ClockSignal("sync") .eq(clk125),
ClockSignal("ss") .eq(clk125),
ClockSignal("fast") .eq(clk250),
ResetSignal("usb") .eq(~usb2_locked),
ResetSignal("usb_io") .eq(~usb2_locked),
ResetSignal("sync") .eq(~usb3_locked),
ResetSignal("ss") .eq(~usb3_locked),
ResetSignal("fast") .eq(~usb3_locked),
]
return m
class NeTV2SuperSpeedPHY(AsyncPIPEInterface):
""" Superspeed PHY configuration for the NeTV2. """
SS_FREQUENCY = 125e6
FAST_FREQUENCY = 250e6
def __init__(self, platform):
# Grab the I/O that implements our SerDes interface...
serdes_io = platform.request("serdes", dir={'tx':"-", 'rx':"-"})
# Use it to create our soft PHY...
serdes_phy = XC7GTPSerDesPIPE(
tx_pads = serdes_io.tx,
rx_pads = serdes_io.rx,
refclk_frequency = self.FAST_FREQUENCY,
ss_clock_frequency = self.SS_FREQUENCY,
)
# ... and bring the PHY interface signals to the MAC domain.
super().__init__(serdes_phy, width=4, domain="ss")
def elaborate(self, platform):
m = super().elaborate(platform)
# Patch in our soft PHY as a submodule.
m.submodules.phy = self.phy
# Drive the PHY reference clock with our fast generated clock.
m.d.comb += self.clk.eq(ClockSignal("fast"))
# This board does not have a way to detect Vbus, so assume it's always present.
m.d.comb += self.phy.power_present.eq(1)
return m
class NeTV2Platform(Xilinx7SeriesPlatform, LUNAPlatform):
""" Board description for the NeTV2. """
name = "NeTV2"
device = "xc7a35t"
package = "fgg484"
speed = "2"
default_clk = "clk50"
# Provide the type that'll be used to create our clock domains.
clock_domain_generator = NeTV2ClockDomainGenerator
# Use our direct USB connection for USB2, and our SerDes for USB3.
default_usb_connection = "usb"
default_usb3_phy = NeTV2SuperSpeedPHY
#
# I/O resources.
#
resources = [
Resource("clk50", 0, Pins("J19"), Attrs(IOSTANDARD="LVCMOS33"), Clock(50e6)),
# R/G leds
*LEDResources(pins="M21 N20 L21 AA21 R19 M16", attrs=Attrs(IOSTANDARD="LVCMOS33"), invert=True),
# Comms
#DirectUSBResource(0, d_p="C14", d_n="C15", attrs=Attrs(IOSTANDARD="LVCMOS33")),
# XXX
DirectUSBResource(0, d_p="A15", d_n="A14", pullup="C17", attrs=Attrs(IOSTANDARD="LVCMOS33")),
UARTResource(0, rx="E13", tx="E14", attrs=Attrs(IOSTANDARD="LVCMOS33")),
# PCIe gold fingers (for USB3)
Resource("serdes", 0,
Subsignal("tx", DiffPairs("D5", "C5")),
Subsignal("rx", DiffPairs("D11", "C11")),
),
# User I/O (labeled "hax")
Resource("user_io", 0, Pins("B15"), Attrs(IOSTANDARD="LVCMOS33")),
Resource("user_io", 1, Pins("B16"), Attrs(IOSTANDARD="LVCMOS33")),
Resource("user_io", 2, Pins("B13"), Attrs(IOSTANDARD="LVCMOS33")),
Resource("user_io", 3, Pins("A15"), Attrs(IOSTANDARD="LVCMOS33")),
Resource("user_io", 4, Pins("A16"), Attrs(IOSTANDARD="LVCMOS33")),
Resource("user_io", 5, Pins("A13"), Attrs(IOSTANDARD="LVCMOS33")),
Resource("user_io", 6, Pins("A14"), Attrs(IOSTANDARD="LVCMOS33")),
Resource("user_io", 7, Pins("B17"), Attrs(IOSTANDARD="LVCMOS33")),
Resource("user_io", 8, Pins("A18"), Attrs(IOSTANDARD="LVCMOS33")),
Resource("user_io", 9, Pins("C17"), Attrs(IOSTANDARD="LVCMOS33")),
]
connectors = []
def toolchain_prepare(self, fragment, name, **kwargs):
extra_constraints = [
# Allow use to drive our SerDes from FPGA fabric.
"set_property SEVERITY {Warning} [get_drc_checks REQP-49]"
]
overrides = {
"script_before_bitstream":
"set_property BITSTREAM.GENERAL.COMPRESS TRUE [current_design]",
"add_constraints": "\n".join(extra_constraints)
}
return super().toolchain_prepare(fragment, name, **overrides, **kwargs)
#
# FIXME: figure out a better tool to use for running with the NeTV attached
# to a raspberry pi
#
def toolchain_program(self, products, name):
xc3sprog = os.environ.get("XC3SPROG", "xc3sprog")
with products.extract("{}.bit".format(name)) as bitstream_file:
subprocess.check_call([xc3sprog, "-c", "ft4232h", bitstream_file])
|
__author__ = "Narwhale"
import json
try:
with open('num.txt', 'r') as f_obj:
num = json.load(f_obj)
except FileNotFoundError:
num = input('请输入您喜欢的数字:')
with open('num.txt', 'w') as f_obj:
json.dump(num, f_obj)
print('I konw your favorite new number!It\'s %s'%num)
else:
print('I konw your favorite old number!It\'s %s'%num)
|
# First project of the Joyita
puwis_dogs = ['Joyita','Maya','Pelusa','Tofy','Popi', 'intruso','intrusa']
print ('Lindas Dogs are:')
for x in puwis_dogs:
if x=="intrusa":
print("You are not her dog")
else: print(x)
print("Intruso you got caught")
print("muajajaja")
|
from django.urls import path
from .views import *
urlpatterns = [
path('', blogPosts.as_view(), name = 'blog_posts_url'),
path('post/create/', CreatePost.as_view(), name = 'blog_create_post_url'),
path('post/<slug:slug>/', blogPost.as_view(), name = 'blog_post_url'),
path('post/<slug:slug>/edit/', EditPost.as_view(), name = 'blog_post_edit_url'),
path('post/<slug:slug>/del/', DelPost.as_view(), name = 'blog_post_del_url'),
path('tags/', blogTags.as_view(), name = 'blog_tags_url'),
path('tag/create/', CreateTag.as_view(), name = 'blog_tag_create_url'),
path('tag/<slug:slug>/', blogTag.as_view(), name = 'blog_tag_url'),
path('tag/<slug:slug>/edit/', EditTag.as_view(), name = 'blog_tag_edit_url'),
path('tag/<slug:slug>/del/', DelTag.as_view(), name = 'blog_tag_del_url'),
]
|
import numpy as np
from cntk.layers import Convolution, MaxPooling, Dropout, Dense
from cntk.initializer import glorot_uniform
from cntk.ops import input_variable, relu, softmax
from common.functor import Functor
class MnistModel(Functor):
def __init__(self):
super(MnistModel, self).__init__()
#self.param_bounds = {
# #'mdl_conv1a_nf': (6, 128),
# #'mdl_conv1b_nf': (6, 128),
# #'mdl_conv2a_nf': (6, 128),
# #'mdl_conv2b_nf': (6, 128),
# #'mdl_fc1_nh': (10, 500),
# 'mdl_drop2a_p': (0.0, 0.25),
# 'mdl_drop2b_p': (0.0, 0.25),
# 'mdl_drop3_p': (0.0, 0.50)}
self.img_h = 28
self.img_w = 28
self.n_dim = 10
def __call__(self,
num_classes=10,
act_type=relu,
mdl_conv1a_nf=40,
mdl_conv1b_nf=60,
mdl_conv2a_nf=50,
mdl_conv2b_nf=75,
mdl_fc1_nh=75,
mdl_drop2a_p=0.033,
mdl_drop2b_p=0.097,
mdl_drop3_p=0.412,
**kwargs):
input_var = input_variable((1, self.img_h, self.img_w), np.float32)
label_var = input_variable((self.n_dim), np.float32)
conv1a = Convolution(filter_shape=(3, 3), num_filters=int(mdl_conv1a_nf), activation=act_type, init=glorot_uniform(), pad=True, name='conv1a')(input_var)
conv1b = Convolution(filter_shape=(3, 3), num_filters=int(mdl_conv1b_nf), activation=act_type, init=glorot_uniform(), pad=True, name='conv1b')(conv1a)
pool1 = MaxPooling(filter_shape=(2, 2), strides=(2, 2), name='pool1')(conv1b)
conv2a = Convolution(filter_shape=(3, 3), num_filters=int(mdl_conv2a_nf), activation=act_type, init=glorot_uniform(), pad=True, name='conv2a')(pool1)
drop2a = Dropout(prob=mdl_drop2a_p, name="drop2a")(conv2a)
conv2b = Convolution(filter_shape=(3, 3), num_filters=int(mdl_conv2b_nf), activation=act_type, init=glorot_uniform(), pad=True, name='conv2b')(drop2a)
drop2b = Dropout(prob=mdl_drop2a_p, name="drop2a")(conv2b)
pool2 = MaxPooling(filter_shape=(2, 2), strides=(2, 2), name='pool2')(drop2b)
fc1 = Dense(shape=int(mdl_fc1_nh), init=glorot_uniform(), activation=act_type, name='fc1')(pool2)
drop3 = Dropout(prob=mdl_drop3_p, name="drop3")(fc1)
#fc2 = Dense(shape=num_classes, init=glorot_uniform(), activation=softmax, name='fc2')(drop3)
fc2 = Dense(shape=num_classes, init=glorot_uniform(), activation=None, name='fc2')(drop3)
return input_var, label_var, fc2
|
#!/usr/bin/python
import sys
import ssl
import boto
import time
import operator
from collections import defaultdict
def convert_to_gb(size):
return float(size)/(1024*1024*1024)
def convert_to_mb(size):
return float(size)/(1024*1024)
def main():
start_time = time.time()
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
s3_bucket = sys.argv[1]
key_prefix = sys.argv[2]
s3conn = boto.connect_s3()
bucket = s3conn.get_bucket(s3_bucket)
rs = bucket.list(key_prefix)
total_size = 0
dir_data = defaultdict(int)
print "\nDirectory List for %s:\n" % key_prefix
for key in rs:
total_size += key.size
key_path = str(key.name).split(key_prefix)[1]
split_key_path = key_path.split('/')
if len(split_key_path) > 2:
dir_name = split_key_path[1]
dir_data[dir_name] += key.size
dir_data_sorted = sorted(dir_data.items(), key=operator.itemgetter(1), \
reverse=True)
for dir_name, dir_size in dir_data_sorted:
dir_size_mb = convert_to_mb(dir_size)
if dir_size_mb > 50:
print "%s: %.2f MB" % (dir_name, dir_size_mb)
print "\nSummary\n" + "-" * 30 + "\n"
print "Total size: %s (%.2f GB)" % (total_size, convert_to_gb(total_size))
print "Time taken: %s seconds" % (time.time() - start_time)
return 0
if __name__ == '__main__':
if len(sys.argv) < 3:
print "Usage: {0} \"S3_BUCKET\" \"KEY_PREFIX\" (Ex:{0} my_bucket production/new_customer)".format(sys.argv[0])
sys.exit(0)
sys.exit(main())
|
#from rest_framework import serializers
from api.models import Challenge
from rest_framework_json_api import serializers
from rest_framework_json_api.relations import ResourceRelatedField
class ChallengeSerializer(serializers.ModelSerializer):
"""
Sets fields for the rest api to serialize in the Challenge model.
"""
class Meta:
model = Challenge
fields = ('id', 'title', 'points', 'description', 'solved', 'numsolved')
|
from pathlib import Path
import numpy as np
import pandas as pd
from keras_preprocessing.image import img_to_array, load_img
from tensorflow.python.keras.utils.data_utils import Sequence
def idx_to_name(id_: int):
return f"{id_:06d}.jpg"
def name_to_idx(name: str):
return int(name[:-4])
def find_range_of_partition(partition_path: str, partition: int):
partition_df = pd.read_csv(partition_path)
sub_df = partition_df[partition_df.partition == partition]
start = name_to_idx(sub_df.iloc[0].image_id)
end = name_to_idx(sub_df.iloc[-1].image_id)
return start, end
class CelebaSequence(Sequence):
def __init__(self, partition: int, batch: int, img_path: str, attrs_path: str, partition_path: str):
self.image_dir = Path(img_path)
self.attrs_df = pd.read_csv(attrs_path)
self.start_idx, self.end_idx = find_range_of_partition(partition_path, partition)
self.batch = batch
def __len__(self):
return int(np.ceil((self.end_idx - self.start_idx + 1) / float(self.batch)))
def __getitem__(self, idx):
indices = np.arange(idx * self.batch + 1, min(1 + (idx + 1) * self.batch, self.end_idx))
x = np.array([img_to_array(load_img(self.image_dir / idx_to_name(idx))) for idx in indices])
y = np.array([self.attrs_df[self.attrs_df.image_id == idx_to_name(idx)].Male.item() for idx in indices])
x /= 255
y = (y + 1) / 2
return x, y
def __iter__(self):
for item in (self[i] for i in range(len(self))):
yield item
class FiniteCelebaSequence(CelebaSequence):
def __iter__(self): # run just one epoch
for item in (self[i] for i in range(len(self))):
yield item
|
# 1st level Model Structure: Equation Block
# import sys
# sys.path.append('..')
# this module define the rules for constructing a energy block in the master block
# this is the global component set import, so that all modules uses the same set
from global_sets.component import m
from pyomo import environ as pe
# define MPCC_beta_NCP rule
def P_NCP_block_rule(block):
#------------------------------LOCAL VARIABLES------------------------------
block.s_L = pe.Var(within=pe.NonNegativeReals,initialize=1000)#,bounds=(0,1))
block.s_V = pe.Var(within=pe.NonNegativeReals,initialize=0)
#-----------------------------LOCAL parameters------------------------------
block.epi = pe.Param(initialize=1e-4,mutable=True)
print('>','Importing MPCC_P_NCP Blocks......')
print('>','Adding the following local variable:')
print('-'*50)
for i in block.component_objects(pe.Var,active=True):
print('|',i)
for i in block.component_objects(pe.Param,active=True):
print('|',i)
print('-'*50)
print('>','Adding complementarity constraint, spliting pressure used in VLE')
print('')
#------------------------------MPCC equations-------------------------------
def s_L_complementarity_rule(block):
return (sum(block.parent_block().L[s] for s in block.parent_block().outlet) + block.s_L) \
== ((sum(block.parent_block().L[s] for s in block.parent_block().outlet) - block.s_L)**2+block.epi)**0.5
block.s_L_complementarity_con = pe.Constraint(rule=s_L_complementarity_rule)
def s_V_complementarity_rule(block):
return (sum(block.parent_block().V[s] for s in block.parent_block().outlet) + block.s_V) \
== ((sum(block.parent_block().V[s] for s in block.parent_block().outlet) - block.s_V)**2+block.epi)**0.5
block.s_V_complementarity_con = pe.Constraint(rule=s_V_complementarity_rule)
#-----------------------------Global equations------------------------------
block.parent_block().VLE_block.del_component(block.parent_block().VLE_block.pressure_equal_con)
def pressure_equal_rule(block):
return block.parent_block().VLE_block.P_VLE - block.parent_block().P == block.s_L - block.s_V
block.pressure_equal_con = pe.Constraint(rule=pressure_equal_rule)
# define MPCC_beta_Reg rule
def P_Reg_block_rule(block):
#------------------------------LOCAL VARIABLES------------------------------
block.s_L = pe.Var(within=pe.NonNegativeReals,initialize=0)#,bounds=(0,1))
block.s_V = pe.Var(within=pe.NonNegativeReals,initialize=0)
#-----------------------------LOCAL parameters------------------------------
block.epi = pe.Param(initialize=1e-3,mutable=True)
print('>','Importing MPCC_P_Reg Blocks......')
print('>','Adding the following local variable:')
print('-'*50)
for i in block.component_objects(pe.Var,active=True):
print('|',i)
for i in block.component_objects(pe.Param,active=True):
print('|',i)
print('-'*50)
print('>','Adding complementarity constraint, spliting pressure used in VLE')
print('')
#------------------------------MPCC equations-------------------------------
def s_L_complementarity_rule(block):
return sum(block.parent_block().L[s] for s in block.parent_block().outlet) * block.s_L \
<= block.epi
block.s_L_complementarity_con = pe.Constraint(rule=s_L_complementarity_rule)
def s_V_complementarity_rule(block):
return sum(block.parent_block().V[s] for s in block.parent_block().outlet) * block.s_V <= block.epi
block.s_V_complementarity_con = pe.Constraint(rule=s_V_complementarity_rule)
#-----------------------------Global equations------------------------------
block.parent_block().VLE_block.del_component(block.parent_block().VLE_block.pressure_equal_con)
def pressure_equal_rule(block):
return block.parent_block().VLE_block.P_VLE - block.parent_block().P == block.s_L - block.s_V
block.pressure_equal_con = pe.Constraint(rule=pressure_equal_rule)
# define MPCC_beta_penalty_function rule
def P_pf_block_rule(block):
#------------------------------LOCAL VARIABLES------------------------------
block.s_L = pe.Var(within=pe.NonNegativeReals,initialize=0)#,bounds=(0,1))
block.s_V = pe.Var(within=pe.NonNegativeReals,initialize=0)
block.pf = pe.Var(within=pe.NonNegativeReals,initialize=0)
#-----------------------------LOCAL parameters------------------------------
block.rho = pe.Param(initialize=1,mutable=True)
print('>','Importing MPCC_P_Reg Blocks......')
print('>','Adding the following local variable:')
print('-'*50)
for i in block.component_objects(pe.Var,active=True):
print('|',i)
for i in block.component_objects(pe.Param,active=True):
print('|',i)
print('-'*50)
print('>','Spliting pressure used in VLE')
print('')
#------------------------------MPCC equations-------------------------------
def penalty_rule(block):
return block.pf >= block.rho*(sum(block.parent_block().L[s] for s in block.parent_block().outlet)*block.s_L \
+ sum(block.parent_block().V[s] for s in block.parent_block().outlet)*block.s_V)
block.penalty_con = pe.Constraint(rule=penalty_rule)
#-----------------------------Global equations------------------------------
block.parent_block().VLE_block.del_component(block.parent_block().VLE_block.pressure_equal_con)
def pressure_equal_rule(block):
return block.parent_block().VLE_block.P_VLE - block.parent_block().P == block.s_L - block.s_V
block.pressure_equal_con = pe.Constraint(rule=pressure_equal_rule)
|
import bleach
import markdown as md
from bleach.linkifier import LinkifyFilter
from django import template
allowed_tags = [
"a",
"abbr",
"acronym",
"b",
"blockquote",
"br",
"code",
"em",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"hr",
"i",
"li",
"ol",
"p",
"pre",
"strong",
"ul",
]
register = template.Library()
cleaner = bleach.Cleaner(tags=allowed_tags, filters=[LinkifyFilter])
@register.filter(is_safe=True)
def markdown(value):
if not value:
return ""
return cleaner.clean(md.markdown(value))
@register.tag()
def markdownify(parser, token):
nodelist = parser.parse(("endmarkdownify",))
parser.delete_first_token()
return Markdownify(nodelist)
class Markdownify(template.Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
output = self.nodelist.render(context)
return cleaner.clean(markdown(output))
|
import cv2
import numpy as np
from abc import ABC, abstractmethod
class VideoReader(ABC):
def __init__(self) -> None:
self.path = None
@abstractmethod
def _read_file(self):
raise NotImplementedError('Read File Method is not implemented')
@abstractmethod
def _read_frames(self):
raise NotImplementedError('Read File Method is not implemented')
@abstractmethod
def __getitem__(self,key):
raise NotImplementedError('Get Item Method is not implemented')
@abstractmethod
def __len__(self):
pass
class AviReader(VideoReader):
def __init__(self,path,mode=None) -> None:
self.path = path
self.mode = mode
self.frames = self._read_frames()
def _read_file(self):
"""
This method reads avi files to opencv video capture object
Only .avi extensions are supported
:return: OpenCV "cap" object
"""
extension = self.path.split('.')[-1]
if extension!='avi':
raise Exception("Invalid Format")
return cv2.VideoCapture(self.path)
def _read_frames(self):
"""
This method reads frames from a vide ocapture object and holds them in memory
:return: frames as a list
"""
cap = self._read_file()
frame_list = []
ret_list = []
while True:
ret, frame = cap.read()
if ret:
frame_list.append(np.array(frame))
ret_list.append(ret)
else:
break
if self.mode=="np":
frame_list = np.array(frame_list)
return frame_list
def __getitem__(self,key):
return self.frames[key]
def resolution(self):
if self.frames is None:
raise Exception("Avi file not loaded")
return self.frames.shape
def __len__(self):
if self.frames is None:
raise Exception("Avi file not loaded")
return self.frames.shape[0]
|
line_stuff = list(map(lambda x: int(x), input().split()))
average = sum(line_stuff) / len(line_stuff)
more_than_average = []
for i in line_stuff:
if i > average:
more_than_average.append(i)
more_than_average.sort()
more_than_average.reverse()
for j in range(5):
if not more_than_average:
print("No")
if len(more_than_average) <= 5:
print(*more_than_average, end=" ")
break
print(more_than_average[j], end=" ")
|
#!/usr/bin/env python
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.autograd import Variable
from tensorboardX import SummaryWriter
import argparse, os, sys, subprocess
import setproctitle, colorama
import numpy as np
from tqdm import tqdm
from glob import glob
from os.path import *
import models, losses, datasets
from utils import flow_utils, tools
# fp32 copy of parameters for update
global param_copy
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--start_epoch", type=int, default=1)
parser.add_argument("--total_epochs", type=int, default=10000)
parser.add_argument("--batch_size", "-b", type=int, default=8, help="Batch size")
parser.add_argument(
"--train_n_batches",
type=int,
default=-1,
help="Number of min-batches per epoch. If < 0, it will be determined by training_dataloader",
)
parser.add_argument(
"--crop_size",
type=int,
nargs="+",
default=[256, 256],
help="Spatial dimension to crop training samples for training",
)
parser.add_argument("--gradient_clip", type=float, default=None)
parser.add_argument(
"--schedule_lr_frequency", type=int, default=0, help="in number of iterations (0 for no schedule)"
)
parser.add_argument("--schedule_lr_fraction", type=float, default=10)
parser.add_argument("--rgb_max", type=float, default=255.0)
parser.add_argument("--number_workers", "-nw", "--num_workers", type=int, default=8)
parser.add_argument("--number_gpus", "-ng", type=int, default=-1, help="number of GPUs to use")
parser.add_argument("--no_cuda", action="store_true")
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--name", default="run", type=str, help="a name to append to the save directory")
parser.add_argument("--save", "-s", default="./work", type=str, help="directory for saving")
parser.add_argument("--validation_frequency", type=int, default=5, help="validate every n epochs")
parser.add_argument("--validation_n_batches", type=int, default=-1)
parser.add_argument(
"--render_validation",
action="store_true",
help="run inference (save flows to file) and every validation_frequency epoch",
)
parser.add_argument("--inference", action="store_true")
parser.add_argument(
"--inference_visualize", action="store_true", help="visualize the optical flow during inference"
)
parser.add_argument(
"--inference_size",
type=int,
nargs="+",
default=[-1, -1],
help="spatial size divisible by 64. default (-1,-1) - largest possible valid size would be used",
)
parser.add_argument("--inference_batch_size", type=int, default=1)
parser.add_argument("--inference_n_batches", type=int, default=-1)
parser.add_argument("--save_flow", action="store_true", help="save predicted flows to file")
parser.add_argument(
"--resume", default="", type=str, metavar="PATH", help="path to latest checkpoint (default: none)"
)
parser.add_argument("--log_frequency", "--summ_iter", type=int, default=1, help="Log every n batches")
parser.add_argument("--skip_training", action="store_true")
parser.add_argument("--skip_validation", action="store_true")
parser.add_argument("--fp16", action="store_true", help="Run model in pseudo-fp16 mode (fp16 storage fp32 math).")
parser.add_argument(
"--fp16_scale",
type=float,
default=1024.0,
help="Loss scaling, positive power of 2 values can improve fp16 convergence.",
)
tools.add_arguments_for_module(parser, models, argument_for_class="model", default="FlowNet2")
tools.add_arguments_for_module(parser, losses, argument_for_class="loss", default="L1Loss")
tools.add_arguments_for_module(
parser, torch.optim, argument_for_class="optimizer", default="Adam", skip_params=["params"]
)
tools.add_arguments_for_module(
parser,
datasets,
argument_for_class="training_dataset",
default="MpiSintelFinal",
skip_params=["is_cropped"],
parameter_defaults={"root": "./MPI-Sintel/flow/training"},
)
tools.add_arguments_for_module(
parser,
datasets,
argument_for_class="validation_dataset",
default="MpiSintelClean",
skip_params=["is_cropped"],
parameter_defaults={"root": "./MPI-Sintel/flow/training", "replicates": 1},
)
tools.add_arguments_for_module(
parser,
datasets,
argument_for_class="inference_dataset",
default="MpiSintelClean",
skip_params=["is_cropped"],
parameter_defaults={"root": "./MPI-Sintel/flow/training", "replicates": 1},
)
main_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(main_dir)
# Parse the official arguments
with tools.TimerBlock("Parsing Arguments") as block:
args = parser.parse_args()
if args.number_gpus < 0:
args.number_gpus = torch.cuda.device_count()
# Get argument defaults (hastag #thisisahack)
parser.add_argument("--IGNORE", action="store_true")
defaults = vars(parser.parse_args(["--IGNORE"]))
# Print all arguments, color the non-defaults
for argument, value in sorted(vars(args).items()):
reset = colorama.Style.RESET_ALL
color = reset if value == defaults[argument] else colorama.Fore.MAGENTA
block.log("{}{}: {}{}".format(color, argument, value, reset))
args.model_class = tools.module_to_dict(models)[args.model]
args.optimizer_class = tools.module_to_dict(torch.optim)[args.optimizer]
args.loss_class = tools.module_to_dict(losses)[args.loss]
args.training_dataset_class = tools.module_to_dict(datasets)[args.training_dataset]
args.validation_dataset_class = tools.module_to_dict(datasets)[args.validation_dataset]
args.inference_dataset_class = tools.module_to_dict(datasets)[args.inference_dataset]
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.current_hash = subprocess.check_output(["git", "rev-parse", "HEAD"]).rstrip()
args.log_file = join(args.save, "args.txt")
# dict to collect activation gradients (for training debug purpose)
args.grads = {}
if args.inference:
args.skip_validation = True
args.skip_training = True
args.total_epochs = 1
args.inference_dir = "{}/inference".format(args.save)
print("Source Code")
print((" Current Git Hash: {}\n".format(args.current_hash)))
# Change the title for `top` and `pkill` commands
setproctitle.setproctitle(args.save)
# Dynamically load the dataset class with parameters passed in via "--argument_[param]=[value]" arguments
with tools.TimerBlock("Initializing Datasets") as block:
args.effective_batch_size = args.batch_size * args.number_gpus
args.effective_inference_batch_size = args.inference_batch_size * args.number_gpus
args.effective_number_workers = args.number_workers * args.number_gpus
gpuargs = (
{"num_workers": args.effective_number_workers, "pin_memory": True, "drop_last": True} if args.cuda else {}
)
inf_gpuargs = gpuargs.copy()
inf_gpuargs["num_workers"] = args.number_workers
if exists(args.training_dataset_root):
train_dataset = args.training_dataset_class(args, True, **tools.kwargs_from_args(args, "training_dataset"))
block.log("Training Dataset: {}".format(args.training_dataset))
block.log("Training Input: {}".format(" ".join([str([d for d in x.size()]) for x in train_dataset[0][0]])))
block.log(
"Training Targets: {}".format(" ".join([str([d for d in x.size()]) for x in train_dataset[0][1]]))
)
train_loader = DataLoader(train_dataset, batch_size=args.effective_batch_size, shuffle=True, **gpuargs)
if exists(args.validation_dataset_root):
validation_dataset = args.validation_dataset_class(
args, True, **tools.kwargs_from_args(args, "validation_dataset")
)
block.log("Validation Dataset: {}".format(args.validation_dataset))
block.log(
"Validation Input: {}".format(" ".join([str([d for d in x.size()]) for x in validation_dataset[0][0]]))
)
block.log(
"Validation Targets: {}".format(
" ".join([str([d for d in x.size()]) for x in validation_dataset[0][1]])
)
)
validation_loader = DataLoader(
validation_dataset, batch_size=args.effective_batch_size, shuffle=False, **gpuargs
)
if exists(args.inference_dataset_root):
inference_dataset = args.inference_dataset_class(
args, False, **tools.kwargs_from_args(args, "inference_dataset")
)
block.log("Inference Dataset: {}".format(args.inference_dataset))
block.log(
"Inference Input: {}".format(" ".join([str([d for d in x.size()]) for x in inference_dataset[0][0]]))
)
block.log(
"Inference Targets: {}".format(" ".join([str([d for d in x.size()]) for x in inference_dataset[0][1]]))
)
inference_loader = DataLoader(
inference_dataset, batch_size=args.effective_inference_batch_size, shuffle=False, **inf_gpuargs
)
# Dynamically load model and loss class with parameters passed in via "--model_[param]=[value]" or "--loss_[param]=[value]" arguments
with tools.TimerBlock("Building {} model".format(args.model)) as block:
class ModelAndLoss(nn.Module):
def __init__(self, args):
super(ModelAndLoss, self).__init__()
kwargs = tools.kwargs_from_args(args, "model")
self.model = args.model_class(args, **kwargs)
kwargs = tools.kwargs_from_args(args, "loss")
self.loss = args.loss_class(args, **kwargs)
def forward(self, data, target, inference=False):
output = self.model(data)
loss_values = self.loss(output, target)
if not inference:
return loss_values
else:
return loss_values, output
model_and_loss = ModelAndLoss(args)
block.log("Effective Batch Size: {}".format(args.effective_batch_size))
block.log(
"Number of parameters: {}".format(
sum([p.data.nelement() if p.requires_grad else 0 for p in model_and_loss.parameters()])
)
)
# assing to cuda or wrap with dataparallel, model and loss
if args.cuda and (args.number_gpus > 0) and args.fp16:
block.log("Parallelizing")
model_and_loss = nn.parallel.DataParallel(model_and_loss, device_ids=list(range(args.number_gpus)))
block.log("Initializing CUDA")
model_and_loss = model_and_loss.cuda().half()
torch.cuda.manual_seed(args.seed)
param_copy = [param.clone().type(torch.cuda.FloatTensor).detach() for param in model_and_loss.parameters()]
elif args.cuda and args.number_gpus > 0:
block.log("Initializing CUDA")
model_and_loss = model_and_loss.cuda()
block.log("Parallelizing")
model_and_loss = nn.parallel.DataParallel(model_and_loss, device_ids=list(range(args.number_gpus)))
torch.cuda.manual_seed(args.seed)
else:
block.log("CUDA not being used")
torch.manual_seed(args.seed)
# Load weights if needed, otherwise randomly initialize
if args.resume and os.path.isfile(args.resume):
block.log("Loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
if not args.inference:
args.start_epoch = checkpoint["epoch"]
best_err = checkpoint["best_EPE"]
model_and_loss.module.model.load_state_dict(checkpoint["state_dict"])
block.log("Loaded checkpoint '{}' (at epoch {})".format(args.resume, checkpoint["epoch"]))
elif args.resume and args.inference:
block.log("No checkpoint found at '{}'".format(args.resume))
quit()
else:
block.log("Random initialization")
block.log("Initializing save directory: {}".format(args.save))
if not os.path.exists(args.save):
os.makedirs(args.save)
train_logger = SummaryWriter(log_dir=os.path.join(args.save, "train"), comment="training")
validation_logger = SummaryWriter(log_dir=os.path.join(args.save, "validation"), comment="validation")
# Dynamically load the optimizer with parameters passed in via "--optimizer_[param]=[value]" arguments
with tools.TimerBlock("Initializing {} Optimizer".format(args.optimizer)) as block:
kwargs = tools.kwargs_from_args(args, "optimizer")
if args.fp16:
optimizer = args.optimizer_class([p for p in param_copy if p.requires_grad], **kwargs)
else:
optimizer = args.optimizer_class([p for p in model_and_loss.parameters() if p.requires_grad], **kwargs)
for param, default in list(kwargs.items()):
block.log("{} = {} ({})".format(param, default, type(default)))
# Log all arguments to file
for argument, value in sorted(vars(args).items()):
block.log2file(args.log_file, "{}: {}".format(argument, value))
# Reusable function for training and validataion
def train(args, epoch, start_iteration, data_loader, model, optimizer, logger, is_validate=False, offset=0):
statistics = []
total_loss = 0
if is_validate:
model.eval()
title = "Validating Epoch {}".format(epoch)
args.validation_n_batches = np.inf if args.validation_n_batches < 0 else args.validation_n_batches
progress = tqdm(
tools.IteratorTimer(data_loader),
ncols=100,
total=np.minimum(len(data_loader), args.validation_n_batches),
leave=True,
position=offset,
desc=title,
)
else:
model.train()
title = "Training Epoch {}".format(epoch)
args.train_n_batches = np.inf if args.train_n_batches < 0 else args.train_n_batches
progress = tqdm(
tools.IteratorTimer(data_loader),
ncols=120,
total=np.minimum(len(data_loader), args.train_n_batches),
smoothing=0.9,
miniters=1,
leave=True,
position=offset,
desc=title,
)
last_log_time = progress._time()
for batch_idx, (data, target) in enumerate(progress):
data, target = [Variable(d) for d in data], [Variable(t) for t in target]
if args.cuda and args.number_gpus == 1:
data, target = [d.cuda(non_blocking=True) for d in data], [t.cuda(non_blocking=True) for t in target]
optimizer.zero_grad() if not is_validate else None
losses = model(data[0], target[0])
losses = [torch.mean(loss_value) for loss_value in losses]
loss_val = losses[0] # Collect first loss for weight update
total_loss += loss_val.item()
loss_values = [v.item() for v in losses]
# gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather'
loss_labels = list(model.module.loss.loss_labels)
assert not np.isnan(total_loss)
if not is_validate and args.fp16:
loss_val.backward()
if args.gradient_clip:
torch.nn.utils.clip_grad_norm(model.parameters(), args.gradient_clip)
params = list(model.parameters())
for i in range(len(params)):
param_copy[i].grad = params[i].grad.clone().type_as(params[i]).detach()
param_copy[i].grad.mul_(1.0 / args.loss_scale)
optimizer.step()
for i in range(len(params)):
params[i].data.copy_(param_copy[i].data)
elif not is_validate:
loss_val.backward()
if args.gradient_clip:
torch.nn.utils.clip_grad_norm(model.parameters(), args.gradient_clip)
optimizer.step()
# Update hyperparameters if needed
global_iteration = start_iteration + batch_idx
if not is_validate:
tools.update_hyperparameter_schedule(args, epoch, global_iteration, optimizer)
loss_labels.append("lr")
loss_values.append(optimizer.param_groups[0]["lr"])
loss_labels.append("load")
loss_values.append(progress.iterable.last_duration)
# Print out statistics
statistics.append(loss_values)
title = "{} Epoch {}".format("Validating" if is_validate else "Training", epoch)
progress.set_description(title + " " + tools.format_dictionary_of_losses(loss_labels, statistics[-1]))
if (((global_iteration + 1) % args.log_frequency) == 0 and not is_validate) or (
is_validate and batch_idx == args.validation_n_batches - 1
):
global_iteration = global_iteration if not is_validate else start_iteration
logger.add_scalar(
"batch logs per second", len(statistics) / (progress._time() - last_log_time), global_iteration
)
last_log_time = progress._time()
all_losses = np.array(statistics)
for i, key in enumerate(loss_labels):
logger.add_scalar("average batch " + str(key), all_losses[:, i].mean(), global_iteration)
logger.add_histogram(str(key), all_losses[:, i], global_iteration)
# Reset Summary
statistics = []
if is_validate and (batch_idx == args.validation_n_batches):
break
if (not is_validate) and (batch_idx == (args.train_n_batches)):
break
progress.close()
return total_loss / float(batch_idx + 1), (batch_idx + 1)
# Reusable function for inference
def inference(args, epoch, data_loader, model, offset=0):
model.eval()
if args.save_flow or args.render_validation:
flow_folder = "{}/inference/{}.epoch-{}-flow-field".format(args.save, args.name.replace("/", "."), epoch)
if not os.path.exists(flow_folder):
os.makedirs(flow_folder)
# visualization folder
if args.inference_visualize:
flow_vis_folder = "{}/inference/{}.epoch-{}-flow-vis".format(args.save, args.name.replace("/", "."), epoch)
if not os.path.exists(flow_vis_folder):
os.makedirs(flow_vis_folder)
args.inference_n_batches = np.inf if args.inference_n_batches < 0 else args.inference_n_batches
progress = tqdm(
data_loader,
ncols=100,
total=np.minimum(len(data_loader), args.inference_n_batches),
desc="Inferencing ",
leave=True,
position=offset,
)
statistics = []
total_loss = 0
for batch_idx, (data, target) in enumerate(progress):
if args.cuda:
data, target = [d.cuda(non_blocking=True) for d in data], [t.cuda(non_blocking=True) for t in target]
data, target = [Variable(d) for d in data], [Variable(t) for t in target]
# when ground-truth flows are not available for inference_dataset,
# the targets are set to all zeros. thus, losses are actually L1 or L2 norms of compute optical flows,
# depending on the type of loss norm passed in
with torch.no_grad():
losses, output = model(data[0], target[0], inference=True)
losses = [torch.mean(loss_value) for loss_value in losses]
loss_val = losses[0] # Collect first loss for weight update
total_loss += loss_val.item()
loss_values = [v.item() for v in losses]
# gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather'
loss_labels = list(model.module.loss.loss_labels)
statistics.append(loss_values)
# import IPython; IPython.embed()
if args.save_flow or args.render_validation:
for i in range(args.inference_batch_size):
_pflow = output[i].data.cpu().numpy().transpose(1, 2, 0)
flow_utils.writeFlow(
join(flow_folder, "%06d.flo" % (batch_idx * args.inference_batch_size + i)), _pflow
)
# You can comment out the plt block in visulize_flow_file() for real-time visualization
if args.inference_visualize:
flow_utils.visulize_flow_file(
join(flow_folder, "%06d.flo" % (batch_idx * args.inference_batch_size + i)),
flow_vis_folder,
)
progress.set_description(
"Inference Averages for Epoch {}: ".format(epoch)
+ tools.format_dictionary_of_losses(loss_labels, np.array(statistics).mean(axis=0))
)
progress.update(1)
if batch_idx == (args.inference_n_batches - 1):
break
progress.close()
return
# Primary epoch loop
best_err = 1e8
progress = tqdm(
list(range(args.start_epoch, args.total_epochs + 1)),
miniters=1,
ncols=100,
desc="Overall Progress",
leave=True,
position=0,
)
offset = 1
last_epoch_time = progress._time()
global_iteration = 0
for epoch in progress:
if args.inference or (args.render_validation and ((epoch - 1) % args.validation_frequency) == 0):
stats = inference(
args=args, epoch=epoch - 1, data_loader=inference_loader, model=model_and_loss, offset=offset
)
offset += 1
if not args.skip_validation and ((epoch - 1) % args.validation_frequency) == 0:
validation_loss, _ = train(
args=args,
epoch=epoch - 1,
start_iteration=global_iteration,
data_loader=validation_loader,
model=model_and_loss,
optimizer=optimizer,
logger=validation_logger,
is_validate=True,
offset=offset,
)
offset += 1
is_best = False
if validation_loss < best_err:
best_err = validation_loss
is_best = True
checkpoint_progress = tqdm(ncols=100, desc="Saving Checkpoint", position=offset)
tools.save_checkpoint(
{
"arch": args.model,
"epoch": epoch,
"state_dict": model_and_loss.module.model.state_dict(),
"best_EPE": best_err,
},
is_best,
args.save,
args.model,
)
checkpoint_progress.update(1)
checkpoint_progress.close()
offset += 1
if not args.skip_training:
train_loss, iterations = train(
args=args,
epoch=epoch,
start_iteration=global_iteration,
data_loader=train_loader,
model=model_and_loss,
optimizer=optimizer,
logger=train_logger,
offset=offset,
)
global_iteration += iterations
offset += 1
# save checkpoint after every validation_frequency number of epochs
if ((epoch - 1) % args.validation_frequency) == 0:
checkpoint_progress = tqdm(ncols=100, desc="Saving Checkpoint", position=offset)
tools.save_checkpoint(
{
"arch": args.model,
"epoch": epoch,
"state_dict": model_and_loss.module.model.state_dict(),
"best_EPE": train_loss,
},
False,
args.save,
args.model,
filename="train-checkpoint.pth.tar",
)
checkpoint_progress.update(1)
checkpoint_progress.close()
train_logger.add_scalar("seconds per epoch", progress._time() - last_epoch_time, epoch)
last_epoch_time = progress._time()
print("\n")
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.core.mail import send_mass_mail
from django.template.loader import render_to_string
from django.contrib.sites.models import Site
from .models import Album, AlbumImageRelation, Image
from .utils import (
search_tweets_by_hashtag, get_original_image_url_from_tweet,
get_tweet_id, get_tweet_url, get_image_from_url,
)
# todo: consider helpful logger naming
logger = logging.getLogger(__name__)
def import_photo_from_tweet(tweet, album_instance):
"""
Import a single photo from a single tweet data (received with twitter api).
:param tweet: dict tweet data.
:param album_instance: .models.Album instance
:return: int or None, None if nothing was imported, image_instance.pk in case of
successful import
"""
tweet_id = get_tweet_id(tweet)
tweet_url = get_tweet_url(tweet)
original_image_url = get_original_image_url_from_tweet(tweet)
# check that we have image url
if original_image_url is None:
logger.debug('Skipping: No original_image_url found for tweet {}'.format(tweet_url))
return None
# validate uniqueness
album_image_relation = AlbumImageRelation.objects.filter(album=album_instance, image__original_image_url=original_image_url)
if album_image_relation.exists():
logger.debug('Skipping duplicate image entry for tweet {}'.format(tweet_url))
return None
# check if we need to fetch an image
try:
image_instance = Image.objects.get(original_image_url=original_image_url)
logger.debug(
'Found existing Image in the database, (pk={}, url={} '.format(
image_instance.id, original_image_url))
except Image.DoesNotExist:
image_instance = None
# if there is no previously imported image - create one
if image_instance is None:
logger.debug('Fetching the image file from url {}'.format(original_image_url))
image_django_file = get_image_from_url(original_image_url)
logger.debug('Creating new Image entry for url {}'.format(original_image_url))
image_instance = Image.objects.create(image_file=image_django_file,
original_image_url=original_image_url)
logger.debug('Creating new Album to Image relation for tweet: {}'.format(tweet_url))
album_instance.image_relations.create(
image=image_instance,
tweet_id=tweet_id,
tweet_url=tweet_url)
return image_instance.pk
def import_photos_for_album(api, album_name, limit=100):
"""
Imports photos from twitter by searching tweets with hash tag that is the
same as album name. This function will search twitter, fetch photos and create
corresponding entries in the database and notify the managers and the admin
with import results.
:param api: Twython instance, twitter api connection
:param album_name: str album name - the hash tag without the '#' symbol
:param limit: int limit twitter search results
:return: list of imported photos pks
"""
logger.info('Starting import for album name "{}"'.format(album_name))
try:
logger.debug('Getting the album instance by name')
album_instance = Album.objects.get(name=album_name)
except Album.DoesNotExist as e:
logger.error(
'No album insatnce found in the database for name {}'.format(album_name))
return []
hash_tag = '#{}'.format(album_name)
# check if there were previous imports, in case there are - we only
# need the most latest tweet id.
# Also limit the query to 1 record, and only tweet_id field.
last_imported_tweet_id_for_album = (
album_instance.image_relations
.all()
.order_by('-tweet_id')
.values_list('tweet_id')[:1])
if last_imported_tweet_id_for_album:
# if there were previous imports - use appropriate twitter id
last_imported_tweet_id = last_imported_tweet_id_for_album[0][0]
logger.debug(
'Found last imported tweet_id from previous import: {}'.format(
last_imported_tweet_id))
else:
logger.debug(
'No previous imports found for album {}'.format(
album_name))
last_imported_tweet_id = None
logger.debug(
'search_tweets_by_hashtag.\n'
'\thash tag: {hash_tag}\n'
'\tlimit: {limit}\n'
'\tsince_id: {since_id}\n'
'\timage_only: {image_only}'.format(
hash_tag=hash_tag,
limit=limit,
since_id=last_imported_tweet_id,
image_only=True
))
search_results = search_tweets_by_hashtag(
api=api,
hash_tag=hash_tag,
limit=limit,
since_id=last_imported_tweet_id,
image_only=True
)
logger.debug('Got {} search results after the query'.format(
len(search_results)))
# Process the search results
successful_imports_pks = []
for tweet in search_results:
image_pk = import_photo_from_tweet(tweet, album_instance=album_instance)
if image_pk is not None:
successful_imports_pks.append(image_pk)
# log results
if successful_imports_pks:
logger.debug('Successfully imported {} photo(s)'.format(
len(successful_imports_pks)))
logger.debug('Imported images pks: \n{}'.format(
str(successful_imports_pks)))
else:
logger.debug('No new images were imported.')
return successful_imports_pks
def construct_notification_emails(subject_template_name, body_template_name,
album_name, photo_pks_list, from_email,
recipients):
"""
Construct email messages to notify recipients about import results.
:param subject_template_name: str path to subject template
:param body_template_name: str path to body template
:param album_name: str name of the album, will be refrlected in the subject
:param photo_pks_list: list of photo pks to build urls
:param from_email: str email address to be used in from_email field
:param recipients: str recipients
:return: list of tuples suitable for usage in send_mass_email
"""
photos = Image.objects.filter(pk__in=photo_pks_list)
album = Album.objects.get(name=album_name)
site = Site.objects.get_current()
context_dict = {
'photos': photos,
'album_name': album_name,
'album_total_photos_count': album.images.count(),
'number_of_photos': len(photos),
'album_url': 'http://{}{}'.format(site.domain,
album.get_absolute_url()),
}
subject = render_to_string(subject_template_name, context=context_dict)
# clean the subject, must be one line
subject = subject.replace('\n', '').replace('\r', '').strip()
body = render_to_string(body_template_name, context_dict)
prepared_data = [(subject, body, from_email, (to_email,)) for to_email in recipients]
return prepared_data
def send_email_notifications(subject_template_name, body_template_name,
album_name, photo_pks_list, from_email, recipients):
"""
Constructs and sends import email notifications to recipients.
:param subject_template_name: str path to subject template
:param body_template_name: str path to body template
:param album_name: str name of the album, will be refrlected in the subject
:param photo_pks_list: list of photo pks to build urls
:param from_email: str email address to be used in from_email field
:param recipients: str recipients
:return: number of sent emails
"""
data = construct_notification_emails(
subject_template_name=subject_template_name,
body_template_name=body_template_name,
album_name=album_name,
photo_pks_list=photo_pks_list,
from_email=from_email,
recipients=recipients,
)
return send_mass_mail(data)
|
# -*- coding: utf-8 -*-
import sys
sys.path.append('../../../python')
import inject, logging
import psycopg2
from model.config import Config
''' configuro el injector con las variables apropiadas '''
def config_injector(binder):
binder.bind(Config,Config('firmware-config.cfg'))
inject.configure(config_injector)
config = inject.instance(Config)
def getDb():
global config
return psycopg2.connect(host=config.configs['database_host'], dbname=config.configs['database_database'], user=config.configs['database_user'], password=config.configs['database_password'])
logging.getLogger().setLevel(logging.INFO)
from model.users.users import Users
from model.credentials.credentials import UserPassword
if __name__ == '__main__':
conn = getDb()
try:
users = inject.instance(Users)
us = users.listUsers(conn)
for u in us:
logging.info(u)
''' conn.commit() '''
finally:
conn.close()
|
# -*- coding: utf-8 -*-
val = float(input())
total = val
ced = 100
totced = 0
tipo = 'nota'
print('NOTAS:')
while True:
if(total >= ced):
total -= ced
totced += 1
else:
print(f'{totced} {tipo}(s) de R$ {ced:.2f}')
if(ced == 100):
ced = 50
elif(ced == 50):
ced = 20
elif(ced == 20):
ced = 10
elif(ced == 10):
ced = 5
elif(ced == 5):
ced = 2
elif(ced == 2):
ced = 1
print('MOEDAS:')
tipo = 'moeda'
elif(ced == 1):
ced = 0.50
elif(ced == 0.50):
ced = 0.25
elif(ced == 0.25):
ced = 0.10
elif(ced == 0.10):
ced = 0.05
elif(ced == 0.05):
ced = 0.01
else: break
totced = 0
|
# -*- coding: utf-8 -*-
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def rangeSumBST(self, root, L, R):
if root is None:
return 0
result = 0
if L <= root.val <= R:
result += root.val
if L <= root.val:
result += self.rangeSumBST(root.left, L, R)
if root.val <= R:
result += self.rangeSumBST(root.right, L, R)
return result
if __name__ == "__main__":
solution = Solution()
t0_0 = TreeNode(10)
t0_1 = TreeNode(5)
t0_2 = TreeNode(15)
t0_3 = TreeNode(3)
t0_4 = TreeNode(7)
t0_5 = TreeNode(18)
t0_2.right = t0_5
t0_1.right = t0_4
t0_1.left = t0_3
t0_0.right = t0_2
t0_0.left = t0_1
assert 32 == solution.rangeSumBST(t0_0, 7, 15)
t1_0 = TreeNode(10)
t1_1 = TreeNode(5)
t1_2 = TreeNode(15)
t1_3 = TreeNode(3)
t1_4 = TreeNode(7)
t1_5 = TreeNode(13)
t1_6 = TreeNode(18)
t1_7 = TreeNode(1)
t1_8 = TreeNode(6)
t1_4.left = t1_8
t1_3.left = t1_7
t1_2.right = t1_6
t1_2.left = t1_5
t1_1.right = t1_4
t1_1.left = t1_3
t1_0.right = t1_2
t1_0.left = t1_1
assert 23 == solution.rangeSumBST(t1_0, 6, 10)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.