text stringlengths 8 6.05M |
|---|
import logging
from datetime import datetime
from typing import Optional, Dict, Sequence, List
from waitlist.utility.swagger.eve import ESIResponse
logger = logging.getLogger(__name__)
class SearchResponse(ESIResponse):
def __init__(self, expires: datetime, status_code: int, error: Optional[str],
data: Optional[Dict[str, Sequence[int]]]) -> None:
super().__init__(expires, status_code, error)
self.data: Optional[Dict[str, Sequence[int]]] = data
def character_ids(self) -> Optional[Sequence[int]]:
return self.__get_ids('character')
def inventory_type_ids(self) -> Optional[Sequence[int]]:
return self.__get_ids('inventory_type')
def ids(self, types: Sequence[str]):
result: List[int] = []
for type_name in types:
r = self.__get_ids(type_name)
if r is not None:
result.extend(r)
return result
def __get_ids(self, name: str) -> Optional[Sequence[int]]:
if name not in self.data:
return None
return self.data[name]
|
# -*- coding: utf-8 -*-
import sys
from project_ui import Ui_MainWindow
import cv2 as cv
import numpy as np
import glob
import os
from PyQt5.QtWidgets import QMainWindow, QApplication
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.setupUi(self)
self.onBindingUI()
# Write your code below
# UI components are defined in hw1_ui.py, please take a look.
# You can also open hw1.ui by qt-designer to check ui components.
def onBindingUI(self):
self.btn1_1.clicked.connect(self.on_btn1_1_click)
self.btn1_2.clicked.connect(self.on_btn1_2_click)
self.btn1_3.clicked.connect(self.on_btn1_3_click)
self.btn1_4.clicked.connect(self.on_btn1_4_click)
self.btn2_1.clicked.connect(self.on_btn2_1_click)
self.btn3_1.clicked.connect(self.on_btn3_1_click)
self.btn3_2.clicked.connect(self.on_btn3_2_click)
self.btn4_1.clicked.connect(self.on_btn4_1_click)
self.btn4_2.clicked.connect(self.on_btn4_2_click)
# button for problem 1.1
def on_btn1_1_click(self):
# add your code here
# termination criteria
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((8*11,3), np.float32)
objp[:,:2] = np.mgrid[0:11,0:8].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images= glob.glob('./images/CameraCalibration/*.bmp')
i=1
for fname in images:
img = cv.imread(fname)
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv.findChessboardCorners(gray, (8,11),None)
# If found, add object points, image points (after refining them)
i=i+1
if ret == True:
objpoints.append(objp)
corners2 = cv.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv.drawChessboardCorners(img, (8,11), corners2,ret)
cv.namedWindow(chr(i),cv.WINDOW_GUI_NORMAL )
cv.imshow(chr(i),img)
cv.waitKey(500)
cv.destroyAllWindows()
def on_btn1_2_click(self):
# termination criteria
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((8*11,3), np.float32)
objp[:,:2] = np.mgrid[0:11,0:8].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('./images/CameraCalibration/*.bmp')
i=1
for fname in images:
img = cv.imread(fname)
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv.findChessboardCorners(gray, (11,8),None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv.drawChessboardCorners(img, (11,8), corners2,ret)
# cv.namedWindow('img',cv.WINDOW_GUI_NORMAL )
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
for entry in mtx:
print('[',end='')
for entry1 in entry:
if entry1==mtx[0][2] or entry1==mtx[1][2]:
print('%f' % entry1,';')
elif entry1==mtx[2][2]:
print('%f' % entry1,';]')
else:
print('%f'% entry1,',',end='')
cv.waitKey(500)
cv.destroyAllWindows()
def on_btn1_3_click(self):
# cboxImgNum to access to the ui object
current = self.cboxImgNum.currentText()
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((8*11,3), np.float32)
objp[:,:2] = np.mgrid[0:11,0:8].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
path = './images/CameraCalibration/'+repr(int(current))+'.bmp'
images = glob.glob('./images/CameraCalibration/'+repr(int(current))+'.bmp')
i=1
for fname in images:
img = cv.imread(fname)
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv.findChessboardCorners(gray, (11,8),None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv.drawChessboardCorners(img, (11,8), corners2,ret)
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
R_matrix,J = cv.Rodrigues(rvecs[0])
Extrinsic = np.hstack((R_matrix,tvecs[0]))
print('[',end='')
for entry in Extrinsic:
for entry1 in entry:
if entry1==Extrinsic[0][3] or entry1==Extrinsic[1][3]:
print(entry1,';')
elif entry1 == Extrinsic[2][-1]:
print(entry1,';]')
else:
print(entry1,',',end='')
def on_btn1_4_click(self):
# cboxImgNum to access to the ui object
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((8*11,3), np.float32)
objp[:,:2] = np.mgrid[0:11,0:8].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('./images/CameraCalibration/*.bmp')
i=1
for fname in images:
img = cv.imread(fname)
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv.findChessboardCorners(gray, (11,8),None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv.drawChessboardCorners(img, (11,8), corners2,ret)
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
print(i)
i=i+1
print('[',end='')
for entry in dist:
for entry1 in entry:
if entry1==entry[4]:
print(entry1,']')
else:
print(entry1,',',end='')
def on_btn2_1_click(self):
def draw(img, corners, imgpts):
imgpts = np.int32(imgpts).reshape(-1,2)
# draw ground floor in green
img = cv.drawContours(img, [imgpts[:4]],-1,(0,0,255),10)
# draw pillars in blue color
for i,j in zip(range(4),range(4,8)):
img = cv.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(0,0,255),10)
# draw top layer in red color
img = cv.drawContours(img, [imgpts[4:]],-1,(0,0,255),10)
return img
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((8*11,3), np.float32)
objp[:,:2] = np.mgrid[0:11,0:8].T.reshape(-1,2)
axis = np.float32([[0,0,0], [0,2,0], [2,2,0], [2,0,0],
[0,0,-2],[0,2,-2],[2,2,-2],[2,0,-2] ])
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
for fname in glob.glob('./images/2_1/*.bmp'):
img = cv.imread(fname)
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
ret, corners = cv.findChessboardCorners(gray, (11,8),None)
if ret == True:
objpoints.append(objp)
corners2 = cv.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners2)
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
V_img=[]
for fname in glob.glob('./images/2_1/*.bmp'):
img = cv.imread(fname)
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
ret, corners = cv.findChessboardCorners(gray, (11,8),None)
if ret == True:
corners2 = cv.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
# Find the rotation and translation vectors.
x,rvecs, tvecs, inliers = cv.solvePnPRansac(objp, corners2, mtx, dist)
# project 3D points to image plane
imgpts, jac = cv.projectPoints(axis, rvecs, tvecs, mtx, dist)
img = draw(img,corners2,imgpts)
V_img.append(img)
h, w, l = V_img[0].shape
fourcc = cv.VideoWriter_fourcc(*'XVID')
videoWriter = cv.VideoWriter('./images/v0.avi', 0x7634706d, 2, (w,h))
for i in range(0,5):
videoWriter.write(V_img[i])
capture = cv.VideoCapture("./images/v0.avi")
if capture.isOpened():
while True:
ret, prev = capture.read()
if ret==True:
cv.imshow('video', prev)
else:
break
if cv.waitKey(20)==27:
break
# cap = cv.VideoCapture('./images/v0.mp4')
# print(type(cap))
# ret, frame = cap.read()
# g = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# cv.imshow('frame',videoWriter)
videoWriter.release()
cv.destroyAllWindows()
def on_btn3_1_click(self):
# edtAngle, edtScale. edtTx, edtTy to access to the ui object
Angle = self.edtAngle.text()
Scale = self.edtScale.text()
Tx = float(self.edtTx.text())
Ty = float(self.edtTy.text())
img = cv.imread('./images/OriginalTransform.png')
H = np.float32([[1,0,Tx],[0,1,Ty]])
rows,cols = img.shape[:2]
res = cv.warpAffine(img,H,(rows,cols))
# rotate & Scale]
rows,cols = res.shape[:2]
M = cv.getRotationMatrix2D((130+Tx,125+Ty),float(Angle),float(Scale))
res = cv.warpAffine(res,M,(rows,cols))
cv.imshow('img',res)
def on_btn3_2_click(self):
imgpoints=[]
objpoints = [[20,20],[450,20],[450,450],[20,450]]
def draw_circle(event,x,y,flags,param):
if event == cv.EVENT_LBUTTONDOWN:
nonlocal imgpoints
imgpoints.append([x,y])
if len(imgpoints)==4:
pts1 = np.float32(imgpoints)
pts2 = np.float32(objpoints)
M = cv.getPerspectiveTransform(pts1,pts2)
dst = cv.warpPerspective(img,M,(430,430))
cv.imshow('new image', dst)
img = cv.imread('./images/OriginalPerspective.png')
cv.namedWindow('image')
cv.imshow('image',img)
cv.setMouseCallback('image',draw_circle)
def on_btn4_1_click(self):
imgL = cv.imread('./images/imL.png',0)
imgR = cv.imread('./images/imR.png',0)
stereo = cv.StereoSGBM_create(numDisparities=48, blockSize=3, disp12MaxDiff=0) #0~47/window size:3x3, block size>5?
disparity = stereo.compute(imgL,imgR)
# res = cv.convertScaleAbs(disparity)
# res = cv.cvtColor(disparity, cv.COLOR_BGR2RGB)
normalizedImg = np.zeros((800, 800))
normalizedImg = cv.normalize(disparity, normalizedImg, 0, 255, cv.NORM_MINMAX,cv.CV_8U)
cv.imshow('Without L-R Disparity check',normalizedImg)
# plt.imshow(disparity,'gray')
# plt.show()
def on_btn4_2_click(self):
imgL = cv.imread('./images/imL.png',0)
imgR = cv.imread('./images/imR.png',0)
cv.imshow('o',imgL)
stereo = cv.StereoSGBM_create(numDisparities=48, blockSize=3, disp12MaxDiff=0) #0~47/window size:3x3, block size>5?
disparity = stereo.compute(imgL,imgR)
stereo_with = cv.StereoSGBM_create(numDisparities=48, blockSize=3, disp12MaxDiff=2)
disparity_with = stereo_with.compute(imgL, imgR)
# res = cv.convertScaleAbs(disparity)
# res = cv.cvtColor(disparity, cv.COLOR_BGR2RGB)
normalizedImg = np.zeros((800, 800))
normalizedImg = cv.normalize(disparity, normalizedImg, 0, 255, cv.NORM_MINMAX,cv.CV_8U)
cv.imshow('Without L-R Disparity',normalizedImg)
normalizedImg_with = np.zeros((800, 800))
normalizedImg_with = cv.normalize(disparity_with, normalizedImg_with, 0, 255, cv.NORM_MINMAX,cv.CV_8U)
cv.imshow('With L-R Disparity',normalizedImg_with)
diff = cv.absdiff(normalizedImg, normalizedImg_with)
(x,y) = np.where(diff>0)
backtorgb = cv.cvtColor(normalizedImg_with,cv.COLOR_GRAY2RGB)
for i in range(len(x)):
backtorgb[x[i],y[i]]=(0, 0, 255)
cv.imshow('Mark the diff.', backtorgb)
# print(type(normalizedImg_with[0,0]))
# normalizedImg_with[x,y] = (0,0,255)
# cv.imwrite('./images/temp.jpg',diff)
# mask = cv.imread('./images/temp.jpg')
# mask = cv.cvtColor(mask, cv.COLOR_BGR2GRAY)
# th = 1
# imask = mask>th
# canvas = np.zeros_like(normalizedImg_with, np.uint8)
# canvas[imask] = normalizedImg_with[imask]
# canvas.setTo(new Scalar(0,0,255))
# print(canvas)
# output = cv.bitwise_and(normalizedImg_with, normalizedImg_with, mask = canvas)
# colors = {"red": [0.1,0.,0.], "blue": [0.,0.,0.1]}
# colored_mask = np.multiply(mask, colors["red"])
# normalizedImg_with = normalizedImg_with+colored_mask
# cv.imshow('i',output)
### ### ###
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
|
# -*- coding: utf-8 -*-
from collections import Counter
class Solution:
def intersect(self, nums1, nums2):
return list((Counter(nums1) & Counter(nums2)).elements())
if __name__ == "__main__":
solution = Solution()
assert [2, 2] == solution.intersect([1, 2, 2, 1], [2, 2])
|
from django.contrib import admin
# Register your models here.
from .models import Dessert
admin.site.register(Dessert)
|
from game import Game
from model.components.ai.monster import BasicMonster, StunnedMonster, FrozenMonster, ConfusedMonster
from model.config import config
from model.entities.party.player import Player
from model.maps.area_map import AreaMap
import pytest
from unittest.mock import Mock
def setup_module(module):
Game()
Game.instance.player = Player()
Game.instance.area_map = AreaMap(9, 9)
class TestBasicMonster:
@pytest.fixture
def ai(self):
mock_ai = BasicMonster(Mock())
visible_tile = (1, 1)
Game.instance.renderer = Mock(visible_tiles=[visible_tile])
mock_ai.owner.x, mock_ai.owner.y = visible_tile
Game.instance.fighter_system.set(Game.instance.player, Mock(hp=5))
Game.instance.fighter_system.set(mock_ai.owner, Mock())
yield mock_ai
def test_take_turn_moves_towards_player_when_in_sight(self, ai):
ai.owner.distance_to.return_value = 4
ai.take_turn()
assert ai.owner.move_towards.called
def test_take_turn_attacks_player_when_close_enough(self, ai):
ai.owner.distance_to.return_value = 1
ai.take_turn()
Game.instance.fighter_system.get(ai.owner).attack.assert_called_with(Game.instance.player)
if config.data.enemies.randomlyWalkWhenOutOfSight:
def test_take_turn_randomly_walks_if_out_of_sight(self, ai, monkeypatch):
Game.instance.renderer.visible_tiles = []
mock_walker = Mock()
monkeypatch.setattr('model.components.ai.monster.RandomWalker', mock_walker)
ai.take_turn()
assert mock_walker.called
assert mock_walker().walk.called
class TestStunnedMonster:
@pytest.fixture
def ai(self):
yield StunnedMonster(Mock())
def test_take_turn_stays_stunned_for_num_turns(self, ai):
ai.num_turns = num_turns = 5
ai.owner.name = 'tiger'
for i in range(num_turns, 1, -1):
old_num_turns = ai.num_turns
ai.take_turn()
assert ai.owner.char == str(i-1)[-1]
assert ai.num_turns == old_num_turns - 1
ai.take_turn()
assert ai.num_turns == 0
assert ai.owner.char == 't'
class TestFrozenMonster:
@pytest.fixture
def ai(self):
mock_owner = Mock()
Game.instance.fighter_system.set(mock_owner, Mock())
mock_ai = FrozenMonster(mock_owner)
mock_ai.owner_fighter.max_hp = 1000
yield mock_ai
def test_take_turn_stays_stunned_for_num_turns(self, ai):
TestStunnedMonster.test_take_turn_stays_stunned_for_num_turns(self, ai)
def test_new_take_damage_strategy_kills_if_equal_half_health(self, ai):
ai.owner_fighter.hp = 500
ai.new_take_damage_strategy(50)
assert ai.owner_fighter.die.called
def test_new_take_damage_strategy_kills_if_below_half_health(self, ai):
ai.owner_fighter.hp = 499
ai.new_take_damage_strategy(50)
assert ai.owner_fighter.die.called
def test_new_take_damage_strategy_damages_normally_if_above_half_health(self, ai):
ai.owner_fighter.hp = 501
ai.new_take_damage_strategy(50)
assert ai.owner_fighter.default_take_damage_strategy.called
def test_cleanup_resets_attack_strategy(self, ai):
assert ai.owner_fighter.take_damage_strategy == ai.new_take_damage_strategy
ai.cleanup()
assert ai.owner_fighter.take_damage_strategy == ai.owner_fighter.default_take_damage_strategy
class TestConfusedMonster:
@pytest.fixture
def ai(self):
owner = Mock()
owner.name = 'mock_owner'
mock_ai = ConfusedMonster(owner)
mock_ai.walker = Mock()
yield mock_ai
def test_take_turn_randomly_walks_when_confused_for_num_turns(self, ai):
ai.num_turns = num_turns = 5
for i in range(num_turns - 1):
old_num_turns = ai.num_turns
ai.take_turn()
assert ai.walker.walk.called
ai.walker.walk.reset_mock()
assert ai.num_turns == old_num_turns - 1
ai.take_turn()
assert ai.num_turns == 0
|
from schoolDemo import app
app.secret_key= "gerileboLTD"
app.run(debug=True)
|
import numpy as np
def interpolateNewton(x,y,order=5,x0 = None, appError = 1e-8,numDigits = 4):
"""
6 inputs: 2 lists necessary and 4 has default:
list (or numpy array) :x :the input x values for all points
list (or numpy array) :y :the input y values for all points (length of y should equal length of x)
integer :order :number of itertations before the stoping (by default 5)
float :x0 :the required x to get the value of the function (by default None)
float :appError :approximate error to stop evaluating (by default 10^(-8))
integer :numDigits :number of digits after floating point (by default 4)
3 outputs:
string :sout :the function writen in string (e.g 0.2x + 0.4(x-1) * (x-2) ... )
if given x0:
float :sumOut :the value of the function at x0
if given x0 and error can be determined (number of points more than the order):
float :error :the value of the approximate error at x0
"""
x = np.round(np.array(x,dtype=np.float64),numDigits)
y = np.round(np.array(y,dtype=np.float64),numDigits)
givenX = True
if len(x) != len(y):
raise ValueError("Length of X doesn't equals length of Y")
if order > len(y):
raise ValueError('Order is more than number of the point')
if x0 == None:
x0 = 0
givenX = False
elif x0 > np.max(x) or x0 < np.min(x):
raise ValueError('Requierd x is out of the given x range')
else:
diffX = x - x0
diffIndx = diffX.argsort()
x = x[diffIndx]
y = y[diffIndx]
fout = np.zeros((len(y),len(y)),dtype=np.float64)
fout[:,0] = y
for j in range(1,len(y)):
for i in range(len(y)-j):
fout[i][j] = ( fout[i+1][j-1] - fout[i][j-1] ) / ( x[i+j] - x[i] )
sout = '{1:.{0}f} '.format(numDigits,y[0])
sumOut = y[0]
for i in range(1,order):
termout = ' + {1:.{0}f}'.format(numDigits,fout[0][i])
term = fout[0][i]
for j in range(i):
termout+= ' * ( x - {1:.{0}f} )'.format(numDigits,x[j])
term *= (x0 - x[j])
if (givenX and term < appError):
return [sout,sumOut,term]
sout += termout
sumOut += term
if givenX:
if order < len(y):
error = fout[0][order]
for j in range(order):
error *= (x0 - x[j])
return [sout,sumOut,error]
else:
return [sout,sumOut]
return [sout]
#print(interpolateNewton([1,4,6,5,7],[0,1.386294,1.791759,1.609438,1.82],appError=0.1))
def interpolationLagrange(x,y,x0 = None,numDigits = 4):
"""
4 inputs: 2 lists necessary and 2 has default:
list (or numpy array) :x :the input x values for all points
list (or numpy array) :y :the input y values for all points (length of y should equal length of x)
float :x0 :the required x to get the value of the function (by default None)
integer :numDigits :number of digits after floating point (by default 4)
2 outputs:
string :sout :the function writen in string (e.g 0.2x + 0.4(x-1) * (x-2) ... )
if given x0:
float :sumOut :the value of the function at x0
"""
x = np.round(np.array(x,dtype=np.float64),numDigits)
y = np.round(np.array(y,dtype=np.float64),numDigits)
givenX = True
if len(x) != len(y):
raise ValueError("Length of X doesn't equals length of Y")
if x0 == None:
x0 = 0
givenX = False
elif x0 > np.max(x) or x0 < np.min(x):
raise ValueError('Requierd x is out of the given x range')
sout = ''
sumOut = 0
for i in range(len(y)):
product = y[i]
upS = ''
down = y[i]
for j in range(len(y)):
if i != j:
product *= (x0 - x[j])/(x[i] - x[j])
upS += '( X - {1:.{0}f} )'.format(numDigits,x[j])
down /= (x[i] - x[j])
sout += ' + {0} * {2:.{1}f}'.format(upS,numDigits,down)
sumOut += product
sout = sout[3:]
if givenX:
return [sout,sumOut]
return [sout]
#inr = interpolationLagrange([1,4,6,5,7],[0,1.386294,1.791759,1.609438,1.82],2)
#print(type(inr),len(inr)) |
# Programa: Imprimir de 1 até um número digitado pelo usuário
x = 1
print('o numero é: %d' %x)
ler = 1
while ler != 0:
ler = int(input('Digite um novo número: '))
print('o número é: %d' %ler)
|
#!/usr/bin/env python3
# ----------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016, Heiko Möllerke
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ----------------------------------------------------------------------
from collections import namedtuple
from math import sqrt, cos, sin
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# The Vector
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Vector(namedtuple('Vector', 'x y')):
"""A two-dimensional vector for a Cartesian coordinate system. """
def __new__(cls, x, y):
return tuple.__new__(cls, [x, y])
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Inquiry
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@property
def magnitude(self): # -> Number
return sqrt(self.x ** 2 + self.y ** 2)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Basic arithmetic operations
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def __neg__(self): # -> Vector
return type(self)(-self.x, -self.y)
def __add__(self, other): # -> Vector
x, y = other
return type(self)(self.x + x, self.y + y)
def __sub__(self, other): # -> Vector
x, y = other
return type(self)(self.x - x, self.y - y)
def __mul__(self, scalar): # -> Vector
return type(self)(self.x * scalar, self.y * scalar)
def __truediv__(self, scalar): # -> Vector
return type(self)(self.x / scalar, self.y / scalar)
def __floordiv__(self, scalar): # -> Vector
return type(self)(self.x // scalar, self.y // scalar)
def __radd__(self, other):
return self + other
def __rsub__(self, other):
return (-self) + other
def __rmul__(self, scalar):
return self * scalar
def scalar_product(self, other):
"""Returns the scalar- (or dot-) product of both vectors. """
x, y = other
return self.x * x + self.y * y
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Logical operations
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# The comparsion for equality is provided by the namedtuple
# super-class
def __lt__(self, other): # -> Bool
return self.magnitude < Vector(*other).magnitude
def __gt__(self, other): # -> Bool
return self.magnitude > Vector(*other).magnitude
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# More specialized vector operations
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def normalized(self): # -> Vector
"""Returns a normalized (magnitude == 1) Vector. """
return self / self.magnitude
def resized(self, magnitude): # -> Vector
"""Returns a new resized Vector. """
return self.normalized() * magnitude
def rotated(self, radians): # -> Vector
"""Returns a new ccw rotated Vector. """
return type(self)(self.x * cos(radians) - self.y * sin(radians),
self.x * sin(radians) + self.y * cos(radians))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Some usefull default-Vectors
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
UP = Vector(0, 1)
DOWN = -UP
LEFT = Vector(-1, 0)
RIGHT = -LEFT
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Shapes (only a rectangle right now)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ParaxialRectangle:
def __init__(self, bottom_left, size):
self._x, self._y = bottom_left
self._width, self._height = size
self._normalize()
def _normalize(self):
"""Ensures, width and height yield non-negative values to reduce
complexity of other methods. """
self._x = min(self._x, self._x + self._width)
self._y = min(self._y, self._y + self._height)
self._width = abs(self._width)
self._height = abs(self._height)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Inquiry
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@property
def width(self): # -> Number
return self._width
@property
def height(self): # -> Number
return self._height
@property
def left(self): # -> Number
return self._x
@property
def right(self): # -> Number
return self._x + self._width
@property
def top(self): # -> Number
return self._y + self._height
@property
def bottom(self): # -> Number
return self._y
@property
def top_left(self): # -> Vector
return Vector(self.left, self.top)
@property
def top_right(self): # -> Vector
return Vector(self.right, self.top)
@property
def bottom_left(self): # -> Vector
return Vector(self.left, self.bottom)
@property
def bottom_right(self): # -> Vector
return Vector(self.right, self.bottom)
@property
def center(self): # -> Vector
return Vector(self.left + self.width / 2, self.bottom + self.height / 2)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Manipulation
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@width.setter
def width(self, value):
self._width = value
self._normalize()
@height.setter
def height(self, value):
self._height = value
self._normalize()
@left.setter
def left(self, value):
self._x = value
@right.setter
def right(self, value):
self._x = value - self._width
@top.setter
def top(self, value):
self._y = value - self._height
@bottom.setter
def bottom(self, value):
self._y = value
@top_left.setter
def top_left(self, point):
self.left, self.top = point
@top_right.setter
def top_right(self, point):
self.right, self.top = point
@bottom_left.setter
def bottom_left(self, point):
self.left, self.bottom = point
@bottom_right.setter
def bottom_right(self, point):
self.right, self.bottom = point
@center.setter
def center(self, point):
x, y = point
self.left = x - self.width / 2
self.bottom = y - self.height / 2
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Intersection testing (for collision-detection etc.)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def intersects_point(self, point): # -> Bool
"""Tests if a point intersects this rectangle. """
x, y = point
return self.left <= x <= self.right and self.bottom <= y <= self.top
def intersects_rect(self, other): # -> Bool
"""Tests if this and another paraxial rectangle intersect each
other. """
return (
intervals_intersect(self.left, self.right, other.left, other.right)
and
intervals_intersect(self.bottom, self.top, other.bottom, other.top)
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Utility functions
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def intervals_intersect(a1, a2, b1, b2): # -> Bool
"""Tests if two intervals (a1, a2) and (b1, b2) intersect. """
assert a1 <= a2 and b1 <= b2
return a1 <= b2 and b1 <= a2
|
# test file
def get_integer(m):
my_integer = int(input(m) )
return my_integer
def get_string(m):
my_string = input(m)
return my_string
def double_loop_print():
for i in range(0, len(L)):
output = "{}:{}".format(i, L[i])
print(output)
for j in range (0, len(L[i])):
output = "{}:{}".format(j,L[i][j])
print(output)
def main():
people = [
["Nia", "Blonde", 16],
["Tommi", "Blonde", 16],
["Grace", "Brown", 16],
["Rebecca", "Black", 16],
["Paige", "Brown", 16]
]
# print(my_list)
for i in range(0, len(people)):
# creates column structure
# < or > makes left/right align
output = "{:10} --- {:10} --- {:<10}".format(people[i][0], people[i][1], people[i][2])
print(output)
main() |
from django.http import (
HttpResponse, HttpResponseRedirect
)
from django.shortcuts import (
render, redirect
)
from .forms import (
QueryForm, TrackingForm, FeedbackForm
)
from django.views import View
from django.contrib import messages
from django.db import models
from django.contrib.auth import get_user_model
from .models import QueryUpdate
def get_anonymous_user():
return get_user_model().objects.get_or_create(username='anonymous.user')[0]
class Node:
def __init__(self, key):
self.val = key
self.right = None
def createRightSkewedTree(hasUpdates,queryUpdateForQuery,queryUpdate):
if hasUpdates:
treeList = []
for update in queryUpdateForQuery:
tree = Node(update)
childUpdates = queryUpdate.filter(updated_query_id = update.query_update_id)
tree.right = createRightSkewedTree(len(childUpdates)!=0, childUpdates, queryUpdate)
treeList.append(tree)
return treeList
return None
class Feedback(View):
def get(self, request, *args, **kwargs):
default_data = {}
if request.user.is_authenticated:
default_data = {'email': request.user.email,
'first_name':request.user.first_name,
'last_name':request.user.last_name,
}
form = FeedbackForm(default_data)
return render(request, "contact/contactQuery.html", {'form': form})
form=FeedbackForm()
return render(request, "contact/feedbackForm.html", {'form': form})
def post(self, request, *args, **kwargs):
form = FeedbackForm(request.POST)
if form.is_valid():
query = form.save(commit=False)
query.user = get_anonymous_user()
query.save()
successMessage = "We have successfully received your feedback."
messages.success(request,successMessage)
return redirect('querysuccess')
return render(request, "contact/feedbackForm.html", {'form': form})
class TrackingView(View):
def get(self, request, *args, **kwargs):
form=TrackingForm()
return render(request, "contact/trackingform.html", {'form': form})
def post(self, request, *args, **kwargs):
form = TrackingForm(request.POST)
result = {}
result['hasUpdates'] = 0
if form.is_valid():
tracking_id = form.cleaned_data.get("tracking_id")
queryUpdate = QueryUpdate.objects.all()
queryUpdateForQuery = queryUpdate.filter(query_id=tracking_id).order_by('-update_date')
hasUpdates = len(queryUpdateForQuery) != 0
result['hasUpdates'] = int(hasUpdates)
result['updates'] = queryUpdateForQuery
rightSkewedTreeList = ''
if hasUpdates:
## for update in queryUpdateForQuery:
treeList = createRightSkewedTree(hasUpdates,queryUpdateForQuery, queryUpdate)
rightSkewedTreeList = treeList
result['rightSkewedTreeList']=rightSkewedTreeList
## for update in queryUpdateForQuery:
## print(update.update_date)
print(result)
return render(request, "contact/trackingform.html", {'form': form, 'result':result})
class QueryView(View):
def get(self, request, *args, **kwargs):
default_data = {}
if request.user.is_authenticated:
default_data = {'email': request.user.email,
'first_name':request.user.first_name,
'last_name':request.user.last_name,
}
form = QueryForm(default_data)
return render(request, "contact/contactQuery.html", {'form': form})
form = QueryForm()
return render(request, "contact/contactQuery.html", {'form': form})
def post(self, request, *args, **kwargs):
form = QueryForm(request.POST)
successMessage = "";
if form.is_valid():
query = form.save(commit=False)
if request.user.is_authenticated:
query.user = request.user
else:
print(get_anonymous_user())
query.user = get_anonymous_user()
query.save()
successMessage = f"We have successfuly received your query, Use Ticket Id:{query.query_id} to track the status of your query."
messages.success(request,successMessage)
return redirect('querysuccess')
return render(request, "contact/contactQuery.html", {'form': form})
class SuccessView(View):
def get(self, request, *args, **kwargs):
return render(request, "contact/successPage.html")
|
n, k = map(int, input().split())
a = list(map(int, input().split()))
no_distinct_element = len(set(a))
# print(no_distinct_element, set(a))
if no_distinct_element < k:
print('NO')
else:
print('YES')
print(*[a.index(i)+1 for i in list(set(a))[:k]]) |
import link_checker
if __name__ == "__main__":
link_checker.web_access.start()
|
from __future__ import absolute_import
from celery import Celery
import os
import random
import time
from datetime import datetime
from flask import Flask, request, session, flash, redirect, url_for, jsonify
app = Flask(__name__)
celery = Celery(app.name)
celery.config_from_object('celeryconfig')
@celery.task
def spark_job_task(self):
task_id = self.request.id
master_path = 'local[2]'
project_dir = "/home/ken/Desktop/"
spark_code_path = project_dir + "wordcount.py"
os.system("/usr/local/spark-1.4.0-bin-hadoop2.6/bin/spark-submit --master %s %s %s" % (master_path, spark_code_path, self.request.id))
return {'current' : 100, 'total' : 100, 'status' : 'Task Completed!', 'result': 10}
@app.route('/', methods=['GET'])
def index():
if request.method == 'GET':
return "Hello World!"
@app.route('/sparktask', methods="POST")
def sparktask():
task = spark_job_task.apply_async()
return jsonify({}), 202, {'Location': url_for('taskstatus', task_id=task.id)}
if __name__== '__main__':
app.run(debug=True)
|
# ============LICENSE_START=======================================================
# Copyright (c) 2018-2021 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
from pysnmp.hlapi import *
from pysnmp import debug
# debug.setLogger(debug.Debug('msgproc'))
iters = range(0, 10, 1)
for i in iters:
errorIndication, errorStatus, errorIndex, varbinds = next(
sendNotification(
SnmpEngine(),
CommunityData("not_public"),
UdpTransportTarget(("localhost", 6164)),
ContextData(),
"trap",
[
ObjectType(ObjectIdentity(".1.3.6.1.4.1.999.1"), OctetString("test trap - ignore")),
ObjectType(ObjectIdentity(".1.3.6.1.4.1.999.2"), OctetString("ONAP pytest trap")),
],
)
)
if errorIndication:
print(errorIndication)
else:
print("successfully sent first trap example, number %d" % i)
for i in iters:
errorIndication, errorStatus, errorIndex, varbinds = next(
sendNotification(
SnmpEngine(),
CommunityData("public"),
UdpTransportTarget(("localhost", 6164)),
ContextData(),
"trap",
NotificationType(ObjectIdentity(".1.3.6.1.4.1.74.2.46.12.1.1")).addVarBinds(
(".1.3.6.1.4.1.999.1", OctetString("ONAP pytest trap - ignore (varbind 1)")),
(".1.3.6.1.4.1.999.2", OctetString("ONAP pytest trap - ignore (varbind 2)")),
),
)
)
if errorIndication:
print(errorIndication)
else:
print("successfully sent second trap example, number %d" % i)
|
import db_handler
import move_cs_folders
import service_handler
import configparser
import logging
import logging.config
import datetime
import os
import sys
logging.config.fileConfig("logging.conf")
logging.info("Getting data from configuration file")
conf = configparser.ConfigParser()
conf.read('main.conf')
TODAY = datetime.date.today()
JBOSS_HOME = os.environ['JBOSS_HOME']
CS_SERVER_BACKUP = JBOSS_HOME + '\\..\\Updates'
CS_SERVER_OLD = JBOSS_HOME + '\\server\\default\\deploy\\'
CS_SERVER_NEW = '..\\update\\data\\jboss\\'
OLD_CLIENT = JBOSS_HOME + '\\..\\client'
NEW_CLIENT = '..\\update\\data\\files\\client'
FOLDERS = ['Clubspire.ear', 'ovladani.sar', 'ovladaniVstupu.sar']
OLD_FILES_PATH = [CS_SERVER_OLD + folder for folder in FOLDERS]
NEW_FILES_PATH = [CS_SERVER_NEW + folder for folder in FOLDERS]
DB_NAME = conf.get('database', 'name')
DB_USER = conf.get('database', 'user')
DB_PASSWORD = conf.get('database', 'password')
CS_VERSION = conf.get('server', 'version')
SERVICE = 'clubspire'
JBOSS_PROCESS = 'JBossService.exe'
CONNECTION_LOOP = 0
CONNECTION_LOOP2 = 0
if __name__ == '__main__':
try:
# ok -->
# logging.info("Stopping clubspire service and dependent services.")
# status = service_handler.service_status(SERVICE)
# if status == 4:
# # - Stop CS server
# service_handler.stop_service(SERVICE)
# else:
# print(f'{SERVICE} is already stopped')
# logging.info(f'{SERVICE} is already stopped')
# logging.info("Creating backup folder with actual date.")
# if not os.path.exists(CS_SERVER_BACKUP):
# os.mkdir(CS_SERVER_BACKUP)
# if not os.path.exists(CS_SERVER_BACKUP + '\\' + str(TODAY)):
# os.mkdir(CS_SERVER_BACKUP + '\\' + str(TODAY))
# # logging.info("Creating db backup")
# # db_handler.backup(DB_NAME, DB_USER, TODAY, CS_SERVER_BACKUP)
# # - Moving folders from deploy to backup in Updates foder
# logging.info("Backuping CS server...")
# move_cs_folders.move_to_backup(CS_SERVER_BACKUP, TODAY, OLD_FILES_PATH)
# logging.info("CS server backuped.")
# # - Moving new folders from update folder to deploy folder
# logging.info("Updating CS server...")
# move_cs_folders.move_from_update(CS_SERVER_NEW, FOLDERS, CS_SERVER_OLD)
# logging.info("CS server updated.")
# - Moving old CS client to backup
# logging.info("Moving CS client to backup folder with actual date...")
# move_cs_folders.move_old_client(OLD_CLIENT, CS_SERVER_BACKUP, TODAY)
# logging.info("CS client backuped.")
# - Updating CS client
# logging.info("Updating CS client...")
# move_cs_folders.move_new_client(NEW_CLIENT, JBOSS_HOME)
# logging.info("CS client updated")
# - runs db create/update/alter/delete query from db_updates folder by version as a loop per line
# logging.info("Updating db with query by line...")
# db_handler.update_by_query(DB_NAME, DB_USER, DB_PASSWORD, CS_VERSION)
# logging.info("DB updated with query.")
# - runs db function update/create query from db_updates_func folder by version as a loop per file
# logging.info("Updating db with function by file...")
# db_handler.update_by_file(DB_NAME, DB_USER, DB_PASSWORD, CS_VERSION)
# logging.info("DB updated with functions.")
# ok <--
# Make binary update here !!!
# - Checking clubspire if running.
# CLUBSPIRE_SERVICE = get_service('clubspire')
# if service_status(CLUBSPIRE_SERVICE) == 1:
# print("Clubspire is stopped, starting Clubspire...")
# print(datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S"))
# while CONNECTION_LOOP < 20:
# start_service('clubspire')
# time.sleep(10)
# clubspire_process_id = get_pid(JBOSS_PROCESS)
# established = check_connection(clubspire_process_id)
# if established == "1":
# print(datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S"))
# stop_service('clubspire')
# else:
# break
# CONNECTION_LOOP += 1
# print(datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S"))
# print('Loop restarting clubspire ended successfully...')
# # Starting Webclient
# start_service('clubspire-webclient')
# print('All services are running properly now.')
# elif service_status(CLUBSPIRE_SERVICE) == 4:
# print('Clubspire service is running, verify is comunication is ok.')
# while CONNECTION_LOOP2 < 20:
# clubspire_process_id = get_pid(JBOSS_PROCESS)
# established = check_connection(clubspire_process_id)
# if established == "1":
# print(datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S"))
# stop_service('clubspire')
# time.sleep(10)
# start_service('clubspire')
# else:
# break
# CONNECTION_LOOP2 += 1
# print(datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S"))
# print('Loop restarting clubspire ended successfully...')
# - Starting Webclient
# start_service('clubspire-webclient')
# print('All services are running properly.')
except(Exception) as error:
logging.exception('Error in the main script occured!')
print(error) |
#main.py
from flask import Flask, jsonify, request
from db import get_songs, add_songs
app = Flask(__name__)
@app.route('/', methods=['POST', 'GET'])
def padre():
if request.method == 'POST':
if not request.is_json:
return jsonify({"msg": "Falta JSON en la solicitud"}), 400
add_padre(request.get_json())
return 'Padre Agregado'
return get_padre()
if __name__ == '__main__':
app.run()
|
i = 0
no = 0
y = 0
z = 0
f = 0
y = 0
redteam = []
blueteam = []
def program():
global i, no, y, redteam, blueteam, none, z, f
fr = open("tagout.txt","w")
file = open("tagin.txt", "r")
r = file.readlines()
for line in r:
word = r[no].split(' ')
y += 1
check1 = int(word[y][0])
z = 0
while len(redteam) > z:
if check1 == redteam[z]:
mar = int(word[y][1])
redteam.append(mar)
z += 1
f = 0
while len(blueteam) > f:
if check1 == blueteam[f]:
mar = int(word[y][1])
blueteam.append(mar)
f += 1
redteamtotal = len(redteam)
blueteamtotal = len(blueteam)
redteamtotal = str(redteamtotal)
blueteamtotal = str(blueteamtotal)
fr.write(redteamtotal + " " + blueteamtotal)
program()
|
# Copyright (c) 2021 Mahdi Biparva, mahdi.biparva@gmail.com
# miTorch: Medical Imaging with PyTorch
# Deep Learning Package for 3D medical imaging in PyTorch
# Implemented by Mahdi Biparva, April 2021
# Brain Imaging Lab, Sunnybrook Research Institute (SRI)
import torch
import numbers
import random
from . import functional_mitorch as F
import collections
import sys
import nibabel as nib
from data.VolSet import c3d_labels
import numpy as np
from data.ABC_utils import Transformable, Randomizable
import itertools
import utils.MONAI_data as mn
import utils.Torchio as tio
if sys.version_info < (3, 3):
Sequence = collections.Sequence
Iterable = collections.Iterable
else:
Sequence = collections.abc.Sequence
Iterable = collections.abc.Iterable
__all__ = [
'RandomOrientationTo',
'RandomResampleTomm',
'RandomCropImageVolume',
'RandomResizedCropImageVolume',
'ResizeImageVolume',
'CenterCropImageVolume',
'NormalizeMeanStdVolume',
'NormalizeMinMaxVolume',
'ToTensorImageVolume',
'RandomFlipImageVolume',
'PadVolume',
'PadToSizeVolume',
]
def generate_all_possible_orients(labels_set):
return list(map(
lambda x: ''.join(x),
itertools.chain(*map(
lambda x: itertools.product(*x),
itertools.permutations(c3d_labels)
))
))
# noinspection PyTypeChecker
class RandomOrientationTo(Randomizable):
def __init__(self, target_orient, orientation_set=None, *args, **kwargs):
super().__init__(*args, **kwargs)
c3d_labels_str = ''.join([''.join(i) for i in c3d_labels])
assert isinstance(target_orient, str), 'target_orient must be string'
assert all([i in c3d_labels_str for i in target_orient]), 'letters in target_orient must be in {}'.format(
c3d_labels_str
)
if orientation_set:
assert isinstance(orientation_set, Iterable)
self.target_orient = target_orient.upper()
if orientation_set is None:
self.orientation_set = generate_all_possible_orients(c3d_labels)
def randomize_params(self, volume):
self.target_orient = self.orientation_set[
torch.randint(
len(self.orientation_set),
(1,)
)
]
@staticmethod
def apply_orient(tensor, orient_trans):
return torch.from_numpy(
np.ascontiguousarray(
nib.apply_orientation(tensor, orient_trans)
)
)
def apply(self, volume):
image, annot, meta = volume
affine = torch.tensor(meta['affine'], dtype=torch.float)
affine = affine.reshape(4, 4)
orient_source = nib.io_orientation(affine)
orient_final = nib.orientations.axcodes2ornt(self.target_orient, labels=c3d_labels)
orient_trans = nib.orientations.ornt_transform(orient_source, orient_final)
orient_trans_3d = orient_trans.copy()
orient_trans[:, 0] += 1 # we skip the channel dimension
orient_trans = np.concatenate([np.array([[0, 1]]), orient_trans])
orient_identical = np.array(list(zip(range(4), [1]*4)))
if not np.all(orient_trans == orient_identical): # identical rotation
image = self.apply_orient(image, orient_trans)
annot = self.apply_orient(annot, orient_trans)
image_shape = image.shape[1:]
inv_affine_trans = nib.orientations.inv_ornt_aff(orient_trans_3d, image_shape)
affine = affine.mm(torch.from_numpy(inv_affine_trans).float())
meta['affine'] = affine.flatten().tolist()
# update size and spacing using affine and shape
meta['size'] = tuple(image.shape[1:])
dim = meta["dimension"]
RZS = affine[:dim, :dim].numpy()
zooms = np.sqrt(np.sum(RZS * RZS, axis=0))
zooms[zooms == 0] = 1
meta['spacing'] = zooms
meta["direction"] = list((RZS / zooms).flatten())
return (
image,
annot,
meta
)
def __repr__(self):
return self.__class__.__name__ + '(target_orient={0})'.format(self.target_orient)
class RandomResampleTomm(Randomizable):
def __init__(self, target_spacing=(1, 1, 1), target_spacing_scale=(0.2, 0.2, 0.2),
interpolation='trilinear', *args, **kwargs):
super().__init__(*args, **kwargs)
self.interpolation = interpolation
assert isinstance(target_spacing, (tuple, list)) and len(target_spacing) == 3
assert isinstance(target_spacing_scale, (tuple, list)), len(target_spacing_scale) == 3
self.target_spacing = self.target_spacing_constant = torch.tensor(target_spacing, dtype=torch.float32)
self.target_spacing_scale = torch.tensor(target_spacing_scale, dtype=torch.float32)
def randomize_params(self, volume):
self.target_spacing = ((torch.rand(3) - 1/2) * 2 * self.target_spacing_scale + 1) * self.target_spacing_constant
def apply(self, volume):
image, annot, meta = volume
size = torch.tensor(meta['size'], dtype=torch.float)
spacing = torch.tensor(meta['spacing'], dtype=torch.float)
spacing /= self.target_spacing
iso1mm = torch.tensor((1, 1, 1), dtype=torch.float32)
if (spacing == iso1mm).all().item() or torch.allclose(spacing, iso1mm, rtol=1e-3, atol=0):
return volume
# size = (size * spacing).floor().int().tolist()[::-1] # reverse size since F.resize works in DxHxW space
size = (size * spacing).floor().int().tolist()
image, annot = (
F.resize(image, size, self.interpolation),
F.resize(annot, size, 'nearest'),
)
# meta['size'] = size[::-1]
meta['size'] = size
meta['spacing'] = self.target_spacing.tolist()
dim = meta["dimension"]
affine_ = torch.tensor(meta['affine'], dtype=torch.float)
affine_ = affine_.reshape(4, 4)
affine = affine_[:dim, :dim]
# affine[affine != 0] = torch.tensor(meta['spacing']) # this assumes affine has not rotation just scaling
affine[range(len(affine)), range(len(affine))] = torch.tensor(meta['spacing']) * affine.diagonal().sign()
meta['affine'] = affine_.flatten().tolist()
return image, annot, meta
# noinspection PyMissingConstructor,PyTypeChecker
class RandomCropImageVolume(Randomizable):
def __init__(self, size, *args, **kwargs):
if 'prand' in kwargs and not kwargs['prand']:
raise ValueError('If you want to turn prand off, use CenterCropImageVolume instead. '
'This one does crop location randomization by default')
kwargs['prand'] = True # we always set prand to True for this particular transform
super().__init__(*args, **kwargs)
if isinstance(size, numbers.Number):
self.size = (int(size), int(size), int(size))
else:
self.size = size
assert self.prand, 'If you want to turn prand off, use CenterCropImageVolume instead.' \
'This one does crop location randomization by default'
self.rloc = None
def randomize_params(self, volume):
image_shape = volume[0].shape # image, annot, meta
self.rloc = self.get_params(image_shape, self.size)
@staticmethod
def get_params(volume_shape, output_size):
"""Get parameters for ``crop`` for a random crop.
Args:
volume_shape (Torch Tensor): Shape of the volume to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
d, h, w = volume_shape[1:]
td, th, tw = output_size
if d == td and h == th and w == tw:
return 0, 0, 0, d, h, w
k = random.randint(0, d - td)
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return k, i, j, td, th, tw
def apply(self, volume):
"""
Args:
volume (tuple(torch.tensor, torch.tensor, dict)): Image, mask volumes to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: randomly cropped/resized image volume.
size is (C, T, OH, OW)
"""
image, annot, meta = volume
k, i, j, d, h, w = self.rloc
return (
F.crop(image, k, i, j, d, h, w),
F.crop(annot, k, i, j, d, h, w),
meta
)
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class RandomCropImageVolumeConditional(RandomCropImageVolume):
def __init__(self, size, *args, **kwargs):
super().__init__(size, *args, **kwargs)
self.num_attemps = kwargs.get('num_attemps', 10)
self.threshold = kwargs.get('threshold', 0)
assert 0 < self.num_attemps, 'num of attempts must be > 0'
assert 0 <= self.threshold, 'threshold must be >= 0'
def __call__(self, volume):
image = annot = meta = None
for _ in range(self.num_attemps):
image, annot, meta = super().__call__(volume)
if annot.sum() > self.threshold:
break
return image, annot, meta
# noinspection PyMissingConstructor
class RandomResizedCropImageVolume(Randomizable):
def __init__(self, size, scale=(0.80, 1.0), interpolation='trilinear', uni_scale=True, *args, **kwargs):
if 'prand' in kwargs and not kwargs['prand']:
raise ValueError('If you want to turn prand off, use CenterCropImageVolume instead. '
'This one does crop location randomization by default')
kwargs['prand'] = True # we always set prand to True for ths particular transform
super().__init__(*args, **kwargs)
assert isinstance(scale, tuple) and len(scale) == 2, 'scale is not defined right'
assert 0 < scale[0] < scale[1] <= 1, 'scale must fall in (lower_range, upper_range)'
assert isinstance(uni_scale, bool), 'iso_crop is bool'
if isinstance(size, tuple):
assert len(size) == 3, "size should be tuple (depth, height, width)"
self.size = size
else:
self.size = tuple([int(size)]*3)
self.interpolation = interpolation
self.scale = scale
self.uni_scale = uni_scale
self.rloc = None
def randomize_params(self, volume):
image_shape = volume[0].shape # image, annot, meta
self.rloc = self.get_params(image_shape, self.scale, self.uni_scale)
@staticmethod
def get_params(volume_shape, scale, uni_scale):
"""Get parameters for ``crop`` for a random crop.
Args:
volume_shape (Torch Tensor): Shape of the volume to be cropped.
scale (tuple): Expected output size of the crop.
uni_scale: uniformly scale all three sides
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
d, h, w = volume_shape[1:]
if uni_scale:
scale_rnd = random.uniform(*scale)
td, th, tw = (torch.tensor(volume_shape[1:]) * scale_rnd).round().int().tolist()
else:
td_l, th_l, tw_l = (torch.tensor(volume_shape[1:]) * scale[0]).round().int().tolist()
td_u, th_u, tw_u = (torch.tensor(volume_shape[1:]) * scale[1]).round().int().tolist()
td = random.randint(td_l, td_u)
th = random.randint(th_l, th_u)
tw = random.randint(tw_l, tw_u)
k = random.randint(0, d - td)
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return k, i, j, td, th, tw
def apply(self, volume):
"""
Args:
volume (tuple(torch.tensor, torch.tensor, dict)): Image and mask volumes to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: randomly cropped/resized image volume.
size is (C, T, H, W)
"""
image, annot, meta = volume
k, i, j, d, h, w = self.rloc
return (
F.resized_crop(image, k, i, j, d, h, w, self.size, self.interpolation),
F.resized_crop(annot, k, i, j, d, h, w, self.size, 'nearest'),
meta
)
def __repr__(self):
return self.__class__.__name__ + \
'(size={0}, interpolation_mode={1}, scale={2})'.format(
self.size, self.interpolation, self.scale
)
# noinspection PyTypeChecker
class ResizeImageVolume(Transformable):
"""Resize the input image volume to the given size.
Args:
size (sequence or int): Desired output size. If size is a sequence like
(d, h, w), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if depth > height > width, then image will be rescaled to
(size * depth / width, size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is trilinear
"""
def __init__(self, size=None, scale_factor=None, interpolation='trilinear', min_side=True, ignore_depth=False):
assert size or scale_factor, 'either size or scale_factor must be given'
assert isinstance(min_side, bool)
assert isinstance(ignore_depth, bool)
if size:
assert isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 3)
if scale_factor:
assert isinstance(scale_factor, float)
if isinstance(size, Iterable) and len(size) == 3 and ignore_depth:
print('warning: ignore_depth is valid when target_size is int')
self.scale_factor = scale_factor
self.size = size
self.interpolation = interpolation
self.min_side = min_side
self.ignore_depth = ignore_depth
def apply(self, volume):
"""
Args:
volume (tuple(torch.tensor, torch.tensor, dict)): Image and mask volumes to be resized. Size is (C, T, H, W)
Returns:
volume (tuple(torch.tensor, torch.tensor, dict)): Image and mask volumes resized
"""
image, annot, meta = volume
assert image.shape[1:] == annot.shape[1:]
size = self.size
if self.scale_factor:
size = (torch.tensor(image.shape[1:], dtype=torch.float) * self.scale_factor).floor().int().tolist()
image, annot = (
F.resize(image, size, self.interpolation, self.min_side, self.ignore_depth),
F.resize(annot, size, 'nearest', self.min_side, self.ignore_depth),
)
meta['size'] = tuple(image.shape[1:])
return image, annot, meta
def __repr__(self):
return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, self.interpolation)
# noinspection PyTypeChecker
class CenterCropImageVolume(Transformable):
def __init__(self, crop_size):
if isinstance(crop_size, numbers.Number):
self.crop_size = tuple([int(crop_size)]*3)
else:
self.crop_size = crop_size
def apply(self, volume):
"""
Args:
volume (tuple(torch.tensor, torch.tensor, dict)): Image and mask volumes to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: central cropping of image volume. Size is
(C, T, crop_size, crop_size)
"""
image, annot, meta = volume
return (
F.center_crop(image, self.crop_size),
F.center_crop(annot, self.crop_size),
meta
)
def __repr__(self):
return self.__class__.__name__ + '(crop_size={0})'.format(self.crop_size)
class NormalizeMeanStdVolume(Transformable):
"""
Normalize the image volume by mean subtraction and division by standard deviation
Args:
mean (3-tuple): pixel RGB mean
std (3-tuple): pixel RGB standard deviation
inplace (boolean): whether do in-place normalization
"""
def __init__(self, mean, std, inplace=False):
self.mean = mean
self.std = std
self.inplace = inplace
def apply(self, volume):
"""
Args:
volume (tuple(torch.tensor, torch.tensor, dict)): Image and mask volumes to be cropped. Size is (C, T, H, W)
"""
image, annot, meta = volume
return (
F.normalize(image, self.mean, self.std, self.inplace),
annot,
meta
)
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1}, inplace={2})'.format(
self.mean, self.std, self.inplace)
class NormalizeMinMaxVolume(Transformable):
"""
Normalize the image volume by minimum subtraction and division by maximum
Args:
max_div (3-tuple): divide by the maximum
inplace (boolean): whether do in-place normalization
"""
def __init__(self, max_div=True, inplace=False):
self.max_div = max_div
self.inplace = inplace
def apply(self, volume):
"""
Args:
volume (tuple(torch.tensor, torch.tensor, dict)): Image and mask volumes to be cropped. Size is (C, T, H, W)
"""
image, annot, meta = volume
return (
F.normalize_minmax(image, self.max_div, self.inplace),
annot,
meta
)
def __repr__(self):
return self.__class__.__name__ + '(max_div={0}, inplace={1})'.format(self.max_div, self.inplace)
class ToTensorImageVolume(Transformable):
"""
Convert tensor data type from uint8 to float, divide value by 255.0 and
permute the dimenions of volume tensor
"""
def apply(self, volume):
"""
Args:
volume (tuple(torch.tensor, torch.tensor, dict)): Image and mask volumes to be cropped. Size is (C, T, H, W)
Return:
volume (tuple(torch.tensor, torch.tensor, dict)): Output image and mask volumes. Size is (C, T, H, W)
"""
image, annot, meta = volume
return (
F.to_tensor(image),
annot,
meta
)
def __repr__(self):
return self.__class__.__name__
class RandomFlipImageVolume(Transformable):
"""
Flip the image volume along the given direction with a given probability
Args:
p (float): probability of the volume being flipped. Default value is 0.5
"""
def __init__(self, p=0.5, dim=3):
self.p = p
self.dim = dim
def apply(self, volume):
"""
Args:
volume (tuple(torch.tensor, torch.tensor, dict)): Image and mask volumes to be cropped. Size is (C, T, H, W)
Return:
volume (tuple(torch.tensor, torch.tensor, dict)): Output image and mask volumes. Size is (C, T, H, W)
"""
image, annot, meta = volume
dim = self.dim
if self.dim < 0:
dim = random.randint(0, 2)
if random.random() < self.p:
image = F.flip(image, dim)
annot = F.flip(annot, dim)
return (
image,
annot,
meta
)
def __repr__(self):
return self.__class__.__name__ + "(p={0})".format(self.p)
class PadVolume(Transformable):
"""Pad the given Torch Tensor Volume on all sides with the given "pad" value.
Args:
padding (Number or tuple): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right, 4 left/right and top/bottom, and 6 left/right, top/bottom, and front/back respectively.
fill (int or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
length K, it is used to fill all of the K channels respectively.
This value is only used when the padding_mode is constant
padding_mode: Type of padding. Should be: 'constant', 'reflect', 'replicate' or 'circular'. Default is constant.
check torch.nn.functional.pad for further details
"""
def __init__(self, padding, fill=0, padding_mode='constant'):
assert isinstance(padding, (numbers.Number, tuple))
assert isinstance(fill, (numbers.Number, str, tuple))
assert padding_mode in ['constant', 'reflect', 'replicate', 'circular']
if isinstance(padding, Sequence) and len(padding) not in [2, 4, 6]:
raise ValueError("Padding must be an int or a 2, 4, or 6 element tuple, not a " +
"{} element tuple".format(len(padding)))
self.padding = padding
self.fill = fill
self.padding_mode = padding_mode
def apply(self, volume):
"""
Args:
volume (Torch Tensor): Volume to be padded.
Returns:
Torch Tensor: Padded volume.
"""
image, annot, meta = volume
return (
F.pad(image, self.padding, self.fill, self.padding_mode),
F.pad(annot, self.padding, 0, self.padding_mode), # TODO assumes bg is always zero, change it
meta
)
def __repr__(self):
return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\
format(self.padding, self.fill, self.padding_mode)
class PadToSizeVolume(Transformable):
"""Pad the given Torch Tensor Volume on all sides to have the given size.
Args:
target_size (Number or tuple): Target size to be padded to. If a single int is provided this
is used to pad all borders. Otherwise, a tuple of length 3 is needed to se the target size of the volume.
fill (int or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
length K, it is used to fill all of the K channels respectively.
This value is only used when the padding_mode is constant
padding_mode: Type of padding. Should be: 'constant', 'reflect', 'replicate' or 'circular',
'mean', 'median', 'min', 'max'. Default is constant.
check torch.nn.functional.pad for further details
"""
def __init__(self, target_size, fill=0, padding_mode='constant'):
assert isinstance(target_size, (numbers.Number, tuple))
assert isinstance(fill, (numbers.Number, str, tuple))
assert padding_mode in ['constant', 'reflect', 'replicate', 'circular',
'mean', 'median', 'min', 'max'
]
if isinstance(target_size, Sequence) and not len(target_size) == 3:
raise ValueError("Size must be an int or a 3 element tuple, not a " +
"{} element tuple".format(len(target_size)))
if isinstance(target_size, numbers.Number):
target_size = tuple([target_size]*3)
self.target_size = torch.tensor(target_size)
if isinstance(target_size, Sequence) and (self.target_size == -1).all():
raise ValueError("all of the target size cannot set to auto_fill (-1). "
"Maximum must be < 3.")
self.fill = fill
self.padding_mode = padding_mode
def apply(self, volume):
"""
Args:
volume (Torch Tensor): Volume to be padded.
Returns:
Torch Tensor: Padded volume.
"""
target_size = self.target_size.clone()
auto_fill_ind = target_size == -1
image, annot, meta = volume
image_size = torch.tensor(image.shape[1:]) # index 0 is the channel
target_size[auto_fill_ind] = image_size[auto_fill_ind]
assert (image_size <= target_size).all()
size_offset = target_size - image_size
padding_before = size_offset // 2
padding_after = size_offset - padding_before
padding = tuple(torch.stack((padding_before.flip(0), padding_after.flip(0))).T.flatten().tolist())
fill = self.fill
padding_mode = self.padding_mode
if self.padding_mode in ('mean', 'median', 'min', 'max'):
fill = getattr(image, self.padding_mode)().item()
padding_mode = 'constant'
return (
F.pad(image, padding, fill, padding_mode),
F.pad(annot, padding, 0, padding_mode), # TODO assumes bg is always zero, change it
meta
)
def __repr__(self):
return self.__class__.__name__ + '(target_size={0}, fill={1}, padding_mode={2})'.\
format(self.target_size, self.fill, self.padding_mode)
class RandomBrightness(Randomizable):
def __init__(self, value, channel_wise=True, *args, **kwargs):
super().__init__(*args, **kwargs)
assert isinstance(value, float), 'value must be float'
assert -0.5 < value < +0.5, 'value must be between [-0.5, +0.5]'
assert isinstance(channel_wise, bool), 'channel_wise is bool'
self.channel_wise = channel_wise
self.value = value
def randomize_params(self, volume):
self.value = torch.rand(1).item() - 1/2
def update_value(self, image):
img_min, img_max = image.min().item(), image.max().item()
img_range = img_max - img_min
return img_range * self.value
def find_ranges(self, image, value):
img_min, img_max = image.min().item(), image.max().item()
if value > 0:
input_range, output_range = (img_min, img_max), (img_min + value, img_max)
else:
input_range, output_range = (img_min, img_max), (img_min, img_max + value)
return input_range, output_range
def apply(self, volume):
image, annot, meta = volume
for i in range(len(image)):
if self.channel_wise:
image_i = image[i]
else:
image_i = image
value = self.update_value(image_i)
if not value == 0:
input_range, output_range = self.find_ranges(image_i, value)
image_i = F.scale_tensor_intensity(image_i, input_range, output_range)
if self.channel_wise:
image[i] = image_i
self.randomize(image)
else:
image = image_i
break
return image, annot, meta
class RandomContrast(RandomBrightness):
def __init__(self, value, channel_wise=True, *args, **kwargs):
super().__init__(value, channel_wise, *args, **kwargs)
def find_ranges(self, image, value):
img_min, img_max = image.min().item(), image.max().item()
if value < 0:
input_range, output_range = (img_min, img_max), (img_min - value, img_max + value)
else:
input_range, output_range = (img_min + value, img_max - value), (img_min, img_max)
return input_range, output_range
class RandomContrastChannelWise(Transformable):
def __init__(self, value, *args, **kwargs):
assert isinstance(value, (list, tuple)), 'value must be sequential'
assert len(value) > 1, 'use RandomContrast if len(sigma) < 2'
self.num_channels = len(value)
self.transform = [
RandomContrast(v, channel_wise=False, *args, **kwargs)
for v in value
]
def apply(self, volume):
image, annot, meta = volume
assert len(image) == len(annot) == self.num_channels, f'number of channels do not match: ' \
f'{len(image)}, {len(annot)}, {self.num_channels}'
for i in range(len(image)):
image_i = image[i]
volume_i = (image_i, None, None)
transform_i = self.transform[i]
image_i, _, _ = transform_i(volume_i)
image[i] = image_i
return image, annot, meta
class RandomGamma(Randomizable):
POWER_MAX = 6
def __init__(self, value, channel_wise=True, *args, **kwargs):
super().__init__(*args, **kwargs)
assert isinstance(value, float), 'value must be float'
assert 0 < value, 'value must be greater than zero'
assert value < self.POWER_MAX, 'large value greater than 6 are not recommended for numerical stability'
assert isinstance(channel_wise, bool), 'channel_wise is bool'
self.channel_wise = channel_wise
self.value = value
def randomize_params(self, volume):
# to be equal we pick randomly under or over contrast
value_under = torch.rand(1).item()
value_over = torch.rand(1).item() * self.POWER_MAX
self.value = random.choice((value_under, value_over))
def apply(self, volume):
image, annot, meta = volume
for i in range(len(image)):
if self.channel_wise:
image_i = image[i]
else:
image_i = image
image_i = F.gamma_correction(image_i, self.value)
if self.channel_wise:
image[i] = image_i
self.randomize(image)
else:
image = image_i
break
return image, annot, meta
class LogCorrection(Transformable): # TODO might be interesting to randomize inverse
def __init__(self, inverse=False, channel_wise=True):
assert isinstance(inverse, bool), 'inverse must be bool'
assert isinstance(channel_wise, bool), 'channel_wise is bool'
self.channel_wise = channel_wise
self.inverse = inverse
def apply(self, volume):
image, annot, meta = volume
for i in range(len(image)):
if self.channel_wise:
image_i = image[i]
else:
image_i = image
image_i = F.log_correction(image_i, self.inverse)
if self.channel_wise:
image[i] = image_i
else:
image = image_i
break
return image, annot, meta
class SigmoidCorrection(Transformable): # TODO might be interesting to randomize inverse
def __init__(self, inverse=False, gain=10, cutoff=0.5, channel_wise=True):
assert isinstance(inverse, bool), 'inverse must be bool'
assert 0 < cutoff <= 1, 'cutoff is between [0, 1]'
assert isinstance(channel_wise, bool), 'channel_wise is bool'
self.channel_wise = channel_wise
self.inverse = inverse
self.gain = gain
self.cutoff = cutoff
def apply(self, volume):
image, annot, meta = volume
for i in range(len(image)):
if self.channel_wise:
image_i = image[i]
else:
image_i = image
image_i = F.sigmoid_correction(image_i, self.inverse, self.gain, self.cutoff)
if self.channel_wise:
image[i] = image_i
else:
image = image_i
break
return image, annot, meta
class HistEqual(Transformable):
def __init__(self, num_bins=256, channel_wise=True):
assert isinstance(num_bins, int), 'num_bins must be int'
assert 0 < num_bins
assert isinstance(channel_wise, bool), 'channel_wise is bool'
self.channel_wise = channel_wise
self.num_bins = num_bins
def apply(self, volume):
image, annot, meta = volume
for i in range(len(image)):
if self.channel_wise:
image_i = image[i]
else:
image_i = image
image_i = F.equalize_hist(image_i, self.num_bins)
if self.channel_wise:
image[i] = image_i
else:
image = image_i
break
return image, annot, meta
class AdditiveNoise(Randomizable):
NOISE_TYPE = ('gaussian', 'rician', 'rayleigh',)
MAX_SIGMA = 1.5
def __init__(self, sigma, noise_type='gaussian', randomize_type=False, out_of_bound_mode='normalize',
channel_wise=True, *args, **kwargs):
super().__init__(*args, **kwargs)
assert isinstance(sigma, float), 'sigma must be float'
assert 0 < sigma, 'sigma must be greater than zero'
assert noise_type in self.NOISE_TYPE, 'unknown noise type'
assert out_of_bound_mode in ('normalize', 'clamp',), 'undefined out_of_bound_mode'
assert isinstance(channel_wise, bool), 'channel_wise is bool'
self.channel_wise = channel_wise
self.sigma = sigma
self.noise_type = noise_type
self.randomize_type = randomize_type
self.out_of_bound_mode = out_of_bound_mode
def randomize_params(self, volume):
self.sigma = torch.rand(1).item() * self.MAX_SIGMA
if self.randomize_type:
self.noise_type = random.choice(self.NOISE_TYPE)
def apply(self, volume):
image, annot, meta = volume
for i in range(len(image)):
if self.channel_wise:
image_i = image[i]
else:
image_i = image
image_i = F.additive_noise(image_i, self.sigma, self.noise_type, self.out_of_bound_mode)
if self.channel_wise:
image[i] = image_i
self.randomize(image)
else:
image = image_i
break
return image, annot, meta
class AdditiveNoiseChannelWise(Transformable):
def __init__(self, sigma, noise_type='gaussian', randomize_type=False, out_of_bound_mode='normalize',
*args, **kwargs):
assert isinstance(sigma, (list, tuple)), 'sigma must be sequential'
assert len(sigma) > 1, 'use AdditiveNoise if len(sigma) < 2'
self.num_channels = len(sigma)
self.transform = [
AdditiveNoise(s, channel_wise=False, noise_type=noise_type, randomize_type=randomize_type,
out_of_bound_mode=out_of_bound_mode, *args, **kwargs)
for s in sigma
]
def apply(self, volume):
image, annot, meta = volume
assert len(image) == self.num_channels, f'number of channels do not match ' \
f'{len(image)} == {self.num_channels}'
for i in range(len(image)):
image_i = image[i]
volume_i = (image_i, None, None)
transform_i = self.transform[i]
image_i, _, _ = transform_i(volume_i)
image[i] = image_i
return image, annot, meta
class PresetMotionArtifact(Transformable):
def __init__(self, time, delta=None, direction=None, pixels=True, theta=None, seq=None,
degrees=True, mode='bilinear', padding_mode='zeros', align_corners=False):
"""
Apply a preset motion artifact to an image volume. This class wraps the apply_motion_from_affine_params function.
Args:
volume (torch.Tensor): Volume to be transformed and resampled. Must be 4D
with a channel dimension i.e. (C, D, H, W).
time (float): Time at which the motion occurs during scanning. Should be between [0.5, 1), where 0
represents the beginning of the scan and 1 represents the end. Time >= 0.5 assures that the
most prominent object in the image is in the original position of the image so that ground truth
annotations don't need to be adjusted.
delta (int, float, list, tuple, np.ndarray, torch.Tensor, optional): Can either be a number (int or float)
which specifies the magnitude of translation along a single axis, or a list, tuple, array
or tensor which specifies the translation components for all three directions. Default is None.
direction (str, optional): Specifies the direction of translation if delta is an int or float. Must be
either 'x', 'y' or 'z', corresponding to one of three array axes. If off-axis translation is desired,
please specify delta as a length-3 item of translation components. Default is None.
pixels (bool, optional): If True, the magnitude of translation is specified in pixels, as opposed to
units of half the input tensor (see pytorch grid_sample for details). Default is True.
theta (int, float, list, tuple, np.ndarray, torch.Tensor, optional): Can either be a number (int or float)
which specifies the angle of rotation about a single axis, or a list, tuple, array or tensor which specifies
a set of three Euler angles for rotation. Default is None.
seq (str): Must be specified if theta is provided. Specifies sequencce of axes for rotations. Up to 3 characters
belonging to the set {'X', 'Y', 'Z'} for intrinsic rotations, or {'x', 'y', 'z'} for extrinsic rotations.
Extrinsic and intrinsic rotations cannot be mixed in one function call. This description is repeated from
the documentation for scipy.spatial.transform.Rotation.from_euler. Default is None.
degrees (bool, optional): If True, then the given angles are assumed to be in degrees.
This description is repeated from the documentation for
scipy.spatial.transform.Rotation.from_euler. Default is True.
mode (str, optional): Interpolation mode to calculate output values 'bilinear' | 'nearest'.
Note that for 3D image input the interpolation mode used internally by torch is
actually trilinear. Default is 'bilinear'
padding_mode (str, optional): Padding mode for outside grid values 'zeros' | 'border' | 'reflection'.
Default is 'zeros'. See torch documentation for more details.
align_corners (bool, optional): See torch documentation for details. Default is False.
Returns:
volume (torch.Tensor): Motion-artifacted image. Shape is the same as the input.
"""
assert isinstance(time, float), 'time must be float between 0.0 (inclusive) and 1.0 (exclusive).'
assert 0.5 <= time < 1.0, 'time must be float between 0.0 (inclusive) and 1.0 (exclusive).'
if delta is not None:
assert isinstance(delta, (int, float, list, tuple, np.ndarray, torch.Tensor)), \
'delta should be an int, float, list, tuple, array, or tensor.'
if isinstance(delta, (int, float)):
assert direction is not None, 'If delta is int or float, direction must be specified.'
assert isinstance(direction, str), 'Translation direction must be "x", "y", or "z".'
assert direction in 'xyz', 'Translation direction must be "x", "y", or "z".'
else:
if isinstance(delta, (np.ndarray, torch.Tensor)):
assert len(delta.shape) == 1, 'If delta is an array or tensor it must have a single dimension.'
assert len(delta) == 3, 'If delta is a list, tuple, array or tensor it must have length 3.'
assert isinstance(pixels, bool), 'pixels must be either True or False'
if theta is not None:
assert isinstance(theta, (int, float, list, tuple, np.ndarray, torch.Tensor)), \
'theta should be an int, float, list, tuple, array, or tensor.'
assert seq is not None, 'If theta is int or float, seq must be specified.'
assert isinstance(seq, str), 'seq argument must be a string.'
if isinstance(theta, (int, float)):
assert len(seq) == 1, 'If theta is int or float, seq must be "x", "y", or "z".'
assert seq in 'xyz', 'If theta is int or float, seq must be "x", "y", or "z".'
else:
assert len(seq) == 3, \
'If theta is list, tuple, array or tensor, ' \
'seq must be a length-3 string containing letters "x", "y", and "z".'
assert all([letter in 'xyz' for letter in seq]), 'All letters in seq must be "x", "y", or "z".'
if isinstance(theta, (np.ndarray, torch.Tensor)):
assert len(theta.shape) == 1, 'If theta is an array or tensor it must have a single dimension.'
assert len(theta) == 3, 'If theta is a list, tuple, array or tensor it must have length 3.'
assert isinstance(degrees, bool), 'Degrees must be a boolean.'
assert isinstance(mode, str), 'Mode must be "bilinear" or "nearest"'
assert mode in ('bilinear', 'nearest'), 'Mode must be "bilinear" or "nearest"'
assert isinstance(padding_mode, str), 'Padding mode must be either "zeros", "border" or "reflection"'
assert padding_mode in ('zeros', 'border', 'reflection'), 'Padding mode must be ' \
'either "zeros", "border" or "reflection"'
assert isinstance(align_corners, bool), 'align_corners must be True or False'
self.time = time
self.delta = delta
self.direction = direction
self.pixels = pixels
self.theta = theta
self.seq = seq
self.degrees = degrees
self.mode = mode
self.padding_mode = padding_mode
self.align_corners = align_corners
def apply(self, volume):
image, annot, meta = volume
assert image.shape[1:] == annot.shape[1:], 'Image and ground-truth annotation should have the same shape.'
image = F.k_space_motion_artifact(
image, self.time, delta=self.delta, direction=self.direction, pixels=self.pixels, theta=self.theta,
seq=self.seq, degrees=self.degrees, mode=self.mode, padding_mode=self.padding_mode,
align_corners=self.align_corners
)
return image, annot, meta
class OneHotAnnot(Transformable):
def __init__(self, num_classes: int, dtype: torch.dtype = torch.float, dim: int = 0, ignore_background=True):
assert isinstance(num_classes, int), 'num_classes must be int'
assert isinstance(dim, int), 'dim must be int'
assert 1 <= num_classes
assert 0 <= dim
assert isinstance(ignore_background, bool)
self.num_classes = num_classes
self.dtype = dtype
self.dim = dim
self.ignore_background = ignore_background
if self.ignore_background:
self.num_classes += 1 # background will be ignored later
def apply(self, volume):
image, annot, meta = volume
return (
image,
F.one_hot(
labels=annot,
num_classes=self.num_classes,
dtype=self.dtype,
dim=self.dim,
),
meta
)
class Affine(Transformable):
def __init__(self, **kwargs):
self.transform = mn.Affine(**kwargs)
def apply(self, volume):
image, annot, meta = volume
return (
self.transform(image),
self.transform(annot),
meta
)
class AffineRotate(Transformable):
def __init__(self, **kwargs):
assert 'rotate_params' in kwargs, 'rotate parameters are undefined'
self.transform = mn.Affine(**kwargs)
def apply(self, volume):
image, annot, meta = volume
return (
self.transform(image),
self.transform(annot),
meta
)
class AffineShear(Transformable):
def __init__(self, **kwargs):
assert 'shear_params' in kwargs, 'shear parameters are undefined'
self.transform = mn.Affine(**kwargs)
def apply(self, volume):
image, annot, meta = volume
return (
self.transform(image),
self.transform(annot),
meta
)
class AffineTranslate(Transformable):
def __init__(self, **kwargs):
assert 'translate_params' in kwargs, 'translate parameters are undefined'
self.transform = mn.Affine(**kwargs)
def apply(self, volume):
image, annot, meta = volume
return (
self.transform(image),
self.transform(annot),
meta
)
class AffineScale(Transformable):
def __init__(self, **kwargs):
assert 'scale_params' in kwargs, 'scale parameters are undefined'
self.transform = mn.Affine(**kwargs)
def apply(self, volume):
image, annot, meta = volume
return (
self.transform(image),
self.transform(annot),
meta
)
class Spike(Transformable):
def __init__(self, **kwargs):
self.transform = tio.Spike(**kwargs)
def apply(self, volume):
image, annot, meta = volume
return (
self.transform(image),
annot,
meta
)
class RandomSpike(Transformable):
def __init__(self, **kwargs):
self.transform = tio.RandomSpike(**kwargs)
def apply(self, volume):
image, annot, meta = volume
return (
self.transform(image),
annot,
meta
)
class Ghosting(Transformable):
def __init__(self, **kwargs):
self.transform = tio.Ghosting(**kwargs)
def apply(self, volume):
image, annot, meta = volume
return (
self.transform(image),
annot,
meta
)
class Blur(Transformable):
def __init__(self, **kwargs):
self.transform = tio.Blur(**kwargs)
def apply(self, volume):
image, annot, meta = volume
return (
self.transform(image),
annot,
meta
)
class BiasField(Transformable):
def __init__(self, **kwargs):
self.transform = tio.BiasField(**kwargs)
def apply(self, volume):
image, annot, meta = volume
return (
self.transform(image),
annot,
meta
)
class Swap(Transformable):
def __init__(self, **kwargs):
self.transform = tio.RandomSwap(**kwargs)
def apply(self, volume):
image, annot, meta = volume
return (
self.transform(image),
annot,
meta
)
class MONAITransformVolume(Transformable):
def __init__(self, transform, *args, **kwargs):
self.transform = transform(*args, **kwargs)
def apply(self, volume):
image, annot, meta = volume
image = torch.as_tensor(self.transform(image.numpy()))
return (
image,
annot,
meta
)
class NormalizeMeanStdSingleVolume(MONAITransformVolume):
def __init__(self, *args, **kwargs):
super().__init__(
mn.NormalizeIntensity,
*args,
**kwargs,
)
class ScaleIntensityRangeVolume(MONAITransformVolume):
def __init__(self, *args, **kwargs):
super().__init__(
mn.ScaleIntensityRange,
*args,
**kwargs,
)
class ScaleIntensityRangePercentilesVolume(MONAITransformVolume):
def __init__(self, *args, **kwargs):
super().__init__(
mn.ScaleIntensityRangePercentiles,
*args,
**kwargs,
)
class MaskIntensityVolume(MONAITransformVolume):
def __init__(self, *args, **kwargs):
super().__init__(
mn.MaskIntensity,
*args,
**kwargs,
)
def apply(self, volume):
image, annot, meta = volume
image = torch.as_tensor(
self.transform(
image.numpy(),
annot.numpy()
)
)
return (
image,
annot,
meta
)
class CropForegroundVolume(MONAITransformVolume):
def __init__(self, *args, **kwargs):
super().__init__(
mn.CropForeground,
*args,
**kwargs,
)
class DivisiblePadVolume(MONAITransformVolume):
def __init__(self, *args, **kwargs):
super().__init__(
mn.DivisiblePad,
*args,
**kwargs,
)
class ConcatAnnot2ImgVolume(Transformable):
"""
concatenate the first to last channels of annot to the last channel of image.
num_task_masks_cat is added to meta to track back the number fo channels concatenated
"""
def __init__(self, num_channels=1):
self.num_channels = num_channels
def apply(self, volume):
"""
Args:
volume (tuple(torch.tensor, torch.tensor, dict)): Image and mask volumes to be cropped. Size is (C, T, H, W)
Return:
volume (tuple(torch.tensor, torch.tensor, dict)): Output image and mask volumes. Size is (C, T, H, W)
"""
image, annot, meta = volume
assert annot.ndim == 4, 'improper annotation tensor is passed'
assert 0 <= abs(self.num_channels) <= len(annot), 'improper annotation tensor is passed'
if 'num_task_masks_cat' not in meta:
meta['num_task_masks_cat'] = 0
task_masks, subtask_mask = annot[:self.num_channels], annot[self.num_channels:]
if not len(task_masks):
task_masks, subtask_mask = subtask_mask, task_masks
image = torch.cat((image, task_masks), dim=0)
annot = None if not len(subtask_mask) else subtask_mask
meta['num_task_masks_cat'] += len(task_masks)
return (
image,
annot,
meta
)
def __repr__(self):
return self.__class__.__name__
class ConcatImg2AnnotVolume(Transformable):
"""
concatenate the last num_task_masks_cat channels of image to the first of annot
"""
def __init__(self, num_channels=1):
assert num_channels > 0, 'undefined values for num_channels'
self.num_channels = num_channels
def apply(self, volume):
"""
Args:
volume (tuple(torch.tensor, torch.tensor, dict)): Image and mask volumes to be cropped. Size is (C, T, H, W)
Return:
volume (tuple(torch.tensor, torch.tensor, dict)): Output image and mask volumes. Size is (C, T, H, W)
"""
image, annot, meta = volume
if annot is not None:
assert annot.ndim == 4, 'improper annotation tensor is passed'
assert 'num_task_masks_cat' in meta and meta['num_task_masks_cat'] > 0
assert self.num_channels <= meta['num_task_masks_cat'] and self.num_channels < len(image)
num_channels = self.num_channels
image, task_masks = image[:-num_channels], image[-num_channels:]
annot = task_masks if annot is None else torch.cat((task_masks, annot), dim=0)
meta['num_task_masks_cat'] -= len(task_masks)
if not meta['num_task_masks_cat']:
meta.pop('num_task_masks_cat')
return (
image,
annot,
meta
)
def __repr__(self):
return self.__class__.__name__
class AnisotropyVolume(Transformable):
r"""Downsample an image along an axis and upsample to initial space.
This transform simulates an image that has been acquired using anisotropic
spacing and resampled back to its original spacing.
Similar to the work by Billot et al.: `Partial Volume Segmentation of Brain
MRI Scans of any Resolution and
Contrast <https://link.springer.com/chapter/10.1007/978-3-030-59728-3_18>`_.
Args:
axes: Axis or tuple of axes along which the image will be downsampled.
downsampling: Downsampling factor :math:`m \gt 1`. If a tuple
:math:`(a, b)` is provided then :math:`m \sim \mathcal{U}(a, b)`.
interpolation_mode (str): algorithm used for upsampling:
``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |
``'trilinear'`` | ``'area'``. Default: ``'nearest'``
"""
def __init__(self, axes, downsampling, interpolation_mode='linear', annot=False):
assert isinstance(axes, (list, tuple))
assert len(axes) in (1, 2, 3)
assert list(axes) == list(set(axes))
assert isinstance(downsampling, float), 1 < downsampling <= 5
assert isinstance(interpolation_mode, str)
assert isinstance(annot, bool)
self.axes = sorted(axes)
self.downsampling = downsampling
self.interpolation_mode = interpolation_mode
self.annot = annot
def apply(self, volume):
"""
Args:
volume (tuple(torch.tensor, torch.tensor, dict)): Image and mask volumes to be cropped. Size is (C, T, H, W)
Return:
volume (tuple(torch.tensor, torch.tensor, dict)): Output image and mask volumes. Size is (C, T, H, W)
"""
image, annot, meta = volume
ref_shape_tensor = torch.tensor(image.shape[1:])
target_shape_tensor = (ref_shape_tensor / self.downsampling).int()
for i in range(image.ndim - 1): # first dim is always channels
if i not in self.axes:
target_shape_tensor[i] = ref_shape_tensor[i]
image = F.resize(image, list(target_shape_tensor), self.interpolation_mode)
image = F.resize(image, list(ref_shape_tensor), self.interpolation_mode)
if self.annot:
annot = F.resize(annot, target_shape_tensor, 'nearest')
annot = F.resize(annot, ref_shape_tensor, 'nearest')
return (
image,
annot,
meta
)
def __repr__(self):
return self.__class__.__name__
class ElasticDeformationVolume(Transformable):
def __init__(self, *args, **kwargs):
self.transform = tio.ElasticDeformation(*args, **kwargs)
def apply(self, volume):
image, annot, meta = volume
# TODO pass Subject instance
return (
self.transform(image),
self.transform(annot),
meta
)
class MotionVolume(Transformable):
def __init__(self, **kwargs):
self.transform = tio.Motion(**kwargs)
def apply(self, volume):
image, annot, meta = volume
# TODO pass Subject instance
return (
self.transform(image),
annot,
meta
)
class ZoomVolume(MONAITransformVolume):
def __init__(self, *args, **kwargs):
super().__init__(
mn.Zoom,
*args,
**kwargs,
)
|
"""
TextWriterクラスのテスト
"""
import os
import sys
from unittest import TestCase
import pickle
# srcの下をパスに追加
sys.path.append(os.path.join(os.getcwd(), 'src'))
from fig_package.text_writer import TextWriter
class TestTextWriter(TestCase):
"""
TextWriterクラスのテスト
"""
def setUp(self):
"""
テスト前処理
"""
self.del_file_list = []
self.del_dir_list = []
return
def tearDown(self):
"""
テスト後処理
"""
# ファイル削除
for f in self.del_file_list:
os.remove(f)
# フォルダ削除
for d in self.del_dir_list:
os.rmdir(d)
return
def test_1_no_file(self):
"""
正常に出力
"""
# 読むファイル
ynf_file1 = \
os.path.join( \
os.path.dirname(__file__), \
'../../data/ynf/A3yoko.ynf')
os.makedirs(
os.path.abspath(os.path.join(ynf_file1, os.pardir)),
exist_ok=True)
# 書くファイル
text_file1 = \
os.path.join( \
os.path.dirname(__file__), \
'../../data/tmp/test1.txt')
os.makedirs(
os.path.abspath(os.path.join(text_file1, os.pardir)),
exist_ok=True)
# 読んでデシリアライズ
ynf = None
with open(ynf_file1, 'rb') as f:
ynf = pickle.load(f)
w = TextWriter(file_path=text_file1, overwrite=True)
w.write(ynf)
|
"""
A simple Python based hangman solver.
One flaw with this program is that it uses letter based frequency analysis, as opposed to
a word based analysis. Hence, it implicitly weights all words as equally likely to appear
in a hangman game -- additional data weighting the actual frequency of words that appear
in the context of a hangman game could be used to improve this model.
"""
# vocabulary stored in dictionary sorted by length
from vocabulary import sorted_words
##### PT 1: Find the words that might match. #####
def get_possible_words(input_word, wrong_chars, candidate_words):
"""Given an input word in the format 'p??ho?', wrong characters in
dct form {'e': True, 'f': True, 'g': True} and a starting list of
candidate words, returns a list of possible word matches."""
new_candidates = []
for guess in candidate_words:
if not matching_letters(input_word, guess):
continue # implicitly discard if wrong
if not contains_no_wrong_letters(wrong_chars, guess):
continue # implicitly discard if wrong
new_candidates.append(guess)
return new_candidates
# helper functions
def matching_letters(input_word, guess):
"""Given an input word in form 'p??ho?' of the same length as the candidate guess,
returns whether the known letters in the input word match the corresponding indices
of the candidate guess"""
for i in range(len(input_word)):
if input_word[i] != '?' and input_word[i] != guess[i]:
return False
return True
def contains_no_wrong_letters(wrong_chars, guess):
"""Given a dictionary of wrong char guesses, determines whether or not a candidate word
contains any letters known to be wrong."""
guess_chars = set(list(guess)) # iterate only over the unique letters in guess
for char in guess_chars:
if char in wrong_chars:
return False
return True
##### PT 2: Find the optimal words to try. #####
def get_top_letters(input_word, possible_words):
"""Given an input word guess, and a list of potential words that match up with this word guess,
determines which letters to try next.
Returns list of letter tuples in the format
(letter, probability of matching), sorted by prob accuracy."""
# all candidates come in lower char form only, so no need to lower again.
candidate_chars = {}
for guess in possible_words:
# set of letters not already guessed.
# implicitly weights matching 2 letters in a word as having the same value as matching 1
unguessed_chars = set(filter(lambda char: char not in input_word, guess))
for c in unguessed_chars:
candidate_chars[c] = candidate_chars.get(c, 0) + 1
chars = sorted(candidate_chars.items(), key=lambda letter_tup: letter_tup[1], reverse=True)
char_probabilities = [ (char_tup[0], float(char_tup[1]) / len(possible_words)) for char_tup in chars]
return char_probabilities
##### PT 3: Several run options. #####
def print_message(input_word, top_letters, possible_words):
print "\nYour current guess: {guess}".format(guess=input_word.upper())
print "\nTop letters to guess next:\n"
for letter in top_letters[:5]:
print "{char}: {prob}% probability of match".format(char = letter[0].upper(), prob = round(letter[1]*100, 2))
print "\nPossible words (total {word_count}):\n".format(word_count = len(possible_words))
for candidate in possible_words:
print candidate
def run(input_word, wrong_chars):
"""input_word = string formatted as 'p??ho?' with '?'s for missing values;
wrong_chars = any iterative of wrong_chars
prints top letters to try.
"""
# ensure input_word formatting correct
input_word = input_word.lower()
# ensure wrong_chars formatting correct
if not isinstance(wrong_chars, dict):
wrong_chars = {char: True for char in wrong_chars}
candidate_words = sorted_words[len(input_word)]
possible_words = get_possible_words(input_word, wrong_chars, candidate_words)
top_letters = get_top_letters(input_word, possible_words)
print_message(input_word, top_letters, possible_words)
def run_interactive():
input_word = raw_input("Enter your hangman guess here. Enter words you know, and use ? for words you don't know. e.g. 'h?e???' ").lower().strip("'")
wrong_chars = raw_input("What words have you tried and gotten wrong before? enter with no spaces, e.g. 'acrq' ").lower().strip("'")
run(input_word, wrong_chars)
##### PT 4: Testing #####
if __name__ == '__main__':
run_interactive() |
import unittest
from katas.kyu_7.highest_profit import min_max
class HighestProfitTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(min_max([1, 2, 3, 4, 5]), [1, 5])
def test_equals_2(self):
self.assertEqual(min_max([2334454, 5]), [5, 2334454])
def test_equals_3(self):
self.assertEqual(min_max([1]), [1, 1])
|
#!/usr/bin/python
size = 21
def recurse(x, y):
global grid
if x == size - 1:
if grid[x][y + 1] != 0:
grid[x][y] = grid[x][y + 1]
return grid[x][y]
grid[x][y] = recurse(x, y + 1)
return grid[x][y]
elif y == size - 1:
if grid[x + 1][y] != 0:
grid[x][y] = grid[x + 1][y]
return grid[x][y]
grid[x][y] = recurse(x + 1, y)
return grid[x][y]
else:
if grid[x][y + 1] != 0:
yPrime = grid[x][y + 1]
else:
yPrime = recurse(x, y + 1)
if grid[x + 1][y] != 0:
xPrime = grid[x + 1][y]
else:
xPrime = recurse(x + 1, y)
grid[x][y] = xPrime + yPrime
return grid[x][y]
grid = [0] * size
for i in range(0, size):
grid[i] = [0] * size
grid[size - 1][size - 2] = 1
grid[size - 2][size - 1] = 1
recurse(0, 0)
print(grid[0][0])
|
import random
class Node:
def __init__(self, val):
self.l_child = None
self.r_child = None
self.data = val
def binary_insert(root, node):
if root is None:
root = node
else:
if root.data > node.data:
if root.l_child is None:
root.l_child = node
else:
binary_insert(root.l_child, node)
else:
if root.r_child is None:
root.r_child = node
else:
binary_insert(root.r_child, node)
def binary_search(root, val):
if root == None or val == root.data:
return root
if val < root.data:
return binary_search(root.l_child, val)
else:
return binary_search(root.r_child, val)
def in_order_print(root):
if not root:
return
in_order_print(root.l_child)
print(root.data)
in_order_print(root.r_child)
def pre_order_print(root):
if not root:
return
print(root.data)
pre_order_print(root.l_child)
pre_order_print(root.r_child)
# r = Node(3)
# for i in range(100):
# binary_insert(r, Node(random.randrange(100)))
#
# # in_order_print(r)
# # print("----")
# # print(binary_search(r, 10))
def combine(n, k):
if n == 0 or k > n:
return []
if n == 1:
return [[1]]
combinations = [[i] for i in range(1,n-k+2)]
for num in range(1, k):
combinations = [c + [i] for c in combinations for i in range(c[-1] + 1, n-k+num+2)]
return combinations
# print(combine(5,6))
n = 20
L = []
for i in range(1,n+1):
for x in combine(n,i):
L.append(x)
L.append([])
print((L)) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 13 21:33:52 2018
@author: brandinho
"""
import numpy as np
from pokerCombinatorics import calcProbs, findHandStatus
def fetchProbabilityArray(hand, table, currentHandRank):
"""
the array has the following probabilities:
[straight flush, four of a kind, full house, flush, straight, three of a kind, two pair, pair]
"""
rankTypes = ["Pair", "Two Pair", "Three of a Kind", "Straight", "Flush", "Full House", "Four of a Kind", "Straight Flush"]
probabilityArray = np.ones(8)
numCardsOnTable = len(table)
if numCardsOnTable == 0:
cardsOnTable = "PreFlop"
elif numCardsOnTable == 3:
cardsOnTable = "Flop"
elif numCardsOnTable == 4:
cardsOnTable = "Turn"
elif numCardsOnTable == 5:
cardsOnTable = "River"
handStatus = findHandStatus(hand, table)
if cardsOnTable == "River":
probabilityArray[:(currentHandRank - 1)] = 0
else:
if currentHandRank == 1:
return probabilityArray
elif currentHandRank == 2:
probabilityArray[0] = 0
return probabilityArray
elif currentHandRank >= 3:
index = currentHandRank - 1
tempRankTypes = rankTypes[-(index):]
for rank in tempRankTypes:
probabilityArray[(index-1)] = calcProbs(hand, rank, cardsOnTable, handStatus)
index -= 1
return probabilityArray
def simulateProbability(hand, table, deck, simulations):
winner_simulation_counter = 0
tie_simulation_counter = 0
probability_array = np.zeros((simulations,2))
for sim in range(simulations):
deck.shuffleDeck()
cards_to_remove = list(hand) + table
clean_shuffled_deck = [x for x in deck.currentDeck if x not in list(cards_to_remove)]
deck.table = table
### We are holding our hands constant but randomly selecting a hand for our opponent from the shuffled deck ###
theoretical_hands = np.array([list(hand), clean_shuffled_deck[:2]])
clean_shuffled_deck = clean_shuffled_deck[2:]
### We add randomly selected cards to the board from the shuffled deck to complete the river ###
deck.table = deck.table + clean_shuffled_deck[:(5-len(table))]
winner = deck.whoWins(theoretical_hands)[1]
if winner == 0:
winner_simulation_counter += 1
elif winner == None:
tie_simulation_counter += 1
probability_array[sim,0] = winner_simulation_counter / (sim + 1)
probability_array[sim,1] = tie_simulation_counter / (sim + 1)
return probability_array
def statusDictToInputArray(statusDict, hand_or_table, cards, tableDeck):
### We are taking the hand/board states and converting them into input vectors, which will be stored in an array ###
if len(cards) > 0:
pair_status = 1 if statusDict['Pair'] == True else 0
if statusDict['straightGap'] == 0:
straight_gap_status = 1
elif statusDict['straightGap'] == 1:
straight_gap_status = 0.8
elif statusDict['straightGap'] == 2:
straight_gap_status = 0.6
elif statusDict['straightGap'] == 3:
straight_gap_status = 0.4
elif statusDict['straightGap'] > 3 or statusDict['straightGap'] == -1:
straight_gap_status = 0.2
NumSplit = []
for i in range(len(cards)): NumSplit.append(cards[i].split("_"))
numbersString = np.array(NumSplit)[:,0]
handNumbers = list(map(int, numbersString))
if hand_or_table == "Hand":
suited_status = 1 if statusDict['NumSuited'] == 2 else 0
card1_status = handNumbers[0] / 14
card2_status = handNumbers[1] / 14
if hand_or_table == "Table":
if statusDict['NumSuited'] > 4:
suited_status = 1
elif statusDict['NumSuited'] == 4:
suited_status = 0.75
elif statusDict['NumSuited'] == 3:
suited_status = 0.5
elif statusDict['NumSuited'] == 2:
suited_status = 0.25
elif statusDict['NumSuited'] == 1:
suited_status = 0
triple_status = 1 if statusDict['Triple'] == True else 0
two_pair_status = 1 if statusDict['TwoPair'] == True else 0
full_house_status = 1 if statusDict['FullHouse'] == True else 0
runner_runner_status = 1 if statusDict['StraightRunnerRunner'] == True else 0
single_runner_status = 1 if statusDict['StraightSingleRunner'] == True else 0
tableDeck.table = cards
table_rank, _ = tableDeck.evaluateHand([])
if table_rank == "straight":
additional_table_status = [1,0,0,0]
elif table_rank == "flush":
additional_table_status = [0,1,0,0]
elif table_rank == "four of a kind":
additional_table_status = [0,0,1,0]
elif table_rank == "straight flush":
additional_table_status = [0,0,0,1]
else:
additional_table_status = [0,0,0,0]
else:
pair_status = 0
suited_status = 0
straight_gap_status = 0
card1_status = 0
card2_status = 0
triple_status = 0
two_pair_status = 0
full_house_status = 0
runner_runner_status = 0
single_runner_status = 0
additional_table_status = [0,0,0,0]
if hand_or_table == "Hand":
return np.array([pair_status, suited_status, straight_gap_status, card1_status, card2_status])
elif hand_or_table == "Table":
return np.array([pair_status, suited_status, straight_gap_status,
triple_status, two_pair_status, full_house_status, runner_runner_status,
single_runner_status] + additional_table_status)
|
# -*- coding: utf-8 -*-
# Copyright 2020 Ali Akhtari <https://github.com/AliAkhtari78>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import requests
class Request:
def __init__(self, cookie_file: str = None, headers: dict = None, proxy: dict = None):
if cookie_file is None:
self.cookie = None
else:
self.cookie_file = cookie_file
try:
self.cookie = self._parse_cookie_file()
except:
raise
if headers is None:
pass
# self.headers = {
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
# 'Accept': '*/*',
# 'DNT': '1',
# 'app-platform': 'WebPlayer',
# }
else:
self.headers = headers
if proxy is None:
self.proxy = None
else:
self.proxy = proxy
def _parse_cookie_file(self) -> dict:
"""Parse a cookies.txt file and return a dictionary of key value pairs
compatible with requests."""
cookies = {}
with open(self.cookie_file, 'r') as fp:
for line in fp:
if not re.match(r'^\#', line):
line_fields = line.strip().split('\t')
cookies[line_fields[5]] = line_fields[6]
return cookies
def request(self) -> requests.Session:
"""Create session using requests library and set cookie and headers."""
request_session = requests.Session()
# request_session.headers.update(self.headers)
if self.cookie is not None:
request_session.cookies.update(self.cookie)
if self.proxy is not None:
request_session.proxies.update(self.proxy)
return request_session
|
# -*- coding: utf-8 -*-
"""Tests for Safari Cookies (Cookies.binarycookies) files."""
import io
import os
import unittest
from dtformats import errors
from dtformats import safari_cookies
from tests import test_lib
class BinaryCookiesFileTest(test_lib.BaseTestCase):
"""Safari Cookies (Cookies.binarycookies) file tests."""
# pylint: disable=protected-access
_FILE_HEADER_DATA = bytes(bytearray([
0x63, 0x6f, 0x6f, 0x6b, 0x00, 0x00, 0x00, 0x1d, 0x00, 0x00, 0x00, 0x76,
0x00, 0x00, 0x00, 0x95, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x01, 0x31,
0x00, 0x00, 0x00, 0xcd, 0x00, 0x00, 0x06, 0xc9, 0x00, 0x00, 0x02, 0x34,
0x00, 0x00, 0x01, 0x2c, 0x00, 0x00, 0x02, 0x08, 0x00, 0x00, 0x03, 0x3d,
0x00, 0x00, 0x00, 0xa2, 0x00, 0x00, 0x04, 0x08, 0x00, 0x00, 0x03, 0x58,
0x00, 0x00, 0x02, 0x4e, 0x00, 0x00, 0x02, 0x88, 0x00, 0x00, 0x05, 0x6e,
0x00, 0x00, 0x01, 0x05, 0x00, 0x00, 0x01, 0x20, 0x00, 0x00, 0x00, 0xd3,
0x00, 0x00, 0x00, 0x76, 0x00, 0x00, 0x01, 0x8d, 0x00, 0x00, 0x00, 0xf4,
0x00, 0x00, 0x01, 0x95, 0x00, 0x00, 0x00, 0x6a, 0x00, 0x00, 0x00, 0x77,
0x00, 0x00, 0x00, 0xed, 0x00, 0x00, 0x00, 0xa6, 0x00, 0x00, 0x00, 0xa3,
0x00, 0x00, 0x00, 0x76]))
_FILE_HEADER_DATA_BAD_SIGNATURE = bytes(bytearray([
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x1d, 0x00, 0x00, 0x00, 0x76,
0x00, 0x00, 0x00, 0x95, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x01, 0x31,
0x00, 0x00, 0x00, 0xcd, 0x00, 0x00, 0x06, 0xc9, 0x00, 0x00, 0x02, 0x34,
0x00, 0x00, 0x01, 0x2c, 0x00, 0x00, 0x02, 0x08, 0x00, 0x00, 0x03, 0x3d,
0x00, 0x00, 0x00, 0xa2, 0x00, 0x00, 0x04, 0x08, 0x00, 0x00, 0x03, 0x58,
0x00, 0x00, 0x02, 0x4e, 0x00, 0x00, 0x02, 0x88, 0x00, 0x00, 0x05, 0x6e,
0x00, 0x00, 0x01, 0x05, 0x00, 0x00, 0x01, 0x20, 0x00, 0x00, 0x00, 0xd3,
0x00, 0x00, 0x00, 0x76, 0x00, 0x00, 0x01, 0x8d, 0x00, 0x00, 0x00, 0xf4,
0x00, 0x00, 0x01, 0x95, 0x00, 0x00, 0x00, 0x6a, 0x00, 0x00, 0x00, 0x77,
0x00, 0x00, 0x00, 0xed, 0x00, 0x00, 0x00, 0xa6, 0x00, 0x00, 0x00, 0xa3,
0x00, 0x00, 0x00, 0x76]))
_PAGE_DATA = bytes(bytearray([
0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x62, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00,
0x6a, 0x00, 0x00, 0x00, 0x3d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x77, 0xe5, 0x94, 0xce, 0x41,
0x00, 0x00, 0x00, 0x70, 0x2d, 0x8b, 0xb7, 0x41, 0x53, 0x57, 0x49, 0x44,
0x00, 0x43, 0x42, 0x45, 0x43, 0x37, 0x46, 0x30, 0x42, 0x2d, 0x43, 0x36,
0x34, 0x45, 0x2d, 0x34, 0x32, 0x39, 0x30, 0x2d, 0x38, 0x37, 0x33, 0x45,
0x2d, 0x31, 0x42, 0x31, 0x38, 0x33, 0x30, 0x33, 0x31, 0x33, 0x39, 0x35,
0x44, 0x00, 0x2e, 0x67, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x00, 0x2f, 0x00]))
def testDebugPrintFileHeader(self):
"""Tests the _DebugPrintFileHeader function."""
output_writer = test_lib.TestOutputWriter()
test_file = safari_cookies.BinaryCookiesFile(output_writer=output_writer)
data_type_map = test_file._GetDataTypeMap('binarycookies_file_header')
file_header = data_type_map.CreateStructureValues(
number_of_pages=1,
signature=b'cook')
test_file._DebugPrintFileHeader(file_header)
def testDebugPrintRecordHeader(self):
"""Tests the _DebugPrintRecordHeader function."""
output_writer = test_lib.TestOutputWriter()
test_file = safari_cookies.BinaryCookiesFile(output_writer=output_writer)
data_type_map = test_file._GetDataTypeMap('binarycookies_record_header')
record_header = data_type_map.CreateStructureValues(
creation_time=0,
expiration_time=1,
flags=2,
name_offset=3,
path_offset=4,
size=5,
unknown1=6,
unknown2=7,
unknown3=8,
url_offset=9,
value_offset=10)
test_file._DebugPrintRecordHeader(record_header)
def testReadCString(self):
"""Tests the _ReadCString function."""
output_writer = test_lib.TestOutputWriter()
test_file = safari_cookies.BinaryCookiesFile(output_writer=output_writer)
page_data = b'string\x00'
cstring = test_file._ReadCString(page_data, 0)
self.assertEqual(cstring, 'string')
with self.assertRaises(errors.ParseError):
test_file._ReadCString(page_data[:-1], 0)
def testReadFileFooter(self):
"""Tests the _ReadFileFooter function."""
output_writer = test_lib.TestOutputWriter()
test_file = safari_cookies.BinaryCookiesFile(output_writer=output_writer)
test_file_path = self._GetTestFilePath(['Cookies.binarycookies'])
self._SkipIfPathNotExists(test_file_path)
with open(test_file_path, 'rb') as file_object:
file_object.seek(-8, os.SEEK_END)
test_file._ReadFileFooter(file_object)
def testReadFileHeader(self):
"""Tests the _ReadFileHeader function."""
output_writer = test_lib.TestOutputWriter()
test_file = safari_cookies.BinaryCookiesFile(output_writer=output_writer)
file_object = io.BytesIO(self._FILE_HEADER_DATA)
test_file._ReadFileHeader(file_object)
file_object = io.BytesIO(self._FILE_HEADER_DATA_BAD_SIGNATURE)
with self.assertRaises(errors.ParseError):
test_file._ReadFileHeader(file_object)
file_object = io.BytesIO(self._FILE_HEADER_DATA[:-1])
with self.assertRaises(errors.ParseError):
test_file._ReadFileHeader(file_object)
def testReadPage(self):
"""Tests the _ReadPage function."""
output_writer = test_lib.TestOutputWriter()
test_file = safari_cookies.BinaryCookiesFile(output_writer=output_writer)
test_file_path = self._GetTestFilePath(['Cookies.binarycookies'])
self._SkipIfPathNotExists(test_file_path)
with open(test_file_path, 'rb') as file_object:
test_file._ReadFileHeader(file_object)
test_file._ReadPage(
file_object, file_object.tell(), test_file._page_sizes[0])
# TODO: test errors.ParseError exception being raised.
def testReadPages(self):
"""Tests the _ReadPages function."""
output_writer = test_lib.TestOutputWriter()
test_file = safari_cookies.BinaryCookiesFile(output_writer=output_writer)
test_file_path = self._GetTestFilePath(['Cookies.binarycookies'])
self._SkipIfPathNotExists(test_file_path)
with open(test_file_path, 'rb') as file_object:
test_file._ReadFileHeader(file_object)
test_file._ReadPages(file_object)
def testReadRecord(self):
"""Tests the _ReadRecord function."""
output_writer = test_lib.TestOutputWriter()
test_file = safari_cookies.BinaryCookiesFile(output_writer=output_writer)
test_file._ReadRecord(self._PAGE_DATA, 0)
with self.assertRaises(errors.ParseError):
test_file._ReadRecord(self._PAGE_DATA[:-1], 0)
def testReadFileObject(self):
"""Tests the ReadFileObject function."""
output_writer = test_lib.TestOutputWriter()
test_file = safari_cookies.BinaryCookiesFile(
debug=True, output_writer=output_writer)
test_file_path = self._GetTestFilePath(['Cookies.binarycookies'])
self._SkipIfPathNotExists(test_file_path)
test_file.Open(test_file_path)
if __name__ == '__main__':
unittest.main()
|
from XML_parser import XMLParser
from keras.callbacks import ModelCheckpoint, CSVLogger
from image_generator import ImageGenerator
from models import simpler_CNN
from utils import split_data
from utils import get_labels
dataset_name = 'german_open_2017'
batch_size = 30
num_epochs = 250
input_shape = (48, 48, 3)
trained_models_path = '../trained_models/object_models/simpler_CNN'
ground_truth_path = '../datasets/german_open_dataset/annotations/'
images_path = '../datasets/german_open_dataset/images/'
labels = get_labels(dataset_name)
#num_classes = len(list(labels.keys()))
num_classes = 7
use_bounding_boxes = True
data_loader = XMLParser(ground_truth_path, dataset_name,
use_bounding_boxes=use_bounding_boxes)
ground_truth_data = data_loader.get_data()
train_keys, val_keys = split_data(ground_truth_data,
training_ratio=.6,
do_shuffle=True)
image_generator = ImageGenerator(ground_truth_data, batch_size, input_shape[:2],
train_keys, val_keys, None,
path_prefix=images_path,
vertical_flip_probability=0,
do_random_crop=False,
use_bounding_boxes=use_bounding_boxes)
model = simpler_CNN(input_shape, num_classes)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
print(model.summary())
print('Number classes: ', num_classes)
print('Classes:', labels)
print('Number of training samples:', len(train_keys))
print('Number of validation samples:', len(val_keys))
model_names = trained_models_path + '.{epoch:02d}-{val_loss:.2f}.hdf5'
model_checkpoint = ModelCheckpoint(model_names,
monitor='val_loss',
verbose=1,
save_best_only=False,
save_weights_only=False)
csv_logger = CSVLogger('log_files/classifier_training.log')
model.fit_generator(image_generator.flow(mode='train'),
#steps_per_epoch=int(len(train_keys)/batch_size),
steps_per_epoch=200,
epochs=num_epochs, verbose=1,
callbacks=[csv_logger, model_checkpoint],
validation_data= image_generator.flow('val'),
validation_steps=int(len(val_keys)/batch_size))
|
import numpy as np
import tensorflow as tf
import random as rn
import os
import pandas as pd
from keras.callbacks import ModelCheckpoint
from keras import backend as K
import keras
from keras.layers.normalization import BatchNormalization
#from keras_layer_normalization import LayerNormalization
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, concatenate
from keras.layers import Bidirectional, GlobalMaxPool1D
from keras.models import Model, Sequential
from keras.callbacks import EarlyStopping
from keras import initializers, regularizers, constraints, optimizers, layers
from gensim.models import Word2Vec
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import nltk
from nltk.stem import PorterStemmer
from nltk.tokenize.treebank import TreebankWordDetokenizer, TreebankWordTokenizer
from nltk.corpus import stopwords
import time
embed_size = 30 #embedding vektor længde
max_features = 25000 #antal unikke ord (antal søjler i embedding matricen)
maxlen = 86
###Train your own word embeddings###
print('Loading data...')
tsla30 = pd.read_csv('C://Users/crefsgaard/Desktop/speciale/data/tsla30nycsv.csv', encoding = 'latin1',lineterminator='\n')
###PRE-PROCESS TEXT DATA###
tokenizer = TreebankWordTokenizer()
tsla30['text'] = [tokenizer.tokenize(tweet) for tweet in tsla30['text']]
#Find længste tweet og sæt maxlen til dette
maxlen = 0
for i in range(len(tsla30['text'])):
a = len(tsla30['text'][i])
if a > maxlen:
maxlen = a
maxlen = maxlen+2 #max antal ord i et tweet
#Stemming
stemmer = PorterStemmer()
tsla30['text'] = tsla30['text'].apply(lambda x: [stemmer.stem(y) for y in x])
#Calculating unique occurences
DF = {}
for tweet in range(len(tsla30['text'])):
tokens = tsla30['text'][tweet]
for w in tokens:
try:
DF[w].add(tweet)
except:
DF[w] = {tweet}
#Antal ord i ordbogen
for i in DF:
DF[i] = len(DF[i])
total_vocab = [x for x in DF]
#Calculating frequencies
DF2 = DF.copy()
for w in DF2:
DF2[w] = DF2[w]/len(tsla30)
#sorted_dict = sorted(DF2.items(), key=lambda kv: kv[1])
#Low frequent stopwords
lowfreq_stopWords = []
for word in DF:
if DF[word] < 5:
lowfreq_stopWords.append(word)
test = lowfreq_stopWords[40000:45000]
print('removing stopwords')
start = time.time()
tsla30['text'] = [[word for word in tweet if not word in test] for tweet in tsla30['text']]
end = time.time()
print(end-start)
###FINISHED###
###Ord totaler###
#total_vocab: 58909
#lowfreq length: 40939
#total vocab after removing freq words: 17.970
#Når vi detokenizer -> fil -> indlæs -> tokenize går vi fra 17.970 til 18.057
#Når vi bare kører fjern freq words, detokenizer, og tokenizer i keras, så er der 17997 unikke ord.
#corpus specific: 3
#nltk stopwords: 115
#total: 118
#Når alle stopord er sorteret fra er der: 17947 unikke ord. Der burde have været 18.057-118=17939, men der er nogle få stopord som ikke er i vores tweets.
#Skriv teksterne, renset for stopord, til fil
tsla_TEST = tsla30.copy()
#After removing non-frequent words, detokenize and write to .csv
detokenizer = TreebankWordDetokenizer()
tsla30['text'] = [detokenizer.detokenize(tweet) for tweet in tsla30['text']]
os.chdir('C://Users/crefsgaard/Desktop/speciale/data')
tsla30.to_csv('nonfreqRemoved.csv', encoding="latin1", index = False)
###Indlæs fil hvor non freq ord er fjernet, og fjern nltk stopord mv.###
tesla = pd.read_csv('C://Users/Morten/Desktop/Speciale/Python/nonfreqRemoved.csv', encoding = 'latin1',lineterminator='\n')
#Tokenize
tokenizer = TreebankWordTokenizer()
tesla['text'] = [tokenizer.tokenize(tweet) for tweet in tesla['text']]
nltk_stopwords = ['ourselves', 'hers', 'between', 'yourself', 'but', 'again', 'there', 'about', 'once', 'during', 'very', 'having', 'with', 'they', 'own', 'an',
'be', 'some', 'for', 'do', 'its', 'yours', 'such', 'into', 'of', 'most', 'itself', 'other', 'off', 'is', 's', 'am', 'or', 'who', 'as', 'from',
'him', 'each', 'the', 'themselves', 'until', 'below', 'are', 'we', 'these', 'your', 'his', 'through', 'don', 'me', 'were', 'her', 'more', 'himself',
'this', 'should', 'our', 'their', 'while', 'both', 'to', 'ours', 'had', 'she', 'all', 'when', 'at', 'any', 'before', 'them', 'same', 'and', 'been',
'have', 'in', 'will', 'on', 'does', 'yourselves', 'then', 'that', 'because', 'what', 'why', 'so', 'can', 'did', 'he', 'you', 'herself', 'has', 'just',
'where', 'too', 'only', 'myself', 'which', 'those', 'i', 'after', 'few', 'whom', 'being', 'if', 'theirs', 'my', 'a', 'by', 'doing', 'it', 'how',
'further', 'was', 'here', 'than']
#Stem stopwords
stemmer = PorterStemmer()
nltk_stopwords = [stemmer.stem(x) for x in nltk_stopwords]
#Calculating unique occurences
DF = {}
for tweet in range(len(tesla['text'])):
tokens = tesla['text'][tweet]
for w in tokens:
try:
DF[w].add(tweet)
except:
DF[w] = {tweet}
#Antal ord i ordbogen
for i in DF:
DF[i] = len(DF[i])
total_vocab = [x for x in DF]
#Calculating frequencies
DF2 = DF.copy()
for w in DF2:
DF2[w] = DF2[w]/len(tesla)
#All stopwords
all_stopWords = []
for word in DF2:
if DF2[word] > 0.5:
all_stopWords.append(word)
for word in nltk_stopwords:
all_stopWords.append(word)
print('removing corpus specific and predefined stopwords')
start = time.time()
tesla['text'] = [[word for word in tweet if not word in all_stopWords] for tweet in tesla['text']]
end = time.time()
print(end-start)
#Beregn maxlen
maxlen = 0
for tweet in tesla['text']:
b = len(tweet)
if b > maxlen:
maxlen = b
maxlen=maxlen+1
#############################
###Train Word2Vec model###
start = time.time()
w2v = Word2Vec(tesla['text'], size=embed_size, min_count=5, window=5, sg = 1, negative=15, iter=10)
len(list(w2v.wv.vocab)) #Antal ord der er lavet embeddings for
end = time.time()
print(end-start)
#Get trained embeddings and check similarities
word_vectors = w2v.wv
result = word_vectors.similar_by_word("market")
print("Most similar to 'model':\n", result[:3])
#Map words to indexes
word2id = {k: v.index for k, v in word_vectors.vocab.items()}
#Save trained embeddings
os.chdir('C://Users/Morten/Desktop/Speciale/Python')
word_vectors.save_word2vec_format('trainedEmb.txt', binary=False)
###FINISHED###
#Write tokenized sentences back to real sentences in order to number tokenize the sentences with keras later
detokenizer = TreebankWordDetokenizer()
tesla['text'] = [detokenizer.detokenize(tweet) for tweet in tesla['text']]
#Normalize input data and split to train/test
x = tesla.copy()
#x = x.sample(frac=1).reset_index(drop=True) #Så x er det samplede data
x=x.sort_values(by='timestamp.1')
y = x['Movement']
x = x[['text', 'followers_count', 'vol_lag', 'verified', 'move_lag\r\r']]
#x = x[['text', 'followers_count', 'vol_lag', 'move_lag\r\r']]
x['followers_count'] = (x['followers_count']-x['followers_count'].mean())/x['followers_count'].std() #Normalisering af data
x['vol_lag'] = (x['vol_lag']-x['vol_lag'].mean())/x['vol_lag'].std() #Normalisering af data
x['move_lag\r\r'] = (x['move_lag\r\r']-x['move_lag\r\r'].mean())/x['move_lag\r\r'].std() #Normalisering af data
timestamp =tesla.copy()
timestamp =timestamp.sort_values(by='timestamp.1')
timestamp = timestamp['timestamp.1']
timestamp = timestamp[194102:]
buffer, timestamp = train_test_split(timestamp, test_size=0.5, random_state=42)
list(x.columns.values)
x1 = x[:194102]
x2 = x[194102:]
y1 = y[:194102]
y2 = y[194102:]
x1 = x1.sample(frac=1,random_state=42)
y1 = y1.sample(frac=1,random_state=42)
x = x1.append(x2)
y = y1.append(y2)
#Tokenize words to numbers, apply padding and define the final dataset
print('Tokenize data...')
tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(x['text']))
list_tokenized = tokenizer.texts_to_sequences(x['text'])
x_lstm = pad_sequences(list_tokenized, maxlen=maxlen)
x_nn = x[['followers_count', 'vol_lag', 'verified', 'move_lag\r\r']]
x#_nn = x[['followers_count', 'vol_lag', 'move_lag\r\r']]
x_train_nn = x_nn[:194102]
x_train_lstm= x_lstm[:194102]
y_train = y[:194102]
x_test_lstm_shuf = x_lstm[194102:]
x_test_shuf = x_nn[194102:]
y_test_shuf = y[194102:]
x_val_nn, x_test_nn, y_val, y_test = train_test_split(x_test_shuf, y_test_shuf, test_size=0.5, random_state=42)
x_val_lstm, x_test_lstm, y_val, y_test = train_test_split(x_test_lstm_shuf, y_test_shuf, test_size=0.5, random_state=42)
#x_train_nn = x_train_nn.append(x_val_nn)
#x_train_lstm = np.append(x_train_lstm,x_val_lstm,axis=0)
#y_train = y_train.append(y_val)
#x_train_nn, x_test_nn, y_train, y_test = train_test_split(x_nn, y, test_size=0.1, random_state=42)
#x_train_lstm, x_test_lstm, y_train, y_test = train_test_split(x_lstm, y, test_size=0.1, random_state=42)
print(len(x_train_nn), 'train sequences')
print(len(x_test_nn), 'test sequences')
print('Load our own pre-trained word embeddings...')
EMBEDDING_FILE = 'C://Users/Morten/Desktop/Speciale/Python/trainedEmb.txt'
#Læs pre-trained word embeddings, og definér matrix med disse
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.strip().split()) for o in open(EMBEDDING_FILE, encoding="utf-8"))
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = all_embs.mean(), all_embs.std()
emb_mean,emb_std
word_index = tokenizer.word_index #liste med unikke ord i data
nb_words = min(max_features, len(word_index)) #hvis antal unikke ord overskrider det max antal, vi ønsker, vælges det
embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size)) #initialisér embedding matricen randomly
for word, i in word_index.items():
if i >= max_features: continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None: embedding_matrix[i-1] = embedding_vector
#TODO
#Næste kørsel: NN med 3 lag - google lige hvad fornuftigt valg af neuroner kan være.
#Kig på BATCH og LAYER normalization
#Overvej hvilke arguments vi skal tilføje til LSTMen og NNet
#Find ud af hvordan vi kan plotte trænings loss og val loss
print('defining model')
###BiLSTM med NN ovenpå###
#http://digital-thinking.de/deep-learning-combining-numerical-and-text-features-in-deep-neural-networks/
#Definér loss-funktionen: https://codeburst.io/neural-networks-for-algorithmic-trading-volatility-forecasting-and-custom-loss-functions-c030e316ea7e
#Loss funktionen skal skrives med Keras backend, når den skal burges i keras/tensorflow netværkene senere.
def stock_loss(y_true, y_pred):
alpha = 2.
loss = K.switch(K.less(y_true * y_pred, 0), \
alpha * (K.abs(y_true) + K.abs(y_pred)), \
K.abs(y_true - y_pred)
)
return K.mean(loss, axis=-1)
#Tilføj række for bias i embedding matricen
newrow = np.array([np.zeros(embed_size)])
A = np.append(newrow, embedding_matrix, axis = 0)
model = Sequential()
nlp_input = Input(shape=(maxlen,), name = 'nlp_input')
meta_input = Input(shape=(4,), name = 'meta_input')
emb = Embedding(nb_words+1, embed_size, weights = [A])(nlp_input)
nlp_out = Bidirectional(LSTM(maxlen,
activation = 'tanh',
use_bias = True,
unit_forget_bias = True,
kernel_initializer = keras.initializers.glorot_uniform(seed =62)))(emb)
nlp_out = Dense(1, activation = 'linear', kernel_initializer = keras.initializers.RandomNormal(mean=0, stddev=0.05,seed=32),bias_initializer='zeros')(nlp_out)
x = keras.layers.concatenate([nlp_out, meta_input])
#x = concatenate([nlp_out, meta_input])
x = BatchNormalization()(x)
x = Dense(4, activation='relu', kernel_initializer = keras.initializers.RandomNormal(mean=0, stddev=0.25,seed=32))(x)
x = BatchNormalization()(x)
x = Dense(3, activation='relu', kernel_initializer = keras.initializers.RandomNormal(mean=0, stddev=0.30,seed=32))(x)
x = BatchNormalization()(x)
#x = Dense(4, activation='relu', kernel_initializer = keras.initializers.RandomNormal(mean=0, stddev=0.25,seed=32), kernel_regularizer=regularizers.l2(0.005))(x)
#x = Dropout(0.25)(x)
#x = Dense(2, activation='relu', kernel_initializer = keras.initializers.RandomNormal(mean=0, stddev=0.23,seed=42))(x)
#x = BatchNormalization()(x)
#x = Dropout(0.25)(x)
#x = Dense(3, activation='relu', kernel_initializer = keras.initializers.RandomNormal(mean=0, stddev=0.25,seed=45), kernel_regularizer=regularizers.l2(0.001))(x)
#x = BatchNormalization()(x)
#x = Dropout(0.25)(x)
#x = Dense(3, activation='relu', kernel_initializer = keras.initializers.RandomNormal(mean=0, stddev=0.22,seed=44))(x)
#x = Dense(5, activation='relu', kernel_initializer = keras.initializers.RandomNormal(mean=0, stddev=0.3,seed=43))(x)
x = Dense(2, activation='relu', kernel_initializer = keras.initializers.RandomNormal(mean=0, stddev=0.30,seed=41))(x)
x = BatchNormalization()(x)
x = Dense(1, activation='linear', kernel_initializer = keras.initializers.RandomNormal(mean=0,stddev=0.30,seed=52))(x)
model_lstmnn = Model(inputs=[nlp_input, meta_input], outputs=[x])
print('compile and train model')
start = time.time()
model_lstmnn.compile(loss=stock_loss,optimizer= keras.optimizers.Adam(lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False))
filepath="C://Users/Morten/Desktop/Speciale/Python/weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
model_lstmnn.fit(x=[x_train_lstm, x_train_nn], y=y_train, batch_size=1024,epochs=75,validation_data=([x_val_lstm, x_val_nn], y_val), shuffle=True,callbacks=callbacks_list)
end = time.time()
print((end-start)/3600) #timer
#ting at teste, hvis det ikke virker:
#flere std.devs
#dropout
#lambda
#arkitektur
#Hvad med embedding size f.eks 50? Tjek
#uden præ-trænede?
print('make predictions and calculate returns')
y_lstmnn_pred = model_lstmnn.predict([x_test_lstm, x_test_nn], batch_size=1024, verbose = 1)
#np.unique(y_lstmnn_pred)
#gem tidligere resultater som vektorer
y_true = np.array(y_test)
y_pred = y_lstmnn_pred
y_pred.min()
y_pred.max()
y_pred.mean()
len(np.unique(y_pred))
#Beregn afkast
a = np.multiply(y_pred.T, y_true).T
a.sum()
###FINISHED###
#call back model:
model = Sequential()
nlp_input = Input(shape=(maxlen,), name = 'nlp_input')
meta_input = Input(shape=(4,), name = 'meta_input')
emb = Embedding(nb_words+1, embed_size, weights = [A])(nlp_input)
nlp_out = Bidirectional(LSTM(maxlen,
activation = 'tanh',
use_bias = True,
unit_forget_bias = True,
kernel_initializer = keras.initializers.glorot_uniform(seed =62)))(emb)
nlp_out = Dense(1, activation = 'linear', kernel_initializer = keras.initializers.RandomNormal(mean=0, stddev=0.05,seed=32),bias_initializer='zeros')(nlp_out)
x = keras.layers.concatenate([nlp_out, meta_input])
#x = concatenate([nlp_out, meta_input])
x = BatchNormalization()(x)
x = Dense(4, activation='relu', kernel_initializer = keras.initializers.RandomNormal(mean=0, stddev=0.25,seed=32))(x)
x = BatchNormalization()(x)
x = Dense(3, activation='relu', kernel_initializer = keras.initializers.RandomNormal(mean=0, stddev=0.30,seed=32))(x)
x = BatchNormalization()(x)
#x = Dense(4, activation='relu', kernel_initializer = keras.initializers.RandomNormal(mean=0, stddev=0.25,seed=32), kernel_regularizer=regularizers.l2(0.005))(x)
#x = Dropout(0.25)(x)
#x = Dense(2, activation='relu', kernel_initializer = keras.initializers.RandomNormal(mean=0, stddev=0.23,seed=42))(x)
#x = BatchNormalization()(x)
#x = Dropout(0.25)(x)
#x = Dense(3, activation='relu', kernel_initializer = keras.initializers.RandomNormal(mean=0, stddev=0.25,seed=45), kernel_regularizer=regularizers.l2(0.001))(x)
#x = BatchNormalization()(x)
#x = Dropout(0.25)(x)
#x = Dense(3, activation='relu', kernel_initializer = keras.initializers.RandomNormal(mean=0, stddev=0.22,seed=44))(x)
#x = Dense(5, activation='relu', kernel_initializer = keras.initializers.RandomNormal(mean=0, stddev=0.3,seed=43))(x)
x = Dense(2, activation='relu', kernel_initializer = keras.initializers.RandomNormal(mean=0, stddev=0.30,seed=41))(x)
x = BatchNormalization()(x)
x = Dense(1, activation='linear', kernel_initializer = keras.initializers.RandomNormal(mean=0,stddev=0.30,seed=52))(x)
model_lstmnn = Model(inputs=[nlp_input, meta_input], outputs=[x])
#SLUT KOPIERING HER
model_lstmnn.load_weights('C://Users/Morten/Desktop/Speciale/Python/weights.best.hdf5')
model_lstmnn.compile(loss=stock_loss,optimizer= keras.optimizers.Adam(lr=0.0002, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False))
y_lstmnn_pred = model_lstmnn.predict([x_test_lstm, x_test_nn], batch_size=1024, verbose = 1)
#gem tidligere resultater som vektorer
y_true = np.array(y_test)
y_pred = y_lstmnn_pred
y_pred.min()
y_pred.max()
y_pred.mean()
len(np.unique(y_pred))
#Beregn afkast
a = np.multiply(y_pred.T, y_true).T
a.sum()
###FINISHED###
np.sum(y_pred)
#Udskriv ytrue, ypred og timestamp til grafudvikling
#gem y-pred i dokument hvis fornuftigt resultat
########SANDBOX######
###Training Multilayer Neural Network###
model_nn = Sequential()
model_nn.add(Dense(8, input_dim=x_nn_train.shape[1], activation = 'relu'))
model_nn.add(Dense(5, activation = 'relu'))
model_nn.add(Dense(1, activation = None))
#Compile model
model_nn.compile(loss='mean_squared_error', optimizer = 'adam', metrics = ['mean_squared_error'])
#Fit model
model_nn.fit(x_nn_train, y_nn_train, batch_size=32, epochs = 10, validation_split=0.2)
#Predictions
y_nn_pred = model_nn.predict(x_nn_test, batch_size=32, verbose=1)
###FINISHED###
#LSTM uden pre-trained embeddings
inp = Input(shape=(maxlen,))
x = Embedding(nb_words+1, embed_size)(inp)
x = Bidirectional(LSTM(100, return_sequences=True, dropout=0.25, recurrent_dropout=0.1))(x)
x = GlobalMaxPool1D()(x)
x = Dense(100, activation="tanh")(x)
x = Dropout(0.25)(x)
x = Dense(1, activation=None)(x)
model = Model(inputs=inp, outputs=x)
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_squared_error'])
model.fit(x_tr, y_train, batch_size=32, epochs=1, validation_split=0.2)
#returnerer array af predictions
y_lstm_pred = model.predict([x_te], batch_size=32, verbose=1)
y_NN_input = y_lstm_pred
###FINISHED###
#LSTM med pre-trained embeddings
print('Bidirectional LSTM...')
inp = Input(shape=(maxlen,))
x = Embedding(nb_words, embed_size, weights=[embedding_matrix])(inp)
x = Bidirectional(LSTM(100, return_sequences=True, dropout=0.25, recurrent_dropout=0.1))(x)
x = GlobalMaxPool1D()(x)
x = Dense(100, activation="relu")(x)
x = Dropout(0.25)(x)
x = Dense(1, activation=None)(x)
model = Model(inputs=inp, outputs=x)
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_squared_error'])
model.fit(x_tr, y_train, batch_size=32, epochs=1)
###FINISHED###
######TSNE plot########
def tsne_plot(model):
"Creates and TSNE model and plots it"
labels = []
tokens = []
for word in w2v.wv.vocab:
tokens.append(w2v[word])
labels.append(word)
tsne_w2v = TSNE(perplexity=30, n_components=2, init='pca', n_iter=1000, random_state=23)
new_values = tsne_w2v.fit_transform(tokens)
x = []
y = []
for value in new_values:
x.append(value[0])
y.append(value[1])
plt.figure(figsize=(16, 16))
for i in range(len(x)):
plt.scatter(x[i],y[i])
plt.annotate(labels[i],
xy=(x[i], y[i]),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.show()
tsne_plot(w2v)
############################
#https://machinelearningmastery.com/check-point-deep-learning-models-keras/
|
from interface import *
|
#!/usr/bin/env python
#
# First create a config file in either /etc or ~/
# Contents should be as follows
#
# -------------------------------------------------
# [global]
# default = https://gitlab.com
# ssl_verify = true
# timeout = 5
#
# [gitlab]
# url = https://gitlab.com
# private_token = <your private gitlab token here>
# api_version = 4
# -------------------------------------------------
#
# Then you must set an environment variable..
#
# PYTHON_GITLAB_CFG=~/.python-gitlab.cfg
#
# ..or..
#
# PYTHON_GITLAB_CFG=/etc/python-gitlab.cfg
#
# .. depending on where you created the config file.
#
# USAGE:
#
# mrpatcount.py 24118165 859
#
# Where:
#
# 24118165 is the project ID for ..
# https://gitlab.com/redhat/rhel/src/kernel/rhel-8
#
# 859 is an MR ID number.
#
import sys
import gitlab
project_id = str(sys.argv[1])
mr_id = str(sys.argv[2])
gl = gitlab.Gitlab.from_config('gitlab')
gl_project = gl.projects.get(project_id)
mr = gl_project.mergerequests.get(mr_id)
ncommits = len(mr.commits())
print(ncommits)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 17:47:11 2020
@author: shihyu
"""
import pandas as pd
from venn import venn
import matplotlib.pyplot as plt
gene_file=pd.read_excel(r'Input/Genelist.xlsx')
SetA=set(gene_file.iloc[:,0].dropna())
SetB=set(gene_file.iloc[:,1].dropna())
SetC=set(gene_file.iloc[:,2].dropna())
genelist={"A":SetA, "B":SetB, "C":SetC}
venn(genelist)
plt.show()
intersec=cgmh.intersection(SetA).intersection(SetB).intersection(SetC) |
import numpy as np
from keras.layers import Input,Dense, Activation
from keras.models import Model
from keras.utils.generic_utils import get_custom_objects
def XOR_train_fully_keras():
#Since Q1 (XOR) only asks for drawing, in the code, I used Keras/TF codes.
x = np.array([[0,0],
[0,1],
[1,0],
[1,1]])
y = np.array([0,1,1,0])
input = Input(shape=(2,))
hidden1 = Dense(10, activation='relu')(input)
hidden2 = Dense(10, activation='relu')(hidden1)
out = Dense(1, activation='linear')(hidden2)
model = Model(inputs=[input], outputs=[out])
model.compile(optimizer='adam', loss='mse', loss_weights=[1])
model.fit(x=[x], y=[y], batch_size=4, epochs=800)
y_p = model.predict(x)
print(y_p)
y_p = np.round(y_p, 2)
print(y_p)
def XOR_check_drawing():
# first columns are for the bias
x = np.array([[1, 0,0],
[1, 0,1],
[1, 1,0],
[1, 1,1]])
# label for xor
y = np.array([0,1,1,0])
# define weight vectors
w1 = np.array([-1, 1, -1])
w2 = np.array([-1, -1, 1])
w3 = np.array([-1, 1, 1])
# select an input
input = x[0,:]
# calculate layer1
percept1_logit = np.dot(input, w1)
percept1_out = 1 if percept1_logit >= 0 else 0
percept2_logit = np.dot(input, w2)
percept2_out = 1 if percept2_logit >= 0 else 0
layer1_out = np.array([1,percept1_out, percept2_out])
# calculate layer2
percept3_logit = np.dot(layer1_out, w3)
percept3_out = 1 if percept3_logit >= 0 else 0
out = percept3_out
# print output of the custom neural net
print(out)
if __name__ =='__main__':
XOR_check_drawing()
|
from django.urls import path
from . import views
urlpatterns = [
path('',views.home,name='home'),
path('chatbot', views.chatbot,name='chatbot'),
path('addmsg',views.addmsg,name='addmsg'),
]
|
from flask import Flask, request, session, redirect, send_from_directory
import json
from demandresponse import UserManager, PermissionManager
app = Flask(__name__, static_url_path="/static")
config = json.loads(open("data/cfg.json", "r").read())
app.config["SECRET_KEY"] = config["SECRET_KEY"]
USERMANAGER = UserManager(config["USERS_JSON_FN"])
PERMSMANAGER = PermissionManager(config["AUTH_JSON_FN"])
ACC_INDEX_PAGES = config["ACCTYPE_INDEXPAGES"]
del config
def Authed(auth_str, acc_type = None):
if not PERMSMANAGER.is_anonymous_action(auth_str) and acc_type != None:
if "USER" not in session: return False
session_user = session["USER"]
if session_user == None or not PERMSMANAGER.user_permitted(session_user, auth_str) or (session_user["acc_type"] != acc_type and acc_type != None):
return False
return True
@app.route("/")
def home():
if "USER" in session: return redirect("/dashboard")
return app.send_static_file("index.html")
@app.route("/login", methods=["POST"])
def login():
loginData = request.form
u, p = loginData["txtUsername"], loginData["txtPassword"]
if USERMANAGER.valid_user(u, p):
session["USER"] = USERMANAGER.get_user_by_username(u)
return "Success!", 200
else:
return "Incorrect username or password", 403
@app.route("/logout", methods=["GET"])
def logout():
if "USER" in session: del session["USER"]
return redirect("/")
@app.route("/dashboard", defaults={'file': None})
@app.route("/dashboard/<path:file>", methods=["GET"])
def dashboard(file):
if not Authed("/dashboard"): return redirect("/")
if file == None:
user = session["USER"]
if user["acc_type"] not in ACC_INDEX_PAGES:
return f"Index webpage not found for account type {user['acc_type']}", 404
return send_from_directory("static/dashboard", ACC_INDEX_PAGES[user["acc_type"]])
return send_from_directory("static/dashboard", file)
@app.route("/dashboard/api/<endpoint>", methods=["GET", "POST"])
def dashboard_api(endpoint):
if Authed(endpoint): return 200
return "", 200
@app.route("/public/<path:path>", methods=["GET"])
def public(path):
print(f"Path: {path}")
return send_from_directory("static/public", path)
# Run the server
if __name__ == "__main__":
app.run("0.0.0.0", 8080) |
#!/usr/bin/env python3
import sys
import math
import json
import time
from pymongo import MongoClient
import requests
from pprint import pformat
from pprint import pprint
SINGLE_ENDPOINT = "https://services.nvd.nist.gov/rest/json/cve/1.0"
COLLECTION_ENDPOINT = "https://services.nvd.nist.gov/rest/json/cves/1.0"
client = MongoClient('mongodb://localhost:27017/')
db = client.cves # The database is called cves.
cves = db.cves # The database has only one collection with cves.
def getSingleCVE(cve):
url = "/".join([SINGLE_ENDPOINT, cve])
res = requests.get(url)
return res.json()
def getCVECollection(params):
res = requests.get(COLLECTION_ENDPOINT, params=params)
print(f"Request URL is {res.url}")
print(f"Status code: {res.status_code}")
return res.json()
def main():
pageSize = 1024
params = {
"startIndex": 0,
"resultsPerPage": 0,
"pubStartDate": "2010-01-01T00:00:00:000 UTC-00:00",
"pubEndDate": "2021-01-01T00:00:00:000 UTC-00:00",
}
# Get total number of results.
cveRawCollection = getCVECollection(params)
totalResults = cveRawCollection["totalResults"]
print(f"Total number of results is {totalResults}")
pageCount = math.ceil(totalResults / pageSize)
print(f"Number of pages is equal to {pageCount}")
time.sleep(10)
print(f"Gatering info from NVD...")
start = time.time()
params["resultsPerPage"] = pageSize
for pageNo in range(pageCount):
params["startIndex"] = pageNo * pageSize
parsedCVEList = []
cveRawCollection = getCVECollection(params)
for cve in cveRawCollection["result"]["CVE_Items"]:
id = cve["cve"]["CVE_data_meta"]["ID"].lower()
parsedCVEList.append({
"_id": id,
"nvd": cve,
})
# cves.insert_many(parsedCVEList)
end = time.time()
elapsed = end - start
print(f"Done. Time elpased: {elapsed}")
# with open("nvd_cves_collection_2010_2020.json", "w") as f:
# json.dump(parsedCVEList, f)
if __name__ == "__main__":
main()
|
import json
import numpy as np
import ktrain
__model = None
def get_predicted_sentiment(review):
data = [review]
return __model.predict(data)
def load_saved_artifacts():
print("loading saved artifacts...start")
global __model
if __model is None:
__model = ktrain.load_predictor('./artifacts')
print("loading saved artifacts...done")
if __name__ == '__main__':
load_saved_artifacts()
print(get_predicted_sentiment('wonderful'))
|
from django.db import models
from edtech.models.questions import Question
from djutil.models import TimeStampedModel
from edtech.models.mixins import DefaultPermissions
from edtech.models.test_series import TestSeries
class QuestionTestSeries(TimeStampedModel, DefaultPermissions):
question = models.ForeignKey(Question)
test_series = models.ForeignKey(TestSeries)
|
LOGGEDOUT_SCSS_MSG = "User Logged out successfully"
LOGIN_SCSS_MSG = "User Logged in successfully"
INVALID_PASS = "Passowrd not valid"
INVALID_USER = "User dose not exsists"
INVALID_SESSION = "Session Invalid"
INVALID_REQUEST = "Not a valid request"
BAD_REQUEST = "Bad request"
PASSWORD_EXPIERD = "Password Expierd" |
def concat(n,m):
u=str(n)
v=str(m)
return int(u+v)
def large_pair(n,m):
u=str(n)
v=str(m)
n_1 = concat(n,m)
n_2 = concat(m,n)
if(
i=0
j=0
while
|
def readNumber(line, index):
number = 0
while index < len(line) and line[index].isdigit():
number = number * 10 + int(line[index])
index += 1
if index < len(line) and line[index] == '.':
index += 1
keta = 0.1
while index < len(line) and line[index].isdigit():
number += int(line[index]) * keta
keta /= 10
index += 1
token = {'type': 'NUMBER', 'number': number}
return token, index
def readPlus(line, index):
token = {'type': 'PLUS'}
return token, index + 1
def readMinus(line, index):
if index>0 and line[index-1] in ['*','/']:#*-とか
index+=1
token,index=readNumber(line,index)
token['number']=-token['number']
return token,index
token = {'type': 'MINUS'}
return token, index + 1
def readMul(line, index):
token = {'type': 'MUL'}
return token, index + 1
def readDiv(line, index):
token = {'type': 'DIV'}
return token, index + 1
def readL(line, index):
token = {'type': 'LEFT'}
return token, index + 1
def readR(line, index):
token = {'type': 'RIGHT'}
return token, index + 1
def tokenize(line):
tokens = []
index = 0
while index < len(line):
if line[index].isdigit():
(token, index) = readNumber(line, index)
elif line[index] == '+':
(token, index) = readPlus(line, index)
elif line[index] == '-':
(token, index) = readMinus(line, index)
elif line[index] == '*':
(token, index) = readMul(line, index)
elif line[index] == '/':
(token, index) = readDiv(line, index)
else:
print('Invalid character found: ' + line[index])
exit(1)
tokens.append(token)
return tokens
def evaluateplusminus(tokens):
tokens.insert(0, {'type': 'PLUS'}) # Insert a dummy '+' token
answer=0
index = 1
while index < len(tokens):
if tokens[index]['type'] == 'NUMBER':
if tokens[index - 1]['type'] == 'PLUS':
answer += tokens[index]['number']
elif tokens[index - 1]['type'] == 'MINUS':
answer -= tokens[index]['number']
else:
print('Invalid syntax:',tokens[index-1])
exit(1)
index += 1
return answer
def evaluatemuldiv(tokens):
plusminustokens=[]
index=0
while index < len(tokens):
smallans=None
if tokens[index]['type']=='NUMBER':
smallans=tokens[index]['number']
index+=1
while index < len(tokens) and tokens[index]['type'] in ['MUL','DIV']:
assert(index+1<len(tokens))
if tokens[index]['type'] == 'MUL':
smallans *= tokens[index+1]['number']
elif tokens[index]['type'] == 'DIV':
smallans /= tokens[index+1]['number']
index+=2
if smallans==None:
plusminustokens.append(tokens[index])
index+=1
else:
plusminustokens.append({'type':'NUMBER','number':smallans})
return evaluateplusminus(plusminustokens)
def evaluate(tokens):
return evaluatemuldiv(tokens)
def test(line):
tokens = tokenize(line)
actualAnswer = evaluate(tokens)
expectedAnswer = eval(line)
if abs(actualAnswer - expectedAnswer) < 1e-8:
print("PASS! (%s = %f)" % (line, expectedAnswer))
else:
print("FAIL! (%s should be %f but was %f)" % (line, expectedAnswer, actualAnswer))
# Add more tests to this function :)
def runTest():
print("==== Test started! ====")
test("0.3")
test("1+2")
test("1.0+2.1-3")
test("3.0+4*2-1/5*2")
test("3.0+4*2-1/5*2/3")
test("3.0+4*2-1/5*4*5")
test("-3.0+4*-2-1/5*2")#発展:マイナスの数をかける
test("-3.0+4*2-1/-5*2")#発展:マイナスの数でわる
print("==== Test finished! ====\n")
runTest()
while True:
print('> ', end="")
line = input()
tokens = tokenize(line)
answer = evaluate(tokens)
print("answer = %f\n" % answer) |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-05-26 16:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('backend', '0012_unisizegroup'),
]
operations = [
migrations.AddField(
model_name='university',
name='size_group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='unis', to='backend.UniSizeGroup'),
),
]
|
__author__ = 'Elisabetta Ronchieri'
import sys
import datetime
import time
import os
import simplejson
#import check_testplan as ctp
import utils
def set_inpt_fn(n_df, n_dfn, path='', subdir=True):
'''Set Input filename (ifn), Back filename (bfn) and Destinatin filename (dfn)'''
#t=datetime.datetime.now()
#ts=str(time.mktime(t.timetuple()))
id = utils.get_uuid()
#ifn = path + '/tstorm-input-file-' + ts + '.txt'
#bfn = path + '/tstorm-back-input-file-' + ts + '.txt'
ifn = path + '/tstorm-input-file-' + id + '.txt'
bfn = path + '/tstorm-back-input-file-' + id + '.txt'
if n_df:
if '/' in n_dfn:
dfn = '/'
tmp_d = os.path.dirname(n_dfn).split('/')[1:]
for x in tmp_d:
dfn = dfn + x + id + '/'
#dfn = dfn + x + ts + '/'
#dfn = dfn + os.path.basename(n_dfn) + '.' + ts
dfn = dfn + os.path.basename(n_dfn) + '.' + id
else:
if subdir:
dfn = '/a'+ id + '/b' + id + '/tstorm-output-file-' + id + '.txt'
#dfn = '/a'+ ts + '/b' + ts + '/tstorm-output-file-' + ts + '.txt'
else:
dfn = '/tstorm-output-file-' + id + '.txt'
#dfn = '/tstorm-output-file-' + ts + '.txt'
return ifn,dfn,bfn
def get_json_file_information(file_name = 'tstorm-tp.json'):
'''Get Test Plan Information from the configuration file of testplan'''
json_file = get_configuration_file(file_name)
try:
tp_info=simplejson.load(open(json_file,'r'))
except ValueError, err:
print "Wrong json file: %s" % err
sys.exit(2)
return tp_info
def get_configuration_paths():
'''Get the path where you can find configuration file'''
dir_name = os.path.dirname(sys.argv[0])
paths = [os.path.join(dir_name, "../", "etc/tstorm/"),
os.path.join(dir_name, "../", "../", "etc/tstorm/"),
os.path.join(dir_name, "../", "etc/tstorm/sanity/"),
os.path.join(dir_name, "../", "../", "etc/tstorm/sanity/"),
os.path.join(dir_name, "../", "etc/tstorm/common/"),
os.path.join(dir_name, "../", "../", "etc/tstorm/common/")]
return paths
def configuration_path_exists():
'''Checks the existance of a given path'''
result=True
paths = get_configuration_paths()
for x in paths:
if not os.path.isdir(x):
#print 'path %s does not exist ' % x
result=False
break
else:
print 'path %s exist ' % x
return result
def get_configuration_path(file_name='map_tests_ids.json'):
'''Get the configuration path'''
configuration_path = os.path.dirname(file_name)
return configuration_path
def configuration_file_exists(file_name='map_tests_ids.json'):
'''Checks the existance of a given configuration file'''
result=False
paths = get_configuration_paths()
for x in paths:
if os.path.isfile(os.path.join(x,file_name)):
result=True
break
#print 'res %s is for %s ' % (result,file_name)
return result
def get_configuration_file(file_name='map_tests_ids.json'):
'''Returns the configuration file'''
configuration_file=''
#if os.path.isfile(file_name):
# configuration_file=file_name
#else:
paths = get_configuration_paths()
for x in paths:
if os.path.isfile(os.path.join(x,file_name)):
configuration_file=(x+file_name)
#print 'file %s ' % configuration_file
break
return configuration_file
def print_json_file_template(file_name = 'tstorm-tp.json.template'):
'''Print Test Plan Information from the configuration template file of testplan'''
json_file = get_configuration_file(file_name)
try:
fl=open(json_file,'r')
json_lines=fl.readlines()
for line in json_lines:
print line
fl.close()
except IOError:
print "I/O error"
sys.exit(2)
except:
print "Unexpected error:", sys.exc_info()[0]
sys.exit(2)
def print_configuration_file_template(file_name = 'tstorm.ini.template'):
'''Print Test Configuration Information from the configuration template file'''
#json_file = get_configuration_file(file_name)
#try:
# fl=open(json_file,'r')
# json_lines=fl.readlines()
# for line in json_lines:
# print line
# fl.close()
#except IOError:
# print "I/O error"
# sys.exit(2)
#except:
# print "Unexpected error:", sys.exc_info()[0]
# sys.exit(2)
#def is_json_file_valid(tp_info):
# '''Check validity of the test plan conf file'''
# result=False
# a=ctp.CheckTestplan()
# kw=a.get_key_word()
# tp_categories=a.get_test_plan_categories()
# available_methods=a.get_test_suites()
# for x in tp_info:
# if x == kw:
# result=True
# break
# try:
# for x in tp_info[kw]:
# if x in tp_categories:
# result=True
# for y in tp_info[x]:
# if y not in available_methods:
# return False
# else:
# return False
# except KeyError:
# return False
# return result
def is_tests_sequence_valid(ts_info, uid):
'''Check validity of the tests sequence'''
result=False
for x in ts_info:
for y in uid:
if x == y[0]:
result=True
if not result:
break
return result
def file_exists(file_name):
'''Check if the file exists'''
if os.path.isfile(file_name):
return True
return False
def get_custom_configuration_file(file_name):
if '/' not in file_name:
return os.getcwd()+'/'+file_name
return file_name
def get_tests_sequence(file_name):
'''Get Tests Sequence from file'''
in_file = open(file_name,"r")
text = in_file.read()
in_file.close()
sequence = []
for x in text.split('\n'):
r = x.strip()
if r != '':
sequence.append(r)
return sequence
|
import torch
import torch.nn as nn
import torch.nn.init as torch_init
from torch.autograd import Variable
class bLSTM(nn.Module):
def __init__(self, config):
super(bLSTM, self).__init__()
# gets the configurations
self.is_bidirectional = config['bidirectional']
# initializes the structure of the LSTM
self.lstm = nn.LSTM(config['input_dim'], config['hidden_dim'], config['layers'],
batch_first = True, dropout = config['dropout'], bidirectional = config['bidirectional'])
# initializes the output layer and its activation function
self.output_layer = nn.Linear(config['hidden_dim'], config['output_dim'])
# creates an initialization for the hidden states and cell states (zeros to denote no info at start)
self.initialC = torch.zeros(config['layers'], config['batch_size'], config['hidden_dim'])
self.initialH = torch.zeros(config['layers'], config['batch_size'], config['hidden_dim'])
if config['cuda']:
self.initialC = self.initialC.cuda()
self.initialH = self.initialH.cuda()
def forward(self, sequence, h0 = None, c0 = None):
# Takes in the sequence of the form (batch_size x sequence_length x input_dim) and
# returns the output of form (batch_size x sequence_length x output_dim)
# initializes the hidden states and cell states to zeros if they are not given
if h0 is None:
h0 = self.initialH
if c0 is None:
c0 = self.initialC
# passes the input to the lstm
hidden_output, (ht, ct) = self.lstm(sequence, (h0, c0))
# passes the output of the hidden layer to the output layer
output = self.output_layer(hidden_output)
return output, (ht, ct)
class bGRU(nn.Module):
def __init__(self, config):
super(bGRU, self).__init__()
# gets the configurations
self.is_bidirectional = config['bidirectional']
# initializes the structure of the LSTM
self.gru = nn.GRU(config['input_dim'], config['hidden_dim'], config['layers'],
batch_first = True, dropout = config['dropout'], bidirectional = config['bidirectional'])
# initializes the output layer and its activation function
self.output_layer = nn.Linear(config['hidden_dim'], config['output_dim'])
# creates an initialization for the hidden states (zeros to denote no info at start)
self.initialH = torch.zeros(config['layers'], config['batch_size'], config['hidden_dim'], )
if config['cuda']:
self.initialH = self.initialH.cuda()
def forward(self, sequence, h0 = None):
# Takes in the sequence of the form (batch_size x sequence_length x input_dim) and
# returns the output of form (batch_size x sequence_length x output_dim)
# initializes the hidden states to zeros if they are not given
if h0 is None:
h0 = self.initialH
# passes the input to the gru
hidden_output, ht = self.gru(sequence, h0)
# passes the output of the hidden layer to the output layer
output = self.output_layer(hidden_output)
return output, ht |
#!/usr/bin/python
from timeit import Timer
t = Timer('cur.execute("SELECT SQL_NO_CACHE * FROM `ticket` WHERE theater_id = 3;");cur.fetchall()', 'import MySQLdb;db = MySQLdb.connect(host="localhost", user="root", passwd="", db="sakila");cur = db.cursor()')
print "theater_id %0.3f sec " % t.timeit(100)
t = Timer('cur.execute("SELECT COUNT(*) FROM `ticket` ti INNER JOIN `show` sh ON ti.show_show_id = sh.show_id WHERE start_date > now() AND start_date < DATE_ADD(now(), INTERVAL 1 YEAR);");cur.fetchall()', 'import MySQLdb;db = MySQLdb.connect(host="localhost", user="root", passwd="", db="sakila");cur = db.cursor()')
print "date %0.3f sec " % t.timeit(1)
# DROP INDEX `theater_id` ON ticket;
# CREATE INDEX start_date_index ON `show` (start_date) USING BTREE;
# DROP INDEX `idx_title` ON `film`;
# DROP INDEX `idx_title` ON `film`;
t = Timer('cur.execute("SELECT SQL_NO_CACHE fi.title FROM `ticket` ti INNER JOIN `show` sh ON sh.show_id = ti.show_show_id LEFT JOIN `film` fi ON fi.film_id = sh.film_film_id WHERE fi.title LIKE \'%the%\'");cur.fetchall()', 'import MySQLdb;db = MySQLdb.connect(host="localhost", user="root", passwd="", db="sakila");cur = db.cursor()')
print "the+ %0.3f sec " % t.timeit(10)
t = Timer('cur.execute("SELECT SQL_NO_CACHE fi.title FROM `ticket` ti INNER JOIN `show` sh ON sh.show_id = ti.show_show_id LEFT JOIN `film` fi ON fi.film_id = sh.film_film_id WHERE fi.title LIKE \'the%\'");cur.fetchall()', 'import MySQLdb;db = MySQLdb.connect(host="localhost", user="root", passwd="", db="sakila");cur = db.cursor()')
print "+the+ %0.3f sec " % t.timeit(10)
|
# -*- coding: cp936 -*-
row = 2000 #数据的维度
col = 62 #数据的样本数量
train = [] #训练数据集
label = [] #标签
distance = [] #距离
inputdata = [] #被预测的集合
equal_k3_cnt= 0 #当K=3时预测准确的次数
equal_k5_cnt = 0 #当K=5时预测准确的次数
cnt = 0 #样本计数
#将gecolon_data.txt中的数据读入到train中
with open('gecolon_data.txt', 'r') as f:
for line in f:
train.append(map(float, line.split()))
f.close
#读入被预测样本
for myline in xrange(col):
#将gecolon_lable.txt中的数据读入到label中
with open('gecolon_label.txt', 'r') as f:
for line in f:
label.append(map(float, line.split()))
f.close
#将被预测的样本读入到inputdata中
for j in xrange(row):
inputdata.append(train[j][myline])
#将距离数组赋0
for b in range(col):
distance.append(0)
#计算距离
for i in range(col):
if(i == myline):
continue
for j in range(row):
distance[i] += (train[j][i] - inputdata[j]) ** 2
distance[i] = distance[i] ** (1.0 / 2)
#对距离进行从小到大的排序,冒泡
for i in range(col - 1):
for j in range(col - i - 1):
if distance[j] > distance[j + 1]:
distance[j], distance[j + 1] = distance[j + 1], distance[j]
label[0][j], label[0][j + 1] = label[0][j + 1], label[0][j]
mylabel_k3 = 0
mylabel_k5 = 0
'''
因为我将被预测的样本直接从训练的样本中拿出来
但是训练样本里的被测试样本没有被删除
所以距离最小的肯定是被预测样本本身的那组数据,距离为0
所以经过排序之后,必须舍弃掉距离数组中第一个元素,即值为0的元素
所以k=3时选择下标为1-3的元素,k=5时选择下标为1-5的元素
'''
#验证,当K = 3
cnt += 1
print "样本%d:" % cnt
label_one_cnt = 0
label_two_cnt = 0
#计算前3个距离中出现次数最多的label
for i in range(1, 4):
if label[0][i] == 1:
label_one_cnt += 1
else:
label_two_cnt += 1
if label_one_cnt > label_two_cnt:
mylabel_k3 = 1
else:
mylabel_k3 = 2
#确定预测得出的label
if label[0][0] == mylabel_k3:
equal_k3_cnt += 1
print '当K=3,预测得到的label = %d,准确率为100%%' % int(mylabel_k3)
else:
print "当K=3,预测得到的label = %d,准确率为0%%" % int(mylabel_k3)
#验证,当K = 5
label_one_cnt = 0
label_two_cnt = 0
#计算前5个距离中出现次数最多的label
for i in range(1, 6):
if label[0][i] == 1:
label_one_cnt += 1
else:
label_two_cnt += 1
#确定预测得出的label
if label_one_cnt > label_two_cnt:
mylabel_k5 = 1
else:
mylabel_k5 = 2
#计算每个样本的预测准确率
if label[0][0] == mylabel_k5:
equal_k5_cnt += 1
print "当K=5,预测得到的label = %d,准确率为100%%" % int(mylabel_k5)
else:
print "当K=5,预测得到的label = %d,准确率为0%%" % int(mylabel_k5)
print
inputdata = []
distance = []
label = []
#计算总的准确率
equal_k3_acc = (equal_k3_cnt * 100.0)/col
equal_k5_acc = (equal_k5_cnt * 100.0)/col
print "经过62个样本的统计:"
print "当K=3,loocv的准确度为%.2f%%" % equal_k3_acc
print "当K=5,loocv的准确度为%.2f%%" % equal_k5_acc
while True:
pass
|
# unit tests for initial_buffering.py
from initial_buffering import *
def test_segment_should_store_size_and_duration():
segment = Segment(123000, 5)
assert segment.duration == 5
assert segment.size == 123000
def test_playlist_should_store_segments():
segment_1 = Segment(123000, 5)
segment_2 = Segment(102000, 5)
playlist = Playlist([segment_1, segment_2])
assert len(playlist.segments) == 2
assert playlist.segments[0] == segment_1
assert playlist.segments[1] == segment_2
def test_initial_segment():
segment_1 = Segment(1000, 5) #1600bps
segment_2 = Segment(1150, 5) #1840bps
segment_3 = Segment(1200, 5) #1920bps
segment_4 = Segment(1100, 5) #1760bps
playlist = Playlist([segment_1, segment_2, segment_3, segment_4])
assert calculate_initial_segment(1600, playlist) == 3
assert calculate_initial_segment(1920, playlist) == 0
assert calculate_initial_segment(1800, playlist) == 0
def test_initial_segment_bigger_playlist():
segment_1 = Segment(1200, 5) #1920bps
segment_2 = Segment(1150, 5) #1840bps
segment_3 = Segment(1100, 5) #1760bps
segment_4 = Segment(1000, 5) #1600bps
segment_5 = Segment(800, 5) #1280bps
segment_6 = Segment(600, 5) #960bps
segment_7 = Segment(300, 5) #480bps
playlist = Playlist([segment_1, segment_2, segment_3, segment_4, segment_5, segment_6, segment_7])
assert calculate_initial_segment(1920, playlist) == 0
assert calculate_initial_segment(1840, playlist) == 2
assert calculate_initial_segment(1600, playlist) == 4
assert calculate_initial_segment(600, playlist) == 6
def test_calculate_download_time():
# no matter the duration of segment on this case :)
segment_1 = Segment(1000, 4)
segment_2 = Segment(1000, 8)
segment_3 = Segment(1000, 12)
bandwidth = 1000
assert calculate_download_time(bandwidth, segment_1) == 8
assert calculate_download_time(bandwidth, segment_2) == 8
assert calculate_download_time(bandwidth, segment_3) == 8
def test_calculate_startup_buffer():
segment_1 = Segment(1000, 5) #1600bps
segment_2 = Segment(1150, 5) #1840bps
segment_3 = Segment(1200, 5) #1920bps
segment_4 = Segment(1100, 5) #1760bps
playlist = Playlist([segment_1, segment_2, segment_3, segment_4])
assert calculate_startup_buffer(1600, playlist) == 16.75
assert calculate_startup_buffer(1920, playlist) == 0
assert calculate_startup_buffer(1000, playlist) == 26.8
|
import json
from datetime import datetime
from django.template.loader import render_to_string
from django.urls import reverse
class Omitable(object):
"""
This value is empty and the key should be omitted.
If you're thinking: "What is that crazy german guy doing?", well the answer
is simple: I didn't want to pull in the whole of rest framework and hacked
some code. Now it escalated. If you wanna change all of that crap to DRF,
feel free :)
"""
class BaseSerializerMany(object):
def __init__(self, objs, context={}):
self.objs = objs
self.context = context
@property
def json(self):
out = []
for obj in self.objs:
oo = {}
for field in self.fields:
try:
v = getattr(self, "attr_" + field)(obj)
if not isinstance(v, Omitable):
oo[field] = v
except AttributeError:
oo[field] = getattr(obj, field)
out.append(oo)
return json.dumps(out, ensure_ascii=False)
class EventsSerializer(BaseSerializerMany):
fields = ("name", "url", "date", "location", "cancelled")
def attr_date(self, obj):
o = {
"human": render_to_string("osmcal/date.l10n.txt", {"event": obj}).strip(),
"human_short": render_to_string("osmcal/date-short.l10n.txt", {"event": obj}).strip(),
"whole_day": obj.whole_day,
"start": obj.start_localized.isoformat(),
}
if obj.end:
o["end"] = obj.end_localized.isoformat()
return o
def attr_location(self, obj):
if not obj.location:
return Omitable()
o = {
"short": obj.location_text,
"detailed": obj.location_detailed_addr,
"coords": [obj.location.x, obj.location.y],
}
if obj.location_name:
o["venue"] = obj.location_name
return o
def attr_url(self, obj):
rel_url = reverse("event", args=[obj.id])
try:
return self.context["request"].build_absolute_uri(rel_url)
except KeyError:
return rel_url
def attr_cancelled(self, obj):
if not obj.cancelled:
return Omitable()
return obj.cancelled
|
def hashing(myS):
multiplier = 1
hashVar = 0
for char in myS:
hashVar += multiplier * ord(char)
multiplier += 1
return hashVar
|
from Dynamics import Dynamics
from Main.Robot import Robot
# This class
class HapticController(object):
def __init__(self):
pass
# Here is some sample code to get the mass matrix
# Dyanamics
r = Robot()
M = Dynamics.make_mass_matrix(r)
print M |
#coding=utf-8
import discord
import re
from secure import DISCORD_TOKEN
from settings import *
from bot.modules.redisClient import startRedisConnection
redisConn=startRedisConnection()
from bot.modules import backgroundTasks
from bot.modules import errors
import logging
from bot.modules.discordUtils import safeSend
from bot.modules.utils import getDisplayName
logger = logging.getLogger(__name__)
logger.info("Starting bot")
class VRCBot(discord.Client):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.current_channel=None
self.msg_level=4
async def on_ready(self):
if not await redisConn.exists("subscribedChannels"):
await redisConn.safeSet("subscribedChannels", [], True)
self.loop.create_task(backgroundTasks.notificationTask(self))
self.loop.create_task(backgroundTasks.reaper(self))
subbedChannelsDict = await redisConn.getSubscribedChannelIDs()
totalSubbed = len(subbedChannelsDict["list"])
totalGuilds = len(self.guilds)
totalUsers = len(self.users)
logger.info(f"Username: {self.user.name}")
logger.info(f"ClientID: {self.user.id}")
logger.info(f"Connected to {totalGuilds} guilds")
logger.info(f"Connected to {totalSubbed} subscribed channels")
logger.info(f"Serving {totalUsers} users")
logger.info("Bot ready")
async def on_message(self, message):
if message.author == self.user:
return
#------------------------privilege judge-------------------------
try:
userIsManager = message.author.permissions_in(message.channel).manage_channels
except AttributeError:
# Happens if user has no roles
userIsManager = False
#------------------------privilege judge-------------------------
message.content = message.content.lower()
#-----------------------------------------------------addd channel-----------------------------------------------------
if message.content.startswith(PREFIX + "addchannel"):
if not userIsManager:
replyMsg="You don't have permission to addchannel"
return await safeSend(message.channel,text=replyMsg)
# Add channel ID to subbed channels
replyMsg = "This channel has been added to the launch notification service"
await redisConn.setDefaultConfig(message.channel.id)
subbedChannelsDict = await redisConn.getSubscribedChannelIDs()
if subbedChannelsDict["err"]:
# return here so nothing else is executed
return await safeSend(message.channel, embed=errors.dbErrorEmbed)
subbedChannelIDs = subbedChannelsDict["list"]
if message.channel.id not in subbedChannelIDs:
subbedChannelIDs.append(message.channel.id)
ret = await redisConn.safeSet("subscribedChannels", subbedChannelIDs, True)
if not ret:
return await safeSend(message.channel, embed=errors.dbErrorEmbed)
else:
replyMsg = "This channel is already subscribed to the launch notification service"
await safeSend(message.channel, text=replyMsg)
#-------------------------------------------------------remove channel--------------------------------------------------
elif message.content.startswith(PREFIX + "removechannel"):
if not userIsManager:
replyMsg="You don't have permission to removechannel"
return await safeSend(message.channel,text=replyMsg)
# Remove channel ID from subbed channels
replyMsg = "This channel has been removed from the launch notification service"
subbedChannelsDict = await redisConn.getSubscribedChannelIDs()
if subbedChannelsDict["err"]:
# return here so nothing else is executed
return await safeSend(message.channel, embed=errors.dbErrorEmbed)
subbedChannelIDs = subbedChannelsDict["list"]
try:
# No duplicate elements in the list so remove(value) will always work
subbedChannelIDs.remove(message.channel.id)
ret = await redisConn.safeSet("subscribedChannels", subbedChannelIDs, True)
if not ret:
return await safeSend(message.channel, embed=errors.dbErrorEmbed)
except ValueError:
replyMsg = "This channel was not previously subscribed to the launch notification service"
await safeSend(message.channel, text=replyMsg)
#-------------------------------------------addping------------------------------------------
# Add/remove ping commands
elif message.content.startswith(PREFIX + "add "):
channelID = message.channel.id
friendsToMetion=[f for f in message.content.split(" ")[1:] if not f.startswith("<@")]
if "" in friendsToMetion:
friendsToMetion.remove("")
rolesToMention=re.findall('(<@!?\d+>)',message.content)
if not rolesToMention:
rolesToMention.append(message.author.mention)
if not friendsToMetion:
replyMsg = "Invalid input for add command"
else:
rolesToDisplay=await getDisplayName(self,rolesToMention)
replyMsg = "Add friend(s) {} to mention {}".format(' '.join(friendsToMetion)," ".join(rolesToDisplay))
new={}
for f in friendsToMetion:
new[f]=rolesToMention
channelConfig = await redisConn.getChannelConfig(channelID)
mentions=channelConfig.get('mentions',{})
mentions.update(new)
channelConfig['mentions']=mentions
ret =await redisConn.safeSet(REDIS_KEY_DISCORD_CHANNEL_CONFIG%message.channel.id,channelConfig,serialize=True)
if not ret:
return await safeSend(message.channel, embed=errors.dbErrorEmbed)
await safeSend(message.channel, text=replyMsg)
elif message.content.startswith(PREFIX + "rm "):
channelID = message.channel.id
friendsToRemove = [f for f in message.content.split(" ")[1:] if not f.startswith("<@")]
if "" in friendsToRemove:
friendsToRemove.remove("")
rolesToRemove = re.findall('(<@!?\d+>)', message.content)
if not rolesToRemove:
rolesToRemove.append(message.author.mention)
if not friendsToRemove:
replyMsg = "Invalid input for rm command"
else:
successed=[]
keyNotExists=[]
rolesNotExists=[]
pop=[]
channelConfig = await redisConn.getChannelConfig(channelID)
try:
for f in friendsToRemove:
for r in rolesToRemove:
if f in channelConfig['mentions'].keys():
if r in channelConfig['mentions'][f]:
channelConfig['mentions'][f].remove(r)
successed.append((f,r))
else:
if channelConfig['mentions'][f]==[]:
channelConfig['mentions'].pop(f)
pop.append(f)
else:
rolesNotExists.append((f,r))
else:
keyNotExists.append((f,r))
ret = await redisConn.safeSet(REDIS_KEY_DISCORD_CHANNEL_CONFIG % channelID, channelConfig, serialize=True)
if not ret:
return await safeSend(message.channel, embed=errors.dbErrorEmbed)
replyMsg =""
for f,r in successed:
replyMsg+=f"Successfully removed {f}'s mention for {r}.\n"
for f,r in keyNotExists:
replyMsg+=f"Failed to remove {f}'s mention for {r},no friends named {f}.\n"
for f,r in rolesNotExists:
replyMsg+=f"{f} has already removed for {r}.\n"
for f in pop:
replyMsg+=f"{f} has been deleted in config.\n"
except TypeError and KeyError as e:
replyMsg = "An error occurred when removing mentions"
await safeSend(message.channel, text=replyMsg)
# -------------------------------------------end------------------------------------------
elif userIsManager and message.content.startswith(PREFIX+"show"):
if message.content.split(" ")[1]=="config":
try:
map={}
config=await redisConn.getChannelConfig(message.channel.id)
for friend, roles in config['mentions'].items():
for mention in roles:
if mention not in map.keys():
ret=await getDisplayName(self,mention)
if ret:
map[mention] =ret
config_text=str(config)
for id,displayName in map.items():
config_text=config_text.replace(id,displayName)
replyMsg="This channel's config is :\n"+config_text
except Exception as e:
replyMsg="An error occurred when showing config"
await safeSend(message.channel, text=replyMsg)
if message.content.split(" ")[1] == "online":
results=[]
ret=await redisConn.scanAll(REDIS_KEY_UID.replace("%s","*"))
if ret:
for key in ret:
User = await redisConn.safeGet(key, deserialize=True)
if User:
results.append(User)
onlines="\n".join([User.displayName for User in results])
replyMsg="Total Count {}:\n{}".format(len(ret),onlines)
else:
replyMsg="An error occured when show online users"
await safeSend(message.channel,text=replyMsg)
elif userIsManager and message.content.startswith(PREFIX+"set"):
channelID=message.channel.id
channelConfig = await redisConn.getChannelConfig(channelID)
key,value,*args=message.content.split(" ")[1:]
try:
if (key=="level" or key=="atlevel") and 0<=int(value)<=4:
channelConfig.update({key:int(value)})
elif key=="verbose":
if value=="true":
channelConfig.update({key:True})
elif value=="false":
channelConfig.update({key:False})
elif key=='mentions':
return await safeSend(message.channel, text="Please use !add command to set mentions")
else:
return await safeSend(message.channel, text='No such configuration property.')
replyMsg=f"set config {key}={value}"
ret = await redisConn.safeSet(REDIS_KEY_DISCORD_CHANNEL_CONFIG % message.channel.id, channelConfig,
serialize=True)
if not ret:
return await safeSend(message.channel, embed=errors.dbErrorEmbed)
except Exception as e:
replyMsg="set command error"
await safeSend(message.channel, text=replyMsg)
elif userIsManager and message.content.startswith(PREFIX+"restore default"):
replyMsg="This channel's configuration has been restored to default"
ret=await redisConn.setDefaultConfig(message.channel.id)
if not ret:
return await safeSend(message.channel, embed=errors.dbErrorEmbed)
await safeSend(message.channel, text=replyMsg)
if __name__ == '__main__':
bot = VRCBot()
bot.run(DISCORD_TOKEN) |
from __future__ import division
from sys import argv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from math import exp,log
from scipy.misc import factorial
from time import sleep
import argparse
import matplotlib as mpl
import string
'''
This module is imported by the "leiva_2step" script and its only purpose is to plot things. There are various different variations: a whole series of individual png files plotting one variable after another (function plotter), or a stacked set of some of the most important plots "column_plotter". "int_plotter" is just how the individual interactions E0prime, gprime, delta1prime vary as a function of occupation x.
Please refer to the documentation for the leiva_2step script for more information about the other variables.
'''
# var_list = ['x','xmobile','dS','dSmob','S','V','mu','dxdmu', 'dxmobdmu', 'dH', 'H', 'dG', 'G', 'n1', 'n2'] # All the plots
# var_names = {'x':'x','xmobile':'x$_{r}$','dS':'dS/dx','dSmob':'dS/dx$_{r}$','S':'S','V':'V','mu':r'$\mu$','dxdmu':r'dx/d$\mu$', 'dxmobdmu':r'dx$_{r}$/d$\mu$', 'dH':'dH/dx', 'H':'H', 'dG':'dG/dx', 'G':'G', 'n1':'n1', 'n2':'n2'} # Presentation of variables on plots.
var_list = ['x','dS','S','VkT','VV','mu','dxdmu','dmudx','dH','delta1','g','E0','G','H'] # All the plots
var_names = {'x':'x','dS':'dS/dx','S':'S','VkT':'V/kT','VV':'V/V','mu':r'$\mu$','dxdmu':r'dx/d$\mu$','dmudx':r'd$\mu$/dx','dH':'d$H$/d$x$','delta1':'delta1','g':'g','E0':'e0','G':'G','H':'H'} # Presentation of variables on plots.
# units = {'x':'','xmobile':'','dS' : 'J mol$^{-1}$ K$^{-1}$', 'dSmob' : 'J mol$^{-1}$ K$^{-1}$', 'S' : 'J mol$^{-1}$ K$^{-1}$', 'V' : 'V vs. Li/Li$^{+}$', 'mu' : 'eV', 'dxdmu' : 'eV$^{-1}$', 'dxmobdmu' : 'eV$^{-1}$', 'dH' : 'kJ mol$^{-1}$', 'H' : 'kJ mol$^{-1}$', 'dG' : 'kJ mol$^{-1}$', 'G' : 'kJ mol$^{-1}$', 'n1' : '', 'n2' : ''} # Units for all the plots.
units = {'x':'','dS' : '$2Mk$', 'S' : '$2Mk$', 'VkT' : 'kT','VV' : 'V vs. Li', 'mu' : 'eV', 'dxdmu' : 'eV$^{-1}$','dmudx':'a.u.', 'dH' : 'a.u.','delta1':'kT','delta2':'kT','g':'kT','E0':'kT','G':'G','H':'H'} # Units for all the plots.
# plt.style.use('classic')
mpl.rcParams['lines.linewidth'] = 1.5
font = {'size': 16}
mpl.rc('font', **font)
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
class Plotting():
def __init__(self,df_dict,long_dict):
self.df_dict = df_dict
print self.df_dict
self.long_dict = long_dict
def plotter(self,loc_value):
for key,value_list in self.long_dict.iteritems():
print 'key,value_list', key, value_list
if key == 'n1':
for value in value_list:
suffix = value[0].split('_')[-1]
plt.plot(self.df_dict['x'],self.df_dict[key+'_'+str(value[1])], label=str(value[1]) + ', n1',linewidth=1.5)
plt.plot(self.df_dict[value[1]]['x'],self.df_dict[key+'_'+str(value[1])], label=str(value[1]) + ', n2',linewidth=1.5)
elif key != 'n2':
for value in value_list:
# plt.plot(df_dict[value[1]]['x'],df_dict[value[1]][str(value[0])], label=str(value[1]))
plt.plot(self.df_dict['x'],self.df_dict[key+'_'+str(value[1])], label=str(value),linewidth=1.5)
plt.xlabel('$x$')
plt.ylabel(str(var_names[key]) + ' / ' + str(units[key]))
if key != 'S':
plt.legend(loc=loc_value,fontsize=16)
# plt.ylim([0,8])
plt.savefig('output/%svsx.png'% str(key),dpi=300)
plt.clf()
if key == 'n1':
for value in value_list:
suffix = value[0].split('_')[-1]
plt.plot(self.df_dict[value[1]]['n1_' + suffix], self.df_dict[value[1]]['H_' + suffix], label=str(value[1]) + ', n1',linewidth=0.75)
plt.plot(self.df_dict[value[1]]['n2_' + suffix], self.df_dict[value[1]]['H_' + suffix], label=str(value[1]) + ', n2',linewidth=0.75)
plt.xlabel('H / kJ mol-1')
plt.ylabel(str(var_names[key]) + ' / ' + str(units[key]))
# voltages=self.long_dict['VV']
# voltage_list=[]
# for entry in voltages:
# voltage_list.append(entry[0])
# print voltage_list
# for key,value_list in self.long_dict.iteritems():
# for n,value in enumerate(value_list):
# plt.plot(self.df_dict[value[1]][voltage_list[n]][1:-2],self.df_dict[value[1]][str(value[0])][1:-2], label=str(value[1]),linewidth=1.5)
# plt.xlabel('E / V vs. Li')
# plt.xlim(0.05,0.25)
# plt.ylabel(str(var_names[key]) + ' / ' + str(units[key]))
# plt.legend(loc=loc_value,fontsize=16)
# plt.ylim([0,8])
# plt.savefig('output/%svsV.png'% str(key),dpi=300)
# plt.clf()
def alt_plotter(self,loc):
for key, df in self.df_dict.iteritems():
plt.plot(df['V_'+str(key)],df['dxdmu_'+str(key)],label=key)
plt.legend(loc=0)
plt.show()
print list(df_dict)
def twobytwo(self,var):
f, ((ax1, ax2),(ax3, ax4)) = plt.subplots(2,2, sharex='col', figsize =(7,7))
axes=(ax1,ax3,ax2,ax4)
var=str(var)
for k in sorted(self.df_dict,key=lambda(k):float(k)):
df=self.df_dict[k]
ax1.plot(df['VV_' + str(k)],df['x'],label='alpha4'+ '= %.3g' % float(k))
ax3.plot(df['VV_' + str(k)],df['dxdmu_'+str(k)],label='alpha4' + '= %.3g' % float(k))
ax2.plot(df['x'],df['dH_'+str(k)],label='alpha4'+'= %.3g' % float(k))
ax4.plot(df['x'],df['dS_'+str(k)],label='alpha4'+ '= %.3g' % float(k))
ax1.set_ylabel('Voltage / V vs. Li')
ax3.set_ylabel('d$x$/d$V$ (arb. units)')
# ax1.get_yaxis().set_label_coords(-0.18,0.5)
# ax2.get_yaxis().set_label_coords(1.22,0.5)
# ax3.get_yaxis().set_label_coords(-0.18,0.5)
# ax4.get_yaxis().set_label_coords(-5,5)
ax2.yaxis.set_label_position('right')
ax4.yaxis.set_label_position('right')
ax2.yaxis.tick_right()
ax4.yaxis.tick_right()
# ax1.xaxis.tick_top()
# ax3.xaxis.tick_top()
# ax2.yaxis.set_ticks_position('both')
# ax4.yaxis.set_ticks_position('both')
# ax1.xaxis.set_ticks_position('both')
# ax3.xaxis.set_ticks_position('both')
ax2.set_ylabel('d$H$/d$x$ / 2Mk')
ax4.set_ylabel('d$S$/d$x$ / 2Mk')
ax4.set_ylim([-5,5])
# ax2.set_ylim([-0.5,6.5])
# ax1.set_yticks(np.arange(3.7,4.3,0.1))
ax1.set_xlim([0.05,0.25])
ax3.set_xlim([0.05,0.25])
ax3.set_xlabel('Voltage vs. Li / V')
# ax1.xaxis.set_label_position('top')
# ax2.xaxis.set_label_position('top')
ax4.set_xlabel('Li content, $x$')
# handles,labels = ax1.get_legend_handles_labels()
# labels,handles = zip(*sorted(zip(labels,handles), key=lambda t:t[0])
ax4.legend(loc=0,fontsize=10,handletextpad=0.1)
for n,ax in enumerate(axes):
if ax == ax1 or ax == ax3:
ax.text(0.83,0.87,'('+string.ascii_lowercase[n]+')',transform=ax.transAxes,size=15,weight='demi')
else:
ax.text(0.05,0.87,'('+string.ascii_lowercase[n]+')',transform=ax.transAxes,size=15,weight='demi')
# ax1.set_ylim([3.65,4.25])
# ax3.set_ylim([-0.5,10.5])
plt.tight_layout()
f.subplots_adjust(hspace=0)
plt.savefig('output/fig2by2.png',dpi=400)
plt.show()
def getkey(self,dict_iteritems):
print dict_iteritems
return(float(dict_iteritems[0].split('_')[1]))
def int_plotter(self):
keys = list(self.df_dict)
for interaction in ['delta1','g','E0']:
for value in keys:
df = self.df_dict[value]
plt.plot(df['x'],df[interaction + '_' + value],label=interaction + ' = ' + value)
plt.legend(fontsize=16)
plt.savefig('interaction_' + interaction + '.png')
def column_plot(self):
f, ((ax1, ax2,ax3)) = plt.subplots(3,1, figsize=(4.5,9),sharex='col')
axes = (ax1, ax2, ax3)
print list(self.long_dict.keys())
key_vals = [k.split('_')[1] for k in self.df_dict.keys() if k.startswith('delta1')]
print self.df_dict.keys(), 'keys!'
for k in key_vals:
lab = '%.3f' % (float(k))
# lab = '$J_{2}$ = ' +k
ax1.plot(self.df_dict['VV_'+str(lab)],self.df_dict['x'],label=lab,linewidth=0.75)
ax2.plot(self.df_dict['VV_'+str(lab)],self.df_dict['dxdmu_'+str(lab)],label=lab,linewidth=0.75)
ax3.plot(self.df_dict['VV_'+str(lab)],self.df_dict['dS_'+str(lab)],label=lab,linewidth=0.75)
ax1.set_ylabel('Li content, $x$')
ax2.set_ylabel('d$x$/d$V$ / kT')
ax3.set_ylabel('d$S$/d$x$ / 2Mk')
ax1.get_yaxis().set_label_coords(-0.1,0.5)
ax2.get_yaxis().set_label_coords(-0.1,0.5)
ax3.get_yaxis().set_label_coords(-0.1,0.5)
ax1.yaxis.set_label_position('right')
ax1.yaxis.tick_right()
ax2.yaxis.set_label_position('right')
ax2.yaxis.tick_right()
ax3.yaxis.set_label_position('right')
ax3.yaxis.tick_right()
# ax1.set_ylim([-0.05,1.05])
# ax2.set_ylim([-0.05,11.3])
# ax3.set_ylim([-25,38])
for n,ax in enumerate(axes):
if ax == ax1:
ax.text(0.03,0.1,'('+string.ascii_lowercase[n]+')',transform=ax.transAxes,size=15,weight='demi')
else:
ax.text(0.03,0.87,'('+string.ascii_lowercase[n]+')',transform=ax.transAxes,size=15,weight='demi')
# ax1.set_xlim(0.05,0.25)
# ax2.set_xlim(0.05,0.25)
# ax3.set_xlim(0.05,0.25)
ax3.set_xlabel('x')
ax1.legend(loc=0,fontsize=13)
plt.tight_layout()
f.subplots_adjust(hspace=0)
plt.savefig('output/column_fig.png',dpi=400)
plt.show()
def double_plot(self):
f, ((ax1, ax2),(ax3,ax4)) = plt.subplots(2,2, figsize=(9,9),sharex='col')
axes = (ax1, ax2, ax3, ax4)
print list(self.long_dict.keys())
for k, df in sorted(self.df_dict.iteritems(),key=self.getkey):
lab = '%.3f' % (float(k))
# lab = '$J_{2}$ = ' +k
ax1.plot(df['VV_' + str(k)],df['x'],label=lab,linewidth=0.75)
ax2.plot(df['VV_' + str(k)],df['dxdmu_'+str(k)],label=lab,linewidth=0.75)
ax3.plot(df['x'],df['dS_'+str(k)],label=lab,linewidth=0.75)
ax4.plot(df['x'],df['dS_'+str(k)],label=lab,linewidth=0.75)
ax1.set_ylabel('Li content, $x$')
ax2.set_ylabel('d$x$/d$V$ / kT')
ax3.set_ylabel('d$S$/d$x$ / 2Mk')
ax1.get_yaxis().set_label_coords(-0.1,0.5)
ax2.get_yaxis().set_label_coords(-0.1,0.5)
ax3.get_yaxis().set_label_coords(-0.1,0.5)
ax1.yaxis.set_label_position('right')
ax1.yaxis.tick_right()
ax2.yaxis.set_label_position('right')
ax2.yaxis.tick_right()
ax3.yaxis.set_label_position('right')
ax3.yaxis.tick_right()
# ax1.set_ylim([-0.05,1.05])
# ax2.set_ylim([-0.05,11.3])
# ax3.set_ylim([-25,38])
for n,ax in enumerate(axes):
if ax == ax1:
ax.text(0.03,0.1,'('+string.ascii_lowercase[n]+')',transform=ax.transAxes,size=15,weight='demi')
else:
ax.text(0.03,0.87,'('+string.ascii_lowercase[n]+')',transform=ax.transAxes,size=15,weight='demi')
ax1.set_xlim(0.05,0.25)
ax2.set_xlim(0.05,0.25)
ax3.set_xlim(0.05,0.25)
ax3.set_xlabel('Voltage / V vs. Li')
ax1.legend(loc=0,fontsize=13)
plt.tight_layout()
f.subplots_adjust(hspace=0)
plt.savefig('output/column_fig.png',dpi=400)
plt.show()
'''
def sub_plotter_voltage(df_dict,long_dict):
f, ((ax1, ax2)) = plt.subplots(1,2, sharex='col', figsize =(7,4))
axes=(ax1,ax2)
print list(long_dict.keys())
for key, df in sorted(df_dict.iteritems()):
ax1.plot(df['V_' + str(key)],df['dxdmu_'+str(key)],label='$J_{1}=$'+'%.3g' % float(key))
ax2.plot(df['V_' + str(key)],df['dS_'+str(key)],label='$J_{1}=$'+ '%.3g' % float(key))
# ax1.get_yaxis().set_label_coords(-0.5,20.5)
# ax2.get_yaxis().set_label_coords(-35,38)
ax2.yaxis.set_label_position('right')
ax2.yaxis.tick_right()
ax1.set_xticks(np.arange(3.8,4.3,0.1))
ax2.set_xticks(np.arange(3.8,4.3,0.1))
ax1.legend(loc=0,fontsize=10.5,handletextpad=0.1)
ax1.text(0.83,0.87,'(a)',transform=ax1.transAxes,size=15,weight='demi')
ax2.text(0.08,0.87,'(b)',transform=ax2.transAxes,size=15,weight='demi')
for axis in axes:
axis.xaxis.set_ticks_position('both')
axis.yaxis.set_ticks_position('both')
ax1.set_xlim([3.75,4.25])
ax2.set_xlim([3.75,4.25])
ax1.set_ylim([-0.05,12.5])
ax2.set_ylim([-35,38])
ax1.set_xlabel('Voltage vs. Li/Li$^{+}$ (V)')
ax2.set_xlabel('Voltage vs. Li/Li$^{+}$ (V)')
ax1.set_ylabel('d$x$/d$V$ (V$^{-1}$)')
ax2.set_ylabel('d$S$/d$x$ (J mol$^{-1}$ K$^{-1}$)')
plt.tight_layout()
plt.savefig('output/modelfig1_voltage.png',dpi=400)
plt.show()
def sub_plotter_rem(df_dict,long_dict):
f, ((ax1, ax2),(ax3, ax4)) = plt.subplots(2,2, sharex='col', figsize =(7,6))
axes=(ax1,ax3,ax2,ax4)
print list(long_dict.keys())
for key, df in sorted(df_dict.iteritems()):
ax1.plot(df['xmobile'],df['V_' + str(key)],label='$J_{1}=$'+key)
ax3.plot(df['xmobile'],df['S_' +str(key)],label='$J_{1}=$'+key)
ax2.plot(df['V_' + str(key)],df['dxmobdmu_'+str(key)],label='$J_{1}=$'+key)
ax4.plot(df['V_' + str(key)],df['dSmob_'+str(key)],label='$J_{1}=$'+key)
ax1.set_ylabel('Voltage vs. Li/Li$^{+}$ (V)')
ax3.set_ylabel('S (J mol$^{-1}$ K$^{-1}$)')
ax2.set_ylabel('d$x_{r}$/d$V$ (V$^{-1}$)')
ax1.get_yaxis().set_label_coords(-0.18,0.5)
ax2.get_yaxis().set_label_coords(1.22,0.5)
ax3.get_yaxis().set_label_coords(-0.18,0.5)
ax4.get_yaxis().set_label_coords(1.22,0.5)
ax2.yaxis.set_label_position('right')
ax2.yaxis.tick_right()
ax4.yaxis.tick_right()
for axis in axes:
axis.xaxis.set_ticks_position('both')
axis.yaxis.set_ticks_position('both')
ax4.yaxis.set_label_position('right')
ax4.set_ylabel('d$S$/d$x_{r}$ (J mol$^{-1}$ K$^{-1}$)')
ax4.set_xticks(np.arange(3.8,4.3,0.1))
ax4.set_ylim([-35,38])
ax2.set_ylim([-0.5,10.5])
ax1.set_yticks(np.arange(3.8,4.3,0.1))
ax3.set_xlabel('Removable Li content, $x_{r}$$')
ax4.set_xlabel('Voltage vs. Li/Li$^{+}$ (V)')
ax1.legend(loc=0,fontsize=10.5,handletextpad=0.1)
for n,ax in enumerate(axes):
if ax == ax1 or ax == ax3:
ax.text(0.83,0.87,'('+string.ascii_lowercase[n]+')',transform=ax.transAxes,size=15,weight='demi')
else:
ax.text(0.05,0.87,'('+string.ascii_lowercase[n]+')',transform=ax.transAxes,size=15,weight='demi')
ax1.set_ylim([3.75,4.25])
ax3.set_ylim([-0.5,6.5])
# plt.tight_layout()
f.subplots_adjust(hspace=0)
plt.savefig('output/modelfig1.png',dpi=400)
plt.show()
def double_column_plot(df_dict,long_dict):
f, ((ax1, ax2),(ax3,ax4),(ax5,ax6)) = plt.subplots(3,2, figsize=(7,8), sharex='col')
axes=(ax1,ax2,ax3,ax4,ax5,ax6)
print list(long_dict.keys())
for k, df in sorted(df_dict.iteritems(),key=getkey):
overlit_frac = float(k)
lab = '$y = $' + '%.1f' % overlit_frac
ax1.plot(df['V_' + str(k)],df['x'],label=lab)
ax3.plot(df['V_' + str(k)],df['dxdmu_'+str(k)],label=lab)
ax5.plot(df['V_' + str(k)],df['dS_'+str(k)],label=lab)
ax2.plot(df['V_' + str(k)],df['xmobile'],label=lab)
ax4.plot(df['V_' + str(k)],df['dxmobdmu_'+str(k)],label=lab)
ax6.plot(df['V_' + str(k)],df['dSmob_'+str(k)],label=lab)
ax1.set_ylabel('Total 8a Li, $x$')
ax3.set_ylabel('d$x$/d$V$ (V$^{-1}$)')
ax5.set_ylabel('d$S$/d$x$ (J mol$^{-1}$ K$^{-1}$)')
ax2.set_ylabel('Removable Li, $x_{r}$')
ax4.set_ylabel('d$x_{r}$/d$V$ (V$^{-1}$)')
ax6.set_ylabel('d$S$/d$x_{r}$ (J mol$^{-1}$ K$^{-1}$)')
ax1.get_yaxis().set_label_coords(-0.15,0.5)
ax2.get_yaxis().set_label_coords(1.18,0.5)
ax3.get_yaxis().set_label_coords(-0.15,0.5)
ax4.get_yaxis().set_label_coords(1.18,0.5)
ax5.get_yaxis().set_label_coords(-0.15,0.5)
ax6.get_yaxis().set_label_coords(1.18,0.5)
for axis in axes:
axis.xaxis.set_ticks_position('both')
axis.yaxis.set_ticks_position('both')
axis.set_xticks(np.arange(3.9,4.3,0.1))
ax1.set_ylim([-0.05,1.05])
ax3.set_ylim([-0.05,11])
ax5.set_ylim([-25,38])
ax2.set_ylim([-0.05,1.05])
ax4.set_ylim([-0.05,11])
ax6.set_ylim([-25,38])
ax2.yaxis.set_label_position('right')
ax4.yaxis.set_label_position('right')
ax6.yaxis.set_label_position('right')
ax2.yaxis.tick_right()
ax4.yaxis.tick_right()
ax6.yaxis.tick_right()
ax2.yaxis.set_ticks_position('both')
ax4.yaxis.set_ticks_position('both')
ax6.yaxis.set_ticks_position('both')
# ax3.set_xticks(np.arange(3.8,4.3,0.0.05))
ax5.set_xlabel('Voltage vs. Li/Li$^{+}$ (V)')
ax6.set_xlabel('Voltage vs. Li/Li$^{+}$ (V)')
ax2.legend(loc=0,fontsize=10.5,handletextpad=0.1)
for n,ax in enumerate(axes):
if ax == ax1 or ax == ax2:
ax.text(0.05,0.1,'('+string.ascii_lowercase[n]+')',transform=ax.transAxes,size=15,weight='demi')
else:
ax.text(0.05,0.87,'('+string.ascii_lowercase[n]+')',transform=ax.transAxes,size=15,weight='demi')
plt.tight_layout()
f.subplots_adjust(hspace=0)
plt.savefig('output/double_column_fig.png',dpi=400)
plt.show()
'''
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 17 08:44:00 2017
@author: ian
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import DataIO as io
path = ('/home/ian/OzFlux/Sites/GatumPasture/Data/Processed/2016/'
'GatumPasture_2016_L4.nc')
df = io.OzFluxQCnc_to_data_structure(path,
var_list = ['Sws', 'Fe', 'Fsd', 'VPD',
'Fn'],
output_structure = 'pandas')
# Drop nocturnal and missing data
df = df[df.Fsd > 5]
#df.drop('Fsd', axis = 1, inplace = True)
df.dropna(inplace = True)
df['ET'] = df.Fe * 86.4 / 2260
df['VPD_cat'] = pd.qcut(df.VPD, 30, labels = np.linspace(1, 30, 30))
vpd_df = df.groupby(df.VPD_cat).mean()
sws_df = df.groupby('Sws').mean()
fig, (ax1, ax2) = plt.subplots(2, 1, figsize = (12, 12))
fig.patch.set_facecolor('white')
ax1.xaxis.set_ticks_position('bottom')
ax1.spines['top'].set_visible(False)
ax1.tick_params(axis = 'x', labelsize = 14)
ax1.tick_params(axis = 'y', labelsize = 14)
ax1.set_xlabel('$Sws\/\/(m^3\/m^{-3})$', fontsize = 18)
ax1.set_ylabel('$EF\/\/(F_e\//F_n)$', fontsize = 18)
ax1b = ax1.twinx()
ax1b.spines['top'].set_visible(False)
ax1b.tick_params(axis = 'y', labelsize = 14)
ax1b.set_ylabel('$VPD\/\/(kPa)$', fontsize = 18)
ax2.xaxis.set_ticks_position('bottom')
ax2.spines['top'].set_visible(False)
ax2.tick_params(axis = 'x', labelsize = 14)
ax2.tick_params(axis = 'y', labelsize = 14)
ax2.set_xlabel('$VPD\/\/(kPa)$', fontsize = 18)
ax2.set_ylabel('$EF\/\/(F_e\//F_n)$', fontsize = 18)
ax2b = ax2.twinx()
ax2b.spines['top'].set_visible(False)
ax2b.tick_params(axis = 'y', labelsize = 14)
ax2b.set_ylabel('$Sws\/\/(m^3\/m^{-3})$', fontsize = 18)
ser_1 = ax1.plot(sws_df.index, sws_df.Fe / sws_df.Fn,
label = 'EF', lw = 2, color = 'black')
ser_1b = ax1b.plot(sws_df.index, sws_df.VPD,
label = 'VPD', ls = ':', lw = 2, color = 'black')
a1_ser = ser_1 + ser_1b
labs = [ser.get_label() for ser in a1_ser]
ax1.legend(a1_ser, labs, frameon = False, fontsize = 18, loc = [0.8, 0.2])
ser_2 = ax2.plot(vpd_df.VPD, vpd_df.Fe / vpd_df.Fn * 1800 / 2260,
label = 'EF', lw = 2, color = 'black')
ser_2b = ax2b.plot(vpd_df.VPD, vpd_df.Sws,
label = 'Sws', ls = ':', lw = 2, color = 'black')
a2_ser = ser_2 + ser_2b
labs = [ser.get_label() for ser in a2_ser]
ax2.legend(a2_ser, labs, frameon = False, fontsize = 18, loc = [0.8, 0.2])
|
#Write a program that computes the sum of the squares of the numbers in the list numbers. For example a call
#with, numbers = [2, 3, 4] should print 4+9+16 which is 29.
numbers=[2,1,2]
count=0
for i in numbers:
count+=i**2
print(numbers, "which is", count) |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 23:34:54 2020
@author: sumant
"""
# Student Progress Report
print("Welcome")
sub = ["telugu","hindi","english","maths","science","social"]
marks=[]
for i in range(6):
marks.append(int(input(f"Enter {sub[i]} marks: ")))
total = sum(marks)
avg = (total/600)*100
print("")
print("Details of Student Progress...!")
print("Student marks : ")
for i in range(6):
print(f"{sub[i]} : {marks[i]}")
print("Student Total Marks: ",total)
print("Student Average: ",round(avg,2)) |
#!/usr/bin/env python
"""
Author: Patrick Monnahan
Purpose: This script generates commands for svtools genotype and svtools copynumber. These commands are meant to be run subsequent to generating a merged vcf via svtools lsort followed by lmerge. copynumber must be run subsequently to genotype and will run only if the latter executes successfully. The corresponding shell script to run these commands is SVtools_Genotype.sh.
Takes the following arguments:
-b : REQUIRED: Full path to directory with input bam files used for speedseq sv
-o : REQUIRED: Full path to output directory. Results for svtools genotype and svtools copynumber will be stored within subdirectories gt and cn, respectively. If these already exist and contain output from a previous run, these files will be overwritten.
-v : REQUIRED: merged vcf file resulting from svtools lmerge. The portion of the prefix denoting the reference should match the specification in the bam file names.
-c : REQUIRED: path to coordinates file created via "create_coordinates -i merged.vcf -o coord_file"
-w : window size in which CNVnator will calculate depth
-s : path to speedseq directory
"""
# Import all necessary modules
import os
import argparse
import time
# Specify arguments to be read from the command line
parser = argparse.ArgumentParser(description='This script generates commands for svtools genotype and svtools copynumber. These commands are meant to be run subsequent to generating a merged vcf via svtools lsort followed by lmerge. copynumber must be run subsequently to genotype and will run only if the latter executes successfully. The corresponding shell script to run these commands is SVtools_Genotype.sh.')
parser.add_argument('-b', type=str, metavar='bam_directory', required=True, help='Full path to directory with input bam files used for speedseq sv')
parser.add_argument('-o', type=str, metavar='output_directory', required=True, help='Full path to output directory. Results for svtools genotype and svtools copynumber will be stored within subdirectories gt and cn, respectively. If these already exist and contain output from a previous run, these files will be overwritten')
# parser.add_argument('-l', type=str, metavar='lumpy_output_directory', required=True, help='Directory that contians the output from lumpy/speedseq sv')
parser.add_argument('-v', type=str, metavar='merged_vcf', required=True, help='merged vcf file resulting from svtools lmerge. The portion of the prefix denoting the reference should match the specification in the bam file names.')
parser.add_argument('-c', type=str, metavar='coord_file', required=True, help='path to coordinates file created via "create_coordinates -i merged.vcf -o coord_file"')
parser.add_argument('-w', type=str, metavar='window_size', default="100", help='window size in which CNVnator will calculate depth')
parser.add_argument('-s', type=str, metavar='path_to_speedseq_directory', default="/home/hirschc1/pmonnaha/software/speedseq/")
args = parser.parse_args()
vcf = args.v
# Prepare necessary paths
if args.o.endswith("/") is False: args.o += "/"
if os.path.exists(args.o) is False:
print("Output directory does not exist. Making output directories")
os.mkdir(args.o)
if os.path.exists(args.o + "gt") or os.path.exists(args.o + "cn"): # Check if necessary subdirectory structure already exists
print("Output subdirectories already exist. !!Running the resulting commands will overwrite files present in these subdirectories!!")
else:
os.mkdir(args.o + "gt") # Create subdirectories if they don't already exist
os.mkdir(args.o + "cn")
if args.s.endswith("/") is False: args.s += "/"
if args.b.endswith("/") is False: args.b += "/"
# Main loop to write commands
for bam in os.listdir(args.b):
if bam.endswith(".bam") and "splt" not in bam and "disc" not in bam: # We only want the full bams, not the discordant or split read bams
ref = bam.split("_")[1].split(".")[0]
sample = bam.split("_")[0]
if ref in vcf: # Only use the bams that pertain to the same reference genome in the merged vcf file
cmd = ''
if args.v.endswith(".gz"): cmd += 'z'
cmd += "cat " + args.v + " | vawk --header '{ $6=\".\"; print }' | svtools genotype -B " + args.b + bam + " -l " + args.b + bam + r".json | sed 's/PR...=[0-9\.e,-]*\(;\)\{0,1\}\(\t\)\{0,1\}/\2/g' - > " + args.o + "gt/" + bam.strip(".bam") + ".vcf && svtools copynumber --cnvnator " + args.s + "bin/cnvnator -s " + sample + " -w " + args.w + " -r " + args.o + bam.strip(".bam") + "_tmp/cnvnator-temp/" + bam + ".hist.root -c " + args.c + " -i " + args.o + "gt/" + bam.strip(".bam") + ".vcf > " + args.o + "cn/" + bam.strip(".bam") + ".vcf"
print(cmd)
|
from excel_handler import create_workbook, worksheet_timeline, worksheet_users, worksheet_places
from fullArchive import get_all
username = 'user'
start_time = "2006-03-21T00:00:00.000Z"
end_time = "2021-05-31T00:00:00.000Z"
max_results = 500
workbook = create_workbook(username + '.xlsx')
tweets, users, places = get_all(username, max_results, start_time, end_time)
worksheet_timeline(tweets, workbook, username)
worksheet_users(users, workbook, 'users_' + username)
worksheet_places(places, workbook, 'places_' + username)
workbook.close()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 24 13:19:18 2015
@author: HSH
"""
class Solution(object):
def generateMatrix(self, n):
"""
:type n: int
:rtype: List[List[int]]
"""
result = [[0 for x in range(n)] for x in range(n)]
nlevel = int(n/2)
val = 1
for i in range(nlevel):
last = n - 1 - i
for j in range(i,last):
result[i][j] = val
val += 1
for j in range(i, last):
result[j][last] = val
val += 1
for j in range(last, i, -1):
result[last][j] = val
val += 1
for j in range(last, i, -1):
result[j][i] = val
val += 1
if n%2 == 1:
result[nlevel][nlevel] = val
return result |
# Rotate ply file to normalized position
# based at points on symmetry axis
# MB
import math
from math import *
import sys
import numpy
# Main function
def main():
# input user .ply file
name_file_ply = sys.argv[1]
# input user symmetry points file
sym_points_file = sys.argv[2]
# to function 'extracting header', return header features
var_header,var_vertex_nm,var_face_nm, var_col = extracting_header(name_file_ply)
# to function 'extracting symmetry points', return symmetry points and list with new vertex
listsym,listtotal_vertex = extracting_symmetry_points(sym_points_file)
# to function 'extracting coordiantes', return list vertex and colors
listtotal_vertex, listtotal_colors = extracting_coordinates(name_file_ply,
var_header,var_vertex_nm,var_face_nm,
listtotal_vertex, var_col)
# to function 'shift to zero'
shift_to_zero(listsym,listtotal_vertex)
# to function 'write ply file'
write_ply_file(name_file_ply, var_header,var_vertex_nm,var_face_nm,
listtotal_colors, listtotal_vertex)
# Function to extract all the values of the header of the ply file
def extracting_header(name_file_ply):
file_ply = open(name_file_ply) # open ply file
var_col = 0 # color variable
count = 0 # counter
# for the first lines of the ply file
for line in range(0, 40):
readheader = file_ply.readline().strip().split()
if len(readheader) != 0:
if readheader[0] == 'end_header':
var_header = count
# If there are colors in the ply file
if 'red' in readheader:
var_col += 1
if 'blue' in readheader:
var_col += 1
if 'green' in readheader:
var_col += 1
# extracting number of vertexen
if readheader[0] == "element" and readheader[1] == "vertex":
var_vertex_nm = readheader[2]
# extracting number of faces
if readheader[0] == "element" and readheader[1] == "face":
var_face_nm = readheader[2]
count += 1
else:
continue
file_ply.close() # closing ply file
return var_header, var_vertex_nm,var_face_nm, var_col # return values to main
# Function for extracting the values of the symmetry points file
def extracting_symmetry_points(sym_points_file):
sympoints = open(sym_points_file) # open symmetry file
symlines = sympoints.readlines() # read all the lines of file
listsym = [] # symmetry points
listtotal_vertex = [] # vertexen
# every line in the sym file
for sym in range(0,len(symlines)):
symline_1 = symlines[sym].strip().split()
listsym.append(symlines[sym].strip().split()) #add symmetry points
listtotal_vertex.append(symlines[sym].strip().split()) # add symmetry points
sympoints.close() # close symmetry file
return listsym,listtotal_vertex # return symmetry points list and vertex list
# Function to extract the coordinates of the ply file
def extracting_coordinates(name_file_ply, var_header,var_vertex_nm,var_face_nm, listtotal_vertex, var_col):
file1 = open(name_file_ply) # open ply file
sub_list = [] #sublist coordinates
listtotal_colors = [] # color list
sub_colors = [] # sublist color
# coordinates of ply file to list
for coordinates_ln in range(0, (int(var_header) + int(var_vertex_nm) + int(var_face_nm) + 1)):
line = file1.readline().strip().split()
if (int(var_header) < coordinates_ln < (int(var_vertex_nm)+ int(var_header)+1)):
rayx = sub_list.append(line[0])
rayy = sub_list.append(line[1])
rayz = sub_list.append(line[2])
listtotal_vertex.append(sub_list) #x y z coordinates to list
sub_list = []
if var_col != 0: #if color code is not 0
color_x = sub_colors.append(line[3])
color_y = sub_colors.append(line[4])
color_z = sub_colors.append(line[5])
listtotal_colors.append(sub_colors) #colors to list
sub_colors = []
return listtotal_vertex, listtotal_colors # return coordinates (vertexen) and color lists
# Function to calculate the values in the Z rotation matrix
def Rz_matrix(z_angle): # Rz rotation matrix
return [[cos(math.radians(z_angle)), -sin(math.radians(z_angle)), 0.0],[sin(math.radians(z_angle)),cos(math.radians(z_angle)),0.0],[0.0, 0.0, 1.0]]
# Function to calculate the new coordinates rotated with Z rotation matrix
def Z_rotation(point2, z_angle): # multiplication rotation matrix and coordinates
r_z = Rz_matrix(z_angle)
rotated_z = []
for i in range(3):
rotated_z.append((sum([r_z[i][j] * point2[j] for j in range(3)])))
return rotated_z
# Function to calculate the values in the X rotation matrix
def Rx_matrix(x_angle): #rotation matrix x-axis
return [[1, 0, 0],[0,cos(math.radians(x_angle)),-sin(math.radians(x_angle))],[0,sin(math.radians(x_angle)),cos(math.radians(x_angle))]]
# Function to calculate the new coordinates rotated with X rotation matrix
def X_rotation(point3, x_angle): #multiplication rotation matrix and coordinates
r_x = Rx_matrix(x_angle)
rotated_x = []
for i in range(3):
rotated_x.append((sum([r_x[i][j] * point3[j] for j in range(3)])))
return rotated_x
# Function to calculate the values in the Y rotation matrix
def Ry_matrix(y_angle): # Ry rotation matrix
return [[cos(math.radians(y_angle)), 0.0, sin(math.radians(y_angle))],[0.0, 1.0, 0.0],[-sin(math.radians(y_angle)),0.0, cos(math.radians(y_angle))]]
# Function to calculate the new coordinates rotated with Y rotation matrix
def Y_rotation(point4, y_angle): #multiplication rotation matrix and coordinates
r_y = Ry_matrix(y_angle)
rotated_y = []
for i in range(3):
rotated_y.append((sum([r_y[i][j] * point4[j] for j in range(3)])))
return rotated_y
#Function to shift the object to the zeropoint
def shift_to_zero(listsym, listtotal_vertex):
sym3 = listsym[2]
zeropoint = sym3
list2 = [] # sublist coordinates
listdata3 = [] # new coordinates
# every coordinate minus the sym3 coordinates
for vertex in range(0,len(listtotal_vertex)):
list2.append(float(listtotal_vertex[vertex][0]) - float(zeropoint[0]))
list2.append(float(listtotal_vertex[vertex][1]) - float(zeropoint[1]))
list2.append(float(listtotal_vertex[vertex][2]) - float(zeropoint[2]))
listdata3.append(list2) # add new coordinates to list
list2 = []
# to function 'rotate z axis'
rotate_z_axis(listdata3)
# Function for rotating the object around z axis
def rotate_z_axis(listdata3):
# If object is upside down, 180 degrees rotation around the Z axis is needed.
listdatatemp = []
# check if object is upside down
if listdata3[0][1] < listdata3[2][1]:
angle = 180
# rotate 180 degrees.
for coordinates in range(0,len(listdata3)):
listdatatemp.append(Z_rotation(listdata3[coordinates], angle))
listdata3 = listdatatemp # new coordinates
# calculate angle rotation z
len_z_a = listdata3[0][0] - listdata3[2][0]
len_z_b = listdata3[0][1] - listdata3[2][1]
z_angle = (math.degrees(math.atan(len_z_a/len_z_b)))
# calculate new coordinates with rotation matrix of Z
listdata4 = []
for coordinates in range(0, len(listdata3)):
listdata4.append(Z_rotation(listdata3[coordinates], z_angle)) # add new coordinates to list
listdata3 = []
# to function 'rotate x axis'
rotate_x_axis(listdata4)
# Function for rotating the object around x axis
def rotate_x_axis(listdata4):
#calculate angle rotation x
len_x_a = listdata4[0][2] - listdata4[2][0]
len_x_b = listdata4[0][1] - listdata4[2][0]
x_angle = -(math.degrees(math.atan(len_x_a/len_x_b)))
# calculate new coordinates with rotation matrix of X
listdata5 = []
for coordinates in range(0, len(listdata4)):
listdata5.append(X_rotation(listdata4[coordinates], x_angle)) # add new coordinates to list
listdata4 = []
# to function 'rotate y axis'
rotate_y_axis(listdata5)
# Function for rotating the object around y axis
def rotate_y_axis(listdata5):
#calculate angle rotation y
len_y_a = (listdata5[1][0] - listdata5[2][0])
len_y_b = (listdata5[1][2] - listdata5[2][2])
y_angle = -(math.degrees(math.atan(len_y_a/len_y_b)))
# calculate new coordinates with rotation matrix of Y
listdata6 = []
for coordinates in range(0, len(listdata5)):
listdata6.append(Y_rotation(listdata5[coordinates], y_angle))
listdata5 = []
#Rotate 180 degrees around y axis when object is backwards.#
listdatatemp = []
if listdata6[1][0] < listdata6[3][0]: #point sym2_x < point sym 3
angle = 180
for coordinates in range(0,len(listdata6)):
listdatatemp.append(Y_rotation(listdata6[coordinates], angle)) # add new coordinates to list
listdata6 = listdatatemp
# to function 'write new coordinates'
write_new_coordinates(listdata6)
# Function write the new coordinates to outputfile.
def write_new_coordinates(listdata6):
file_outputname4 = 'outputrotate_points.ply' # sub outputfile
output4 = open(file_outputname4, 'w')
# write every coordinate to output file
for line in range(0,len(listdata6)):
output4.write("%.7f %.7f %.7f\n"%(listdata6[line][0], listdata6[line][1], listdata6[line][2]))
# Function to write the new ply file with
def write_ply_file(name_file_ply, var_header,var_vertex_nm,var_face_nm, listtotal_colors, listtotal_vertex):
outfile_rotating2 = open(sys.argv[3], 'w') #create new file
pointsfile = open('outputrotate_points.ply', 'r') #new points
file2= open(name_file_ply) #original ply file
counter = 0 # counter for color
# writing all the parts of the new ply file
for line in range(0,(int(var_header) + int(var_vertex_nm) + int(var_face_nm) + 1)):
line2 = file2.readline()
readline2 = line2.strip().split()
if line <= (int(var_header)): #header writing
outfile_rotating2.write('%s'%(line2))
if line == int(var_header): #new coordinates writing
line2 = file2.readline()
readline2 = line2.strip().split()
for vertex in range(0,len(listtotal_vertex)):
line3 = pointsfile.readline().strip()
if vertex >= 4: #from point 4, because they were extra edit to the list in the beginning
outfile_rotating2.write('%s %s %s %s\n'%
(line3, listtotal_colors[counter][0],
listtotal_colors[counter][1], listtotal_colors[counter][2]))
counter += 1
if line == (int(var_vertex_nm) + int(var_header)-1): #write the faces
for face in range(0, int(var_face_nm)):
line2 = file2.readline()
outfile_rotating2.write('%s'%(line2))
counter = 0
outfile_rotating2.close() # outputfile close
main()
|
# -*- coding: utf-8 -*-
def compute(a,b):
c=a.count(b)
print("{:} occurs {:} time(s)".format(b,c))
return c
a=input()
b=input()
compute(a,b) |
import ast
class Attachment(object):
def __inti__(self):
self._AttachmentName = ""
self._AttachmentLabel = ""
self._IsTypeFile = False
self._IsTypeImage = False
self._IsOptional = False
@property
def AttachmentName(self):
return self._AttachmentName
@AttachmentName.setter
def AttachmentName(self,attachmentName):
if (type(attachmentName) is not str):
raise ValueError('AttachmentName must be type String')
self._AttachmentName = str(attachmentName)
@property
def AttachmentLabel(self):
return self._AttachmentLabel
@AttachmentLabel.setter
def AttachmentLabel(self,attachmentLabel):
if (type(attachmentLabel) is not str):
raise ValueError('AttachmentLabel must be type String')
self._AttachmentLabel = str(attachmentLabel)
@property
def IsTypeFile(self):
return self._IsTypeFile
@IsTypeFile.setter
def IsTypeFile(self,isTypeFile):
if (not isinstance(ast.literal_eval(str(isTypeFile)),bool)):
raise ValueError('IsTypeFile must be type Boolean')
self._IsTypeFile = ast.literal_eval(str(isTypeFile))
@property
def IsTypeImage(self):
return self._IsTypeImage
@IsTypeImage.setter
def IsTypeImage(self,isTypeImage):
if (not isinstance(ast.literal_eval(str(isTypeImage)),bool)):
raise ValueError('IsTypeImage must be type Boolean')
self._IsTypeImage = ast.literal_eval(str(isTypeImage))
@property
def IsOptional(self):
return self._IsOptional
@IsOptional.setter
def IsOptional(self,isOptional):
if (not isinstance(ast.literal_eval(str(isOptional)),bool)):
raise ValueError('IsOptional must be type Boolean')
self._IsOptional = ast.literal_eval(str(isOptional))
|
import datetime
import io
import re
from unittest import mock
import arrow
import freezegun
import pytest
from botocore.exceptions import ClientError
import keg_storage.backends as backends
from keg_storage.backends.base import (
FileMode,
FileNotFoundInStorageError,
ListEntry,
ShareLinkOperation,
)
@mock.patch('keg_storage.backends.s3.boto3', autospec=True, spec_set=True)
class TestS3Storage:
def test_init_sets_up_correctly(self, m_boto):
s3 = backends.S3Storage('bucket', aws_region='us-east-1', aws_access_key_id='key',
aws_secret_access_key='secret', name='test')
assert s3.name == 'test'
assert s3.bucket == 'bucket'
m_boto.session.Session.assert_called_once_with(
aws_access_key_id='key',
aws_secret_access_key='secret',
profile_name=None,
region_name='us-east-1'
)
def test_list(self, m_boto):
s3 = backends.S3Storage('bucket', aws_region='us-east-1')
s3.client.list_objects_v2.return_value = {
'IsTruncated': False,
'Contents': [
{
'Key': 'file-1.wps',
'LastModified': datetime.datetime(2019, 8, 26, 15, 30, 1),
'Size': 10 * 1024
},
{
'Key': 'file-2.rm',
'LastModified': datetime.datetime(2019, 8, 26, 15, 30, 2),
'Size': 20 * 1024
},
]
}
results = s3.list('foo/bar')
s3.client.list_objects_v2.assert_called_once_with(
Bucket='bucket',
Prefix='foo/bar'
)
assert results == [
ListEntry(
name='file-1.wps',
last_modified=arrow.get(2019, 8, 26, 15, 30, 1),
size=10240
),
ListEntry(
name='file-2.rm',
last_modified=arrow.get(2019, 8, 26, 15, 30, 2),
size=20480
)
]
def test_list_pagenated(self, m_boto):
s3 = backends.S3Storage('bucket', aws_region='us-east-1')
s3.client.list_objects_v2.side_effect = [
{
'IsTruncated': True,
'NextContinuationToken': 'next-token-1',
'Contents': [
{
'Key': 'file-1.wps',
'LastModified': datetime.datetime(2019, 8, 26, 15, 30, 1),
'Size': 10 * 1024
},
{
'Key': 'file-2.rm',
'LastModified': datetime.datetime(2019, 8, 26, 15, 30, 2),
'Size': 20 * 1024
},
]
},
{
'IsTruncated': True,
'NextContinuationToken': 'next-token-2',
'Contents': [
{
'Key': 'file-3.rar',
'LastModified': datetime.datetime(2019, 8, 26, 15, 30, 3),
'Size': 5 * 1024
},
]
},
{
'IsTruncated': False,
'Contents': [
{
'Key': 'file-4.rtf',
'LastModified': datetime.datetime(2019, 8, 26, 15, 30, 4),
'Size': 1024
},
]
},
]
results = s3.list('foo/bar')
assert s3.client.list_objects_v2.call_args_list == [
mock.call(Bucket='bucket', Prefix='foo/bar'),
mock.call(Bucket='bucket', Prefix='foo/bar', ContinuationToken='next-token-1'),
mock.call(Bucket='bucket', Prefix='foo/bar', ContinuationToken='next-token-2'),
]
assert results == [
ListEntry(
name='file-1.wps',
last_modified=arrow.get(2019, 8, 26, 15, 30, 1),
size=10240
),
ListEntry(
name='file-2.rm',
last_modified=arrow.get(2019, 8, 26, 15, 30, 2),
size=20480
),
ListEntry(
name='file-3.rar',
last_modified=arrow.get(2019, 8, 26, 15, 30, 3),
size=5120
),
ListEntry(
name='file-4.rtf',
last_modified=arrow.get(2019, 8, 26, 15, 30, 4),
size=1024
),
]
def test_delete(self, m_boto):
s3 = backends.S3Storage('bucket', aws_region='us-east-1')
s3.delete('foo/bar')
s3.client.delete_object.assert_called_once_with(
Bucket='bucket',
Key='foo/bar'
)
def test_open_read(self, m_boto):
s3 = backends.S3Storage('bucket', aws_region='us-east-1')
result = s3.open('foo/bar', FileMode.read)
assert isinstance(result, backends.s3.S3Reader)
s3.client.get_object.assert_called_once_with(Bucket='bucket', Key='foo/bar')
def test_open_write(self, m_boto):
s3 = backends.S3Storage('bucket', aws_region='us-east-1')
result = s3.open('foo/bar', FileMode.write)
assert isinstance(result, backends.s3.S3Writer)
def test_open_read_write(self, m_boto):
s3 = backends.S3Storage("bucket", aws_region="us-east-1")
with pytest.raises(
NotImplementedError, match=re.escape("Read+write mode not supported by the S3 backend")
):
s3.open("foo/bar", FileMode.read | FileMode.write)
with pytest.raises(
ValueError,
match=re.escape("Unsupported mode. Accepted modes are FileMode.read or FileMode.write"),
):
s3.open("foo/bar", FileMode(0))
def test_read_operations(self, m_boto):
s3 = backends.S3Storage('bucket', aws_region='us-east-1')
body_obj = io.BytesIO(b'a' * 100)
s3.client.get_object.return_value = {
'Body': body_obj
}
with s3.open('foo/bar', FileMode.read) as fp:
assert fp.read(1) == b'a'
assert fp.read(2) == b'aa'
assert fp.read(37) == b'a' * 37
assert fp.read(50) == b'a' * 50
s3.client.get_object.assert_called_once_with(Bucket='bucket', Key='foo/bar')
assert fp.reader.closed is True
def test_read_not_found(self, m_boto):
s3 = backends.S3Storage('bucket', aws_region='us-east-1')
s3.client.get_object.side_effect = ClientError({'Error': {'Code': 'NoSuchKey'}}, 'foo')
with pytest.raises(FileNotFoundInStorageError) as exc:
s3.open('foo/bar', FileMode.read)
assert exc.value.filename == 'foo/bar'
assert str(exc.value.storage_type) == 'S3Storage'
def test_write_operations(self, m_boto):
m_client = mock.MagicMock()
m_client.create_multipart_upload.return_value = {'UploadId': 'upload-id'}
m_client.upload_part.side_effect = [{'ETag': f'etag-{x}'} for x in range(5)]
with backends.s3.S3Writer('bucket', 'foo/bar', m_client, chunk_size=100) as fp:
m_client.create_multipart_upload.assert_not_called()
fp.write(b'a')
m_client.create_multipart_upload.assert_not_called()
m_client.upload_part.assert_not_called()
fp.write(b'b' * 100)
m_client.create_multipart_upload.assert_called_once_with(
Bucket='bucket',
Key='foo/bar'
)
m_client.upload_part.assert_called_once_with(
Bucket='bucket',
Key='foo/bar',
PartNumber=1,
UploadId='upload-id',
Body=b'a' + b'b' * 99
)
# test a write bigger than the buffer size
fp.write(b'c' * 200)
m_client.upload_part.assert_any_call(
Bucket='bucket',
Key='foo/bar',
PartNumber=2,
UploadId='upload-id',
Body=b'b' + b'c' * 99
)
m_client.upload_part.assert_any_call(
Bucket='bucket',
Key='foo/bar',
PartNumber=3,
UploadId='upload-id',
Body=b'c' * 100
)
m_client.complete_multipart_upload.assert_not_called()
m_client.upload_part.assert_called_with(
Bucket='bucket',
Key='foo/bar',
PartNumber=4,
UploadId='upload-id',
Body=b'c'
)
m_client.complete_multipart_upload.assert_called_once_with(
Bucket='bucket',
Key='foo/bar',
UploadId='upload-id',
MultipartUpload={
'Parts': [
{
'PartNumber': 1,
'ETag': 'etag-0',
},
{
'PartNumber': 2,
'ETag': 'etag-1',
},
{
'PartNumber': 3,
'ETag': 'etag-2',
},
{
'PartNumber': 4,
'ETag': 'etag-3',
},
]
}
)
def test_copy(self, m_boto):
s3 = backends.S3Storage('bucket', aws_region='us-east-1')
s3.copy('foo/bar', 'foo/baz')
s3.client.copy_object.assert_called_once_with(
CopySource={
'Bucket': 'bucket',
'Key': 'foo/bar'
},
Bucket='bucket',
Key='foo/baz'
)
def test_write_abort(self, m_boto):
m_client = mock.MagicMock()
m_client.create_multipart_upload.return_value = {'UploadId': 'upload-id'}
m_client.upload_part.return_value = {'ETag': 'etag-0'}
with backends.s3.S3Writer('bucket', 'foo/bar', m_client, chunk_size=100) as fp:
fp.write(b'a' * 99) # fill the buffer but don't trigger a flush
fp.abort()
# We haven't created the upload yet so aborting should do nothing
m_client.abort_multipart_upload.assert_not_called()
with backends.s3.S3Writer('bucket', 'foo/bar', m_client, chunk_size=100) as fp:
fp.write(b'a' * 100) # Force a buffer flush
fp.abort()
m_client.complete_multipart_upload.assert_not_called()
m_client.abort_multipart_upload.assert_called_once_with(
Bucket='bucket',
Key='foo/bar',
UploadId='upload-id'
)
def test_write_flushes(self, m_boto):
m_client = mock.MagicMock()
m_client.create_multipart_upload.return_value = {'UploadId': 'upload-id'}
m_client.upload_part.return_value = {'ETag': 'etag-0'}
with backends.s3.S3Writer('bucket', 'foo/bar', m_client, chunk_size=100) as fp:
fp.write(b'a' * 99)
m_client.create_multipart_upload.assert_called()
m_client.upload_part.assert_called()
m_client.complete_multipart_upload.assert_called()
def test_link_to_bad_operation(self, m_boto):
s3 = backends.S3Storage('bucket', aws_region='us-east-1')
with pytest.raises(NotImplementedError,
match='S3 backends cannot generate a link for multiple operations'):
s3.link_to(
path='foo/bar',
operation=ShareLinkOperation.download | ShareLinkOperation.upload,
expire=arrow.utcnow().shift(hours=1)
)
assert not s3.client.generate_presigned_url.called
@pytest.mark.parametrize('op,method,extra_params', [
(ShareLinkOperation.download, 'get_object', {}),
(ShareLinkOperation.upload, 'put_object', {'ContentType': 'application/octet-stream'}),
(ShareLinkOperation.remove, 'delete_object', {}),
])
@freezegun.freeze_time('2020-04-27')
def test_link_to_success(self, m_boto, op, method, extra_params):
s3 = backends.S3Storage('bucket', aws_region='us-east-1')
s3.client.generate_presigned_url.return_value = 'https://localhost/foo'
result = s3.link_to(path='foo/bar', operation=op, expire=arrow.get(2020, 4, 27, 1))
assert result == 'https://localhost/foo'
s3.client.generate_presigned_url.assert_called_once_with(
ClientMethod=method,
ExpiresIn=3600,
Params={'Bucket': 'bucket', 'Key': 'foo/bar', **extra_params}
)
@freezegun.freeze_time('2020-04-27')
def test_link_to_download_output_path(self, m_boto):
op = ShareLinkOperation.download
method = 'get_object'
extra_params = {'ResponseContentDisposition': 'attachment;filename=myfile.txt'}
s3 = backends.S3Storage('bucket', aws_region='us-east-1')
s3.client.generate_presigned_url.return_value = 'https://localhost/foo'
result = s3.link_to(
path='foo/bar',
operation=op,
expire=arrow.get(2020, 4, 27, 1),
output_path='myfile.txt'
)
assert result == 'https://localhost/foo'
s3.client.generate_presigned_url.assert_called_once_with(
ClientMethod=method,
ExpiresIn=3600,
Params={'Bucket': 'bucket', 'Key': 'foo/bar', **extra_params}
)
@freezegun.freeze_time('2020-04-27')
def test_link_to_specific_content_type(self, m_boto):
op = ShareLinkOperation.download
method = 'get_object'
extra_params = {'ResponseContentType': 'image/png'}
s3 = backends.S3Storage('bucket', aws_region='us-east-1')
s3.client.generate_presigned_url.return_value = 'https://localhost/foo'
result = s3.link_to(
path='foo/bar',
operation=op,
expire=arrow.get(2020, 4, 27, 1),
content_type='image/png',
)
assert result == 'https://localhost/foo'
s3.client.generate_presigned_url.assert_called_once_with(
ClientMethod=method,
ExpiresIn=3600,
Params={'Bucket': 'bucket', 'Key': 'foo/bar', **extra_params}
)
|
from rest_framework import serializers
from .models import Level, LevelPackage, PackageUserRelation
class LevelDetailedRetrieveSerializer(serializers.ModelSerializer):
class Meta:
model = Level
fields = [
'id', 'name', 'time',
'date', 'singer', 'song_name',
'msg_count', 'code', 'notif_sender',
'notif_msg', 'default_phone_number',
'hint_msg', 'incoming_call_number',
'incoming_call_name', 'clipboard_msg',
'second_name', 'second_text', 'hint_1',
'hint_2', 'type', 'image', 'cover',
'incoming_call_image', 'hint_count',
'pin_count', 'index', 'notif',
'hint', 'passed', 'incoming_call', 'clipboard',
'second_notif', 'contact_name', 'contact_number',
]
class LevelSimpleRetrieveSerializer(serializers.ModelSerializer):
class Meta:
model = Level
fields = [
'id', 'name', 'image', 'cover',
]
class PackageSimpleRetrieveSerializer(serializers.ModelSerializer):
levels = LevelSimpleRetrieveSerializer(many=True)
class Meta:
model = LevelPackage
fields = ['pk', 'name', 'price', 'image', 'levels']
class PackageDetailedRetrieveSerializer(serializers.ModelSerializer):
levels = LevelDetailedRetrieveSerializer(many=True)
class Meta:
model = LevelPackage
fields = ['pk', 'name', 'price', 'image', 'levels']
class UserPackageDetailSerializer(serializers.ModelSerializer):
package = PackageDetailedRetrieveSerializer()
class Meta:
model = PackageUserRelation
fields = ['pk', 'user_profile', 'package', ]
class UserPackageCreateSerializer(serializers.ModelSerializer):
class Meta:
model = PackageUserRelation
fields = ['package', ]
def validate(self, attrs):
user_profile = self.context.get('user_profile')
pack = attrs.get('package')
try:
user_profile.pur.get(pk=pack.pk)
raise serializers.ValidationError("You have this pack")
except PackageUserRelation.DoesNotExist:
if user_profile.coins < pack.price:
raise serializers.ValidationError("Not enough coins")
else:
user_profile.coins -= pack.price
user_profile.save()
return attrs
def create(self, validated_data):
pur = PackageUserRelation(user_profile=self.context.get('user_profile'), package=validated_data.get('package'))
pur.save()
return pur
|
from sympy.ntheory import sieve
from collections import Counter
from itertools import combinations
solution = 0
min = float("inf")
is_perm = lambda x, y: Counter(str(x)) == Counter(str(y))
primes = sieve.primerange(10**3, 10**4)
for x, y in combinations(primes, 2):
n = x * y
if n < 10**7:
fi = (x - 1) * (y - 1)
q = n / fi
if q < min and is_perm(n, fi):
solution, min = n, q
print(solution)
|
import threading
from peewee import Database, ExceptionWrapper, basestring
from peewee import sort_models_topologically, merge_dict
from peewee import OperationalError
from peewee import (RESULTS_NAIVE, RESULTS_TUPLES, RESULTS_DICTS,
RESULTS_AGGREGATE_MODELS, RESULTS_MODELS)
from peewee import SQL, R, Clause, fn, binary_construct
from peewee import logger
from .context import _aio_atomic, aio_transaction, aio_savepoint
from .result import (AioNaiveQueryResultWrapper, AioModelQueryResultWrapper,
AioTuplesQueryResultWrapper, AioDictQueryResultWrapper,
AioAggregateQueryResultWrapper)
# remove this one, just use autocommit arg in db.execute_sql
# in case of a transaction, the connection should be bounded
# to the atomic/transaction context manager
class AioConnection(object):
def __init__(self, acquirer, exception_wrapper,
autocommit=None, autorollback=None):
self.autocommit = autocommit
self.autorollback = autorollback
self.acquirer = acquirer
self.closed = True
self.conn = None
self.context_stack = []
self.transactions = []
self.exception_wrapper = exception_wrapper # TODO: remove
def transaction_depth(self):
return len(self.transactions)
def push_transaction(self, transaction):
self.transactions.append(transaction)
def pop_transaction(self):
return self.transactions.pop()
async def execute_sql(self, sql, params=None, require_commit=True):
logger.debug((sql, params))
with self.exception_wrapper:
cursor = await self.conn.cursor()
try:
await cursor.execute(sql, params or ())
except Exception:
if self.autorollback and self.autocommit:
await self.rollback()
raise
else:
if require_commit and self.autocommit:
await self.commit()
return cursor
async def __aenter__(self):
self.conn = await self.acquirer.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.acquirer.__aexit__(exc_type, exc_val, exc_tb)
async def begin(self):
pass
def commit(self):
with self.exception_wrapper:
return self.conn.commit()
def rollback(self):
with self.exception_wrapper:
return self.conn.rollback()
# def close(self):
# # self.conn_pool.release(conn)
# return self.conn.close()
def transaction(self, transaction_type=None):
return aio_transaction(self, transaction_type)
commit_on_success = property(transaction)
def savepoint(self, sid=None):
if not self.savepoints:
raise NotImplementedError
return aio_savepoint(self, sid)
class AioDatabase(Database):
def begin(self):
raise NotImplementedError
def commit(self):
raise NotImplementedError
def rollback(self):
raise NotImplementedError
def get_cursor(self):
raise NotImplementedError
def get_tables(self, schema=None):
raise NotImplementedError
def get_indexes(self, table, schema=None):
raise NotImplementedError
def get_columns(self, table, schema=None):
raise NotImplementedError
def get_primary_keys(self, table, schema=None):
raise NotImplementedError
def get_foreign_keys(self, table, schema=None):
raise NotImplementedError
def sequence_exists(self, seq):
raise NotImplementedError
def transaction_depth(self):
raise NotImplementedError
def __init__(self, database, threadlocals=True, autocommit=True,
fields=None, ops=None, autorollback=False,
**connect_kwargs):
self.connect_kwargs = {}
self.closed = True
self.init(database, **connect_kwargs)
self.pool = None
self.autocommit = autocommit
self.autorollback = autorollback
self.use_speedups = False
self.field_overrides = merge_dict(self.field_overrides, fields or {})
self.op_overrides = merge_dict(self.op_overrides, ops or {})
self.exception_wrapper = ExceptionWrapper(self.exceptions)
def is_closed(self):
return self.closed
def get_conn(self):
if self.closed:
raise OperationalError('Database pool has not been initialized')
return AioConnection(self.pool.acquire(),
autocommit=self.autocommit,
autorollback=self.autorollback,
exception_wrapper=self.exception_wrapper)
async def close(self):
if self.deferred:
raise Exception('Error, database not properly initialized '
'before closing connection')
with self.exception_wrapper:
if not self.closed and self.pool:
self.pool.close()
self.closed = True
await self.pool.wait_closed()
async def connect(self, safe=True):
if self.deferred:
raise OperationalError('Database has not been initialized')
if not self.closed:
if safe:
return
raise OperationalError('Connection already open')
with self.exception_wrapper:
self.pool = await self._connect(self.database,
**self.connect_kwargs)
self.closed = False
def get_result_wrapper(self, wrapper_type):
if wrapper_type == RESULTS_NAIVE:
return AioNaiveQueryResultWrapper
elif wrapper_type == RESULTS_MODELS:
return AioModelQueryResultWrapper
elif wrapper_type == RESULTS_TUPLES:
return AioTuplesQueryResultWrapper
elif wrapper_type == RESULTS_DICTS:
return AioDictQueryResultWrapper
elif wrapper_type == RESULTS_AGGREGATE_MODELS:
return AioAggregateQueryResultWrapper
else:
return AioNaiveQueryResultWrapper
def atomic(self, transaction_type=None):
return _aio_atomic(self.get_conn(), transaction_type)
def transaction(self, transaction_type=None):
return aio_transaction(self, transaction_type)
commit_on_success = property(transaction)
# def savepoint(self, sid=None):
# if not self.savepoints:
# raise NotImplementedError
# return aio_savepoint(self, sid)
async def create_table(self, model_class, safe=False):
qc = self.compiler()
async with self.get_conn() as conn:
args = qc.create_table(model_class, safe)
return await conn.execute_sql(*args)
async def create_tables(self, models, safe=False):
await create_model_tables(models, fail_silently=safe)
async def create_index(self, model_class, fields, unique=False):
qc = self.compiler()
if not isinstance(fields, (list, tuple)):
raise ValueError('Fields passed to "create_index" must be a list '
'or tuple: "%s"' % fields)
fobjs = [model_class._meta.fields[f]
if isinstance(f, basestring) else f
for f in fields]
async with self.get_conn() as conn:
args = qc.create_index(model_class, fobjs, unique)
return await conn.execute_sql(*args)
async def drop_index(self, model_class, fields, safe=False):
qc = self.compiler()
if not isinstance(fields, (list, tuple)):
raise ValueError('Fields passed to "drop_index" must be a list '
'or tuple: "%s"' % fields)
fobjs = [model_class._meta.fields[f]
if isinstance(f, basestring) else f
for f in fields]
async with self.get_conn() as conn:
args = qc.drop_index(model_class, fobjs, safe)
return await conn.execute_sql(*args)
async def create_foreign_key(self, model_class, field, constraint=None):
qc = self.compiler()
async with self.get_conn() as conn:
args = qc.create_foreign_key(model_class, field, constraint)
return await conn.execute_sql(*args)
async def create_sequence(self, seq):
if self.sequences:
qc = self.compiler()
async with self.get_conn() as conn:
return await conn.execute_sql(*qc.create_sequence(seq))
async def drop_table(self, model_class, fail_silently=False, cascade=False):
qc = self.compiler()
if cascade and not self.drop_cascade:
raise ValueError('Database does not support DROP TABLE..CASCADE.')
async with self.get_conn() as conn:
args = qc.drop_table(model_class, fail_silently, cascade)
return await conn.execute_sql(*args)
async def drop_tables(self, models, safe=False, cascade=False):
await drop_model_tables(models, fail_silently=safe, cascade=cascade)
async def truncate_table(self, model_class, restart_identity=False,
cascade=False):
qc = self.compiler()
async with self.get_conn() as conn:
args = qc.truncate_table(model_class, restart_identity, cascade)
return await conn.execute_sql(*args)
async def truncate_tables(self, models, restart_identity=False,
cascade=False):
for model in reversed(sort_models_topologically(models)):
await model.truncate_table(restart_identity, cascade)
async def drop_sequence(self, seq):
if self.sequences:
qc = self.compiler()
async with self.get_conn() as conn:
return await conn.execute_sql(*qc.drop_sequence(seq))
async def execute_sql(self, sql, params=None, require_commit=True):
async with self.get_conn() as conn:
return await conn.execute_sql(sql, params,
require_commit=require_commit)
def extract_date(self, date_part, date_field):
return fn.EXTRACT(Clause(date_part, R('FROM'), date_field))
def truncate_date(self, date_part, date_field):
return fn.DATE_TRUNC(date_part, date_field)
def default_insert_clause(self, model_class):
return SQL('DEFAULT VALUES')
def get_noop_sql(self):
return 'SELECT 0 WHERE 0'
def get_binary_type(self):
return binary_construct
async def create_model_tables(models, **create_table_kwargs):
"""Create tables for all given models (in the right order)."""
for m in sort_models_topologically(models):
await m.create_table(**create_table_kwargs)
async def drop_model_tables(models, **drop_table_kwargs):
"""Drop tables for all given models (in the right order)."""
for m in reversed(sort_models_topologically(models)):
await m.drop_table(**drop_table_kwargs)
|
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from models import Base, User
engine = create_engine('sqlite:///user.db', echo=False)
#Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
user1 = User(name='user1', fullname='Ed Jones', nickname='ed')
session.add(user1)
session.commit() |
# -*- coding: utf-8 -*-
import scrapy
from headerchange.user_agents import agents
import random
import json
class HeadervalidationSpider(scrapy.Spider):
name = 'headervalidation'
def start_requests(self):
url='http://httpbin.org/ip'
for i in range(5):
yield scrapy.Request(url=url,dont_filter=True)
def parse(self, response):
print('*'*20)
print(response.text)
# print(json.loads(response.body_as_unicode()).get('headers').get('User-Agent'))
print('*'*20)
|
import random
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from class_2_3.trust_system.agent import DummyAgent
from class_2_3.trust_system.environment import Environment
Agent = DummyAgent
NUM_AGENTS = 10 # Number of agents
random.seed(0) # set the random seed
agents = []
for i in range(0, NUM_AGENTS):
agents.append(Agent(random.random()))
# create a complete graph,
# see https://networkx.github.io/documentation/stable/reference/generators.html for other generators
graph = nx.complete_graph(NUM_AGENTS)
env = Environment(graph)
env.add_agents(agents)
# random.seed(time.time()) # uncomment if you want different experiments on same graph
n_rounds = 100
all_scores = np.zeros((n_rounds, 2))
for i in range(0, n_rounds): # run for 100 rounds
score = [0, 0]
for a in env.nodes:
s = a.delegate()
if s:
score[0] += 1
else:
score[1] += 1
all_scores[i] = score
fig, ax = plt.subplots()
ax.plot(all_scores[:, 0], all_scores[:, 1], 'o', alpha=0.05, markersize=15)
plt.show()
|
import pandas as pd
import math
"""
Point A to B continuous path finder
-----------------------------------
step 1 : Import csv file
step 2 : get first and last point (latitude,longitude)
step 3 : find the slope ratio by using equation (first latitude-last latitud2)/(first latitude - last latitude)
step 4 : slope ratio is used for straight line
step 5 : appending straight line path by changing latitude of out of line to first latitude
step 6 : find distance of all point (lantitude/Longitude)
step 7 : sort the distance of all points for generate continuous path
step 8 : export the data as new csv file
"""
class GeoLocator():
def __init__(self):
pass
def get_csv(self, filename):
return pd.read_csv(filename)
def get_first_last_lat_long_data(self, df_data):
return df_data.iloc[0], df_data.iloc[-1]
def get_slope_ratio(self, from_lat, from_long, to_lat, to_long):
slope_ratio = (from_lat-to_lat)/(from_long-to_long)
return slope_ratio
def distance(self, from_lat, from_long, to_lat, to_long):
if from_lat == to_lat and from_long == to_long:
return 0
else:
radlat1 = math.pi * from_lat/180
radlat2 = math.pi * to_lat/180
theta = from_long-to_long
radtheta = math.pi * theta/180
dist = math.sin(radlat1) * math.sin(radlat2) + math.cos(radlat1) * math.cos(radlat2) * math.cos(radtheta)
if dist > 1:
dist = 1
dist = math.acos(dist)
dist = dist * 180/math.pi
dist = dist * 60 * 1.1515
dist = dist * 1.609344
return dist;
def export_csv(self, df_data):
df_first, df_last = self.get_first_last_lat_long_data(df_data)
dbl_slope_ratio = self.get_slope_ratio(df_first['latitude'], df_first['longitude'], df_last['latitude'], df_last['longitude'])
dct_data = {'latitude':[df_first['latitude']], 'longitude':[df_first['longitude']], 'distance': [0]}
for ind,row in df_data.iterrows():
if ind !=0:
current_ratio = self.get_slope_ratio(df_first['latitude'], df_first['longitude'], row['latitude'], row['longitude'])
distance = self.distance(df_first['latitude'], df_first['longitude'], row['latitude'], row['longitude'])
if distance not in dct_data['distance']:
dct_data['latitude'].append(row['latitude'])
dct_data['longitude'].append(row['longitude'])
dct_data['distance'].append(distance)
if round(dbl_slope_ratio,2) != round(current_ratio,2):
distance = self.distance(df_first['latitude'], df_first['longitude'],df_first['latitude'],row['longitude'])
if distance not in dct_data['distance']:
dct_data['latitude'].append(df_first['latitude'])
dct_data['longitude'].append(row['longitude'])
dct_data['distance'].append(distance)
df_exp_data = pd.DataFrame(dct_data)
df_exp_data = df_exp_data.sort_values(by=['distance'])
df_exp_data.to_csv('exported_details.csv', index=False)
if __name__ == '__main__':
obj_geo = GeoLocator()
df_data = obj_geo.get_csv('latitude_longitude_details.csv')
obj_geo.export_csv(df_data)
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import csv
import json
import sys
# In[11]:
input_file_txt = sys.argv[1]
input_file_csv = sys.argv[2]
output_file = sys.argv[3]
all_chat_info = open(input_file_txt, 'r')
people_chat_lst = []
for i in all_chat_info:
useful_info = i.replace("\n","").split()
chat_info = (" ").join(useful_info[1:])
chat_info_split = chat_info.split(":")
people = chat_info_split[0]
if people not in people_chat_lst:
people_chat_lst.append(people)
all_chat_info.close()
csv_lst = []
with open(input_file_csv) as csv_reading:
read = csv.reader(csv_reading, delimiter=',')
h = next(read)
if h != None:
for j in read:
name = j[0]
name_list = name.split(", ")
name = name_list[1] + " " + name_list[0]
if name not in people_chat_lst:
csv_lst.append({"Name":name,"Participating from": j[1]})
with open(output_file, 'w') as output:
json.dump(csv_lst, output)
with open(output_file) as f:
data = json.load(f)
print(data)
# In[12]:
# In[ ]:
|
s,x=map(int,input().split())
l=list(map(int,input().split()))
flag=0
for i in l:
if(x==i):
flag=1
if(flag==1):
print("yes")
else:
print("no")
|
#!/usr/bin/env python3
import turtle
import os
#lets u do some basic graphics for beginners best (inbuilt)
wn = turtle.Screen() #creating window
wn.title("Ping Pong You Vs The Computer Game ") #giving title
wn.bgcolor("black")
wn.setup(width=800,height =600)
#wn.tracer(0) #stops window from updsating so as to increase game speed
#paddle a
paddle_a = turtle.Turtle()
paddle_a.speed(10) #ets speed to max possible speed
paddle_a.shape("square") #default 20*20 px
paddle_a.shapesize(stretch_wid=5,stretch_len=1) #stretch
paddle_a.color("white")
paddle_a.penup() #to not draw lines as it moves
paddle_a.goto(-350,0)
speed = 5
#paddle b
paddle_b = turtle.Turtle()
paddle_b.speed(0) #ets speed to max possible speed
paddle_b.shape("square") #default 20*20 px
paddle_b.shapesize(stretch_wid=5,stretch_len=1) #stretch to 5 times 20 and
paddle_b.color("white")
paddle_b.penup() #to not draw lines as it moves
paddle_b.goto(350 ,0)
#paddle_ball
ball = turtle.Turtle()
ball.speed(speed) #ets speed to max possible speed
ball.shape("circle") #default 20*20 px #stretch
ball.color("white")
ball.penup() #to not draw lines as it moves
ball.goto(0,0)
ball.dx = 2 #delta x : every time ball moves it moves by 2 px right(as +ve2)
ball.dy = 2 #delta y : every time ball moves it moves by 2 px up(as +ve2)
miss = 0
hit = 0
#pen
pen = turtle.Turtle()
pen.speed(0) #animation speed
pen.color("white")
pen.penup()
pen.hideturtle() #to hide it as we only want to see its text that it writes
pen.goto(0,260)
pen.write("Miss : {} Hit : {} Level : {} ".format(miss,hit,(5-speed)),align="center",font=("Courier",24,"normal"))
#score
#function
def paddle_a_up():
y = paddle_a.ycor() #module of turtle remains y cordinate
y += 20
paddle_a.sety(y)
def paddle_a_down():
y = paddle_a.ycor()
y -= 20
paddle_a.sety(y) #module of turtle sets y cordinate
def paddle_b_up():
y = paddle_b.ycor()
y += 20
paddle_b.sety(y)
def paddle_b_down():
y = paddle_b.ycor()
y -= 20
paddle_b.sety(y)
"""def ball_up():
ball.sety(ball.ycor() + ball.dy)
def ball_down():
ball.sety(ball.ycor() - ball.dy)
def ball_left():
ball.setx(ball.xcor() - ball.dx)
def ball_right():
ball.setx(ball.xcor() + ball.dx)"""
#keyboard binding
wn.listen() #listen keyboard input
"""wn.onkeypress(paddle_a_up,"w") #on pressing w call function paddle_a_up
wn.onkeypress(paddle_a_down,"s")"""
wn.onkeypress(paddle_b_up,"Up")
wn.onkeypress(paddle_b_down,"Down")
"""wn.onkeypress(ball_up,"8")
wn.onkeypress(ball_left,"4")
wn.onkeypress(ball_right,"6")
wn.onkeypress(ball_down,"5")"""
#main game loop
while True:
wn.update() #everytime the loop runs it updates the window
#move the ball
ball.setx(ball.xcor() + ball.dx)
ball.sety(ball.ycor() + ball.dy)
if ball.xcor() < 0 and ball.dx == -2 :
#print(ball.ycor()," ",paddle_a.ycor())
if(ball.ycor() < paddle_a.ycor() - 40):
#print("i was here")
paddle_a_down()
#print(ball.ycor()," ",paddle_a.ycor())
elif(ball.ycor() > paddle_a.ycor() + 40):
paddle_a_up()
#print(ball.ycor()," y ",paddle_a.ycor())
#border checking
if ball.ycor() > 290:
os.system("aplay pong.wav&")
ball.sety(290)
#print("no up")
ball.dy *= -1 #reverses the ball direction
if ball.ycor() < -290:
os.system("aplay pong.wav&")
ball.sety(-290)
#print("no down")
ball.dy *= -1
if ball.xcor() > 350:
os.system("aplay crash.wav&")
#ball.hideturtle()
ball.setpos(0,0)
#ball.showturtle()
ball.dx *= -1
miss +=1
pen.clear()
pen.write("Miss : {} Hit : {} Level : {} ".format(miss,hit,(5-speed)),align="center",font=("Courier",24,"normal"))
if ball.xcor() < -350:
os.system("aplay crash.wav&")
#ball.hideturtle()
ball.setpos(0,0)
#ball.showturtle()
ball.dx *= -1
#score_b +=1
#pen.clear()
#pen.write("Player A : {} Player B : {}".format(score_a,score_b),align="center",font=("Courier",24,"normal"))
if paddle_a.ycor() > 250:
paddle_a.sety(250)
if paddle_a.ycor() < -250:
paddle_a.sety(-250)
if paddle_b.ycor() > 250:
paddle_b.sety(250)
if paddle_b.ycor() < -250:
paddle_b.sety(-250)
#paddle and ball collide
if (ball.xcor() > 340 and ball.xcor() < 350) and (ball.ycor() < paddle_b.ycor() + 40 and ball.ycor() > paddle_b.ycor() - 40):
os.system("aplay pong.wav&")
hit+=1
if hit % 5 == 0 :
speed -=1
print(speed)
if speed <= -1 :
wn.tracer(0)
else :
ball.speed(speed)
pen.clear()
pen.write("Miss : {} Hit : {} Level : {} ".format(miss,hit,(5-speed)),align="center",font=("Courier",24,"normal"))
ball.setx(340)
#print("here")
ball.dx *= -1
elif (ball.xcor() < -340 and ball.xcor() > -350) and (ball.ycor() < paddle_a.ycor() + 40 and ball.ycor() > paddle_a.ycor() - 40):
os.system("aplay pong.wav&")
ball.setx(-340)
#print("t here")
ball.dx *= -1 |
def solution(A):
# write your code in Python 3.6
hash = dict()
for num in A:
if num not in hash.keys():
hash[num] = 1
else:
hash[num] *= -1
for key, value in hash.items():
if value == 1:
return key |
from urllib.parse import urljoin
import sys
import requests
from ex02_bearer import bearer_token_for_namespace
import stacksmith
def get_app_details(namespace, token, app):
endpoint = urljoin(
stacksmith.url,
'ns/{ns}/apps/{app}'.format(
ns=namespace,
app=app
)
)
response = requests.get(
endpoint, headers={'authorization': token})
assert response.status_code == 200, (
'Failed to fetch details for app "{app}": {error}'.format(
app=app, error=response.json()['error']))
return response.json()
def main(args):
"""
Use the Stacksmith API to fetch details of an application.
"""
if len(args) < 2:
print('Must specify an app ID')
sys.exit(1)
app = args[1]
print('Fetching details for app "{app}"'.format(app=app))
namespace = stacksmith.namespace
bearer_token = bearer_token_for_namespace(namespace)
app_details = get_app_details(namespace, bearer_token, app)
print(app_details)
if __name__ == "__main__":
main(sys.argv)
|
#!/usr/bin/env python
#
# Copyright (c) 2012, Jake Marsh (http://jakemmarsh.com)
#
# license: GNU LGPL
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# As a guide I took the following code: http://iamtrask.github.io/2015/07/12/basic-python-network/
# Add visualization of neural network :)
import math, random, string
random.seed(0)
## ================================================================
# calculate a random number a <= rand < b
def rand(a, b):
return (b-a)*random.random() + a
def makeMatrix(I, J, fill = 0.0):
m = []
for i in range(I): #loops 4 times
m.append([fill]*J) #[0.0, 0.0, 0.0]
return m
# m = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
####Value of the activation function parameter: 0.5
def sigmoid(x):
# tanh is a little nicer than the standard 1/(1+e^-x)
return math.tanh(x)
# derivative of our sigmoid function, in terms of the output (i.e. y)
def dsigmoid(y):
return 1.0 - y**2
## ================================================================
class NeuralNetwork:
def __init__(self, inputNodes, hiddenNodes, outputNodes):
# number of input, hidden, and output nodes
self.inputNodes = inputNodes + 1 # +1 for bias node
self.hiddenNodes = hiddenNodes
self.outputNodes = outputNodes
# activations for nodes
self.inputActivation = [1.0]*self.inputNodes #>>> len(c) is 4; c = [1.0]*4 gives c = [1.0, 1.0, 1.0, 1.0]
self.hiddenActivation = [1.0]*self.hiddenNodes#>>> len(d) is 3; d = [1.0]*3 gives d = [1.0, 1.0, 1.0]
self.outputActivation = [1.0]*self.outputNodes#>>> len(e) is 1; e = [1.0]*1 gives e = [1.0]
# create weights
self.inputWeight = makeMatrix(self.inputNodes, self.hiddenNodes) #len(m) is 4; m = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
self.outputWeight = makeMatrix(self.hiddenNodes, self.outputNodes)#len(n) is 1; n = [[0.0], [0.0], [0.0]]
# set them to random vaules
for i in range(self.inputNodes): #loops 4 times
for j in range(self.hiddenNodes): #loops 3 times
self.inputWeight[i][j] = rand(-0.2, 0.2) #it's m but with rand(-0.2, 0.2)
print "self.inputWeight fed with rand(-0.2, 0.2): ", self.inputWeight #len(m) is 4
#self.inputWeight fed with rand(-0.2, 0.2):
#[[0.13776874061001926, 0.10318176117612099, -0.031771367667662004],
# [-0.09643329988281467, 0.004509888547443414, -0.03802634501983429],
# [0.11351943561390904, -0.07867490956842903, -0.009361218339057675],
# [0.03335281578201249, 0.16324515407813406, 0.0018747423269561136]]
for j in range(self.hiddenNodes): #loops 3 times
for k in range(self.outputNodes): #loops 1 time
self.outputWeight[j][k] = rand(-2.0, 2.0) #it's m but with rand(-0.2, 0.2)
print "self.outputWeight fed with rand(-0.2, 0.2): ", self.outputWeight #len(n) is 1
# self.outputWeight fed with rand(-0.2, 0.2):
#[[-0.8726486224011847], [1.0232168166288957], [0.4734759867013265]]
# last change in weights for momentum
self.ci = makeMatrix(self.inputNodes, self.hiddenNodes) #m = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
print "self.ci last change for momentum: ", self.ci
# self.ci last change for momentum:
#[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
self.co = makeMatrix(self.hiddenNodes, self.outputNodes) #n = [[0.0], [0.0], [0.0]]
print "self.co last change for momentum: ", self.co
# self.co last change for momentum:
#[[0.0], [0.0], [0.0]]
# Results:
# self.inputWeight fed with rand(-0.2, 0.2): [[0.13776874061001926, 0.10318176117612099, -0.031771367667662004], [-0.09643329988281467, 0.004509888547443414, -0.03802634501983429], [0.11351943561390904, -0.07867490956842903, -0.009361218339057675], [0.03335281578201249, 0.16324515407813406, 0.0018747423269561136]]
# self.outputWeight fed with rand(-0.2, 0.2): [[-0.8726486224011847], [1.0232168166288957], [0.4734759867013265]]
# self.ci last change for momentum: [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
# self.co last change for momentum: [[0.0], [0.0], [0.0]]
#http://cs231n.github.io/neural-networks-1/
#Analysis: train calls update(Line 290)
# update uses self.inputWeight and self.outputWeightchanges
# update changes self.inputActivation, self.hiddenActivation, self.outputActivation
# update returns self.outputActivation (as an array)
# train calls backPropagate
# backPropagate modifies self.inputWeight and self.outputWeight
# self.inputWeight and self.outputWeight are used in update
# repeats itself 1000 times. Then in the analyzer code we call test
# test calls update (Line 260)
# update uses self.inputWeight and self.outputWeightchanges (self.inputWeight and self.outputWeight were modified by backpropagate)
# update changes self.inputActivation, self.hiddenActivation, self.outputActivation
# update returns self.outputActivation (as an array)
#That is how train and test relate to each other, through self.inputWeight and self.outputWeight
#because backpropagate inside train modifies self.inputWeight and self.outputWeight
#and then self.inputWeight and self.outputWeight are used by update which is called inside test
def update(self, inputs): #input is [531.9904153999998, 524.052386, 539.172466]
#print "inputs of update function from NeuralNetwork: ", inputs #[531.9904153999998, 524.052386, 539.172466]
#print "length of inputs of update function from NeuralNetwork: ", len(inputs) # len(inputs) is 3
if len(inputs) != self.inputNodes-1: #if len(inputs) != 3
raise ValueError('wrong number of inputs')
# input activations
for i in range(self.inputNodes-1): #loops 3 times (4-1 = 3 times)
self.inputActivation[i] = inputs[i] #we modify all the values except the default value of the bias input node
print "self.inputActivation inside update function: ", self.inputActivation[:]
#self.inputActivation inside update function: [531.9904153999998, 524.052386, 539.172466, 1.0]
#before, self.inputActivation was [1.0, 1.0, 1.0, 1.0]
# hidden activations
for j in range(self.hiddenNodes): #loops 3 times
sum = 0.0
for i in range(self.inputNodes): #loops 4 times
sum = sum + self.inputActivation[i] * self.inputWeight[i][j] #len(m) is 4; m = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
#self.inputActivation inside update function:
#[531.9904153999998, 524.052386, 539.172466, 1.0]
#self.inputWeight fed with rand(-0.2, 0.2):
#[[0.13776874061001926, 0.10318176117612099, -0.031771367667662004],
# [-0.09643329988281467, 0.004509888547443414, -0.03802634501983429],
# [0.11351943561390904, -0.07867490956842903, -0.009361218339057675],
# [0.03335281578201249, 0.16324515407813406, 0.0018747423269561136]]
#Extract (summary)
#j, loops 3 times
#i, loops 4 times
##j=0
# Then:
#self.inputActivation = sia #len(sia) is 4
# sia[0], sia[1], sia[2], sia[3]
# m[0][0], m[1][0], m[2][0], m[3][0]
#self.hiddenActivation[0] = sigmoid(sum) #part of the result
##j=1
# Then:
#self.inputActivation = sia #len(sia) is 4
# sia[0], sia[1], sia[2], sia[3]
# m[0][1], m[1][1], m[2][1], m[3][1]
#self.hiddenActivation[1] = sigmoid(sum) #part of the result
##j=2
# Then:
#self.inputActivation = sia #len(sia) is 4
# sia[0], sia[1], sia[2], sia[3]
# m[0][2], m[1][2], m[2][2], m[3][2]
#self.hiddenActivation[2] = sigmoid(sum) #part of the result
self.hiddenActivation[j] = sigmoid(sum) #>>> len(d) is 3; d = [1.0]*3 gives d = [1.0, 1.0, 1.0]
#before, self.inputActivation was [1.0, 1.0, 1.0]
print "self.hiddenActivation inside update function: ", self.hiddenActivation[:]
#self.hiddenActivation inside update function:
#[1.0, 0.9999999999998125, -1.0]
# output activations
print "range(self.outputNodes): ", range(self.outputNodes) #range(self.outputNodes): [0]
for k in range(self.outputNodes): #loops 1 time, k=0
sum = 0.0
for j in range(self.hiddenNodes): #loops 3 times
sum = sum + self.hiddenActivation[j] * self.outputWeight[j][k]
self.outputActivation[k] = sigmoid(sum) #>>> len(e) is 1; e = [1.0]*1 gives e = [1.0]
#print "This is full self.outputWeight: ", self.outputWeight[:]
#This is full self.outputWeight: [[-0.8726486224011847], [1.0232168166288957], [0.4734759867013265]]
#before the update self.outputWeight was [[0.0], [0.0], [0.0]]
# range(self.outputNodes): [0], k is always [0], loops 1 time
# This is [j][k]: 0 0 #k from the outer loop, j from the inner loop
# This is [j][k]: 1 0
# This is [j][k]: 2 0
# n[0][0], n[1][0], n[2][0] #len(n) is 1; n = [[0.0], [0.0], [0.0]]
print "self.outputActivation inside update function: ", self.outputActivation[:]
#self.outputActivation inside update function: [0.9606026038505812]
#before update self.outputActivation was [1.0]
return self.outputActivation[:] #len(self.outputActivation) is 1; self.outputActivation = e, e = [1.0]*1 gives e = [1.0]
#Note: self.inputWeight and self.outputWeight are modified in backPropagate (also self.co and self.ci)
def backPropagate(self, targets, N, M):
if len(targets) != self.outputNodes: #len(targets) is 1, self.outputNodes = 1
raise ValueError('wrong number of target values')
# calculate error terms for output
output_deltas = [0.0] * self.outputNodes #output_deltas = [0.0]*1 or [0.0]
for k in range(self.outputNodes): #loops 1 time, k is always [0]
error = targets[k]-self.outputActivation[k] #self.outputActivation[0] is [1.0]
output_deltas[k] = dsigmoid(self.outputActivation[k]) * error #output_deltas = [0.0] with a different value rather than 0.0
print "output deltas from backPropagate function: ", output_deltas #because of the loop with 1000 iterations
#output deltas appears 1000 times with different values
#example
#output deltas from backPropagate function: [0.0025952033592540847]
# calculate error terms for hidden
hidden_deltas = [0.0] * self.hiddenNodes #hidden_deltas = [0.0]*3 or [0.0, 0.0, 0.0]
for j in range(self.hiddenNodes): #loops 3 times
error = 0.0
for k in range(self.outputNodes): #loops 1 time, k is always [0]
error = error + output_deltas[k]*self.outputWeight[j][k] #self.outputWeight is [[0.0], [0.0], [0.0]] #of course as update function is called it gives self.outputWeight different values rather than 0.0
hidden_deltas[j] = dsigmoid(self.hiddenActivation[j]) * error #hidden_deltas = [0.0, 0.0, 0.0] (result from [0.0]*3) with different values rather than 0.0
#hidden_Activation is [1.0, 1.0, 1.0] but with different values, from [1.0]*3 gives [1.0, 1.0, 1.0]
print "hidden deltas from backPropagate function: ", hidden_deltas #because of the loop with 1000 iterations
#hidden deltas appears 1000 times with different values
#example
#hidden deltas from backPropagate function: [-0.0, 1.7789468019844285e-15, -0.0]
# update output weights
for j in range(self.hiddenNodes): #loops 3 times
for k in range(self.outputNodes): #loops 1 time, k is always [0]
change = output_deltas[k]*self.hiddenActivation[j] #output_deltas = [0.0] with a different value rather than 0.0
print "change for self.co :", change
self.outputWeight[j][k] = self.outputWeight[j][k] + N*change + M*self.co[j][k] #self.outputWeight is [[0.0], [0.0], [0.0]] #of course as update function is called it gives self.outputWeight different values rather than 0.0
self.co[j][k] = change #self.co is [[0.0], [0.0], [0.0]] and the float type zeros are substituted by new values, change
print "self.co from backPropagate function: ", self.co #because of the loop with 1000 iterations
#self.co appears 1000 times with different values
#example
#self.co from backPropagate function:
#[[0.00013728964264772642], [0.00013728964264770067], [-0.00013728964264772642]]
# update input weights
for i in range(self.inputNodes): #loops 4 times (3+1=4)
for j in range(self.hiddenNodes): #loops 3 times
change = hidden_deltas[j]*self.inputActivation[i] #hidden_deltas = [0.0, 0.0, 0.0] (result from [0.0]*3) with different values rather than 0.0
print "change for self.ci: ", change
self.inputWeight[i][j] = self.inputWeight[i][j] + N*change + M*self.ci[i][j] #self.inputWeight has length 4 and is [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]. The float type zeros are substituted by new values
self.ci[i][j] = change #self.ci has length 4 and is [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]. The float type zeros are substituted by new values, change
print "self.ci from backPropagate function: ", self.ci #because of the loop with 1000 iterations
#self.ci appears 1000 times with different values
#example
#self.ci from backPropagate function:
#[[0.0, 5.599362764540085e-14, -0.0],
# [0.0, 5.515812563334366e-14, -0.0],
# [0.0, 5.674956056333596e-14, -0.0],
# [0.0, 1.0525307604141634e-16, -0.0]]
# calculate error
error = 0.0
for k in range(len(targets)): #loops 1 time, k is always [0]
error = error + 0.5*(targets[k] - self.outputActivation[k])**2
print "error, return value from backPropagate function: ", error
return error #because of the loop with 1000 iterations
#error appears 1000 times with different values
#example (last value, 1000th value)
#error, return value from backPropagate function: 3.42294633473e-05
def test(self, inputNodes):
print "This is self from input from test from NeuralNetwork: ", self
print "This is the input from test from NeuralNetwork: ", inputNodes
print(inputNodes, '->', self.update(inputNodes))
print "test1 from NeuralNetwork.py (self.update(inputNodes): ", self.update(inputNodes) #returns an array with only one value
print "test2 from NeuralNetwork.py (self.update(inputNodes)[0])", self.update(inputNodes)[0] #returns the value (only) without being inside the array
return self.update(inputNodes)[0] #returns the value (only) without being inside the array
#example: -0.3763290104856086
#from function **test** from neuralNetwork.py. Line 134
# ([699.7640014000001, 692.359985, 711.119995], '->', [-0.3763290104856086])
def weights(self):
print('Input weights:')
for i in range(self.inputNodes):
print(self.inputWeight[i])
print()
print('Output weights:')
for j in range(self.hiddenNodes):
print(self.outputWeight[j])
def train(self, patterns, iterations = 1000, N = 0.5, M = 0.1):
# N: learning rate, M: momentum factor
print "Test1 This is patterns: ", patterns
#This is patterns (input of train function from NeuralNetwork):
#[[[531.9904153999998, 524.052386, 539.172466], [1.0000000000000075]]]
for i in range(iterations):
error = 0.0
print "Test2 This is patterns: ", patterns
for p in patterns: #an array of 5 arrays (each array has two arrays)
print "Test3 This is patterns: ", patterns
print "This is p from patterns: ", p
inputs = p[0] #three items
print "This is p[0]: ", inputs
targets = p[1] #one item
print "This is targets: ", targets
self.update(inputs)
error = error + self.backPropagate(targets, N, M)
if i % 100 == 0:
print('error %-.5f' % error)
#Output:
# Test1 This is patterns: [[[531.9904153999998, 524.052386, 539.172466], [1.0000000000000075]]]
# Test2 This is patterns: [[[531.9904153999998, 524.052386, 539.172466], [1.0000000000000075]]]
# Test3 This is patterns: [[[531.9904153999998, 524.052386, 539.172466], [1.0000000000000075]]]
# This is p from patterns: [[531.9904153999998, 524.052386, 539.172466], [1.0000000000000075]]
# This is p[0]: [531.9904153999998, 524.052386, 539.172466]
# This is targets: [1.0000000000000075]
|
#Ejercicio 12
"""
Una empresa distribuidora de energía le cobra a sus abonados el consumo de kW por hora, pero además
debe sumarle el 0,21 % de impuesto, pero actualmente todos los cliente están dentro de un plan de
promoción que les descuenta el 3,7 % del monto total apagar.
"""
kw = float (input ("Ingrese la cantidad de kw: "))
hora = float (input ("Ingrese la cantidad de horas: "))
consumo = kw * hora
impuesto = (0.21 / 100) * consumo
descuento = round (3.7 / 100) * (consumo + impuesto)
total_consumo = consumo + impuesto - descuento
print ("El importe a cobrar al cliente es de: " , total_consumo, "pesos")
|
import re
from Jumpscale import j
from Jumpscale.tools.threegit.ThreeGit import load_wiki
WIKIS = {"info_grid": "wiki.grid.tf", "info_foundation": "wiki.threefold.tf", "info_tokens": "wiki.tokens.tf"}
BRANCH = "development"
TF_WIKIS_LINKS = {
"info_grid": f"https://github.com/threefoldfoundation/info_grid/tree/{BRANCH}/docs",
"info_foundation": f"https://github.com/threefoldfoundation/info_foundation/tree/{BRANCH}/docs",
"info_tokens": f"https://github.com/threefoldfoundation/info_tokens/tree/{BRANCH}/docs",
}
class Package(j.baseclasses.threebot_package):
def start(self):
for wiki_name, wiki_domain in WIKIS.items():
for port in (443, 80):
website = self.openresty.websites.get(f"threefold_wiki_{wiki_name}_{port}")
website.port = port
website.ssl = port == 443
website.domain = wiki_domain
locations = website.locations.get(name=f"{wiki_name}_wiki_locations_{port}")
include_location = locations.get_location_custom(f"include_{wiki_name}_wiki")
# default website locations include wiki related locations
# so include them
default_website_name = self.openresty.get_from_port(port).name
include_location.config = f"""
include {website.path_cfg_dir}/{default_website_name}_locations/*.conf;
location / {{
rewrite ^(.+) /wiki/{wiki_name};
}}"""
locations.configure()
website.configure()
for name, link in TF_WIKIS_LINKS.items():
j.servers.myjobs.schedule(load_wiki, wiki_name=name, wiki_path=link, reset=True)
|
#通过用户输入数字,计算阶乘。(30分)
print("请输入一个数字")
num=int(input())
sum=0
f=1
for i in range(1,num+1):
f=f*i
sum+=f
print("阶乘为",sum) |
class Solution(object):
def myAtoi(self, s):
"""
https://leetcode.com/problems/string-to-integer-atoi/
"""
s = s.lstrip()
sign = 1
start = 0
pos_max = 2**31-1
neg_max = -2**31
if len(s) == 0:
return 0
if s[0] == '-':
sign = -1
start = 1
elif s[0] == '+':
start = 1
ans = ''
for i in range(start, len(s)):
if not (s[i] >= '0' and s[i] <= '9'):
break
ans += s[i]
atoi = 0
mult = sign
for i in range(len(ans)-1, -1, -1):
atoi += int(ans[i]) * mult
if atoi > pos_max:
return pos_max
if atoi<neg_max:
return neg_max
mult *= 10
return atoi
|
import unittest
from katas.beta.counting_array_elements import count
class CountTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(count(['a', 'a', 'b', 'b', 'b']), {'a': 2, 'b': 3})
|
import requests
import argparse
import pathlib
import hashlib
import sys
import os
from zeroconf import ServiceBrowser, Zeroconf
from concurrent.futures import Future
from requests.auth import HTTPDigestAuth
class ESPFinder:
def __init__(self, espid):
self.espid = espid
self.expected_suffix = f'-{espid}'
self.future = Future()
self.wait = self.future.result
def remove_service(self, zeroconf, type, name):
pass
def decode(self, value):
return value.decode(errors='ignore', encoding='utf-8')
def add_service(self, zeroconf, type, name):
if self.future.done():
return
info = zeroconf.get_service_info(type, name)
addresses = info.parsed_addresses()
if not addresses:
return
host = addresses[0]
def found():
self.future.set_result(host)
properties = { self.decode(k): self.decode(v) for k, v in info.properties.items() }
if properties.get('espid') == self.espid:
return found()
name2 = info.get_name()
if name2 and name2.endswith(self.expected_suffix):
return found()
name3 = properties.get('name')
if name3 and name3.endswith(self.expected_suffix):
return found()
def find_esp(espid, timeout=10):
zeroconf = Zeroconf()
listener = ESPFinder(espid)
browser = ServiceBrowser(zeroconf, "_http._tcp.local.", listener)
return listener.wait(timeout)
def parse_args():
p = argparse.ArgumentParser()
p.add_argument('-f', '--firmware')
p.add_argument('--espid')
p.add_argument('--host')
p.add_argument('--user', default=os.environ.get('OTA_USER', 'ota'))
p.add_argument('--password', default=os.environ.get('OTA_PASSWORD', ''))
return p.parse_args()
def main():
args = parse_args()
if not args.host:
if not args.espid:
print('host or espid must be specified')
return 1
args.host = find_esp(args.espid)
print(f'address of {args.espid} is {args.host}')
# return
auth = HTTPDigestAuth(args.user, args.password)
url = f'http://{args.host}/update'
r = requests.get(f'{url}/identity', auth=auth)
r.raise_for_status()
device_id = r.json()['id']
if args.espid and args.espid != device_id:
print(f'ESP ID mismatch: wanted {args.espid}, got {device_id}')
return 1
print(f'ESP ID verified: {device_id}')
if not args.firmware:
print('no firmware file specified')
return 2
fwpath = pathlib.Path(args.firmware)
fwdata = fwpath.read_bytes()
fwhash = hashlib.md5(fwdata).hexdigest()
print(f'firmware hash is md5={fwhash}')
r = requests.post(url, auth=auth, files={
"MD5": (None, fwhash),
"firmware": ("firmware", fwdata, "application/octet-stream"),
})
print(r)
print(r.text)
if __name__ == '__main__':
ret = main()
if ret:
sys.exit(ret) |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.shell import dependency_inference, shunit2_test_runner
from pants.backend.shell.goals import tailor, test
from pants.backend.shell.subsystems import shunit2
from pants.backend.shell.target_types import (
ShellCommandRunTarget,
ShellCommandTarget,
ShellCommandTestTarget,
ShellSourcesGeneratorTarget,
ShellSourceTarget,
Shunit2TestsGeneratorTarget,
Shunit2TestTarget,
)
from pants.backend.shell.target_types import rules as target_types_rules
from pants.backend.shell.util_rules import shell_command
def target_types():
return [
ShellCommandTarget,
ShellCommandRunTarget,
ShellCommandTestTarget,
ShellSourcesGeneratorTarget,
Shunit2TestsGeneratorTarget,
ShellSourceTarget,
Shunit2TestTarget,
]
def rules():
return [
*dependency_inference.rules(),
*shell_command.rules(),
*shunit2.rules(),
*shunit2_test_runner.rules(),
*tailor.rules(),
*target_types_rules(),
*test.rules(),
]
|
"""Module containing database cli and models."""
from flask import Flask
from sqlalchemy import event
from sqlalchemy.engine import Engine
from .db import DB, MIGRATE
from .cli import register_cli_blueprint
def register_db(app: Flask):
"""Register the sqlalchemy db and alembic migrations with the flask app."""
if not app.config.get("SQLALCHEMY_DATABASE_URI"):
app.config[
"SQLALCHEMY_DATABASE_URI"
] = f"sqlite:///{app.instance_path}/{app.import_name}.db"
DB.init_app(app)
app.logger.info(f'Connected to db "{app.config["SQLALCHEMY_DATABASE_URI"]}".')
register_cli_blueprint(app)
MIGRATE.init_app(app, DB)
# Apply additional config for Sqlite databases
if app.config.get("SQLALCHEMY_DATABASE_URI", "").startswith("sqlite://"):
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
if app.config.get("SQLITE_FOREIGN_KEYS", True):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
|
import tkinter as tk
import numpy as np
import random
import time
import datetime
import threading
import Adafruit_DHT
from time import sleep # Library will let us put in delays
import RPi.GPIO as GPIO
pin = 4
sensor = Adafruit_DHT.DHT22
button1_pin=12 # Button 1 is connected to physical pin 12
GPIO.setmode(GPIO.BOARD) # Use Physical Pin Numbering Scheme
GPIO.setup(button1_pin,GPIO.IN,pull_up_down=GPIO.PUD_UP)
# Make button1_pin an input, Activate Pull UP Resistor
def tick():
time2=time.strftime('%H:%M:%S')
clock.config(text=time2)
clock.after(200,tick)
def button_pressed_mech():
threading.Timer(1,button_pressed_mech).start()
input1=GPIO.input(button1_pin)
if input1==0:
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if humidity is not None and temperature is not None:
print('Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity))
l_display.config(text = temperature)
l_display1.config(text = humidity)
else:
print('Failed to get reading. Try again!')
mainwindow = tk.Tk()
mainwindow.geometry('640x340')
mainwindow.title("Sensor Data Live Feed ")
clock=tk.Label(mainwindow,font=("Arial",30), bg='green',fg="white")
clock.grid(row=0, column=0, padx=10, pady=10, sticky="nsew")
l_m=tk.Label(mainwindow,text="Sensor Data ",font=("Arial",30),fg="Black")
l_m.grid(row=0,column=1, padx=10, pady=10, sticky="nsew")
l_t=tk.Label(mainwindow, text="Temperature C",font=("Arial",25))
l_t.grid(row=1,column=0, padx=10, pady=10, sticky="nsew")
l_display=tk.Label(mainwindow,font=("Arial",25),fg="red")
l_display.grid(row=1,column=1, padx=10, pady=10, sticky="nsew")
l_h=tk.Label(mainwindow, text="Humidity C",font=("Arial",25))
l_h.grid(row=2,column=0, padx=10, pady=10, sticky="nsew")
l_display1=tk.Label(mainwindow,font=("Arial",25),fg="red")
l_display1.grid(row=2,column=1, padx=10, pady=10, sticky="nsew")
tick()
button_pressed_mech()
mainwindow.mainloop()
|
import random
import math
def split_data_set(data, fraction):
train_set = data.sample(frac=fraction, random_state=random.randrange(10))
test_set = data.drop(train_set.index)
return train_set, test_set
def calculate_net(node, weights):
nets = [0 for i in range(len(node))]
for i in range(len(node)):
for j in range(len(weights)):
nets[i] += node[i][j] * weights[j]
return nets
def calculate_net_bias(node, weights, biases):
nets = [0 for i in range(len(weights[0]))]
for j in range(len(weights[0])):
for k in range(len(weights)):
nets[j] += node[k] * weights[k][j]
nets[j] += biases[j]
return nets
def sigmoid(net):
for i in range(len(net)):
net[i] = 1 / (1 + math.exp(-net[i]))
return net
def encode_target_attribute(df):
target_attribute = list(df.columns)[-1]
goal_idx = 0
replace_goal = {}
for val in df[target_attribute].unique():
replace_goal[val] = goal_idx
goal_idx += 1
df[target_attribute].replace(replace_goal, inplace=True)
return df, goal_idx
def split_dataset_to_train_and_test(df, split_rate):
data_train, data_test = split_data_set(df, split_rate)
target_attribute = list(df.columns)[-1]
train_y = data_train[target_attribute]
train_X = data_train.drop([target_attribute], axis=1)
test_y = data_test[target_attribute]
test_X = data_test.drop([target_attribute], axis=1)
return train_X, train_y, test_X, test_y
def split_trainset_into_chunks(train_X, train_y, batch_size):
train_X = train_X.values.tolist()
train_y = train_y.values.tolist()
train_X_chunk_list = [train_X[batch_size*i:min(batch_size*i+batch_size, len(train_X))]
for i in range(len(train_X)//batch_size)]
train_Y_chunk_list = [train_y[batch_size*i:min(batch_size*i+batch_size, len(train_y))]
for i in range(len(train_y)//batch_size)]
return train_X_chunk_list, train_Y_chunk_list
def initialize_weight_and_biases(neuron):
weights = []
biases = []
for i in range(len(neuron) - 1):
w = [[0 for k in range(neuron[i + 1])] for j in range(neuron[i])]
b = [0 for j in range(neuron[i + 1])]
weights.append(w)
biases.append(b)
# Initiate weight with random between -1.0 ... 1.0
for i in range(len(neuron) - 1):
for j in range(neuron[i]):
for k in range(neuron[i + 1]):
weights[i][j][k] = 2 * random.random() - 1
return weights, biases
def predict(mlp, test_X):
neuron = mlp[0]
weights = mlp[1]
biases = mlp[2]
test_X = test_X.values.tolist()
# test_y = test_y.values.tolist()
pred_y = []
for (idx, inputs) in enumerate(test_X):
nets = []
outs = []
for i in range(len(neuron) - 1):
if (i == 0):
n = calculate_net_bias(inputs, weights[i], biases[i])
else:
n = calculate_net_bias(outs[i - 1], weights[i], biases[i])
nets.append(n)
o = sigmoid(n)
outs.append(o)
pred_y.append(outs[-1].index(max(outs[-1])))
return pred_y
def myMLP(train_X, train_y, goal_idx, split_rate=0.9, learning_rate=0.05, max_iteration=600, hidden_layer=[3], batch_size=10):
# Define parameter
neuron = [len(train_X.columns)] + hidden_layer + [goal_idx]
# Splitting into list of mini-batch
train_X_chunk_list, train_Y_chunk_list = split_trainset_into_chunks(
train_X, train_y, batch_size)
# Initialize weights and biases
weights, biases = initialize_weight_and_biases(neuron)
for i in range(max_iteration):
cost_total = 0
for idx_batch in range(len(train_X_chunk_list)):
for (idx, inputs) in enumerate(train_X_chunk_list[idx_batch]):
nets = []
outs = []
for i in range(len(neuron) - 1):
if (i == 0):
n = calculate_net_bias(inputs, weights[i], biases[i])
else:
n = calculate_net_bias(
outs[i - 1], weights[i], biases[i])
nets.append(n)
o = sigmoid(n)
outs.append(o)
target = [0 for i in range(neuron[-1])]
target[int(train_Y_chunk_list[idx_batch][idx])] = 1
# Cost function, Square Root Error
error = 0
for i in range(neuron[-1]):
error += (target[i] - outs[-1][i]) ** 2
cost_total += error * 1 / neuron[2]
# Backward propagation
deltas = [[] for i in range(len(neuron) - 1)]
i = len(neuron) - 2
while (i >= 0):
d = []
if (i == len(neuron) - 2):
for j in range(neuron[i + 1]):
d.append(-1 * (target[j] - outs[i][j]) *
outs[i][j] * (1 - outs[i][j]) * 2. / neuron[-1])
else:
out = calculate_net(weights[i + 1], deltas[i + 1])
for j in range(neuron[i + 1]):
d.append(out[j] * outs[i][j] * (1 - outs[i][j]))
deltas[i] = d
for j in range(neuron[i]):
for k in range(neuron[i+1]):
if (i == 0):
weights[i][j][k] -= learning_rate * \
(deltas[i][k] * inputs[j])
else:
weights[i][j][k] -= learning_rate * \
(deltas[i][k] * outs[i - 1][j])
biases[i][k] -= learning_rate * deltas[i][k]
i -= 1
return [neuron, weights, biases]
|
import sys, traceback, os
import wmi
import subprocess
def decryptorTaskName():
return 'DeArchiver'
def isDecryptorTaskRunningWithPID(pid):
c = wmi.WMI()
for process in c.Win32_Process():
if decryptorTaskName().lower() in process.Name.lower() and pid == process.ProcessID:
return True
return False
def killDecryptorWithPID(pid):
c = wmi.WMI()
for process in c.Win32_Process():
if decryptorTaskName().lower() in process.Name.lower() and pid == process.ProcessID:
process.Terminate()
if __name__ == '__main__':
applicationPath = "C:\\Program Files (x86)\\MediaSeal\\DeArchiver\\DeArchiver.exe"
if not os.path.exists(applicationPath):
print 'no application path'
process = subprocess.Popen([applicationPath, "F:\\Avid_DNxHD_Test_Movie-enc.mov", "F:\\temp\\test.mov"])
while process.poll() == None:
print isDecryptorTaskRunningWithPID(process.pid)
out, err = process.communicate()
|
#!/usr/local/bin/ryu-manager
from os import environ
environ['EVENTLET_ZMQ'] = '1'
# Hack to load parent module
from sys import path
path.append('..')
# Import the Template Controller
from base_controller.base_controller import base_controller
from eventlet.green import zmq
# Import the System and Name methods from the OS module
from os import system, name
# Import signal
import signal
import argparse
import time
from ryu.lib import hub
hub.patch()
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import arp
from ryu.lib.packet import icmp
from ryu.lib.packet import ether_types
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_4
from ryu.ofproto import ofproto_v1_5
from ryu.exception import RyuException
from ryu.lib import ofctl_v1_0
from ryu.lib import ofctl_v1_2
from ryu.lib import ofctl_v1_3
from ryu.lib import ofctl_v1_4
from ryu.lib import ofctl_v1_5
from ryu.app.wsgi import ControllerBase
from ryu.app.wsgi import Response
from ryu.app.wsgi import WSGIApplication
from collections import defaultdict
from sonar.nsb import nsb
supported_ofctl = {
ofproto_v1_0.OFP_VERSION: ofctl_v1_0,
ofproto_v1_2.OFP_VERSION: ofctl_v1_2,
ofproto_v1_3.OFP_VERSION: ofctl_v1_3,
ofproto_v1_4.OFP_VERSION: ofctl_v1_4,
ofproto_v1_5.OFP_VERSION: ofctl_v1_5,
}
def cls():
system('cls' if name == 'nt' else 'clear')
class ovs_controller(base_controller):
def post_init(self, **kwargs):
# Clear screen
cls()
self.ovs = kwargs.get('ovs')
# Hold information about the slices
self.slice_list = {}
def pre_exit(self):
# Terminate the OVS SDR Controller Server
self.shutdown_flag.set()
# Join thread
self.join()
def get_topology(self, **kwargs):
return True, {'topology': self.ovs.topology}
def create_slice(self, **kwargs):
single = time.time()
# Extract parameters from keyword arguments
s_id = kwargs.get('s_id', None)
route = kwargs.get('route', None)
print('create s_id ', s_id, 'route ', route)
# Check for validity of the slice ID
if s_id in self.slice_list:
print('updating slice ', s_id)
# return False, 'Slice ID already exists'
# Check for validity of the route
if not route:
print('did not work, took', + (time.time() - single)*1000, 'ms')
return False, 'Missing route'
# Add the slice to the slice list
self.slice_list[s_id] = {
'route': route }
# Iterate over the list of switches
for switch in route['switches']:
# Get the datapath
datapath = self.ovs.switches[switch['node']]
# Extract the datapath parameters
dpid = datapath.id
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
queue_fw = self.define_queue(route, datapath, switch['out_port'])
# ip_src, ip_dst, s, p_in, p_out)
# Creating ingress match and actions which will be send to ovs-switch
match = parser.OFPMatch(
eth_type=switch['eth_type'],
in_port=(switch['in_port']),
ipv4_src=(route['ipv4_src'], route['ipv4_src_netmask']),
ipv4_dst=(route['ipv4_dst'], route['ipv4_dst_netmask'])
)
#actions = [parser.OFPActionOutput(switch['out_port'])]
actions = [parser.OFPActionSetQueue(queue_fw), parser.OFPActionOutput(switch['out_port'])]
# Add the flow to the switch
self.ovs.add_flow(datapath, 10, match, actions)
queue_rv = self.define_queue(route, datapath, switch['in_port'])
# Creating egress match and actions which will be send to ovs-switch
match = parser.OFPMatch(
eth_type=switch['eth_type'],
in_port=(switch['out_port']),
ipv4_src=(route['ipv4_dst'], route['ipv4_dst_netmask']),
ipv4_dst=(route['ipv4_src'], route['ipv4_src_netmask'])
)
actions = [parser.OFPActionSetQueue(queue_rv), parser.OFPActionOutput(switch['in_port'])]
# Add the flow to the switch
self.ovs.add_flow(datapath, 10, match, actions)
print('worked, took', + (time.time() - single)*1000, 'ms')
return True, {'host': route['ipv4_dst']}
def define_queue(self, route, datapath, port):
connection = self.ovs.control[self.ovs.dpid_to_name[datapath.id]]
queue = connection.create_queue(route, port)
#if 'max_rate' in route and route['max_rate'] is not None:
# connection.modify_default_queue(route['max_rate'], port)
if 'min_rate' in route and route['min_rate'] is not None:
connection.modify_default_queue(route['min_rate'], port)
return queue
def delete_slice(self, **kwargs):
# Extract parameters from keyword arguments
s_id = kwargs.get('s_id', None)
route = kwargs.get('route', None)
print('delete s_id ', s_id, 'route ', route)
# Check for validity of the slice ID
if s_id not in self.slice_list:
return False, 'Slice ID does not exist'
# Check for validity of the route supposed to be deleted
if not route:
self._log('error: route not received, took', + (time.time() - single)*1000, 'ms')
return False, 'Missing route'
# For each switch in the route
for switch in route['switches']:
# Extract the datapath parameters
datapath = self.ovs.switches[switch['node']]
dpid = datapath.id
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
direction = None
if 'direction' in switch:
direction = switch['direction']
match_fw = parser.OFPMatch(
in_port=(switch['in_port']),
eth_type=switch['eth_type'],
ipv4_src=(route['ipv4_src'], route['ipv4_src_netmask']),
ipv4_dst=(route['ipv4_dst'], route['ipv4_dst_netmask'])
)
match_rv = parser.OFPMatch(
in_port=(switch['out_port']),
eth_type=switch['eth_type'],
ipv4_src=(route['ipv4_dst'], route['ipv4_dst_netmask']),
ipv4_dst=(route['ipv4_src'], route['ipv4_src_netmask'])
)
# Added this clause to verify if the flow rules should be
# deleted in the both directions
if direction is None or direction == 'full':
self.ovs.del_flow(datapath, match_fw)
self.ovs.del_flow(datapath, match_rv)
elif direction == 'half-fw':
self.ovs.del_flow(datapath, match_fw)
else:
self.ovs.del_flow(datapath, match_rv)
#self.return_default_queue_reservation(datapath, route['max_rate'], switch['in_port'])
#self.return_default_queue_reservation(datapath, route['max_rate'], switch['out_port'])
self.return_default_queue_reservation(datapath, route['min_rate'], switch['in_port'])
self.return_default_queue_reservation(datapath, route['min_rate'], switch['out_port'])
# Return host and port -- TODO may drop port entirely
return True, {'s_id': s_id}
def return_default_queue_reservation(self, datapath, value, port):
connection = self.ovs.control[self.ovs.dpid_to_name[datapath.id]]
if value is not None:
connection.modify_default_queue(-value, port)
class ovs_ctl(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
# Make printing easier. TODO: Implement real logging
def _log(self, *args, head=False):
print("-" if head else '\t' ,*args)
def __init__(self, *args, **kwargs):
super(ovs_ctl, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self.switches = {}
self.dpid_to_name = {
# Original environment -- uncomment the 3 lines below
# 95536754289: 'h00',
# 95535344413: 'h01',
# 95542363502: 'h02'
# ORCA first demo -- uncomment the 3 lines below
# 95534111059: 'h00',
# 95538556217: 'h01',
# 95533205304: 'h02'
# Virtual Machine SONAr -- uncomment the 5 lines below
# 95532435104: 's01',
# 95533179799: 's02',
# 95532162947: 's03',
# 95539282496: 's04',
# 95533558180: 's05'
# ORCA second demo -- uncomment the 5 lines below:
# 95532594594: 's01',
# 95534454058: 's02',
# 95536781980: 's03',
# 95531791552: 's04',
# 47102661227: 's05'
# 95532050795: 's05'
# ORCA Final Demo
int('000000163ea46de1', 16): 's01',
int('000000163e1d4d1f', 16): 's02',
int('0000000af789926b', 16): 's03',
int('000000163e784ab7', 16): 's04'
}
self.topology = defaultdict(dict)
self.topology['s01']['s02'] = 1
self.topology['s01']['s04'] = 2
self.topology['s02']['s03'] = 1
self.topology['s02']['s01'] = 2
self.topology['s03']['s04'] = 1
self.topology['s03']['s02'] = 2
self.topology['s04']['s01'] = 1
self.topology['s04']['s03'] = 2
self.speed = defaultdict(dict)
self.speed['s01']['s02'] = 1000
self.speed['s01']['s04'] = 1000
self.speed['s02']['s03'] = 1000
self.speed['s02']['s01'] = 1000
self.speed['s03']['s04'] = 1000
self.speed['s03']['s02'] = 1000
self.speed['s04']['s01'] = 1000
self.speed['s04']['s03'] = 1000
self.ports = {}
self.arp_disabled_ports = self.ports_to_disable()
self.control = {}
self.waiters = {}
# Instantiate the OVS SDR Controller
self.ovs_controller_thread = ovs_controller(
name='OVS',
req_header='ovs_req', # Don't modify
rep_header='ovs_rep', # Don't modify
create_msg='ovc_crs',
request_msg='ovc_rrs',
update_msg='ovc_urs',
delete_msg='ovc_drs',
topology_msg='ovc_trs',
host=kwargs.get('host', '0.0.0.0'),
port=kwargs.get('port', 3200),
ovs=self
)
# Start the OVS SDR Controller Server
self.ovs_controller_hub = hub.spawn(self.ovs_controller_thread.run)
self.count = len(self.topology)
self.switch_config_count = {}
self.single = {}
self.st = time.time()
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
# Get the new switch
datapath = ev.msg.datapath
self.single[datapath.id] = time.time()
# Add the new switch to the container
self.switches[self.dpid_to_name[datapath.id]] = datapath
# Send proactive rules to the switches
self._base_start(datapath)
self.get_current_ports(datapath)
self.connect_local_agent(datapath)
def connect_local_agent(self, datapath):
connection = nsb(datapath)
reset = connection.reset_queues()
self.control[self.dpid_to_name[datapath.id]] = connection
if reset:
self.check_finished_config(datapath.id)
def ports_to_disable(self):
stp = defaultdict(dict)
nodes = list(self.topology.keys())
visited = []
stack = []
stack.append(nodes[0])
while stack:
current = stack[0]
if current not in visited:
visited.append(current)
next = list(self.topology[current].keys())
for n in next:
if n not in visited:
stack.append(n)
visited.append(n)
stp[current][n] = self.topology[current][n]
stp[n][current] = self.topology[n][current]
stack.remove(current)
b = {}
for k in self.topology.keys():
b[k] = []
for l in list(self.topology.get(k).keys()):
if l not in stp.get(k).keys():
b[k].append(self.topology[str(k)][str(l)])
return b
def get_current_ports(self, datapath):
dpid = datapath.id
try:
ofctl = supported_ofctl.get(datapath.ofproto.OFP_VERSION)
desc = ofctl.get_port_desc(datapath, self.waiters)
except Exception as e:
print(e)
pass
def _base_start(self, datapath):
# Extract the datapath parameters
dpid = datapath.id
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# Match all
match = parser.OFPMatch({})
# Delete all the existing flows
self.del_flow(datapath, match)
match = parser.OFPMatch()
# Send and ask what to do to the controller
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
# Add the controller flow to the switch
self.add_flow(datapath, 0, match, actions)
# Output info message
#self._log('Configured Switch ', self.dpid_to_name[dpid], head=True)
self.check_finished_config(dpid)
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
def del_flow(self, datapath, match):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
mod = parser.OFPFlowMod(
datapath=datapath,
match=match,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
command=ofproto.OFPFC_DELETE
)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
in_port = msg.match['in_port']
parser = datapath.ofproto_parser
dpid = datapath.id
ofproto = datapath.ofproto
pkt = packet.Packet(data=msg.data)
pkt_ethernet = pkt.get_protocol(ethernet.ethernet)
if not pkt_ethernet:
return
pkt_arp = pkt.get_protocol(arp.arp)
pkt_icmp = pkt.get_protocol(icmp.icmp)
# self.logger.info("packet in %s %s %s %s",
# self.dpid_to_name[dpid],
# pkt_ethernet.src,
# pkt_ethernet.dst,
# in_port)
if not pkt_arp:
return
self.mac_to_port.setdefault(dpid, {})
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][pkt_ethernet.src] = in_port
if pkt_ethernet.dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][pkt_ethernet.dst]
else:
out_port = ofproto.OFPP_FLOOD
self.provision_paths(
msg, in_port, pkt_ethernet.src, out_port, pkt_ethernet.dst, dpid)
def provision_paths(self, msg, in_port, src, out_port, dst, dpid):
parser = msg.datapath.ofproto_parser
ofproto = msg.datapath.ofproto
if out_port == ofproto.OFPP_FLOOD:
if not self.ports or not self.arp_disabled_ports:
return
actions = []
node = self.dpid_to_name.get(dpid)
if node in self.ports:
for port in self.ports[node]:
if port not in self.arp_disabled_ports[node] and port != in_port:
actions.append(parser.OFPActionOutput(port))
else:
return
# print(self.mac_to_port)
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
actions = [parser.OFPActionOutput(out_port)]
match = parser.OFPMatch(
eth_type=0x0806, # 0x0806 = ARP packet
in_port=in_port,
eth_dst=dst,
eth_src=src)
# verify if we have a valid buffer_id, if yes avoid to send both
# flow_mod & packet_out
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_flow(msg.datapath, 1, match, actions, msg.buffer_id)
return
else:
self.add_flow(msg.datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=msg.datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
msg.datapath.send_msg(out)
@set_ev_cls([ofp_event.EventOFPStatsReply,
ofp_event.EventOFPDescStatsReply,
ofp_event.EventOFPFlowStatsReply,
ofp_event.EventOFPAggregateStatsReply,
ofp_event.EventOFPTableStatsReply,
ofp_event.EventOFPTableFeaturesStatsReply,
ofp_event.EventOFPPortStatsReply,
ofp_event.EventOFPQueueStatsReply,
ofp_event.EventOFPQueueDescStatsReply,
ofp_event.EventOFPMeterStatsReply,
ofp_event.EventOFPMeterFeaturesStatsReply,
ofp_event.EventOFPMeterConfigStatsReply,
ofp_event.EventOFPGroupStatsReply,
ofp_event.EventOFPGroupFeaturesStatsReply,
ofp_event.EventOFPGroupDescStatsReply,
ofp_event.EventOFPPortDescStatsReply
], MAIN_DISPATCHER)
def stats_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
body = msg.body
if msg.type == 13:
self.desc_stats_reply_handler(dp, body)
if dp.id not in self.waiters:
return
if msg.xid not in self.waiters[dp.id]:
return
lock, msgs = self.waiters[dp.id][msg.xid]
msgs.append(msg)
flags = 0
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
flags = dp.ofproto.OFPSF_REPLY_MORE
elif dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
flags = dp.ofproto.OFPSF_REPLY_MORE
elif dp.ofproto.OFP_VERSION >= ofproto_v1_3.OFP_VERSION:
flags = dp.ofproto.OFPMPF_REPLY_MORE
if msg.flags & flags:
return
del self.waiters[dp.id][msg.xid]
lock.set()
def desc_stats_reply_handler(self, dp, body):
dpid = dp.id
node = self.dpid_to_name.get(dpid)
self.ports[node] = []
for p in body:
port = p.port_no
if port != 4294967294:
self.ports[node].append(port)
#print('Switch', node, ' ports:', self.ports[node])
self.check_finished_config(dpid)
def check_finished_config(self, dpid):
if dpid not in self.switch_config_count:
self.switch_config_count[dpid] = 0
self.switch_config_count[dpid] = self.switch_config_count[dpid] + 1
if self.switch_config_count[dpid] == 3:
self.count-= 1
self._log('Configured Switch ', self.dpid_to_name[dpid], head=True)
print('took', + (time.time() - self.single[dpid])*1000, 'ms')
if (not self.count):
self._log('total:', (time.time()-self.st)*1000, 'ms')
|
import random
import json
from os import walk
import operator
import re
#import lyricsgenius
filenames = []
for (dirpath, dirnames, filenames_list) in walk("./songs"):
filenames.extend(filenames_list)
break
lyrics = []
for filename in filenames:
try:
file = open("./songs/"+filename)
lines = file.readlines()
json_str = ' '.join(lines)
loaded_json = json.loads(json_str)
song_lyrics = loaded_json['songs'][0]['lyrics'].lower()
#replace all the \n's with " "
while song_lyrics.find("\n") != -1:
song_lyrics = song_lyrics.replace("\n", " ")
lyrics.append(song_lyrics)
file.close()
except:
#print("ERROR: %s not found!" %filename)
continue
wordsData = {} #word, nextWordList
for song_lyrics in lyrics:
#song_lyrics = re.sub(rePattern, '', song_lyrics)
words = song_lyrics.split(" ")
counter = 0
while counter < len(words) - 1:
word1, word2 = words[counter], words[counter + 1]
if len(word1) > 0 and len(word2) > 0:
if word1[-1] in ".,'!):(?": word1 = word1[:-1]
if word2[-1] in ".,'!):(?": word2 = word2[:-1]
if word1 in wordsData.keys():
wordsData[word1].append(word2)
else:
wordsData[word1] = [word2]
counter += 1
def getAvgLyricsLength():
lengths = [len(song) for song in lyrics]
return int(sum(lengths) / len(lyrics))
def getNextWord(currentWord, second=False):
nextWordsList = wordsData[currentWord]
#return random.choice(nextWordsList)
wordsCount = {}#word, word count
for nextWord in nextWordsList:
if nextWord in wordsCount:
wordsCount[nextWord] += 1
else:
wordsCount[nextWord] = 0
#sortedWords = sorted(wordsCount.items(), key=operator.itemgetter(1))
#return random.choice(sortedWords[-3:])[0]
'''if second:
return sortedWords[-2][0]
return sortedWords[-1][0]'''
#choose corresponding key of max value
return max(wordsCount.items(), key=operator.itemgetter(1))[0]
#let the starting word be randomly chosen
currentWord = random.choice(list(wordsData.keys()))
maxWords = getAvgLyricsLength()
prevWords = []
for x in range(maxWords):
try:
currentWord = getNextWord(currentWord)
if len(prevWords) > 5:
seq = prevWords[x-5:]
if currentWord in seq:
currentWord = getNextWord(seq[-1], second=True)
prevWords.append(currentWord)
print(currentWord, end=" ")
except:
continue |
#!/usr/bin/env python
"""
::
run ~/opticks/ana/debug_buffer.py
"""
import os, numpy as np
np.set_printoptions(suppress=True)
os.environ.setdefault("OPTICKS_EVENT_BASE",os.path.expandvars("/tmp/$USER/opticks"))
path = os.path.expandvars("$OPTICKS_EVENT_BASE/G4OKTest/evt/g4live/natural/1/dg.npy")
dg = np.load(path)
sensorIndex = dg[:,0,3].view(np.uint32)
#tid = dg[:,0,3].view(np.uint32)
sel = sensorIndex > 0
#sel = tid > 0x5000000 # for DYB this means landing (but not necessarily "hitting") a volume of the instanced PMT assembly
dgi = sensorIndex[sel]
dgs = dg[sel]
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import signal
import unittest
from simple.readwrite import SunnyDayTest
from TestUtils import ACCUMULO_HOME
import logging
log = logging.getLogger('test.auto')
class TabletServerHangs(SunnyDayTest):
order = 25
# connections should timeout quickly for faster tests
settings = SunnyDayTest.settings.copy()
settings['general.rpc.timeout'] = '5s'
settings['instance.zookeeper.timeout'] = '15s'
def start_tserver(self, host):
log.info("Starting tserver we can pause with bad read/writes")
libpath = '%s/test/system/auto/fake_disk_failure.so' % ACCUMULO_HOME
os.environ['LD_PRELOAD'] = libpath
os.environ['DYLD_INSERT_LIBRARIES'] = libpath
os.environ['DYLD_FORCE_FLAT_NAMESPACE'] = 'true'
self.stop = self.runOn(self.masterHost(), [self.accumulo_sh(), 'tserver'])
del os.environ['LD_PRELOAD']
del os.environ['DYLD_FORCE_FLAT_NAMESPACE']
del os.environ['DYLD_INSERT_LIBRARIES']
self.flagFile = os.getenv("HOME") + "/HOLD_IO_%d" % self.stop.pid
log.debug("flag file is " + self.flagFile)
return self.stop
def runTest(self):
waitTime = self.waitTime()
log.info("Waiting for ingest to stop")
self.waitForStop(self.ingester, waitTime)
MANY_ROWS = 500000
self.ingester = self.ingest(self.masterHost(),
MANY_ROWS,
size=self.options.size)
# wait for the ingester to get going
for i in range(100):
line = self.ingester.stdout.readline()
if line == '' or line.find(' sent ') > 0:
break
log.info("Starting faking disk failure for tserver")
fp = open(self.flagFile, "w+")
fp.close()
self.sleep(10)
log.info("Ending faking disk failure for tserver")
os.unlink(self.flagFile)
# look for the log message that indicates a timeout
out, err = self.waitForStop(self.ingester, waitTime)
self.assert_(out.find('requeuing') >= 0)
log.info("Verifying Ingestion")
self.waitForStop(self.verify(self.masterHost(),
MANY_ROWS,
size=self.options.size),
waitTime)
os.kill(self.stop.pid, signal.SIGHUP)
# look for the log message that indicates the tablet server stopped for a while
out, err = self.stop.communicate()
self.assert_(err.find('sleeping\nsleeping\nsleeping\n') >= 0)
def tearDown(self):
SunnyDayTest.tearDown(self)
try:
os.unlink(self.flagFile)
except:
pass
def suite():
result = unittest.TestSuite()
result.addTest(TabletServerHangs())
return result
|
# -*- coding: utf-8 -*-
import regex
import logging
import sqlite3
from datetime import datetime
import config
import stuff
import get
from telegram.ext import Updater, MessageHandler, CommandHandler, Filters, PrefixHandler, CallbackContext
from telegram import TelegramError, ReplyKeyboardMarkup, ReplyKeyboardRemove, Update
import random
from time import time
from weather import WeatherGod
# hyviä ehdotuksia: krediitti ja vitsi
class TelegramBot:
def __init__(self):
logging.basicConfig(filename='mobile.log', format='%(asctime)s - %(name)s - %(levelname)s - '
'%(message)s', filemode='w', level=logging.WARNING)
updater = Updater(token=config.TOKEN, use_context=True)
dispatcher = updater.dispatcher
self.commands = {'wabu': self.wabu,
'kiitos': self.kiitos,
'sekseli': self.sekseli,
'poyta': self.poyta,
#'pöytä': self.poyta,
'insv': self.insv,
'quoteadd': self.quoteadd,
'addquote': self.quoteadd,
'quote': self.quote,
'viisaus': self.viisaus,
'saa': self.weather,
#'sää': self.weather,
'kuka': self.kuka,
'value_of_content': self.voc,
'voc': self.voc,
'cocktail': self.cocktail,
'episode_ix': self.episode_ix,
'kick': self.kick,
'leffa': self.leffa,
'voivoi': self.voivoi,
'fiilis': self.getFiilis,
'viikonloppu': self.viikonloppu,
'rudelf': self.rudelf
}
for cmd, callback in self.commands.items():
dispatcher.add_handler(PrefixHandler(['!', '.', '/'], cmd, callback))
dispatcher.add_handler(CommandHandler(cmd, callback)) # ÄLÄ POISTA TAI KOMMENTOI
dispatcher.add_handler(MessageHandler(Filters.status_update.pinned_message, self.pinned))
dispatcher.add_handler(MessageHandler(Filters.text, self.huuto))
# TODO: Tee textHandler niminen funktio mikä on sama kuin commandsHandler mutta tekstille
# TODO: Ota voc_add pois huuto():sta :DDD
# TODO: Tee filtterit niin, että gifit ja kuvat kasvattaa self.voc_msg:eä
dispatcher.job_queue.run_repeating(self.voc_check, interval=60, first=5)
self.noCooldown = (self.quoteadd, self.leffa, self.kick)
self.users = {} # user_id : unix timestamp
self.voc_cmd = list()
self.voc_msg = list()
get.create_tables()
updater.start_polling()
# updater.idle()
logging.info('Botti käynnistetty')
@staticmethod
def wabu(update: Update, context: CallbackContext):
wabu = datetime(2021, 4, 15, 13)
tanaan = datetime.now()
erotus = wabu - tanaan
hours = erotus.seconds // 3600
minutes = (erotus.seconds - hours*3600) // 60
seconds = erotus.seconds - hours * 3600 - minutes * 60
"""
context.bot.send_message(chat_id=update.message.chat_id,
text=f'Wabun alkuun on {erotus.days} päivää, {hours} tuntia, {minutes} minuuttia ja'
f' {seconds} sekuntia',
disable_notification=True)
"""
context.bot.send_message(chat_id=update.message.chat_id,
text=f'Wappu on joskus',
disable_notification=True)
@staticmethod
def episode_ix(update: Update, context: CallbackContext):
wabu = datetime(2019, 12, 20)
tanaan = datetime.now()
erotus = wabu - tanaan
context.bot.send_message(chat_id=update.message.chat_id,
text=f'Ensi-iltaan on mennyt jo kauan sitten.', disable_notification=True)
@staticmethod
def kiitos(update: Update, context: CallbackContext):
if update.message.reply_to_message is not None:
context.bot.send_message(chat_id=update.message.chat_id,
text=f'Kiitos {update.message.reply_to_message.from_user.first_name}!',
disable_notifications=True)
else:
context.bot.send_message(chat_id=update.message.chat_id, text='Kiitos Jori!', disable_notification=True)
@staticmethod
def voivoi(update: Update, context: CallbackContext):
if update.message.reply_to_message is not None:
context.bot.send_message(chat_id=update.message.chat_id,
text=f'voi voi {update.message.reply_to_message.from_user.first_name}😩😩😩',
disable_notifications=True)
else:
context.bot.send_message(chat_id=update.message.chat_id, text='voi voi Nuutti😩😩😩', disable_notification=True)
@staticmethod
def sekseli(update: Update, context: CallbackContext):
if update.message.chat_id == config.MOBILE_ID:
context.bot.forward_message(chat_id=update.message.chat_id, from_chat_id=config.MOBILE_ID,
message_id=316362, disable_notification=True)
@staticmethod
def poyta(update: Update, context: CallbackContext):
context.bot.send_animation(chat_id=update.message.chat_id, animation=config.desk, disable_notification=True)
@staticmethod
def insv(update: Update, context: CallbackContext):
context.bot.send_sticker(chat_id=update.message.chat_id, sticker=config.insv, disable_notification=True)
@staticmethod
def pinned(update: Update, context: CallbackContext):
try:
if update.message.pinned_message:
if update.message.chat_id == config.MOBILE_ID:
sql = "INSERT INTO pinned VALUES (?,?,?)"
pinned = (update.message.date.isoformat(), update.message.pinned_message.from_user.username,
update.message.pinned_message.text)
conn = sqlite3.connect(config.DB_FILE)
cur = conn.cursor()
cur.execute(sql, pinned)
conn.commit()
conn.close()
except KeyError:
return False
@staticmethod
def quoteadd(update: Update, context: CallbackContext):
r = regex.compile(r'\/quoteadd (.[^\s]+) (.+)')
match = r.match(update.message.text)
if match:
temp = (match[1], match[2], update.message.chat_id)
# tarkasta onko sitaatti jo lisätty joskus aiemmin
result = get.dbQuery("SELECT * FROM quotes WHERE quotee=? AND quote=? AND groupID=?", temp)
if len(result) != 0:
context.bot.send_message(chat_id=update.message.chat_id, text="Toi on jo niin kuultu...",
disable_notification=True)
return
quote = (datetime.now().strftime("%Y-%m-%d %H:%M:%S"), match[1],
match[2], update.message.from_user.username, update.message.chat_id)
conn = sqlite3.connect(config.DB_FILE)
cur = conn.cursor()
sql_insert = "INSERT INTO quotes VALUES (?,?,?,?,?)"
cur.execute(sql_insert, quote)
conn.commit()
conn.close()
context.bot.send_message(chat_id=update.message.chat_id, text="Sitaatti suhahti")
else:
context.bot.send_message(chat_id=update.message.chat_id,
text="Opi käyttämään komentoja pliide bliis!! (/quoteadd"
" <nimi> <sitaatti>)")
@staticmethod
def quote(update: Update, context: CallbackContext):
space = update.message.text.find(' ')
if space == -1:
quotes = get.dbQuery("SELECT * FROM quotes WHERE groupID=? ORDER BY RANDOM() LIMIT 1", (update.message.chat_id,))
if len(quotes) == 0:
context.bot.send_message(chat_id=update.message.chat_id, text='Yhtään sitaattia ei ole lisätty.')
else:
name = update.message.text[space + 1:]
quotes = get.dbQuery("""SELECT * FROM quotes WHERE LOWER(quotee)=? AND groupID=? ORDER BY RANDOM() LIMIT 1""",
(name.lower(),
update.message.chat_id))
if len(quotes) == 0:
context.bot.send_message(chat_id=update.message.chat_id, text='Ei löydy')
return
context.bot.send_message(chat_id=update.message.chat_id, text=f'"{quotes[0][2]}" -{quotes[0][1]}')
@staticmethod
def viisaus(update: Update, context: CallbackContext):
wisenings = get.dbQuery("SELECT * FROM sananlaskut ORDER BY RANDOM() LIMIT 1")
context.bot.send_message(chat_id=update.message.chat_id, text=wisenings[0][0])
@staticmethod
def kuka(update: Update, context: CallbackContext):
index = random.randint(0, len(config.MEMBERS)-1)
context.bot.send_message(chat_id=update.message.chat_id, text=config.MEMBERS[index])
@staticmethod
def weather(update: Update, context: CallbackContext):
try:
city = update.message.text[5:]
weather = WeatherGod()
context.bot.send_message(chat_id=update.message.chat_id,
text=weather.generateWeatherReport(city))
except AttributeError:
context.bot.send_message(chat_id=update.message.chat_id,
text="Komento vaatii parametrin >KAUPUNKI< \n"
"Esim: /saa Hervanta ")
return
@staticmethod
def kick(update: Update, context: CallbackContext):
try:
context.bot.kickChatMember(update.message.chat.id, update.message.from_user.id)
context.job_queue.run_once(TelegramBot.invite, 60, context=[update.message.chat_id, update.message.from_user.id,
update.message.chat.invite_link])
except TelegramError:
context.bot.send_message(chat_id=update.message.chat_id, text="Vielä joku päivä...")
@staticmethod
def invite(update: Update, context: CallbackContext):
job = context.job
context.bot.unBanChatMember(chat_id=job.context[0], user_id=job.context[1])
context.bot.send_message(chat_id=job.context[1], text=job.context[2])
def voc(self, update: Update, context: CallbackContext):
if self.voc_calc():
context.bot.send_message(chat_id=update.message.chat_id, text="Value of content: Laskussa")
else:
context.bot.send_message(chat_id=update.message.chat_id, text="Value of content: Nousussa")
def voc_check(self, update: Update, context: CallbackContext):
now = time()
while len(self.voc_cmd) > 0:
if now - self.voc_cmd[0] > 7200:
self.voc_cmd.pop(0)
else:
break
while len(self.voc_msg) > 0:
if now - self.voc_msg[0] > 7200:
self.voc_msg.pop(0)
else:
return
def voc_add(self, update: Update):
if update.message.entities is None:
self.voc_msg.append(time())
for i in update.message.entities:
if i.type == 'bot_command':
self.voc_cmd.append(time())
else:
self.voc_msg.append(time())
def voc_calc(self):
now = time()
cmds = 0
for i in self.voc_cmd:
if now - i < 900:
cmds += 4
elif 900 < now - i < 1800:
cmds += 2
else:
cmds += 1
msgs = 2 * len(self.voc_msg)
# Minus 4 so that we dont count the calling /voc
return cmds - 4 > msgs
@staticmethod
def cocktail(update: Update, context: CallbackContext):
adj = get.dbQuery('''SELECT * FROM adjektiivit ORDER BY RANDOM() LIMIT 1''')[0][0].capitalize() # fetchall returns tuple in list
sub = get.dbQuery('''SELECT * FROM substantiivit ORDER BY RANDOM() LIMIT 1''')[0][0]
if update.message.text[0:12] == '/cocktail -n':
context.bot.send_message(chat_id=update.message.chat_id, text=f'{adj} {sub}', disable_notification=True)
return
# generate cocktail name
msg = str(adj) + " " + str(sub) + ":\n"
floor = random.randint(0, 1)
# generate spirit(s)
used = []
for i in range(random.randint(0, 3) * floor):
index = random.randint(0, len(stuff.spirits) - 1)
while index in used:
index = random.randint(0, len(stuff.spirits) - 1)
used.append(index)
rnd = stuff.spirits[index]
vol = str(random.randrange(2, 8, 2))
msg += "-" + vol + " " + "cl " + rnd + "\n"
# generate mixer(s)
used = []
if floor == 0:
# in case of no spirits, lift the floor to 1
# so recipe contains at least one mixer
floor = 1
for i in range(random.randint(floor, 3)):
index = random.randint(0, len(stuff.spirits) - 1)
while index in used:
index = random.randint(0, len(stuff.spirits) - 1)
used.append(index)
rnd = stuff.mixers[index]
vol = str(random.randrange(5, 20, 5))
msg += "-" + vol + " " + "cl " + rnd + "\n"
context.bot.send_message(chat_id=update.message.chat_id, text=msg)
def huuto(self, update: Update, context: CallbackContext):
rng = random.randint(0, 99)
r = regex.compile(r"^(?![\W])[^[:lower:]]+$")
self.voc_add(update)
self.leffaReply(update, context)
if rng >= len(stuff.message) or not r.match(update.message.text):
return
context.bot.send_message(chat_id=update.message.chat_id, text=stuff.message[rng], disable_notification=True)
@staticmethod
def leffa(update: Update, context: CallbackContext):
custom_keyboard = get.generateKeyboard()
reply_markup = ReplyKeyboardMarkup(get.build_menu(custom_keyboard, n_cols=2))
context.bot.send_message(chat_id=update.message.chat_id,
text="Leffoja",
reply_markup=reply_markup)
@staticmethod
def leffaReply(update: Update, context: CallbackContext):
if update.message.reply_to_message is None:
return
if update.message.reply_to_message.text != "Leffoja":
return
premiere = get.getMovie(update.message.text)
reply_markup = ReplyKeyboardRemove()
context.bot.send_message(chat_id=update.message.chat_id, text=f'Ensi-ilta on {premiere}', reply_markup=reply_markup)
@staticmethod
def getFiilis(update: Update, context: CallbackContext):
imgUrl = get.getImage()
if imgUrl != "":
context.bot.send_photo(chat_id=update.message.chat_id, photo=imgUrl)
else:
context.bot.send_message(chat_id=update.message.chat_id, text="Ei fiilistä")
@staticmethod
def viikonloppu(update: Update, context: CallbackContext):
context.bot.send_message(chat_id=update.message.chat_id,
text=f'On viiiiiikonloppu! https://youtu.be/vkVidHRkF88',
disable_notifications=True)
def rudelf(self, update: Update, context: CallbackContext):
if update.message.reply_to_message is False or update.message.reply_to_message.text is None:
return
# Capitalize
msg = update.message.reply_to_message.text[0].upper() + update.message.reply_to_message.text[1:]
for key, val in stuff.rudismit.items():
msg = regex.sub(regex.compile(key), val, msg)
if random.randint(0,9) < 3:
msg = msg + " 😅"
context.bot.send_message(chat_id=update.message.chat_id,
text=msg, disable_notification=True)
if __name__ == '__main__':
TelegramBot()
|
# Generated by Django 3.1.1 on 2020-09-30 18:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scrapingApp', '0018_theodoteam'),
]
operations = [
migrations.DeleteModel(
name='TheodoTeam',
),
migrations.AddField(
model_name='parliament1',
name='dob',
field=models.TextField(blank=True, max_length=15, null=True),
),
migrations.AddField(
model_name='parliament1',
name='pp',
field=models.TextField(blank=True, max_length=10, null=True),
),
migrations.AlterField(
model_name='parliament1',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
]
|
#!/usr/bin/env python
# coding: utf-8
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
class Mail:
def __init__(self):
self.sender = 'autosendmail@qq.com'
self.smtpserver = 'smtp.qq.com'
self.username = 'autosendmail@qq.com'
self.password = 'autosendmail'
self.content = []
self.attachment = []
def set_subject(self, subject):
self.subject = subject
def set_receiver(self, receivers):
self.receivers = receivers
def add_text(self, text):
content = MIMEText(text, 'plain')
self.content.append(content)
def add_html(self, html):
content = MIMEText(html, 'html')
self.content.append(content)
def add_file(self, filename):
filename = filename.replace("\\", "/")
attach = MIMEText(open(filename, 'rb').read(), 'base64', 'utf-8')
attach["Content-Disposition"] = 'attachment; filename="' + filename.split("/")[-1] + '"'
self.attachment.append(attach)
def send(self):
msg = MIMEMultipart('alternative')
msg['Subject'] = self.subject
msg['From'] = self.sender
for c in self.content:
msg.attach(c)
for a in self.attachment:
msg.attach(a)
smtp = smtplib.SMTP()
smtp.connect(self.smtpserver)
smtp.login(self.username, self.password)
smtp.sendmail(self.sender, self.receivers, msg.as_string())
smtp.quit()
smtp.close()
if __name__ == '__main__':
subject = "python mail"
receiver = ["autosendmail@taobao.com", "gongqingyi@qq.com"]
text = "text Hi!\nHow are you?\nHere is the link you wanted:\nhttp://www.python.org"
html = """\
<html>
<head></head>
<body>
<p>html Hi!<br>
How are you?<br>
Here is the <a href="http://www.python.org">link</a> you wanted.
</p>
</body>
</html>
"""
filename = "/Users/gongqingyi/.vimrc"
mail = Mail()
mail.set_subject(subject)
mail.set_receiver(receiver)
mail.add_text(text)
mail.add_file(filename)
mail.send()
print("finished!")
|
import numpy as np
from typing import Union, Optional
from .callback import Callback
class Bandit:
def __init__(self, k: int, rewards: np.ndarray):
self.k = k
self.actions = np.arange(k)
self.rewards = rewards
self.unbiased_constant = 0
def constant(self, step_size: float, **kwargs) -> float:
return step_size
def unbiased_constant(self, step_size: float, **kwargs) -> float:
self.unbiased_constant += step_size * (1 + self.unbiased_constant)
return step_size/self.unbiased_constant
def get_reward(self, a: int) -> float:
return np.random.choice(self.rewards[a])
class SimpleBandit(Bandit):
def __init__(self, k: int, rewards: np.ndarray, **kwargs):
super().__init__(k, rewards)
self.n = np.zeros(k)
self.q = kwargs.get("q", np.zeros(k))
def incremental(self, a: int, **kwargs):
return 1/self.n[a]
def epsilon_greedy(self, eps: float) -> int:
if np.random.sample() < eps:
return np.random.choice(self.actions)
else:
return np.argmax(self.q)
def ucb(self, c: float, t: float) -> int:
zeros = np.where(self.n == 0)[0]
if zeros.size > 0:
return np.random.choice(zeros)
else:
tmp = np.asarray(list(map(lambda x: self.q[x] + c * np.sqrt(np.log(t)/self.n[x]), self.actions)))
return np.argmax(tmp)
def learn(self, time_steps: int,
eps: float = None,
c: float = None,
alpha: float = 0.1,
step_size: str = "incremental",
callback: Optional[Callback] = None,
**kwargs):
# Store reward at every time step
_ = np.zeros(time_steps)
# Set step size
try:
f = getattr(super(), step_size)
except:
f = getattr(self, step_size)
for t in range(time_steps):
# Select action according to action selection strategy
a = self.epsilon_greedy(eps) if eps is not None else self.ucb(c, t)
# Update N
self.n[a] += 1
# Fetch reward resulting from action a
r = self.get_reward(a)
# Update step size
alpha = f(a=a, step_size=alpha)
# Update Q
self.q[a] += alpha * (r - self.q[a])
# Store reward earned at time step t
_[t] = r
return _
class GradientBandit(Bandit):
def __init__(self, k: int, rewards: np.ndarray, **kwargs):
super().__init__(k, rewards)
self.h = kwargs.get("h", np.zeros(k))
def softmax(self, a: int) -> float:
return np.e**(self.h[a])
def gradient_ascent(self, a_t: int, r: float, baseline: float, alpha: float, p: np.ndarray) -> None:
for a in self.actions:
if a == a_t:
self.h[a] += alpha * (r - baseline) * (1 - p[a])
else:
self.h[a] -= alpha * (r - baseline) * p[a]
def learn(self, time_steps: int,
step_size: str = "constant",
alpha: float = 0.1,
callback: Optional[Callback] = None,
**kwargs):
# Store reward at every time step
_ = np.zeros(time_steps)
# Initialize baseline
r_sum, baseline = 0, 0
# Set step size
f = getattr(super(), step_size)
for t in range(time_steps):
# Compute probability of being chosen based on preferences h
denominator = np.sum([np.e ** (self.h[b]) for b in self.actions])
p = np.asarray(list(map(lambda x: self.softmax(x), self.actions)))/denominator
# Select action according to action selection strategy
a = np.random.choice(self.actions, p=p)
# Fetch reward resulting from action a
r = self.get_reward(a)
# Initialize baseline
if t == 0:
baseline = r
# Update step size
alpha = f(step_size=alpha)
# Update h
self.gradient_ascent(a, r, baseline, alpha, p)
# Update baseline based on new reward
baseline += (1 / (t + 1)) * (r - baseline)
# Store reward earned at time step t
_[t] = r
return _
|
import numpy as np
import torch
import torch.nn.functional as F
from torch import optim
from torch.nn import CrossEntropyLoss
import cfg
import utils
def _train_epoch(model, epoch, dataloader, optimizer):
model.train() # set model to training mode
for batch_idx, (images, masks, _) in enumerate(dataloader, start=1):
images = images.to(cfg.device)
masks = masks.long().to(cfg.device)
optimizer.zero_grad()
output = model(images)
loss = CrossEntropyLoss()(output, masks)
loss.backward()
optimizer.step()
print(f'epoch [{epoch}/{cfg.max_epoch}]', end=' | ')
print(f'train iter [{batch_idx}/{len(dataloader)}]', end=' | ')
print(f'loss={loss.item():.5f}')
def _valid_epoch(model, epoch, dataloader, best_loss, results_dir):
model.eval() # set model to evaluation mode
total_val_loss = 0.
best_val_loss = best_loss
with torch.no_grad():
for batch_idx, (images, masks, _) in enumerate(dataloader, start=1):
images = images.to(cfg.device)
masks = masks.long().to(cfg.device)
output = model(images)
loss = CrossEntropyLoss()(output, masks)
total_val_loss += loss.item()
print(f'epoch [{epoch}/{cfg.max_epoch}]', end=' | ')
print(f'val iter [{batch_idx}/{len(dataloader)}]', end=' | ')
print(f'val loss={loss.item():.5f}')
mean_val_loss = total_val_loss / len(dataloader)
if mean_val_loss < best_loss:
torch.save(model.state_dict(), results_dir.joinpath(cfg.best_model_path_name))
best_val_loss = mean_val_loss
print(f'epoch [{epoch}/{cfg.max_epoch}]', end=' | ')
print(f'val', end=' | ')
print(f'mean loss={mean_val_loss:.5f}', end=' | ')
print(f'best loss={best_val_loss:.5f}')
return best_val_loss
def train_val(model, train_dataloader, test_dataloader, results_dir):
optimizer = optim.Adam(model.parameters(), lr=cfg.lr)
schedular = optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg.milestones, gamma=cfg.gamma)
best_loss = float('inf')
for epoch in range(cfg.max_epoch):
_train_epoch(model, epoch, train_dataloader, optimizer)
best_loss = _valid_epoch(model, epoch, test_dataloader, best_loss, results_dir)
schedular.step()
def test_batch(model, dataloader, results_dir):
model_path = results_dir.joinpath(cfg.best_model_path_name)
model.load_state_dict(torch.load(model_path))
model.eval()
with torch.no_grad():
for images, masks, p_names in dataloader:
images = images.to(cfg.device)
masks = masks.long().to(cfg.device)
output = model(images)
positive_prob = F.softmax(output, dim=1)[:, 1]
for iter_idx in range(output.size(0)):
prd_mask = positive_prob[iter_idx].detach().cpu().numpy().reshape((cfg.w, cfg.h))
p_name = p_names[iter_idx]
print(f'now testing {p_name}')
np.save(results_dir.joinpath(f'{p_name}_prd_prob.npy'), prd_mask)
utils.postprocess(prd_mask, results_dir.joinpath(f'{p_name}_prd_bin.png'))
|
import pyrebase
config = {
"apiKey": "AIzaSyB3UTG878t8nyfUiw8zIhfyqb5Pqwp7S2I",
"authDomain": "thebigtag-135f2.firebaseapp.com",
"databaseURL": "https://thebigtag-135f2.firebaseio.com/",
"storageBucket": "thebigtag-135f2.appspot.com",
"serviceAccount": "thebigtag-135f2-firebase-adminsdk-f3yzd-32e7052d24.json"
}
firebase = pyrebase.initialize_app(config)
auth = firebase.auth()
email = 'testingraggawgwegewGA@gmail.com'
password = 'testing11111'
user = auth.sign_in_with_email_and_password(email, password)
print(user)
db = firebase.database()
name = "Tester"
data = {
"name":name,
"email":email,
"password":password
}
# Pass the user's idToken to the push method
results = db.child("users").push(data)
print(results)
all_users = db.child("users").get()
for user in all_users.each():
print(user.key()) # Morty
print(user.val())
|
from django.db import models
class Quote(models.Model):
quote_author = models.CharField(max_length=50)
quote_body = models.TextField()
context = models.CharField(max_length=240, blank=True)
source = models.CharField(max_length=120, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.quote_author
|
Input:
S = hello
Output: h
Solution:
from collections import Counter
class Solution:
#Function to find the first non-repeating character in a string.
def nonrepeatingCharacter(self,s):
#code here
freq = Counter(s)
for i in s:
if (freq[i] == 1):
return i
break
return -1
|
from ncclient import manager
import sys
import xml.dom.minidom
HOST='10.1.100.33'
PORT = 830
USER='cisco'
PASS='cisco'
FILE='get_interface_gigabit3.xml'
def get_configured_interfaces(xml_filter):
with manager.connect(host=HOST, port=PORT, username=USER, password=PASS, hostkey_verify=FALSE, device_params={'name':'default'}, allow_agent=False, look_for_keys) as m:
with open(xml_filter) as f:
return(m.get_config('running', f.read()))
def main():
interfaces = get_configured_interfaces(FILE)
interfaces = xml.dom.minidom.parseString(interfaces.xml)
interfaces = interfaces.getElementsBy TagName("interfaces")
print(interfaces[0].toprettyxml())
if _name_=='_main_':
sys.exit(main())
|
from autodisc.explorers.randomexplorer import RandomExplorer
from autodisc.explorers.goalspaceexplorer import GoalSpaceExplorer
from autodisc.explorers.goalspacedensityexplorer import GoalSpaceDensityExplorer
from autodisc.explorers.onlinelearninggoalexplorer import OnlineLearningGoalExplorer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.