text
stringlengths 8
6.05M
|
|---|
import argparse
import re
import glob
import os
import numpy as np
import matplotlib.pyplot as plt
import sys
args = dict()
data = dict()
origDir = os.getcwd()
plt.style.use('ggplot')
# plt.style.use('grayscale')
# plt.style.use('fivethirtyeight')
print plt.style.available
numInst = re.compile('Number of Instructions: (\d+)')
numFTL = re.compile('Number of FTL Instructions: (\d+)')
typeCheckInfo = re.compile('TypeCheck\[(.+)\]= (\d+)')
fileNameInfo = re.compile('.*___(.+)\.log')
def drawMultiData():
numPoints = len(data)
widthNum = len(data[data.keys()[0]])
f, ax = plt.subplots()
ax.set_title("Something")
x = np.arange(numPoints)
y = dict()
labels = []
for i in range(0, widthNum):
y[i] = []
for filename in data.keys():
i = 0
labels.append(filename)
for category in data[filename].keys():
y[i].append(int(data[filename][category]))
i += 1
width = 0
color_cycle_length = len(plt.rcParams['axes.color_cycle'])
for i in range(0, widthNum):
ax.bar(x + width, y[i], .25,
color=plt.rcParams['axes.color_cycle'][i % color_cycle_length])
width += .25
ax.set_xticks(x + .25)
ax.set_xticklabels(labels, rotation='vertical')
os.chdir(origDir)
# plt.savefig(args.output + '.ps')
plt.show()
def drawStackedData():
numPoints = len(data)
stackNum = len(data[data.keys()[0]])
f, ax = plt.subplots()
ax.set_title("Something")
x = np.arange(numPoints)
y = dict()
plots = dict()
labels = []
for i in range(0, stackNum):
y[i] = []
for filename in sorted(data.keys()):
i = 0
labels.append(parseFilename(filename))
for category in sorted(data[filename].keys()):
y[i].append(int(data[filename][category]))
i += 1
# this for finding the bottom
btm = dict()
total = [0] * numPoints
for i in range(0, stackNum):
btm[i] = total
total = [first + second for (first, second) in zip(total, y[i])]
# this is for drawing
width = .5
colors = plt.get_cmap('jet')(np.linspace(0, 1.0, stackNum))
# color_cycle_length = len(plt.rcParams['axes.color_cycle'])
for i in range(0, stackNum):
plots[i] = ax.bar(x, y[i], width, bottom=btm[i],
color=colors[i])
ax.set_xticks(x + width/2.)
# ax.set_xticklabels(labels, rotation=-45)
ax.set_xticklabels(labels, rotation='vertical')
# this is for the legend
plotList = []
for i in range(0, stackNum):
plotList.append(plots[i][0])
plt.legend(plotList, sorted(data[data.keys()[0]].keys()))
# now am drawing the plot
plt.show()
def drawMultiStackedData():
numPoints = len(data)
widthNum = len(data[data.keys()[0]])
stackNum = len(data[data.keys()[0]][data[data.keys()[0]].keys()[0]])
f, ax = plt.subplots()
ax.set_title("Something")
x = np.arange(numPoints)
labels = []
y = dict()
btm = dict()
for i in range(0, widthNum):
y[i] = dict()
btm[i] = dict()
for j in range(0, stackNum):
y[i][j] = []
i = 0
for tickName in data.keys():
labels.append(tickName)
for test in data[tickName].keys():
j = 0
for category in data[tickName][test].keys():
y[i][j].append(int(data[tickName][test][category]))
j += 1
i += 1
for i in range(0, widthNum):
total = [0] * numPoints
for j in range(0, stackNum):
btm[i][j] = total
total = [first + second for (first, second) in zip(total, y[i][j])]
width = 0
color_cycle_length = len(plt.rcParams['axes.color_cycle'])
for i in range(0, widthNum):
for j in range(0, stackNum):
ax.bar(x + width, y[i][j], .25, bottom=btm[i][j],
color=plt.rcParams['axes.color_cycle'][j % color_cycle_length])
width += .25 # this number is going to have to change
ax.set_xticks(x + width/2.)
ax.set_xticklabels(labels, rotation='vertical')
plt.show()
def initializeData(dictionary):
addElementToDictionary(dictionary, "ftl_count", 0)
addElementToDictionary(dictionary, "inst_count", 0)
def parseFilename(filename):
result = fileNameInfo.match(filename)
if result:
return result.group(1)
else:
print "oops"
return None
def parseData(line, dictionary):
result = typeCheckInfo.match(line)
if result:
addElementToDictionary(dictionary, result.group(1),
result.group(2))
def parseAndAdd(expression, line, dictionary, name):
result = expression.match(line)
if result:
addElementToDictionary(dictionary, name,
result.group(1))
def addElementToDictionary(dictionary, key, value):
dictionary[key] = value
def readCommandline():
global args
parser = argparse.ArgumentParser(prog='Plot generator')
parser.add_argument('folder', help='example')
parser.add_argument('output', help='output name')
parser.add_argument('-colors', dest='colors', help='example')
args = parser.parse_args()
def main():
sys.exit()
global data
readCommandline()
os.chdir(args.folder)
for filename in glob.glob("*.log"):
addElementToDictionary(data, filename, dict())
# initializeData(data[filename])
with open(filename) as f:
for line in f:
parseData(line, data[filename])
# drawMultiData()
drawStackedData()
print data
if __name__ == '__main__':
main()
# print data
|
from django.db import models
# Create your models here.
class Store(models.Model):
store_name = models.CharField(max_length=255)
store_address = models.CharField(max_length=255)
store_phone = models.CharField(max_length=255)
store_website = models.CharField(max_length=255)
store_email = models.CharField(max_length=255)
store_hours = models.CharField(max_length=255)
class StoreAdmin(models.Model):
admin_email = models.CharField(max_length=255)
admin_password = models.CharField(max_length=255)
admin_storeid = models.ForeignKey(Store)
class ProductAlbum(models.Model):
album_name = models.CharField(max_length=255)
album_store_name = models.ForeignKey(Store)
album_product_number = models.IntegerField(max_length=255)
album_photograph = models.CharField(max_length=255)
class ProductDetails(models.Model):
product_name = models.CharField(max_length=255)
product_image = models.CharField(max_length=255)
product_price = models.IntegerField(max_length=255)
product_inStock = models.BooleanField()
product_album_name = models.ForeignKey(ProductAlbum)
|
#!/usr/bin/env python
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from random import randint
from main.page.base import *
import os, time, sys
class SearchBasePage(BasePage):
# page locator
_page = "index.pl"
# locators in header search
_input_search_loc = (By.XPATH, "//*[@id='search-keyword']")
_btn_search_loc = (By.CSS_SELECTOR, "button.btn-search")
# locators tab search
_tab_product_loc = (By.XPATH, "/html/body/div[1]/div[4]/div/div[1]/ul/li[1]/a")
_tab_catalog_loc = (By.XPATH, "//*[@id='tab-catalog']")
_tab_shop_loc = (By.XPATH, "//*[@id='tab-shop']")
_tab_all_shop_loc = (By.XPATH, "/html/body/div[1]/div[5]/div[1]/div/form/div/div[2]/div/div/a[1]")
_tab_gm_shop_loc = (By.XPATH, "/html/body/div[1]/div[5]/div[1]/div/form/div/div[2]/div/div/a[2]")
def open(self, site=""):
self._open(site, self._page)
def search(self, keyword=""):
try:
self.driver.find_element(*self._input_search_loc).clear()
self.driver.find_element(*self._input_search_loc).send_keys(keyword)
time.sleep(1)
self.driver.find_element(*self._btn_search_loc).click()
except Exception as inst:
print(inst)
def tab_product(self):
try:
self.driver.find_element(*self._tab_product_loc).click()
except Exception as ins:
print("Exception in tab product", ins)
def tab_shop(self):
try:
self.driver.find_element(*self._tab_shop_loc).click()
except Exception as ins:
print("Exception in tab shop!", ins)
def tab_catalog(self):
try:
self.driver.find_element(*self._tab_catalog_loc).click()
except Exception as ins:
print("Exception in tab catalog!", ins)
def tab_all_shop(self):
try:
self.driver.find_element(*self._tab_all_shop_loc).click()
except Exception as ins:
print("Exception in tab all shop!", ins)
def tab_gm_shop(self):
try:
self.driver.find_element(*self._tab_gm_shop_loc).click()
except Exception as inst:
print("Exception in tab GM shop!", ins)
def __str__(self):
return self.driver.title
|
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
import data.climate.window_generator as wg
# https://www.tensorflow.org/tutorials/structured_data/time_series
mpl.rcParams['figure.figsize'] = (8, 6)
mpl.rcParams['axes.grid'] = False
train_df = pd.read_csv("jena_climate_2009_2016_train.csv")
val_df = pd.read_csv("jena_climate_2009_2016_val.csv")
test_df = pd.read_csv("jena_climate_2009_2016_test.csv")
CONV_WIDTH = 3
LABEL_WIDTH = 24
INPUT_WIDTH = LABEL_WIDTH + (CONV_WIDTH - 1)
wide_conv_window = wg.WindowGenerator(train_df=train_df, val_df=val_df, test_df=test_df,
input_width=INPUT_WIDTH,
label_width=LABEL_WIDTH,
shift=1,
label_columns=['T (degC)'])
conv_model = tf.keras.Sequential([
tf.keras.layers.Conv1D(filters=32,
kernel_size=(CONV_WIDTH,),
activation='relu'),
tf.keras.layers.Dense(units=32, activation='relu'),
tf.keras.layers.Dense(units=1),
])
history = wg.compile_and_fit(conv_model, wide_conv_window)
conv_model.save("h5/cnn_32_26_19__32_24_1.h5")
wide_conv_window.plot(conv_model)
plt.show()
|
#!/usr/bin/env python
# Rips Roms from the Steam release of Colecovision Flashback
ROMS = [{'OFFSETS': [0x01740, 0x0573F]},
{'OFFSETS': [0x05740, 0x0973F]},
{'OFFSETS': [0x09740, 0x0D73F]},
{'OFFSETS': [0x0D740, 0x1373F]},
{'OFFSETS': [0x13740, 0x1773F]},
{'OFFSETS': [0x17740, 0x1B73F]},
{'OFFSETS': [0x1B740, 0x1F73F]},
{'OFFSETS': [0x1F740, 0x2373F]},
{'OFFSETS': [0x23740, 0x2773F]},
{'OFFSETS': [0x27740, 0x2F73F]},
{'OFFSETS': [0x2F740, 0x3373F]},
{'OFFSETS': [0x33740, 0x3973F]},
{'OFFSETS': [0x39740, 0x3D73F]},
{'OFFSETS': [0x3D740, 0x4173F]},
{'OFFSETS': [0x41740, 0x4573F]},
{'OFFSETS': [0x45740, 0x4B73F]},
{'OFFSETS': [0x4B740, 0x5173F]},
{'OFFSETS': [0x51740, 0x5573F]},
{'OFFSETS': [0x55740, 0x5973F]},
{'OFFSETS': [0x59740, 0x6153F]},
{'OFFSETS': [0x61540, 0x6553F]},
{'OFFSETS': [0x65540, 0x6953F]},
{'OFFSETS': [0x69540, 0x6D53F]},
{'OFFSETS': [0x6D540, 0x7153F]},
{'OFFSETS': [0x71540, 0x7553F]},
{'OFFSETS': [0x75540, 0x7953F]},
{'OFFSETS': [0x79540, 0x7D53F]},
{'OFFSETS': [0x7D540, 0x8153F]},
{'OFFSETS': [0x81540, 0x8753F]},
{'OFFSETS': [0x87540, 0x8B53F]},
{'OFFSETS': [0x8B540, 0x8F53F]},
{'OFFSETS': [0x8F540, 0x9353F]},
{'OFFSETS': [0x93540, 0x9753F]},
{'OFFSETS': [0x97540, 0x9D53F]},
{'OFFSETS': [0x9D540, 0xA153F]},
{'OFFSETS': [0xA1540, 0xA553F]},
{'OFFSETS': [0xA5540, 0xA953F]},
{'OFFSETS': [0xA9540, 0xAF53F]},
{'OFFSETS': [0xAF540, 0xB353F]},
{'OFFSETS': [0xB3540, 0xB953F]}]
# Rom filenames
ROMFILE = [{'NAME': [str("AntarcticAdventure.cv")]},
{'NAME': [str("Aquattack.cv")]},
{'NAME': [str("BrainStrainers.cv")]},
{'NAME': [str("BumpnJump.cv")]},
{'NAME': [str("Choplifter.cv")]},
{'NAME': [str("CosmicAvenger.cv")]},
{'NAME': [str("Evolution.cv")]},
{'NAME': [str("Fathom.cv")]},
{'NAME': [str("FlipperSlipper.cv")]},
{'NAME': [str("FortuneBuilder.cv")]},
{'NAME': [str("FranticFreddy.cv")]},
{'NAME': [str("Frenzy.cv")]},
{'NAME': [str("GatewayToApshai.cv")]},
{'NAME': [str("GustBuster.cv")]},
{'NAME': [str("JumpmanJunior.cv")]},
{'NAME': [str("JungleHunt.cv")]},
{'NAME': [str("Miner2049er.cv")]},
{'NAME': [str("Moonsweeper.cv")]},
{'NAME': [str("MountainKing.cv")]},
{'NAME': [str("MsSpaceFury.cv")]},
{'NAME': [str("NovaBlast.cv")]},
{'NAME': [str("OilsWell.cv")]},
{'NAME': [str("OmegaRace.cv")]},
{'NAME': [str("PepperII.cv")]},
{'NAME': [str("QuintanaRoo.cv")]},
{'NAME': [str("Rolloverture.cv")]},
{'NAME': [str("SammyLightfoot.cv")]},
{'NAME': [str("SirLancelot.cv")]},
{'NAME': [str("Slurpy.cv")]},
{'NAME': [str("SpaceFury.cv")]},
{'NAME': [str("SpacePanic.cv")]},
{'NAME': [str("SquishemSam.cv")]},
{'NAME': [str("SuperCrossForce.cv")]},
{'NAME': [str("TheHeist.cv")]},
{'NAME': [str("Threshold.cv")]},
{'NAME': [str("TournamentTennis.cv")]},
{'NAME': [str("Venture.cv")]},
{'NAME': [str("WarRoom.cv")]},
{'NAME': [str("WingWar.cv")]},
{'NAME': [str("Zaxxon.cv")]}]
if __name__ == '__main__':
f = open("AUTO", "rb")
try:
autofile = f.read()
finally:
f.close
for i in range(0, 40):
for section in ['OFFSETS']:
if ROMS[i][section]:
start = f.seek(ROMS[i][section][0], 0)
end = f.seek(ROMS[i][section][1], 0)
game = autofile[start:end]
for section in ['NAME']:
if ROMFILE[i][section]:
romfilename = ROMFILE[i][section][0]
filename = open(romfilename, "wb")
try:
filename.write(game)
finally:
filename.close()
|
# import imp modules
import pygame
import tkinter as tkr
from tkinter.filedialog import askdirectory
import os
#creat music player window
musicplayer = tkr.Tk()
#set a tile fow window
musicplayer.title("my player")
# set the screen dimenson of window
musicplayer.geometry('400x350') # use a quote between on dimension
# askdirectory to choose a user music directory
directory = askdirectory()
# now interact directory to system and turn it into current directory
os.chdir(directory) #chdir = current directory
# now creat a song list using directory
songlist = os.listdir()# list dir is a module of os system and its showing the list of the song name
#now creat the palylist of the song and pop the window and add its function
playlist = tkr.Listbox(musicplayer,font= "Cambria 14 bold",bg ="cyan2",selectmode= tkr.SINGLE)
#selectmode is to select a song and we put it single because we play one song at one time
# now we add for loop bc songs paly in loop
for item in songlist:
pos = 0
playlist.insert(pos,item)
# initilise the py game module and add mix function to stop and play the song
pygame.init()
pygame.mixer.init()
def play():
pygame.mixer.music.load(playlist.get(tkr.ACTIVE))# NOW WE USE ATTRUBUTE ACTIVE TO ACTIVE SONG OF PLAYLIST
var.set(playlist.get(tkr.ACTIVE))
pygame.mixer.music.play()
def stop():
pygame.mixer.music.stop()
def pause():
pygame.mixer.music.pause()
def resume():
pygame.mixer.music.unpause()
# now creat button
Button_play =tkr.Button(musicplayer,height=3,width= 5, text = "play music",font= "Cambria 20 bold",command = play,bg="lime green",fg ="black")
# here bg = background and fg = front ground
Button_stop = tkr.Button(musicplayer,height=3,width= 5, text = "stop music",font= "Cambria 20 bold",command = stop,bg="skyblue",fg ="black")
Button_pause = tkr.Button(musicplayer,height=3,width= 5, text = "pause music",font= "Cambria 20 bold",command = pause,bg="blue",fg ="black")
Button_resume = tkr.Button(musicplayer,height=3,width= 5, text = "resume music",font= "Cambria 20 bold",command = resume,bg="red",fg ="black")
Button_resume.pack(fill="x") # here fill= x mean fill text on button completely
Button_pause.pack(fill="x")
Button_stop.pack(fill="x")
Button_play.pack(fill="x")
playlist.pack(fill="both",expand = "yes")
# now we want to creat the current song title
var = tkr.StringVar() # Stringvar is varible which hold an string,default value is empty string
song_title =tkr.Label(musicplayer,font = "Cambrai 12 bold ",textvariable = var)
song_title.pack()
musicplayer.mainloop()
|
from testutil import *
import numpy as np
import smat # want module name too
from smat import *
import cPickle as pickle
import os,os.path
####################################################
# GLOBAL VARIABLES USED IN EACH TEST (as read-only)
n,m = 123,21
Z,_Z = None,None # Z = numpy.zeros(n,m), _Z = smat.zeros(n,m)
O,_O = None,None # ones
I,_I = None,None # identity
A,_A = None,None # random matrix 1 is whatever
B,_B = None,None # random matrix 2 is whatever
C,_C = None,None # random matrix 3 has non-zero values (good as a denominator)
W,_W = None,None # row vector taken from ravel'd C
def clear_test_matrices():
global Z, O, I, A, B, C, W
global _Z,_O,_I,_A,_B,_C,_W
Z, O, I, A, B, C, W = None, None, None, None, None, None, None
_Z,_O,_I,_A,_B,_C,_W= None, None, None, None, None, None, None
def alloc_test_matrices(dt):
global n,m
global Z, O, I, A, B, C, W
global _Z,_O,_I,_A,_B,_C,_W
Z = np.zeros((n,m),dt); _Z = zeros((n,m),dt);
O = np.ones((n,m),dt); _O = ones((n,m),dt);
I = np.eye(n,dtype=dt); _I = eye(n,dtype=dt);
A = make_rand(n,m,dt); A = A.astype(dt)
B = make_rand(n,m,dt); B = abs(B); B = B.astype(dt)
C = make_rand(n,m,dt); C = abs(C); C[C==0] += 1; C = C.astype(dt)
W = C.ravel()[10:n+10].reshape((-1,1))
_A = as_sarray(A)
_B = as_sarray(B)
_C = as_sarray(C)
_W = as_sarray(W)
assert_eq(_Z,Z)
#####################################################
def test_create(dt):
# Machine-created matrices should match numpy versions
assert_eq(_Z,Z)
assert_eq(_O,O)
assert_eq(_I,I)
assert_eq(_A,A)
assert_eq(_B,B)
assert_eq(_C,C)
assert_eq(_W,W)
# Properties.
assert _Z.size == Z.size
assert _Z.ndim == Z.ndim
assert _Z.shape== Z.shape
assert _Z.dtype== Z.dtype
assert _Z.nrow == Z.shape[0]
assert _Z.ncol == Z.shape[1]
# Create range-valued array.
assert_eq(arange(5,67),np.arange(5,67))
# Use _like creation functions.
_X = empty_like(_Z)
assert _X.shape == _Z.shape
assert _X.dtype == _Z.dtype
_X = empty_like(_Z,uint8)
assert _X.shape == _Z.shape
assert _X.dtype == uint8
_X = zeros_like(_O)
assert_eq(_X,_Z)
_X = ones_like(_Z)
assert_eq(_X,_O)
#####################################################
def test_copy(dt):
# Upload and then download from machine
assert_eq(as_sarray(A), A)
assert_eq(_A.copy(),A)
# Type casting.
assert _A.astype(float32).dtype == float32
if int32 in get_supported_dtypes():
assert _A.astype(int32).dtype == int32
#####################################################
ext_demo_dll = None
class c_clamp_args_t(Structure): # This same structure is defined in cuda_ext_clamp.cu
_fields_ = [("lo", c_double),
("hi", c_double)]
c_clamp_args_p = POINTER(c_clamp_args_t)
def load_extension_demo():
global ext_demo_dll
ext_demo_dll = load_extension("smat_ext_demo")
ext_demo_dll.api_lerp.declare( c_smat_p, [c_smat_p,c_smat_p,c_double]) # C, [A,B,alpha]
ext_demo_dll.api_clamp.declare( None, [c_smat_p,c_clamp_args_p]) # [A,(lo,hi)]
def unload_extension_demo():
global ext_demo_dll
unload_extension(ext_demo_dll)
ext_demo_dll = None
def lerp(A,B,alpha):
C_ptr = ext_demo_dll.api_lerp(A._ptr,B._ptr,c_double(alpha))
return sarray(C_ptr)
def clamp(A,lo,hi):
args = c_clamp_args_t(lo,hi)
ext_demo_dll.api_clamp(A._ptr,byref(args))
def test_smat_extension(dt):
load_extension_demo()
# Function lerp(A,B) computes A*(1-B) and returns the result
_X = lerp(_A,_B,0.25)
X = (1-0.25)*A + 0.25*B
assert_close(_X,X)
# Function clamp(A,lo,hi) computes A[:] = maximum(lo,minimum(hi,A)) inplace
_X = _A.copy(); clamp(_X,-0.5,0.5)
X = A.copy(); X = np.maximum(-0.5,np.minimum(0.5,X))
assert_eq(_X,X)
unload_extension_demo()
#####################################################
def test_random(dt):
# Bernoulli random numbers
_A1 = bernoulli((n,m),0.5,dt)
_A2 = bernoulli((n,m),0.5,dt)
_A3 = bernoulli((n,m),0.2,dt)
assert_ne(_A1,_A2) # pretty pathetic test of randomness, but whatever
assert_ne(_A2,_A3)
assert_any(_A1 == 0)
assert_any(_A1 == 1)
assert_all(logical_or(_A1 == 1,_A1 == 0))
assert_all(nnz(_A1) > nnz(_A3)*1.1)
#####################################################
def test_random_int(dt):
# Integral random numbers
_A1 = rand(n,m,dt)
_A2 = rand(n,m,dt)
_A3 = rand(n,m,dt)
assert_ne(_A1,_A2) # pretty pathetic test of randomness, but whatever
assert_ne(_A2,_A3)
rand_seed(1234)
_A4 = rand(n,m,dt)
assert_ne(_A1,_A4)
rand_seed(1234)
_A5 = rand(n,m,dt)
assert_eq(_A4,_A5) # same random seed should give same random stream
#######################################################################
def test_random_float(dt):
# Floating point random numbers
_A1 = randn(n,m,dt)
_A2 = randn(n,m,dt)
_A3 = randn(n,m,dt)
assert_ne(_A1,_A2) # pretty pathetic test of randomness, but whatever
assert_ne(_A2,_A3)
rand_seed(1234)
_A4 = randn(n,m,dt)
assert_ne(_A1,_A4)
rand_seed(1234)
_A5 = randn(n,m,dt)
assert_eq(_A4,_A5) # same random seed should give same random stream
#######################################################################
def test_closeness(dt):
A1 = np.require(make_rand(n,m,dt)*1e-5,dtype=float32)
_A1 = asarray(A1)
assert allclose(A1,A1*(1+1e-6),rtol=1e-5,atol=0)
assert not allclose(A1,A1*(1+1e-4),rtol=1e-5,atol=0)
assert allclose(A1,A1+1e-6,rtol=0,atol=1e-5)
assert not allclose(A1,A1+1e-4,rtol=0,atol=1e-5)
#####################################################
def test_attributes():
"""Test setattr and getattr functions."""
A = empty((5,5))
A.setattr("foo",1)
A.setattr("bar",10)
assert A.foo == 1
assert A.bar == 10
del A.foo
assert A.bar == 10
del A.bar
#######################################################################
def test_serialize(dt):
"""
Tests that an smat array of any type can be
serialized to disk, including its attributes.
"""
A1 = rand(30,10,dtype=dt)
X1 = rand(256,5,dtype=dt)
X1.setattr("A",A1)
fname = "smat_unittest_serialize.pkl"
with open(fname,"wb") as file:
pickle.dump(X1,file)
with open(fname,"rb") as file:
X2 = pickle.load(file)
os.remove(fname)
assert isinstance(X2,sarray)
assert_eq(X1,X2)
assert(X2.hasattr("A")) # Make sure that attributes are also serialized
A2 = X2.getattr("A")
assert_eq(A1,A2)
#####################################################
def test_slicing(dt):
# Row slicing.
assert_eq(_A[0], A[0])
assert_eq(_A[0,:], A[0,:])
assert_eq(_A[11], A[11])
assert_eq(_A[11,:], A[11,:])
assert_eq(_A[-1], A[-1])
assert_eq(_A[-1,:], A[-1,:])
assert_eq(_A[:], A[:])
assert_eq(_A[:,:], A[:,:])
assert_eq(_A[:21], A[:21])
assert_eq(_A[:21,:], A[:21,:])
assert_eq(_A[-21:], A[-21:])
assert_eq(_A[-21:-16],A[-21:-16:])
assert_eq(_A[-21:,:], A[-21:,:])
assert_eq(_A[21:-21], A[21:-21:])
assert_eq(_A[21:-21,:],A[21:-21,:])
# Row slicing on a row vector
_a,a = _A[3,:],A[3:4,:]
assert_eq(_a, a)
assert_eq(_a[0], a[0])
# Column slicing.
assert_eq(_A[:,0], A[:,0:1])
assert_eq(_A[:,1], A[:,1:2])
assert_eq(_A[:,:5], A[:,:5])
assert_eq(_A[:,-1], A[:,-1:])
assert_eq(_A[:,-5], A[:,-5:-4])
assert_eq(_A[:,-5:], A[:,-5:])
assert_eq(_A[:,-5:-1], A[:,-5:-1])
# Column slicing on a column vector
_a,a = _A[:,3],A[:,3:4]
assert_eq(_a, a)
assert_eq(_a[:,0], a[:,0:1])
# Row + Column slicing.
assert_eq(_A[5,5], A[5,5])
assert_eq(_A[:5,5], A[:5,5:6])
assert_eq(_A[2:5,5], A[2:5,5:6])
assert_eq(_A[2:5,5:7], A[2:5,5:7])
assert_eq(_A[-6:,-10:], A[-6:,-10:])
# Row-sliced assignments.
_X,X = _A.copy(),A.copy(); _X[:] ,X[:] = 789 ,789; assert_eq(_X,X)
_X,X = _A.copy(),A.copy(); _X[:] ,X[:] = _B[:] ,B[:]; assert_eq(_X,X)
_X,X = _A.copy(),A.copy(); _X[0] ,X[0] = _B[0] ,B[0]; assert_eq(_X,X) # Broadcast copy.
_X,X = _A.copy(),A.copy(); _X[:] ,X[:] = _B[0] ,B[0]; assert_eq(_X,X) # Broadcast copy.
_X,X = _A.copy(),A.copy(); _X[:] ,X[:] = _W ,W; assert_eq(_X,X) # Broadcast copy.
_X,X = _A.copy(),A.copy(); _X[-1] ,X[-1] = _B[-1] ,B[-1]; assert_eq(_X,X)
_X,X = _A.copy(),A.copy(); _X[:11] ,X[:11] = _B[:11] ,B[:11]; assert_eq(_X,X)
_X,X = _A.copy(),A.copy(); _X[-11:],X[-11:] = _B[-11:],B[-11:]; assert_eq(_X,X)
# Col-sliced assignments.
# _X,X = _A.copy(),A.copy(); _X[:,0] ,X[:,0] = 789 ,789; assert_eq(_X,X) # Assigning const to column strided array not implemented
_X,X = _A.copy(),A.copy(); _X[:,0] ,X[:,0] = _B[:,0] ,B[:,0]; assert_eq(_X,X)
_X,X = _A.copy(),A.copy(); _X[:,1] ,X[:,1] = _B[:,1] ,B[:,1]; assert_eq(_X,X)
_X,X = _A.copy(),A.copy(); _X[:,:10] ,X[:,:10] = _B[:,:10] ,B[:,:10]; assert_eq(_X,X)
_X,X = _A.copy(),A.copy(); _X[:,:10] ,X[:,:10] = _B[:,:10] ,B[:,:10]; assert_eq(_X,X)
_X,X = _A.copy(),A.copy(); _X[:,10:-10] ,X[:,10:-10] = _B[:,10:-10] ,B[:,10:-10]; assert_eq(_X,X)
# Row+Col-sliced assignments.
_X,X = _A.copy(),A.copy(); _X[5:10,7:13] ,X[5:10,7:13] = _B[5:10,7:13] ,B[5:10,7:13]; assert_eq(_X,X)
#####################################################
def test_reshape(dt):
_X,X = _A[:45,:].copy(),A[:45,:].copy()
assert_eq(_X.reshape((7,135)),X.reshape((7,135)))
assert_eq(_X.reshape((-1,7)),X.reshape((-1,7)))
_Y,Y = _X[:9],X[:9]; _Y[:,:],Y[:,:] = 1,1
assert_eq(_Y,Y)
assert_eq(_X,X)
assert_eq(_X.reshape((7,135)),X.reshape((7,135)))
assert_eq(_X.reshape((135,-1)),X.reshape((135,-1)))
#####################################################
def test_transpose(dt):
assert_eq(transpose(_I),I)
assert_eq(transpose(_A.reshape((-1,1))),np.transpose(A.reshape((-1,1))))
assert_eq(transpose(_A.reshape((1,-1))),np.transpose(A.reshape((1,-1))))
assert_eq(transpose(_A.reshape((3,-1))),np.transpose(A.reshape((3,-1))))
assert_eq(transpose(_A.reshape((-1,3))),np.transpose(A.reshape((-1,3))))
assert_eq(transpose(_A),np.transpose(A))
assert_eq(transpose(_B),np.transpose(B))
assert_eq(_A.T, np.transpose(A))
assert_eq(_B.T, np.transpose(B))
#######################################################################
def test_dot(dt):
assert_eq(dot(_I,_I), I)
assert_close(dot(_A.reshape((1,-1)),_B.reshape((-1,1))), np.dot(A.reshape((1,-1)),B.reshape((-1,1))))
assert_close(dot(_A,_B.T) ,np.dot(A,B.T))
assert_close(dot_nt(_A,_B),np.dot(A,B.T))
assert_close(dot(_A.T,_B) ,np.dot(A.T,B))
assert_close(dot_tn(_A,_B),np.dot(A.T,B))
#######################################################################
def test_bitwise(dt):
# Bitwise and logical (NOT,AND,OR,XOR).
assert_eq(~_A, ~A)
assert_eq(_A | 0, A | 0)
assert_eq( 1 | _B, 1 | B)
assert_eq(_A | _B, A | B)
assert_eq(_A ^ 0, A ^ 0)
assert_eq( 1 ^ _B, 1 ^ B)
assert_eq(_A ^ _B, A ^ B)
assert_eq(_A & 0, A & 0)
assert_eq( 1 & _B, 1 & B)
assert_eq(_A & _B, A & B)
#######################################################################
def test_logical(dt):
# Logical operations (as opposed to bitwise)
assert_eq(logical_not(_A), np.logical_not(A))
assert_eq(logical_or(_A,_B), np.logical_or(A,B))
assert_eq(logical_and(_A,_B), np.logical_and(A,B))
#######################################################################
def test_modulo(dt):
_X,X = _A,A
_Y,Y = _C,C
if dt in dtypes_sint:
_X,X = abs(_X),abs(X) # cuda modulo for signed types differs from numpy,
_Y,Y = abs(_Y),abs(Y) # so don't compare that case
assert_eq(_X % 7, (X % np.asarray(7,dtype=dt)).astype(dt))
assert_eq( 7 % _Y, (np.asarray(7,dtype=dt) % Y).astype(dt))
assert_eq(_X % _Y, (X % Y).astype(dt))
#######################################################################
def test_naninf(dt):
_X = _A.copy(); _X[3] = np.nan; _X[5] = np.inf
X = A.copy(); X[3] = np.nan; X[5] = np.inf
assert_eq(isnan(_X), np.isnan(X))
assert_eq(isinf(_X), np.isinf(X))
assert_eq(isinf(_A/0),np.ones(A.shape,dtype=bool))
assert_eq(isnan(0*_A/0),np.ones(A.shape,dtype=bool))
#######################################################################
def test_math_float(dt):
Amin = A.min()
Amax = A.max()
A2 = (2*( A-Amin)/(Amax-Amin)-1)*.999
_A2 = (2*(_A-Amin)/(Amax-Amin)-1)*.999
assert_eq(clip(_A,0,1),np.clip(A,0,1))
assert_eq(abs(_O), np.abs(O))
assert_eq(abs(_A), np.abs(A))
assert_eq(square(_A), np.square(A))
assert_eq(round(_A), np.round(A))
assert_eq(floor(_A), np.floor(A))
assert_eq(ceil(_A), np.ceil(A))
assert_close(sin(_A), np.sin(A))
assert_close(cos(_A), np.cos(A))
assert_close(tan(_A), np.tan(A))
assert_close(arcsin(_A2), np.arcsin(A2))
assert_close(arccos(_A2), np.arccos(A2))
assert_close(arctan(_A2), np.arctan(A2))
assert_close(sinh(_A), np.sinh(A))
assert_close(cosh(_A), np.cosh(A))
assert_close(tanh(_A), np.tanh(A))
assert_close(arcsinh(_A2), np.arcsinh(A2))
assert_close(arccosh(1+abs(_A2)), np.arccosh(1+np.abs(A2)))
assert_close(arctanh(_A2), np.arctanh(A2))
assert_close(exp(_C), np.exp(C))
assert_close(exp2(_C), np.exp2(C))
assert_close(log(_C), np.log(C))
assert_close(log2(_C), np.log2(C))
assert_close(logistic(_A), 1 / (1 + np.exp(-A)))
# Handle sign and sqrt separately...
if dt == bool:
assert_eq(sign(_O), np.sign(np.asarray(O,dtype=uint8))) # numpy doesn't support sign on type bool
assert_eq(sign(_A), np.sign(np.asarray(A,dtype=uint8)))
else:
assert_eq(sign(_O), np.sign(O))
assert_eq(sign(_I), np.sign(I))
if dt in (int8,int16,int32,int64,float32,float64):
assert_eq(sign(-_I), np.sign(-I))
assert_eq(sign(_A), np.sign(A))
assert_eq(signbit(_O), np.signbit(O,out=np.empty(O.shape,dtype=dt)))
assert_eq(signbit(_I), np.signbit(I,out=np.empty(I.shape,dtype=dt)))
if dt in (int8,int16,int32,int64,float32,float64):
assert_eq(signbit(-_I), np.signbit(-I,out=np.empty(I.shape,dtype=dt)))
assert_eq(signbit(_A), np.signbit(A,out=np.empty(A.shape,dtype=dt)))
if dt in dtypes_float:
assert_close(sqrt(abs(_A)),np.sqrt(np.abs(A))) # numpy converts integer types to float16/float32/float64, and we don't want that.
#######################################################################
def test_reduce(dt):
X = np.asarray([[12.5],[1]])
_X = as_sarray(X)
assert_eq(sum(_X,axis=1),np.sum(X,axis=1).reshape((-1,1)))
# Operations that reduce in one or more dimensions.
reducers = [(max,np.max,assert_eq),
(min,np.min,assert_eq),
(sum,np.sum,assert_close),
(mean,np.mean,assert_close),
(nnz,np.nnz,assert_eq),
(any,np.any,assert_eq),
(all,np.all,assert_eq),
]
shapes = [_A.shape,(-1,1),(3,-1),(-1,3),(-1,7),(1,-1),(7,-1)]
for shape in shapes:
for sreduce,nreduce,check in reducers:
_X = _A.reshape(shape).copy(); _X.ravel()[5:100] = 0;
X = A.reshape(shape).copy(); X.ravel()[5:100] = 0;
assert_eq(_X,X)
check(sreduce(_X,axis=1), nreduce(X,axis=1).reshape((-1,1))) # reshape because we don't want to follow numpy's convention of turning all reduces into dimension-1 vector
check(sreduce(_X,axis=0), nreduce(X,axis=0).reshape((1,-1)))
check(sreduce(_X), nreduce(X))
#######################################################################
def test_trace(dt):
#assert_eq(trace(_I), np.trace(I)) # not yet implemented
pass
#######################################################################
def test_diff(dt):
for axis in (0,1):
if axis == 1: continue # TODO: axis=1 not yet implemented
for n in range(5):
assert_eq(diff(_A,n,axis=axis), np.diff(A,n,axis=axis))
#######################################################################
def test_repeat(dt):
for n in range(5): assert_eq(repeat(_A,n,axis=1), np.repeat(A,n,axis=1))
for n in range(5): assert_eq(repeat(_A,n), np.repeat(A,n).reshape((-1,1)))
# TODO: axis=0 not yet implemented
#######################################################################
def test_tile(dt):
for n in range(5): assert_eq(tile(_A,n,axis=1), np.tile(A,(1,n)))
for n in range(5): assert_eq(tile(_A,n), np.tile(A.reshape((-1,1)),n).reshape((-1,1)))
# TODO: axis=0 not yet implemented
#######################################################################
def test_arithmetic(dt):
# Arithmetic operators (+,-,*,/)
_X,X = _A,A
_Y,Y = _B,B
_D,D = _C,C
if dt in dtypes_sint:
_Y,Y = abs(_Y),abs(Y) # cuda/numpy differ on how signed integer types
_D,D = abs(_D),abs(D) # are rounded under division, so skip that comparison
assert_eq(_X+_Y, X+Y)
assert_eq(_X+_Y[5,:], X+Y[5,:]) # test broadcast of row vector
assert_eq(_X[0,:]+_Y, X[0,:]+Y) # test broadcast of row vector
assert_eq(_X+_W, X+W) # test broadcast of col vector
assert_eq(_X+3 , np.asarray(X+3,dtype=dt))
assert_eq(3+_X , np.asarray(3+X,dtype=dt))
assert_eq(_X-_Y, X-Y)
assert_eq(_X-_Y[5,:], X-Y[5,:])
assert_eq(_X[0,:]-_Y, X[0,:]-Y)
assert_eq(_X-_W, X-W)
assert_eq(_X-3 , X-np.asarray(3,dtype=dt))
assert_eq(3-_X , np.asarray(3,dtype=dt)-X)
assert_eq(_X*_Y, X*Y)
assert_eq(_X*_Y[5,:], X*Y[5,:])
assert_eq(_X[0,:]*_Y, X[0,:]*Y)
assert_eq(_X*_W, X*W)
assert_eq(_X*3 , X*np.asarray(3,dtype=dt))
assert_eq(3*_X , np.asarray(3,dtype=dt)*X)
assert_close(_Y/_D[5,:], Y/D[5,:])
assert_close(_Y[0,:]/_D, Y[0,:]/D)
assert_close(_Y/_W, Y/W)
assert_close(_Y/_D, np.asarray(Y/D,dtype=dt))
assert_close(_Y/3 , np.asarray(Y/np.asarray(3,dtype=dt),dtype=dt))
assert_close(3/_D , np.asarray(np.asarray(3,dtype=dt)/D,dtype=dt))
if dt != bool:
_X = _A.copy(); X = A.copy(); _X += 2; X += 2; assert_eq(_X,X)
_X = _A.copy(); X = A.copy(); _X += _C; X += C; assert_eq(_X,X)
_X = _A.copy(); X = A.copy(); _X -= 2; X -= 2; assert_eq(_X,X)
_X = _A.copy(); X = A.copy(); _X -= _C; X -= C; assert_eq(_X,X)
_X = _A.copy(); X = A.copy(); _X *= 2; X *= 2; assert_eq(_X,X)
_X = _A.copy(); X = A.copy(); _X *= _C; X *= C; assert_eq(_X,X)
_X = _A.copy(); X = A.copy(); _X *= 0; X *= 0; assert_eq(_X,X)
_X = _A.copy(); X = A.copy(); _X *= 1; X *= 1; assert_eq(_X,X)
_X = _A.copy(); X = A.copy(); _X /= 1; X /= 1; assert_eq(_X,X)
#######################################################################
def test_elemwise_minmax(dt):
# Elementwise minimum/maximum
assert_eq(maximum(_A, 9),np.maximum(A,np.asarray(9,dtype=dt)).astype(dt))
assert_eq(maximum( 9,_B),np.maximum(np.asarray(9,dtype=dt),B).astype(dt))
assert_eq(maximum(_A,_B),np.maximum(A,B))
assert_eq(minimum(_A, 9),np.minimum(A,np.asarray(9,dtype=dt)).astype(dt))
assert_eq(minimum( 9,_B),np.minimum(np.asarray(9,dtype=dt),B).astype(dt))
assert_eq(minimum(_A,_B),np.minimum(A,B))
#######################################################################
def test_pow(dt):
if dt in [int64,uint64]: # Currently not work well with int64 and compute capability 1.2 (no doubles)
return
# Power (**).
_X,X = abs(_A),np.abs(A);
_Y,Y = (_I[:21,:].reshape((-1,21))+1.2).astype(dt),(I[:21,:].reshape((-1,21))+1.2).astype(dt)
assert_close(_X**_Y, X**Y)
assert_close(_X**_Y[0,:], X**Y[0,:]) # broadcast
assert_close(_X**2.1 , (X**np.asarray(2.1,dtype=dt)).astype(dt))
assert_close(7**_Y , np.asarray(7**Y,dtype=dt))
#######################################################################
def test_softmax(dt):
assert_close(softmax(_A,axis=0),numpy_softmax(A,axis=0))
assert_close(softmax(_A,axis=1),numpy_softmax(A,axis=1))
#######################################################################
def test_apply_mask(dt):
for _ in range(5):
# smat version
_X = _A.copy()
_M = bernoulli(_A.shape, 0.8, dtype=np.bool)
_X[5:7] = np.nan
_M[5:7] = False
apply_mask(_X, _M)
# numpy version
X = A.copy()
X[5:7] = 0
X *= _M.asnumpy()
X[np.where(X==-0.)] = 0
# compare
assert_eq(_X, X)
#######################################################################
def test_memory_manager():
reset_backend()
#reset_backend(verbose=1,log=["heap"]) # for debugging, if there's a problem
size = 10*1024*1024 # 10 million element chunks
m = 1024
n = size/m
status0 = get_heap_status()
Y = ones((n,m),dtype=float32)
status1 = get_heap_status()
Y = None
status2 = get_heap_status()
Y = ones((n,m),dtype=float32)
status3 = get_heap_status()
Y = None
status4 = get_heap_status()
Y = ones((n,3*m//4),dtype=float32)
status5 = get_heap_status()
Y = None
status6 = get_heap_status()
assert status1.device_used >= status0.device_used + n*m # use >= n*m instead of == n*m because sanity checks/alignment constraints might allocate a few extra bytes
assert status1.device_committed >= status0.device_committed
assert status2.device_used == status0.device_used
assert status2.device_committed == status1.device_committed
assert status3.device_used == status1.device_used
assert status3.device_committed == status1.device_committed
assert status4.device_used == status0.device_used
assert status4.device_committed == status1.device_committed
assert status5.device_used < status1.device_used # allocated smaller array, but should use same block
assert status5.device_committed == status1.device_committed
assert status6.device_used == status0.device_used
assert status6.device_committed == status1.device_committed
for i in range(2): # try to alloc and free all memory, several times
# Each trial allocates (and continues to reference)
# enough matrix data to nearly fill the available device memory,
# then syncs with the machine.
mem = get_heap_status()
X = []
Y = ones((n,m),dtype=float32)
elem_to_alloc = int(mem.device_avail*0.9)/4
chunks_to_alloc = elem_to_alloc/size-2
for j in range(chunks_to_alloc):
X.append(ones((n,m),dtype=float32))
Y = Y + X[-1]
sync()
X = None
Y = None
sync()
reset_backend()
#######################################################################
def run_unittest(test,dtypes=None):
print rpad("%s..." % test.__name__.partition("_")[2],19),
if dtypes == None:
test()
else:
supported = get_supported_dtypes()
for dt in [bool, int8, int16, int32, int64,
uint8,uint16,uint32,uint64,float32,float64]:
if not dt in supported:
continue
print ("%3s" % dtype_short_name[dt] if dt in dtypes else " "),
if dt in dtypes:
alloc_test_matrices(dt)
test(dt)
clear_test_matrices()
print
#######################################################################
def unittest():
print '\n---------------------- UNIT TESTS -------------------------\n'
np.random.seed(42)
set_backend_options(randseed=42,verbose=0,sanitycheck=False)
run_unittest(test_memory_manager)
run_unittest(test_create ,dtypes_generic)
run_unittest(test_copy ,dtypes_generic)
run_unittest(test_random ,dtypes_generic)
run_unittest(test_random_int ,dtypes_integral)
run_unittest(test_random_float ,dtypes_float)
run_unittest(test_smat_extension,dtypes_float)
run_unittest(test_closeness ,dtypes_float)
run_unittest(test_attributes)
run_unittest(test_serialize ,dtypes_generic)
run_unittest(test_slicing ,dtypes_generic)
run_unittest(test_reshape ,dtypes_generic)
run_unittest(test_transpose ,dtypes_generic)
run_unittest(test_dot ,dtypes_float)
run_unittest(test_bitwise ,dtypes_integral)
run_unittest(test_logical ,dtypes_integral)
run_unittest(test_modulo ,dtypes_integral)
run_unittest(test_naninf ,dtypes_float)
run_unittest(test_math_float ,dtypes_float)
run_unittest(test_reduce ,dtypes_generic)
run_unittest(test_trace ,dtypes_generic)
run_unittest(test_diff ,dtypes_generic)
run_unittest(test_repeat ,dtypes_generic)
run_unittest(test_tile ,dtypes_generic)
run_unittest(test_arithmetic ,dtypes_generic)
run_unittest(test_elemwise_minmax,dtypes_generic)
run_unittest(test_pow ,dtypes_generic)
run_unittest(test_softmax ,dtypes_float)
run_unittest(test_apply_mask ,dtypes_float)
#run_unittest(test_repmul_iadd ,dtypes_float)
|
import xlrd
import math
class node:
def __init__(self,x,y,z,r):
self.x=x
self.y=y
self.z=z
self.r=r
def dist(self,other):
return math.sqrt((self.x-other.x)**2+(self.y-other.y)**2+(self.z-other.z)**2)
def neighbor(self,other):
dist=self.dist(other)
if self==other:
return False
if dist<self.r+other.r:
return True
else:
return False
def read_excel():
workbook=xlrd.open_workbook('聚集体形态.xlsx')
sheet1=workbook.sheet_by_index(0)
data=[]
for i in range(sheet1.nrows):
data.append(node(sheet1.cell(i,0).value,sheet1.cell(i,1).value,sheet1.cell(i,2).value,sheet1.cell(i,3).value))
return data
def max_dist(data):
max_distance=-1
max_index1=-1
max_index2=-1
for i in range(len(data)-1):
for j in range(i+1,len(data)):
dist=data[i].dist(data[j])
if dist>max_distance:
max_distance=dist
max_index1=i
max_index2=j
return max_distance,max_index1,max_index2
def build_graph(data):
graph=[]
for i in range(len(data)):
row=[]
for j in range(len(data)):
if i==j:
row.append(0)
continue
if data[i].neighbor(data[j]):
row.append(1);
else:
row.append(float('inf'))
graph.append(row)
return graph
def dijkstra(graph,src,target):
nodes=[i for i in range(len(data))]
visited=[]
visited.append(src)
nodes.remove(src)
dis={src:0}
while nodes:
min_nodes = 500
min_index = -1
for v in visited:
for d in nodes:
if graph[src][v]!=float('inf') and graph[v][d]!=float('inf'):
new_dist=graph[src][v]+graph[v][d]
if graph[src][d]>new_dist:
graph[src][d]=new_dist
if graph[src][d]<min_nodes:
min_nodes=graph[src][d]
min_index = d
dis[min_index]=min_nodes
visited.append(min_index)
nodes.remove(min_index)
return dis[target]
if __name__=='__main__':
data=read_excel()
max_distance,max_index1,max_index2=max_dist(data)
graph=build_graph(data)
min_nodes=dijkstra(graph,max_index1,max_index2)
results={'AB距离':max_distance,'AB间点数':min_nodes}
f=open('结果.txt','w+')
f.write(str(results))
|
import hashlib
import psycopg2
class User:
conn = psycopg2.connect("dbname=projnew user=postgres")
cur = conn.cursor()
success_messages = []
error_messages = []
ALREADY_SIGNED_IN = ["Hey! You're already signed in :)"]
permission_msgs = {"writer": ["Please, sign in before."], "admin": ["You have not enough permissions, please contact us to get it."]}
@staticmethod
def check_login(name, password):
User.check_presence(name, password)
hashed = hashlib.sha1(bytes(password, 'utf-8')).hexdigest()
if User.error_messages == []:
user = User.find(name, hashed)
return user
else:
return False
@staticmethod
def new(name, password):
if User.check_new_user(name, password):
hashed = hashlib.sha1(bytes(password, 'utf-8')).hexdigest()
User.cur.execute("INSERT INTO users (login, password_hash, is_admin) VALUES (%s, %s, false)", (name, hashed,))
User.conn.commit()
User.success_messages.append("You have successfully registered at articleHub!")
return True
else:
return False
@staticmethod
def check_new_user(name, password):
User.check_presence(name, password)
if (len(password) < 6) and (password != ""):
User.error_messages.append("The password is too short (at least 6 symbols required)!")
if len(User.error_messages) != 0:
return False
if User.user_exists(name):
User.error_messages.append("This user already exists! Choose another login.")
return False
else:
return True
@staticmethod
def check_presence(name, password):
if name == "":
User.error_messages.append("The username is empty!")
if password == "":
User.error_messages.append("The password is empty!")
@staticmethod
def user_exists(name):
User.cur.execute("SELECT login FROM users WHERE login = %s", (name,))
users_already_exists = User.cur.fetchall()
if len(users_already_exists) == 0:
return False
else:
return True
@staticmethod
def find(name, hashed_password):
User.cur.execute("SELECT login, is_admin FROM users WHERE login = %s and password_hash = %s", (name, hashed_password, ))
user = User.cur.fetchall()
if len(user) != 0:
return dict(username=user[0][0], hashed_password=hashed_password, is_admin=user[0][1])
else:
return False
@staticmethod
def is_admin(username):
if not username:
return False
User.cur.execute("SELECT is_admin FROM users WHERE login = %s", (username,))
is_admin = User.cur.fetchall()[0][0]
return is_admin
|
import numpy as np
import pylab as P
import ROOT
from ROOT import gROOT
gROOT.ProcessLine(".L /home/mage/PROSPECT/PROSPECT-G4-build/lib/libEventLib.so")
gROOT.ProcessLine(".L /home/mage/PROSPECT/PROSPECT-G4-Sec/include/Output/Event.hh")
histCell=ROOT.TH2D("Cell Ionization Hits","Cell Ionization Hits",14,0,14,10,0,10)
pmt1Hist=ROOT.TH2D("d","d",100,-1200,1200,100,-1200,1200)
pmt2Hist=ROOT.TH2D("d","d",100,-1200,1200,100,-1200,1200)
cellHitX=ROOT.TH1D("cellH","Cell's Hit",15,0,15)
cellHitY=ROOT.TH1D("cellH","Cell's Hit",11,0,11)
Nx=14
Ny=10
goodEvent=[]
count=0
tot=0
for k in xrange(0,1):
# if k!=1 and k!=9:
cryFile=ROOT.TFile("../cry_SCE"+str(k)+".root")
tree=ROOT.TTree()
tree=cryFile.Get("PG4")
ion=ROOT.IoniClusterEvent()
sp=ROOT.SecondaryParticleEvent()
tree.SetBranchAddress("ScIoni",ion)
tree.SetBranchAddress("SecParticle",sp)
entries=int(tree.GetEntries())
entries=10
for i in xrange(0,entries):
tree.GetEntry(i)
clust=ion.nIoniClusts
clus=ion.nIoniClusts
E=ion.EIoni
e=0
ylist=[]
xlist=[]
t=0;
tot+=1
if i ==0:
t=ion.clusts.At(0).t
for j in xrange(0,clus):
vert=ion.clusts.At(j)
time=vert.t-t
vol=vert.vol
pid=vert.PID
if vol>=0 and pid==13:
coord=vert.x
x=vol%Nx
y=vol/(Nx)
vol2=x+(Nx)*y
# print "x: "+str(x)+" x coord " +str(coord[0])+" y: "+str(y)+"cm z coord "+str(coord[2])+"cm"+" vol :"+str(vol)
# print "x: "+str(x)+ " y: "+str(y)+" vol :"+str(vol)+" vol2: "+str(vol2)
# print "x coord "+str(coord[0])+"cm y coord "+str(coord[1])+"cm z coord "+str(coord[2])+"cm"+" time "+str(time)+"ns"
# print ""
if (y not in ylist):
ylist.append(y)
if (x not in xlist):
xlist.append(x)
if len(ylist) ==10 and len(xlist)==1:
count+=1
for j in xrange(0,10):
histCell.Fill(xlist[0],ylist[j])
# histCell.Fill(x,y)
# for j in xrange(0,clus):
# coord=vert.x
# x=vol%Nx
# y=vol/(Nx)
# vol=vert.vol
# pid=vert.PID
# # if vol>-1 and pid ==13:
# print "x :"+str(x)
# print "y :"+str(y)
# histCell.Fill(x,y)
# # print "event "+str(i)+" for file "+str(k)
# print "xlist: "+str(xlist)
# print "ylist: "+str(ylist)
cellHitY.Fill(len(ylist))
cellHitX.Fill(len(xlist))
print count
print tot
histCell.Draw("colz")
raw_input()
|
# -*- coding: utf-8 -*-
# @Author: Fallen
# @Date: 2020-04-19 14:47:28
# @Last Modified by: Fallen
# @Last Modified time: 2020-04-19 14:51:13
# def fib(num):
# if num == 1: return num
# else: num+fib(num-1)
def fib(num):
if num == 1 or num == 2: return 1
else:
return fib(num-2)+fib(num-1)
# 找第100个数
# 1 1 2 3 5
def main():
print("Hello, World!")
print(fib(100))
if __name__ == "__main__":
main()
|
from survy import AnonymousSurvey
#定义一个问题 并创建一个表示调查的AnonymousSurvey对象
question="What language did you first learn to speak?"
my_survey=AnonymousSurvey(question)
#显示问题并存储答案
my_survey.show_question()
print("Enter 'q' to exit \n")
while True:
response=input("Language:\n")
if response=='q':
break
my_survey.store_response(response)
#显示调查结果
print("\nThank you to everyone who participate in the survey!")
my_survey.show_results()
|
'''hopfield.py
Simulates a Hopfield network
CS443: Computational Neuroscience
Ethan, Cole, Alice
Project 2: Content Addressable Memory
'''
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import display, clear_output
import preprocessing as prep
class HopfieldNet():
'''A binary Hopfield Network that assumes that input components are encoded as bipolar values
(-1 or +1).
'''
def __init__(self, data, orig_width, orig_height):
'''HopfieldNet constructor
Parameters:
-----------
data: ndarray. shape=(N, M). Each data sample is a length M bipolar vector, meaning that
components are either -1 or +1. Example: [-1, -1, +1, -1, +1, ...]
orig_width : int. Original width of each image before it was flattened into a 1D vector.
If data are not images, this can be set to the vector length (number of features).
orig_height : int. Original height of each image before it was flattened into a 1D vector.
If data are not images, this can be set to 1.
TODO:
Initialize the following instance variables:
- self.num_samps
- self.num_neurons: equal to # features
- self.orig_width, self.orig_height
- self.energy_hist: Record of network energy at each step of the memory retrieval process.
Initially an empty Python list.
- self.wts: handled by `initialize_wts`
'''
self.num_samps = data.shape[0]
self.num_neurons = orig_width * orig_height
self.orig_width = orig_width
self.orig_height = orig_height
self.energy_hist = []
self.wts = self.initialize_wts(data)
def initialize_wts(self, data):
'''Weights are initialized by applying Hebb's Rule to all pairs of M components in each
data sample (creating a MxM matrix) and summing the matrix derived from each sample
together.
Parameters:
-----------
data: ndarray. shape=(N, M). Each data sample is a length M bipolar vector, meaning that
components are either -1 or +1. Example: [-1, -1, +1, -1, +1, ...]
Returns:
-----------
ndarray. shape=(M, M). Weight matrix between the M neurons in the Hopfield network.
There are no self-connections: wts(i, i) = 0 for all i.
NOTE: It might be helpful to average the weights over samples to avoid large weights.
'''
wts = np.zeros((self.num_neurons, self.num_neurons))
for i in range(self.num_samps):
vec = np.expand_dims(data[i, :], axis=0)
wts = wts + vec.T @ vec
for n in range(self.num_neurons):
wts[n, n] = 0
return wts/self.num_samps
def energy(self, netAct):
'''Computes the energy of the current network state / activation
See notebook for refresher on equation.
Parameters:
-----------
netAct: ndarray. shape=(num_neurons,)
Current activation of all the neurons in the network.
Returns:
-----------
float. The energy.
'''
netAct = np.squeeze(netAct) #we don't want to expand dims more than once
return -1/2*(np.sum(np.expand_dims(netAct, axis=0) @ self.wts @ np.expand_dims(netAct, axis=1)))
def predict(self, data, update_frac=0.1, tol=1e-15, verbose=False, show_dynamics=False):
''' Use each data sample in `data` to look up the associated memory stored in the network.
Parameters:
-----------
data: ndarray. shape=(num_test_samps, num_features)
Each data sample is a length M bipolar vector, meaning that components are either
-1 or +1. Example: [-1, -1, +1, -1, +1, ...].
May or may not be the training set.
update_frac: float. Proportion (fraction) of randomly selected neurons in the network
whose netAct we update on every time step.
(on different time steps, different random neurons get selected, but the same number)
tol: float. Convergence criterion. The network has converged onto a stable memory if
the difference between the energy on the current and previous time step is less than `tol`.
verbose: boolean. You should only print diagonstic info when set to True. Minimal print outs
otherwise.
show_dynamics: boolean. If true, plot and update an image of the memory that the network is
retrieving on each time step.
Returns:
-----------
ndarray. shape=(num_test_samps, num_features)
Retrieved memory for each data sample, in each case once the network has stablized.
TODO:
- Process the test data samples one-by-one, setting them to as the initial netAct then
on each time step only update the netAct of a random subset of neurons
(size determined by `update_frac`; see notebook for refresher on update equation).
Stop this netAct updating process once the network has stablized, which is defined by the
difference betweeen the energy on the current and previous time step being less than `tol`.
- When running your code with `show_dynamics` set to True from a notebook, the output should be
a plot that updates as your netAct changes on every iteration of the loop.
If `show_dynamics` is true, create a figure and plotting axis using this code outside the
main update loop:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
Inside, plot an image of the current netAct and make the title the current energy. Then after
your plotting code, add the following:
display(fig)
clear_output(wait=True)
plt.pause(<update interval in seconds>) # CHANGE THIS
NOTE: Your code should work even if num_test_samps=1.
'''
data = np.copy(data)
if np.ndim(data) < 2:
data = np.expand_dims(data, axis=0)
preds = np.zeros((data.shape[0], data.shape[1]))
for samp in range(data.shape[0]):
#set net activity to test sample
net_act = np.expand_dims(data[samp], 0)
#update energy_hist
energy = self.energy(net_act)
self.energy_hist.append(energy)
curr_energy = energy - 1
if show_dynamics:
fig = plt.figure(1)
ax = fig.add_subplot(1, 1, 1)
# fig.suptitle("Current Energy")
iterations = 0
#while energy is still changing
while abs(curr_energy - energy) > tol:
iterations += 1
#make random indices
inds = np.random.choice(np.arange(self.num_neurons), size=((int(update_frac*self.num_neurons))), replace=False)
#update neurons at selected indices using update rule
for i in inds:
net_act[:, i] = np.sign(np.sum(np.expand_dims(self.wts[:, i], 0) @ net_act.T))
#update energy
energy = curr_energy
curr_energy = self.energy(net_act)
self.energy_hist.append(curr_energy)
# plotting
if show_dynamics:
ax.set_title(str(curr_energy))
img = prep.vec2img(net_act, self.orig_width, self.orig_height)
ax.imshow(img[0], cmap='gray')
display(fig)
clear_output(wait=True)
plt.pause(.05)
if verbose:
print("iterations", iterations)
#stabilized net_act is the prediction
preds[samp, :] = net_act
return preds
|
def circle_circuit(diameter, pi = 3.14):
result = 2 * diameter * pi
return round(result, 2)
print(circle_circuit(5))
|
# -*- coding: utf-8 -*-
#from distutils.core import setup
from setuptools import setup
setup(
name = "duckdaq",
packages = ["duckdaq", "duckdaq.Filter", "duckdaq.Device", "duckdaq.Display"],
version = "0.1",
description = "Didactic lab software for the LabJack U3-HV",
author = "Ulrich Leutner",
author_email = "ulli@koid.org",
url = "https://duckdaq.readthedocs.org/en/latest/index.html",
download_url = "https://github.com/kaschpal/duckdaq/tarball/master",
install_requires = ["pyqtgraph>=0.9.8", "PySide>=1.2.0"],
keywords = ["labjack", "daq", "education", "physics"],
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Development Status :: 3 - Alpha",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: OS Independent",
"Topic :: Education",
"Topic :: Scientific/Engineering :: Physics"
],
long_description = """\
Duckdaq is an educational software for data acquisition and analyis. The focus
lies on live analyis both in class and in lab.
The program aims to provide similar functionality as common didactic software
like Leybold CassyLab or PHYWE measure. The difference is, that experiments
are not clicked together like in the programs mentioned above, because this
is unflexible, slow, inconvenient and provides no mechanism, to extend the
software.
Just like in LabVIEW, data flow is the foundation of duckdaq. Data comes
from sources (called Measurements) is modified sample by sample with a cascade
of Filters (which are e.g. converting voltage to temperature) and then the
result goes to a Display, for example a plotter.
Unlike in LabVIEW, you don’t program grahically, but in form of a python
script.
Documentation: https://duckdaq.readthedocs.org/en/latest/index.html
"""
)
|
from sqlalchemy.exc import IntegrityError
from psycopg2.errors import UniqueViolation
from functools import partial
from multiprocess import Pool
from sql_utils import make_session_kw, select
from sys import argv
import gc
import utils
import random
from os import path
from glob import glob
import pandas as pd
from joblib import Parallel, delayed
import argparse
import re
from collections import namedtuple
schema = 'sim4'
outgroup_name = '4'
def make_tree(tstr):
t = utils.Tree(tstr)
t.set_outgroup(outgroup_name)
return t
stree_rx = re.compile(r't_([\d\.]+)_([\d\.]+)_([\d\.]+)')
filename_rx = re.compile(r'(t_\d.*)_(WAG|LG)(_ds\d+)?')
dirname_rx = re.compile('/?(\d+)bp_b(\d+)_g(\d+)')
def fn2nw(s):
ta, tb, tc = stree_rx.search(s).groups()
ibl = float(tb)-float(ta)
outgroup_bl = float(tc)-float(tb)
tstr = f'(4:{tc},(3:{tb},(1:{ta},2:{ta}):{ibl}):{outgroup_bl});'
return utils.nwstr(make_tree(tstr))
def value_mapper(d):
def f(X):
try:
return [d[x] for x in X]
except:
return d[X]
return f
Params = namedtuple(
'Params',
('filename',
'stree',
'stree_str',
'smodel',
'ds')
)
def parse_filename(fn):
s = path.basename(fn)
m = filename_rx.search(s)
try:
stree_str, smodel, ds = m.groups()
if ds:
ds = int(ds[3:]) - 1
except Exception as e:
print(fn)
print(m.groups())
raise e
return Params(filename=fn,
stree=fn2nw(stree_str),
stree_str=stree_str,
smodel=smodel,
ds=ds)
if __name__ == '__main__':
nprocs = int(argv[2])
with open('/N/u/bkrosenz/BigRed3/.ssh/db.pwd') as f:
password = f.read().strip()
print(argv)
session, conn = make_session_kw(username='bkrosenz_root',
password=password,
database='bkrosenz',
schema=schema,
port=5444,
host='10.79.161.8',
with_metadata=False # sasrdspp02.uits.iu.edu'
)
top2tid = pd.read_sql_table('topologies',
con=conn,
schema=schema,
columns=['topology', 'tid'],
index_col='topology'
)['tid'].to_dict()
RECOMB = False
dirname = argv[1]
if 'bp_b' in dirname:
try:
match = dirname_rx.search(dirname).groups()
slength, nblocks, ngenes = map(
int, match
)
except AttributeError as e:
print(dirname)
raise(e)
RECOMB = True
scf_table = 'rec_scf'
elif 'hetero' in dirname:
slength, ngenes = 500, 250
scf_table = 'heterotachy_scf'
elif '1_rate' in dirname:
slength, ngenes = 500, 250
scf_table = 'one_rate_scf'
else:
slength = int(re.search('(\d+)bp', dirname).group(1))
scf_table = 'nonrec_scf'
print(slength, scf_table, 'getting filenames')
dirname = utils.Path(dirname)
filenames = list(
filter(utils.is_nonempty,
(dirname / 'scf').glob('*_tree1.gz'))
)
random.shuffle(filenames)
print('reading species tree')
nw2sid = pd.read_sql_table('species_trees',
con=conn,
schema=schema,
columns=['sid', 'newick'],
index_col='newick'
)['sid'].to_dict()
csize = int(5000/nprocs)
sim_models = ('LG', 'WAG')
# TODO check file size, keep recmap in memory
stree = ds = smodel = recfile = None
with Pool(nprocs) as p:
for param in p.map(parse_filename, filenames):
written = 0
if param.stree != stree:
stree = param.stree
try:
sid = nw2sid[stree]
except KeyError:
print('{} not found in species_trees, skipping'.format(stree))
continue
try:
d = pd.read_csv(param.filename,
header=None,
sep='\t',
usecols=range(1, 5),
names=('top_1', 'top_2', 'top_3', 'nsites')
)
d['sid'] = sid
ds = param.ds
if RECOMB:
rfilename = dirname/'seqs' / \
f'{param.stree_str}_{param.smodel}.permutations.npy'
try:
recmap = utils.np.load(rfilename)
_, _, n = recmap.shape
d['tree_no'] = recmap[ds, :, :].T.tolist()
if (n != len(d)):
raise AttributeError
except FileNotFoundError:
print('no recfile found at ', rfilename)
except ValueError:
print(param)
d['ds_no'] = ds
else:
d['tree_no'] = d.index
d['sid'] = sid
d['sim_model'] = param.smodel
d['theta'] = 0.01
d['seq_length'] = slength
try:
d.to_sql(scf_table, conn,
schema=schema,
method='multi',
if_exists='append',
index=False)
written = len(d)
except (UniqueViolation, IntegrityError) as e:
print('Error: most likely from duplicate gene trees.',
param,
d.columns, sep='\t')
except Exception as e:
print('error writing', e, d)
print(param)
finally:
print('wrote {} scf datasets to sql'.format(
written))
gc.collect()
except OSError as e:
print(e, 'param:', param, sep='\n')
print('finished updating scf')
|
X=int(input())
if (not X % 4 and X % 100) or not X % 400:
print("Високосный")
else:
print("Обычный")
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 16 18:03:03 2015
@author: Martin Nguyen
"""
TimeToVFTest = 11
FirstProgressionTarget = 21.0
SecondProgressionTarget = 18.0
ThirdProgressionTarget = 15.0
AgeNottoSurgery = 85
TimenotSideEffect = 2
from TreatmentBlock1Class import TreatmentBlock1
from TreatmentBlock2Class import TreatmentBlock2
from TreatmentBlock3Class import TreatmentBlock3
GraphPlan = {'A':['B','E'],'B': ['C'],'C':['D','E'],'D':['E'],'E':['F','G'],'F':['G'],'G':'Terminal'}
class Doctor(object):
def __init__(self,Attribute,params,medicalRecords):
self.PatientAttribute = Attribute
self.params = params
self.medicalRecords = medicalRecords
#Main Program instructions
def ReturnAllDoctorValues (self):
self.IOPTargetSetting()
self.IOPandSideEffectEvaluation()
self.DoctorModule()
def DoctorModule(self):
self.InitializeCorrectTreatment()
if self.medicalRecords['ExitCode'] == True:
self.ChangeTreatmentPlan()
self.InitializeCorrectTreatment()
self.medicalRecords['ExitCode'] = False
self.medicalRecords['PatientVisits'] += 1
def IOPTargetSetting(self):
#the doctor module here is only called during VF Tests
if self.params['VFCountdown'] > TimeToVFTest:
self.SetCorrectIOPTarget()
self.medicalRecords['NumberVF'] +=1
self.params['VFCountdown'] = self.params['VFCountdown'] + self.params['time_next_visit']
def IOPandSideEffectEvaluation(self):
if self.medicalRecords['MedicationIntake'] > TimenotSideEffect :
self.params['SideEffect'] = 0
if self.PatientAttribute['IOP'] > self.PatientAttribute['IOPTarget']:
self.medicalRecords['TreatmentOverallStatus'] = 2
self.medicalRecords['ContinueTreatment'] = True
else:
self.medicalRecords['ContinueTreatment'] =False
#Deeper level of meaning
def SetCorrectIOPTarget(self):
if self.params['FirstProgression'] == 1 and self.PatientAttribute['CumulativeMDR'] > 2:
self.params['SecondProgression'] =1
self.PatientAttribute['CumulativeMDR'] = 0
elif self.PatientAttribute['CumulativeMDR'] > 2:
self.params['FirstProgression'] = 1
self.PatientAttribute['CumulativeMDR'] = 0
#########################################################
if self.params['Conversion'] == True:
if self.params['FirstProgression'] == 1 and self.params['SecondProgression'] == 1:
self.PatientAttribute['IOPTarget'] = ThirdProgressionTarget
elif self.params['FirstProgression'] == 1:
self.PatientAttribute['IOPTarget'] = SecondProgressionTarget
else:
self.PatientAttribute['IOPTarget'] = FirstProgressionTarget
self.params['VFCountdown'] = 0
def InitializeCorrectTreatment(self):
if self.medicalRecords['TreatmentBlock'] == 'A':
block = TreatmentBlock1(self.params,self.medicalRecords)
block.update()
del block
elif self.medicalRecords['TreatmentBlock'] == 'B':
block = TreatmentBlock2(self.PatientAttribute,self.params,self.medicalRecords)
block.update()
del block
elif self.medicalRecords['TreatmentBlock'] == 'C':
block = TreatmentBlock1(self.params,self.medicalRecords)
block.update()
del block
elif self.medicalRecords['TreatmentBlock'] == 'D':
block = TreatmentBlock2(self.PatientAttribute,self.params,self.medicalRecords)
block.update()
del block
elif self.medicalRecords['TreatmentBlock'] == 'E':
block = TreatmentBlock1(self.params,self.medicalRecords)
block.update()
del block
elif self.medicalRecords['TreatmentBlock'] == 'F':
block = TreatmentBlock3(self.PatientAttribute,self.params,self.medicalRecords)
block.updateImplant()
del block
elif self.medicalRecords['TreatmentBlock'] == 'G':
block = TreatmentBlock1(self.params,self.medicalRecords)
block.update()
del block
def ChangeTreatmentPlan(self):
key = self.medicalRecords['TreatmentBlock']
if key == 'B' or key == 'D' or key == 'F':
self.medicalRecords['TreatmentBlock'] = GraphPlan[key][0]
elif key == 'C':
if self.medicalRecords['TrabeculectomySuccess'] == True:
self.medicalRecords['TreatmentBlock'] = GraphPlan[key][0]
else:
self.medicalRecords['TreatmentBlock'] = GraphPlan[key][1]
else:
if self.PatientAttribute['Age'] < AgeNottoSurgery:
self.medicalRecords['TreatmentBlock'] = GraphPlan[key][0]
else:
self.medicalRecords['TreatmentBlock'] = GraphPlan[key][1]
|
# -*- coding: utf-8 -*-
__license__ = """
This file is part of **janitoo** project https://github.com/bibi21000/janitoo.
License : GPL(v3)
**janitoo** is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
**janitoo** is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with janitoo. If not, see http://www.gnu.org/licenses.
"""
__copyright__ = "Copyright © 2013-2014-2015-2016 Sébastien GALLET aka bibi21000"
__author__ = 'Sébastien GALLET aka bibi21000'
__email__ = 'bibi21000@gmail.com'
try:
__import__('pkg_resources').declare_namespace(__name__)
except Exception: # pragma: no cover
# bootstrapping
pass # pragma: no cover
import sys, os, errno
import time, datetime
import unittest
import threading
import json as mjson
import shutil
import mock
import platform
import pwd
import grp
import socket
import tempfile
from netifaces import interfaces, ifaddresses, AF_INET
from pkg_resources import iter_entry_points
from nose.plugins.skip import SkipTest
from janitoo.mqtt import MQTTClient
from janitoo.dhcp import JNTNetwork, HeartbeatMessage
from janitoo.utils import json_dumps, json_loads
from janitoo.utils import HADD_SEP, HADD
from janitoo.utils import TOPIC_HEARTBEAT
from janitoo.utils import TOPIC_NODES, TOPIC_NODES_REPLY, TOPIC_NODES_REQUEST
from janitoo.utils import TOPIC_BROADCAST_REPLY, TOPIC_BROADCAST_REQUEST
from janitoo.utils import TOPIC_VALUES_USER, TOPIC_VALUES_CONFIG, TOPIC_VALUES_SYSTEM, TOPIC_VALUES_BASIC
from janitoo.runner import jnt_parse_args
class JNTTBase(unittest.TestCase):
"""Grand mother
"""
path = '/tmp/janitoo_test'
broker_user = 'toto'
broker_password = 'toto'
@classmethod
def setUpClass(self):
self.skip = True
if 'NOSESKIP' in os.environ:
self.skip = eval(os.environ['NOSESKIP'])
if 'MANUALSKIP' in os.environ:
self.skipManual = eval(os.environ['MANUALSKIP'])
else:
self.skipManual = True
self.tmp_files = []
@classmethod
def tearDownClass(self):
try:
pass
#shutil.rmtree(self.path)
except OSError as exc: # Python >2.5
pass
def setUp(self):
try:
shutil.rmtree(self.path)
except OSError as exc: # Python >2.5
time.sleep(1.0)
try:
shutil.rmtree(self.path)
except OSError as exc: # Python >2.5
time.sleep(5.0)
try:
shutil.rmtree(self.path)
except OSError as exc: # Python >2.5
pass
os.makedirs(self.path)
os.makedirs(os.path.join(self.path, 'etc'))
os.makedirs(os.path.join(self.path, 'cache'))
os.makedirs(os.path.join(self.path, 'home'))
os.makedirs(os.path.join(self.path, 'log'))
os.makedirs(os.path.join(self.path, 'run'))
def tearDown(self):
try:
pass
#shutil.rmtree(self.path)
except OSError:
pass
@classmethod
def skipManualTest(self, message=''):
"""Skip a manual test (need human intervention)
"""
if self.skipManual:
raise SkipTest("%s" % ("manual test (%s)" % message))
@classmethod
def skipTest(self, message=''):
"""Skip a test
"""
raise SkipTest("%s" % (message))
@classmethod
def skipAllTests(self):
"""Skip a test when JANITOO_ALLTESTS is in env.
"""
if 'JANITOO_ALLTESTS' in os.environ:
raise SkipTest("%s" % ("Skipped on JANITOO_ALLTESTS"))
@classmethod
def onlyAllTests(self):
"""Run a test only when JANITOO_ALLTESTS is in env
"""
if 'JANITOO_ALLTESTS' not in os.environ:
raise SkipTest("%s" % ("Only on JANITOO_ALLTESTS"))
@classmethod
def skipTravisTest(self):
"""Skip a test on travis.
"""
if 'TRAVIS_OS_NAME' in os.environ:
raise SkipTest("%s" % ("Skipped on travis"))
@classmethod
def onlyTravisTest(self):
"""Run a test only on travis
"""
if 'TRAVIS_OS_NAME' not in os.environ:
raise SkipTest("%s" % ("Only on travis"))
@classmethod
def skipCircleTest(self):
"""Skip a test on circle
"""
if 'CIRCLE_USERNAME' in os.environ:
raise SkipTest("%s" % ("Skipped on circle"))
@classmethod
def onlyCircleTest(self):
"""Run a test only on circle
"""
if 'CIRCLE_USERNAME' not in os.environ:
raise SkipTest("%s" % ("Only on circle"))
@classmethod
def skipCITest(self):
"""Skip a test on continouos integration
"""
if 'TRAVIS_OS_NAME' in os.environ:
raise SkipTest("%s" % ("Skipped on Continuous Integration"))
if 'CIRCLE_USERNAME' in os.environ:
raise SkipTest("%s" % ("Skipped on Continuous Integration"))
@classmethod
def onlyCITest(self):
"""Run a test only on continuous integration
"""
if 'TRAVIS_OS_NAME' not in os.environ and \
'CIRCLE_USERNAME' not in os.environ:
raise SkipTest("%s" % ("Only on Continuous Integration"))
@classmethod
def skipDockerTest(self):
"""Skip a test on docker
"""
if 'JANITOO_DOCKER' in os.environ:
raise SkipTest("%s" % ("Skipped on Docker"))
@classmethod
def onlyDockerTest(self):
"""Run a test only on docker
"""
if 'JANITOO_DOCKER' not in os.environ:
raise SkipTest("%s" % ("Only on docker"))
@classmethod
def skipRasperryTest(self):
"""Skip a test when not on raspy
"""
if platform.machine().startswith('armv6'):
raise SkipTest("%s" % ('Skipped on Raspberry pi'))
@classmethod
def onlyRasperryTest(self):
"""Skip a test when not on raspy
"""
if not platform.machine().startswith('armv6'):
raise SkipTest("%s" % ('Only on a Raspberry pi'))
@classmethod
def skipNoPingTest(self, ip):
"""Skip a test when when no ping response
"""
response = os.system("ping -c 1 -w2 " + ip + " > /dev/null 2>&1")
if response != 0:
raise SkipTest("No ping response from %s" % (ip))
@classmethod
def wipTest(self, message=''):
"""Work In Progress test
"""
raise SkipTest("Work in progress : %s" % message)
def touchFile(self, path):
"""Touch a file
"""
with open(path, 'a'):
os.utime(path, None)
def rmFile(self, path):
"""Remove a file
"""
if os.path.isfile(path):
os.remove(path)
def assertFile(self, path):
"""Check a file exists
"""
print("Check file %s" % path)
self.assertTrue(os.path.isfile(path))
def assertDir(self, path):
"""Check a directory exists
"""
print("Check directory %s" % path)
self.assertTrue(os.path.isdir(path))
def assertUser(self, usr):
"""Check a user exists on the system
"""
print("Check user %s" % usr)
try:
pwd.getpwnam(usr)
res = True
except KeyError:
print(('User %s does not exist.' % usr))
res = False
self.assertTrue(res)
def assertGroup(self, grp):
"""Check a group exists on the system
"""
print("Check group %s" % grp)
try:
grp.getgrnam('somegrp')
except KeyError:
print(('Group %s does not exist.' % grp))
res = False
self.assertTrue(res)
def assertDateInterval(self, which, dateref, delta=1):
"""
"""
print("Check date %s in interval : %s +/- %ss" % (which, dateref, delta))
self.assertTrue(which > dateref - datetime.timedelta(seconds=delta))
self.assertTrue(which < dateref + datetime.timedelta(seconds=delta))
def assertTCP(self, server='localhost', port=80):
"""
"""
# Create a TCP socket
try:
ip = socket.gethostbyname(server)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect_ex((ip, port))
s.send('test')
s.close()
except socket.error as e:
raise AssertionError("Can't connect to %s(%s):%s"%(server, ip, port))
except socket.gaierror:
raise AssertionError("Can't connect to %s(%s):%s"%(server, 'unknown', port))
def assertFsmBoot(self, bus=None, state='booting', timeout=20):
"""Assert Finish State Machine can boot
"""
if bus is None:
self.skipTest("Can't test state of a None bus")
i = 0
while i<timeout*2 and bus.state == state:
time.sleep(0.5)
i += 1
print("Bus state %s"%bus.state)
self.assertNotEqual(state, bus.state)
def mkDir(self, path):
"""Create a directory
"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def mkTempFile(self, prefix="tmp"):
"""Create a temporary file and return ist name
"""
tmpfile,tmpname = tempfile.mkstemp(prefix='janitoo_%s'%prefix)
self.tmp_files.append(tmpname)
os.close(tmpfile)
return tmpname
def cpTempFile(self, src):
"""Copy the path_src fil to a tmp file.
Return the path of the tmp file
"""
dst = None
try:
dst = self.mkTempFile()
shutil.copyfile(src, dst)
finally:
try:
src.close()
except Exception:
pass
try:
dst.close()
except Exception:
pass
return dst
def rmDir(self, path):
"""Remove a directory
"""
#try:
shutil.rmtree(path)
#except OSError as exc: # Python >2.5
# pass
def startServer(self):
pass
def stopServer(self):
pass
def getDataFile(self, path):
"""Retrieve a datafile. Look in the current dir and if not found look in the __file__ directory
"""
if os.path.isfile(path):
return path
path = os.path.join(os.path.dirname(__name__), path)
if os.path.isfile(path):
return path
raise RuntimeError("[%s] : Can't find data file %s"%(self.__class__.__name__, path))
@property
def ip4Addresses(self):
"""Retrieve all ip4 adresses as a list
"""
ip_list = []
for interface in interfaces():
for link in ifaddresses(interface).get(AF_INET, ()):
ip_list.append(link['addr'])
print("Found ip4 addresses %s" % ip_list)
return ip_list
class JNTTDockerBase(JNTTBase):
"""Tests for servers on docker
"""
def setUp(self):
JNTTBase.onlyDockerTest()
JNTTBase.setUp(self)
#Definitions for database tests
try:
from sqlalchemy import Table, Column, String
from janitoo_db.base import Base, create_db_engine
DBCONFS = [
('Sqlite', {'dbconf':'sqlite:////tmp/janitoo_tests.sqlite'}),
('Mysql',{'dbconf':'mysql+pymysql://root:janitoo@localhost/janitoo_tests'}),
('Postgresql',{'dbconf':'postgresql://janitoo:janitoo@localhost/janitoo_tests'}),
]
alembic_version = Table('alembic_version', Base.metadata,
Column('version_num', String(32), nullable=False)
)
except ImportError:
pass
|
def checker_board(rows,cols):
board=[]
for j in range(0,rows):
row=[]
for i in range(0,cols):
if j%2==0:
if i%2==0:
row+="*"
else:
row+=" "
else:
if i%2==0:
row+=" "
else:
row+="*"
board.append(row)
for i in board:
s=""
print s.join(i)
checker_board(7,8)
|
import json
import datetime
import time
import os
import dateutil.parser
import logging
import boto3
import re
import pymssql
region = 'us-east-1'
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ec2 = boto3.resource('ec2', region_name=region)
ec2_client = boto3.client('ec2')
lex_client = boto3.client('lex-models')
cloudwatch = boto3.client('cloudwatch')
ambariUser = admin
ambariPass = admin
server = 'chatbottestdb.cniw8p6tx7sx.us-east-1.rds.amazonaws.com'
user = 'shubhamkackar'
password = 'arsenal1994'
'''
with open('AWS_Pricing.csv', 'rb') as price_chart:
reader = csv.reader(price_chart, delimiter=',')
price_chart_list = list(reader)
total_rows = len(price_chart_list)
'''
# Start an instance
def action_instance(intent_request):
instance_action = intent_request['currentIntent']['slots']['instance_actions']
instance_identifier = intent_request['currentIntent']['slots']['instance_identifiers']
'''if instance_identifier is None:
response_get_slot_type = lex_client.get_slot_type(name='instance_identifiers', version='$LATEST')
print response_get_slot_type
slot_values_present = []
for evals in response_get_slot_type[enumerationValues]:
slot_values_present.append(evals['value'])
print slot_values_present
user_input = intent_request['currentIntent']['inputTranscript'].split()
response_put_slot_type = lex_client.put_slot_type(name='instance_identifiers',enumerationValues=[{'value': 'ekta'}],checksum='0379e74f-1cbe-4a3a-8fd0-efeba73c608f')
instance_identifier = 'none' '''
#print (type(instance_action))
#print (type(instance_identifier))
#response_all_instances = ec2_client.describe_instances(Filters=[{'Name': 'tag:Name','Values': ['*'instance_identifier'*']}])
#print (response_all_instances)
response_describe = ec2_client.describe_instances(Filters=[{'Name': 'tag:Name','Values': ['*'+instance_identifier+'*']}])
print response_describe
words_show = ['show','list']
words_start = ['start']
words_stop = ['stop']
instance_ids = []
instance_names = []
total_instances = 0
for i in range(0, len(response_describe['Reservations'])):
for j in range(0, len(response_describe['Reservations'][i]['Instances'])):
instance_ids.append(response_describe['Reservations'][i]['Instances'][j]['InstanceId'])
if instance_action in words_show:
for i in range(0, len(response_describe['Reservations'])):
for j in range(0, len(response_describe['Reservations'][i]['Instances'])):
for k in range(0, len(response_describe['Reservations'][i]['Instances'][j]['Tags'])):
if(response_describe['Reservations'][i]['Instances'][j]['Tags'][k]['Key'] == 'Name'):
instance_names.append(response_describe['Reservations'][i]['Instances'][j]['Tags'][k]['Value'])
total_instances +=1
break
str1 = ' , \n'.join(instance_names)
print str1
output_message = 'There are a total of '+str(total_instances)+' Instances and they are as follows:-'+'\n'+str1
if instance_action in words_start:
response_action = ec2_client.start_instances(InstanceIds=instance_ids)
print('startAction')
output_message = 'The '+str(instance_identifier)+' instance/s you have requested has been '+str(instance_action)+'ed.'
if instance_action in words_stop:
response_action = ec2_client.stop_instances(InstanceIds=instance_ids)
print('stopAction')
#output_message = 'The instance you have requested has been started.'+instance_identifier+instance_action
output_message = 'The '+str(instance_identifier)+' instance/s you have requested has been '+str(instance_action)+'ped.'
#"Observed %s instances running at %s" % (num_instances, timestamp)
return close(
'Fulfilled',
{
'contentType': 'PlainText',
'content': output_message
}
)
# Greetings
def greetings(intent_request):
return elicit(
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'Hi, I am LabRat, a Chatbot. I can help you with DataLake related queries.'
}
)
# environment Status
def environment_status(intent_request):
insts = ""
instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['running','stopped','terminated','pending','stopping','shutting-down']}])
for instance in instances:
launch_time = instance.launch_time
current_time = datetime.datetime.now(launch_time.tzinfo)
lt_delta = current_time - launch_time
running_time = str(lt_delta)
lt_Delta_hr = lt_delta.total_seconds()/3600
period = 60
if lt_Delta_hr > 360 and lt_Delta_hr < 1412 :
period = 300 * int(lt_delta.total_seconds()/1440)
elif lt_delta.total_seconds()/60 > 1412 :
period = 3600 * int(lt_delta.total_seconds()/1440)
results = cloudwatch.get_metric_statistics(Namespace='AWS/EC2', MetricName='CPUUtilization', Dimensions=[{'Name': 'InstanceId', 'Value': instance.id}], StartTime=launch_time, EndTime=current_time, Period=period, Statistics=['Average'])
length = len(results['Datapoints'])
if length == 0 : length = 1
sum_of_avg = 0
for datapoint in results['Datapoints'] :
sum_of_avg = sum_of_avg + datapoint['Average']
average = str(sum_of_avg / length) + '%'
insts = insts + instance.id + ' , ' + str(instance.launch_time) + ' , ' + running_time + ' , ' + average + ' \n '
print('Instance : ' + instance.id + ', Launch Time : ' + str(instance.launch_time) + ', Running Time : ' + running_time + ', CPU Utilization : ' + average)
return elicit(
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'Here is the List of instances with CPU Utilization & Running time.' + insts
}
)
# Pricing Information
def pricing_information(intent_request):
insts = ""
instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['running','stopped','terminated','pending','stopping','shutting-down']}])
for instance in instances:
instance_type = instance.instance_type
launch_time = instance.launch_time
current_time = datetime.datetime.now(launch_time.tzinfo)
lt_delta = current_time - launch_time
running_time_hr = str(lt_delta)
price = 0.0
for row_cnt in range(1, total_rows):
if price_chart_list[row_cnt][0] == region and price_chart_list[row_cnt][1] == instance_type :
price = price_chart_list[row_cnt][2] * (lt_delta.total_seconds()/3600)
#insts = insts + instance.id + ',' + str(instance.launch_time) + ',' + running_time + ',' + average + '\n'
print('Instance : ' + instance.id + ', Running Time : ' + running_time + ', Instance Type : ' + instance_type + ', Price : ' + str(price))
return elicit(
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'Here is the List of instances with CPU Utilization & Running time.' + insts
}
)
def close(fulfillment_state, message):
response = {
'dialogAction': {
'type': 'Close',
'fulfillmentState': fulfillment_state,
'message': message
}
}
return response
def elicit(fulfillment_state, message):
response = {
'dialogAction': {
'type': 'ElicitIntent',
'message': message
}
}
return response
# --- Intent handler ---
def dispatch(intent_request):
logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))
intent_name = intent_request['currentIntent']['name']
print(intent_request)
# Dispatch to your bot's intent handlers
if intent_name == 'action_instances':
return action_instance(intent_request)
elif intent_name == 'greetings':
return greetings(intent_request)
elif intent_name == 'environment_status':
return environment_status(intent_request)
elif intent_name == 'pricing_information'
return pricing_information(intent_request)
elif intent_name == 'services_check':
return services_list(intent_request)
else:
return close(
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'Sorry!! The request which you are looking for does not support with the current release '
}
)
# list of services in Ambari
def services_list(intent_request):
instance_identifier = intent_request['currentIntent']['slots']['instance_identifier']
#which_stacks = intent_request['currentIntent']['slots']['which_stack']
response_describe = ec2_client.describe_instances(Filters=[{'Name': 'tag:Name','Values': ['*'+instance_identifier+'*']}])
print response_describe
words_show = ['show','list']
statck_list_dev = ['dev']
stack_list_int = ['integration','int','nonprod']
stack_list_prod = ['prod','prodstage']
words_start = ['start']
words_stop = ['stop']
instance_ids = []
instance_id = []
instance_names = []
instance_states = []
instance_states_1 = []
total_instances = 0
for i in range(0, len(response_describe['Reservations'])):
for j in range(0, len(response_describe['Reservations'][i]['Instances'])):
instance_ids.append(response_describe['Reservations'][i]['Instances'][j]['InstanceId'])
for i in range(0, len(response_describe['Reservations'])):
for j in range(0, len(response_describe['Reservations'][i]['Instances'])):
for k in range(0, len(response_describe['Reservations'][i]['Instances'][j]['Tags'])):
if('Ambari'in (response_describe['Reservations'][i]['Instances'][j]['Tags'][k]['Value']) or ('ambari'in (response_describe['Reservations'][i]['Instances'][j]['Tags'][k]['Value'] ))):
instance_id = response_describe['Reservations'][i]['Instances'][j]['InstanceId']
#print instance_id
str2 = instance_id
#print instance_id
#instance_names.append(response_describe['Reservations'][i]['Instances'][j]['Tags'][k]['Value'])
#total_instances +=1
#break
#str1 = ' , '+'\n'.join(instance_names)
#print 'wow'
print instance_id
print 'Not Now :('
print 'There are a total of '+str(total_instances)+' Instances and they are as follows:-'+'\n'+str2
check_service_list(instance_id)
#return check_service_list(instance_id)
return elicit(
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'Working Now'
}
)
def check_service_list(ip):
try:
print("getting service list")
response_instance = client_ec2.describe_instances( InstanceIds = [ip])
print("DNS: " + response_instance['Reservations'][0]['Instances'][0]['PrivateDnsName'])
base_url = 'https://'+response_instance['Reservations'][0]['Instances'][0]['PrivateDnsName']+':8080/api/v1/clusters'
r = requests.get(base_url, auth=HTTPBasicAuth(ambariUser, ambariPass), verify=False)
cluster_name = r.json()['items'][0]['Clusters']['cluster_name']
print ("cluster name")
print (cluster_name)
base_url_services = 'https://'+response_instance['Reservations'][0]['Instances'][0]['PrivateDnsName']+':8080/api/v1/clusters/'+cluster_name+'/services'
r_services = requests.get(base_url_services, auth=HTTPBasicAuth(ambariUser, ambariPass), verify=False)
print(r_services.json())
service_list = []
for i in range(0,len(r_services.json()['items'])):
service_list.append(r_services.json()['items'][i]['ServiceInfo']['service_name'])
print (service_list)
except Exception as e:
print(e)
return service_list
# --- Main handler ---
def lambda_handler(event, context):
# By default, treat the user request as coming from the America/New_York time zone.
os.environ['TZ'] = 'America/New_York'
time.tzset()
logger.debug('event.bot.name={}'.format(event['bot']['name']))
#conn = pymssql.connect(server, user, password, "chatbot")
#cursor = conn.cursor()
#cursor.execute('SELECT * FROM users')
#row = cursor.fetchone()
#while row:
# print("ID=%d, Name=%s" % (row[0], row[1]))
# row = cursor.fetchone()
return dispatch(event)
|
# -*- coding: utf-8 -*-
"""Renderer for JSON API v1.0."""
from collections import OrderedDict
from django.core.urlresolvers import reverse
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import urlparse, urlunparse
from rest_framework.renderers import JSONRenderer
from rest_framework.status import is_client_error, is_server_error
from ..renderers import BaseJsonApiTemplateHTMLRenderer
class JsonApiV10Renderer(JSONRenderer):
"""JSON API v1.0 renderer.
This is a partial implementation, focused on the current needs of the API.
For the full spec, see http://jsonapi.org/format/1.0/
"""
PAGINATION_KEYS = ('count', 'next', 'previous', 'results')
dict_class = OrderedDict
media_type = 'application/vnd.api+json'
namespace = 'v2'
def render(self, data, accepted_media_type=None, renderer_context=None):
"""Convert DRF native data to the JSON API v1.0 format."""
# Construct absolute URI for override path or request path (default)
response = renderer_context.get('response')
self.request = renderer_context['request']
self.request_uri = self.request.build_absolute_uri()
override_path = renderer_context.get('override_path')
resource_uri = self.request.build_absolute_uri(override_path)
fields_extra = renderer_context.get('fields_extra')
status_code = response and response.status_code
exception = response and getattr(response, 'exc', None)
is_err = is_client_error(status_code) or is_server_error(status_code)
as_relationship = renderer_context.get('as_relationship')
if data is None:
# Deleted items
converted = None
elif all([key in data for key in self.PAGINATION_KEYS]):
# Paginated object
converted = self.convert_paginated(
data, fields_extra, request_uri=self.request_uri)
elif is_err:
converted = self.convert_error(
data, exception, fields_extra, status_code)
elif self.request.method == 'OPTIONS':
converted = {'meta': data}
elif as_relationship:
relationship_name = as_relationship
if as_relationship not in data:
# Relationship to current resource in a historical viewset
# For example, historicalbrowsers/5/relationships/browser
# In this case, 'as_relationship' is the singular name
# (browser), but the link ID is in 'object_id'.
assert 'object_id' in data, (
'Expecting "%s" or object_id in data keys %s.'
% (as_relationship, list(data.keys())))
assert 'object_id' in fields_extra, (
'Expecting "object_id" in fields_extra.')
assert 'name' in fields_extra['object_id'], (
'Expecting "name" in fields_extra["object_id"].')
object_name = fields_extra['object_id']['name']
assert object_name == as_relationship, (
('Expecting fields_extra["object_id"]["name"] == "%s",'
' got "%s".') % (as_relationship, object_name))
relationship_name = 'object_id'
converted = self.convert_to_relationship_object(
relationship_name, data[relationship_name],
fields_extra[relationship_name],
resource_uri=resource_uri)
else:
converted = self.convert_document(
data, fields_extra, resource_uri=resource_uri,
request_uri=self.request_uri)
renderer_context['indent'] = 4
return super(JsonApiV10Renderer, self).render(
data=converted,
accepted_media_type=accepted_media_type,
renderer_context=renderer_context)
def convert_to_relationship_object(
self, name, raw_id, field_data, resource_uri, include_links=True):
"""Convert from IDs to a relationship object.
Partially implements the full spec at:
http://jsonapi.org/format/#document-resource-object-relationships
Expecting raw_id to be one of:
- None (an empty to-one link)
- A single ID (a to-one link)
- An empty array (an empty to-many link)
- An array of one or more IDs (a to-many link)
The format of raw_id should agree with field_data['link']
Return is a relationship object, such as this (include_links=True):
{
"data": {
"type": "features",
"id": "1",
},
"links": {
"self": "/api/v2/features/3/relationships/parent",
"related": "/api/v2/features/3/parent",
},
}
"""
relationship = self.dict_class()
if include_links:
# TODO: Use reverse instead of concat to construct links
attr_name = field_data.get('name', name)
endpoint = field_data.get('singular', attr_name)
scheme, netloc, path, params, query, fragment = urlparse(
resource_uri)
base_uri = urlunparse((scheme, netloc, path, '', '', ''))
relationship['links'] = self.dict_class((
('self', base_uri + '/relationships/' + endpoint),
('related', base_uri + '/' + endpoint),
))
link = field_data['link']
resource = field_data.get('resource', name)
if link in ('from_many', 'to_many'):
data = [
self.dict_class((('type', resource), ('id', force_text(pk))))
for pk in raw_id]
elif raw_id is None:
data = None
else:
data = self.dict_class(
(('type', resource), ('id', force_text(raw_id))))
relationship['data'] = data
return relationship
def construct_resource_uri(self, resource_type, resource_id, field_data):
singular = field_data.get('singular', resource_type[:-1])
pattern = '%s:%s-detail' % (self.namespace, singular.replace('_', ''))
url = reverse(pattern, kwargs={'pk': resource_id})
uri = self.request.build_absolute_uri(url)
return uri
def convert_document(
self, data, fields_extra, resource_uri=None, request_uri=None,
include_relationship_links=True):
"""Convert DRF data into a JSON API document.
Keyword Arguments:
data - dictionary of names to DRF native values
fields_extra - dictionary of field names to metadata
resource_uri - the full URI of this resource, or None to derive from
ID field and metadata
request_uri - the full URI of the request, or None to copy the
resource_uri
include_relationship_links - For relationships, include or omit the
links object
"""
# Parse the ID data
raw_data_id = data['id']
id_extra = fields_extra['id']
data_resource = id_extra['resource']
if raw_data_id is None:
data_id = None
else:
data_id = force_text(raw_data_id)
if not resource_uri and data_id is not None:
resource_uri = self.construct_resource_uri(
data_resource, data_id, id_extra)
links = self.dict_class()
if request_uri:
links['self'] = request_uri
else:
links['self'] = resource_uri
# Parse the remaining elements
relationships = self.dict_class()
attributes = self.dict_class()
view_extra = {}
for name, value in data.items():
field_extra = fields_extra.get(name, {})
link = field_extra.get('link')
is_archive_of = field_extra.get('is_archive_of')
attr_name = field_extra.get('name', name)
if name == 'id':
pass # Handled above
elif link:
relationship = self.convert_to_relationship_object(
name, value, field_extra,
resource_uri, include_links=include_relationship_links)
relationships[attr_name] = relationship
elif is_archive_of:
archive_data = self.convert_archive_object(
name, value, is_archive_of)
attributes['archive_data'] = archive_data
elif name == '_view_extra':
view_extra = self.convert_extra(value, field_extra)
else:
attributes[attr_name] = value
# Assemble the document
if data_resource and data_id:
out_data = self.dict_class((
('type', data_resource),
('id', data_id),
))
if attributes:
out_data['attributes'] = attributes
if relationships:
out_data['relationships'] = relationships
else:
out_data = None
document = self.dict_class((
('links', links),
('data', out_data),
))
for name, value in view_extra.items():
assert name not in document
document[name] = value
return document
def convert_object(
self, data, fields_extra, resource_uri=None, request_uri=None):
"""Convert DRF data into a JSON API document.
Keyword Arguments:
data - dictionary of names to DRF native values
fields_extra - dictionary of field names to metadata
resource_uri - the full URI of this resource, or None to derive from
ID field and metadata
request_uri - the full URI of the request, or None to copy the
resource_uri
"""
document = self.convert_document(
data, fields_extra, resource_uri=resource_uri,
request_uri=request_uri, include_relationship_links=False)
obj = document['data']
obj.setdefault(
'links', self.dict_class()).update(document['links'])
return obj
def convert_archive_object(self, name, data, serializer):
data.update(data.pop('links', {}))
archive_extra = serializer.get_fields_extra()
archive_data = self.convert_object(data, archive_extra)
return archive_data
def convert_error(self, data, exception, fields_extra, status_code):
error_list = []
errors = []
for name, value in data.items():
field_extra = fields_extra.get(name, {})
is_link = bool(field_extra.get('link'))
group = 'relationships' if is_link else 'attributes'
parameter = getattr(exception, 'parameter', None)
if name == 'detail':
fmt_error = self.dict_class((
('detail', value),
('status', str(status_code)),
))
if parameter is not None:
fmt_error['source'] = self.dict_class((
('parameter', parameter),
))
errors.append(fmt_error)
elif name == '_view_extra':
for rname, error_dict in value.items():
assert rname != 'meta'
for seq, seq_errors in error_dict.items():
if seq is None:
# TODO: diagnose how subject feature errors are
# getting into view_extra.
seq = 'subject'
for fieldname, error_list in seq_errors.items():
path = '/included.%s.%s.%s' % (
rname, seq, fieldname)
assert isinstance(error_list, list)
for error in error_list:
fmt_error = self.dict_class((
('detail', error),
('path', path),
('status', str(status_code)),
))
errors.append(fmt_error)
else:
for error in value:
fmt_error = self.dict_class((
('status', str(status_code)),
('detail', error),
('path', '/data/%s/%s' % (group, name)),
))
errors.append(fmt_error)
assert errors, data
return self.dict_class((('errors', errors),))
def convert_paginated(self, data, fields_extra, request_uri):
item_list = []
for item in data['results']:
converted = self.convert_object(item, fields_extra)
item_list.append(converted)
return self.dict_class((
('links', self.dict_class((
('self', request_uri),
('next', data.get('next', None)),
('prev', data.get('previous', None)),
))),
('data', item_list),
('meta', self.dict_class((
('count', data['count']),
))),
))
def convert_extra(self, data, field_extra):
extra = self.dict_class()
for resource_name, resource_value in data.items():
if resource_name == 'meta':
extra['meta'] = resource_value
else:
serializer = resource_value.serializer.child
fields_extra = serializer.get_fields_extra()
for raw_resource in resource_value:
resource = self.convert_object(raw_resource, fields_extra)
extra.setdefault('included', []).append(resource)
return extra
class JsonApiV10TemplateHTMLRenderer(BaseJsonApiTemplateHTMLRenderer):
"""Render to a template, but use JSON API format as context."""
json_api_renderer_class = JsonApiV10Renderer
def customize_context(self, context):
# Add a collection of types and IDs
collection = {}
for resource in context.get('included', []):
resource_id = resource['id']
resource_type = resource['type']
collection.setdefault(resource_type, {})[resource_id] = resource
main_id = context['data']['id']
main_type = context['data']['type']
collection.setdefault(main_type, {})[main_id] = context['data']
context['collection'] = collection
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import cv2
# import numpy as np
import glob
import os
files = glob.glob(sys.argv[1] + '*.png')
if not os.path.exists(sys.argv[1] + '/rgb/'):
os.makedirs(sys.argv[1] + '/rgb/')
for f in files:
image = cv2.imread(f)
image = cv2.cvtColor(image, cv2.COLOR_YCR_CB2BGR)
cv2.imshow("img", image)
# print os.path.dirname(f), os.path.basename(f)
print os.path.dirname(f) + '/rgb/' + os.path.basename(f)
cv2.imwrite(os.path.dirname(f) + '/rgb/' + os.path.basename(f), image)
# cv2.waitKey()
|
class IrreversibleData(object):
"""
Some of the code from templates can't be converted
This class saves information about irreversible code
"""
def __init__(self, fileName, lineStart, lineEnd, type, oldValue):
self.lineStart = lineStart
self.lineEnd = lineEnd
self.fileName = fileName
self.type = type
self.oldValue = oldValue
self.codeReplacment = ["-No replacment-"]
@property
def codeReplacment(self):
return self._codeReplacment
@codeReplacment.setter
def codeReplacment(self, replacment):
if not isinstance(replacment, list):
raise ValueError("Replacment should be a list")
self._codeReplacment = replacment
|
import socket, re, itertools, ssl
from os import strerror
from multiprocessing import Pool, Lock, active_children
from time import sleep
global lock
lock = Lock()
class BrutePlugins(object):
def __init__(self,plugin):
self.plugin = plugin
def run(self):
self.donothing = 0
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if (secure == 1): self.ssocket = ssl.wrap_socket(self.s)
else: self.ssocket = self.s
self.connmsg = self.ssocket.connect_ex((host,port))
while (self.connmsg != 0):
print("ERROR:\t%s" % strerror(self.connmsg))
sleep(2.1)
self.connmsg = self.ssocket.connect_ex((host,port))
self.ssocket.send("HEAD "+path+"wp-content/plugins/"+self.plugin+"/ HTTP/1.1\r\nHost: "+host+"\r\n\r\n")
self.chunk = self.ssocket.recv(20)
while (len(self.chunk) < 20):
sleep(1)
self.chunk += self.ssocket.recv(20)
self.ssocket.shutdown(socket.SHUT_RDWR)
self.ssocket.close()
if (self.chunk.find("200 OK") > 0):
print("Valid plugin found:\t%s" % self.plugin)
lock.acquire()
f = open(plugfound,"a")
f.write(self.plugin+"\n")
f.close()
lock.release()
elif (self.chunk.find("403 Forb") > 0): # plugins finally locking down directories
print("Valid plugin found:\t%s" % self.plugin)
lock.acquire()
f = open(plugfound,"a")
f.write(self.plugin+"\n")
f.close()
lock.release()
elif (self.chunk.find("500") > 0):
print(str(self.plugin))
print("500 Internal Server Error Seen, you might be sending too fast!")
print("Logged to file as 500 in case of locked directory. If you see lots of these, please slow down scanner")
lock.acquire()
f = open(plugfound,"a")
f.write("500 (possible): "+self.plugin+"\n")
f.close()
lock.release()
return 0
elif (self.chunk.find("404") > 0): self.donothing = 1
# this only for a client who had certain keywords have redirect tags
#elif (self.chunk.find("301") > 0): self.donothing = 1
else:
print("Irregular server response seen.\n%s" % str(self.chunk))
return 1
return 0
def worker(plugins):
for plugin in plugins:
plugin = str(plugin.strip("\n"))
while (BrutePlugins(plugin).run() != 0): sleep(1)
def grouper(iterable,n,fillvalue=None):
it = iter(iterable)
def take():
while 1: yield list(itertools.islice(it,n))
return iter(take().next,[])
def brutePlugin(pluginlist,foundplug,hosti,pathi,porti,securei,psize):
global host
host = hosti
global port
port = porti
global secure
secure = securei
global plugfound
plugfound = foundplug
global path
path = pathi
f = open(plugfound,'w').close()
listsize = (len(pluginlist))
# manage pool
if (psize == 0): psize = 5
if (list <= psize): chunksize = 1
else: chunksize = ((listsize / psize) + (listsize % psize))
print("Plugin list size: %d\tChunk size: %d\tPool size: %d" % ((listsize),chunksize,psize))
print("Plugin bruteforcing started")
pool = Pool(processes=psize)
for chunk in itertools.izip(grouper(pluginlist,chunksize)): pool.map_async(worker,chunk)
pool.close()
try:
while(len(active_children()) > 0): # how many active children do we have
sleep(2)
ignore = active_children()
except KeyboardInterrupt: exit('CTRL^C caught, exiting...\n\n')
print("Plugin bruteforce complete")
|
list1 = ['Tom', 'Jim', 'Mary', 'Tom', 'Jack', 'Rose', 'Jim']
if 'Tom' in list1:
print('call him back')
print(set([x for x in list1 if list1.count(x) > 1]))
student = {'Tom', 'Jim', 'Mary', 'Tom', 'Jack', 'Rose', 'Jim'}
student.add(12)
student.remove('Tom')
print(student)
set1 = set('abracadabra')
set2 = set('alacazam')
print(set1)
print(set2)
print(set1 & set2)
print(set1.intersection(set2))
print(set1 - set2)
print(set1.difference(set2))
print(set1 | set2)
print(set1.union(set2))
print(set1 ^ set2)
print(set1.symmetric_difference(set2))
|
def main():
word=''
count = 0
infile = open('/Users/Python/Desktop/mypython/mypython-4/employees.txt','r')
for line in infile :
count = count + 1
if count == 1:
word = word + line
else:
word = word +':'+line
if (count % 3) == 0 :
word = word.split(':')
print("Name:",word[0])
print("ID:",word[1])
print("Department:",word[2])
count = 0
word =""
infile.close
main()
|
# Define here the models for your scraped items
#
# See documentation in=
# https=//docs.scrapy.org/en/latest/topics/items.html
import scrapy
class GameBasicInfo(scrapy.Item):
# define the fields for your item here like=
appid = scrapy.Field()
title = scrapy.Field()
developer = scrapy.Field()
publisher = scrapy.Field()
tags = scrapy.Field()
genres = scrapy.Field()
early_access = scrapy.Field()
total_reviews = scrapy.Field()
positive_reviews = scrapy.Field()
negative_reviews = scrapy.Field()
english_reviews = scrapy.Field()
mature_content = scrapy.Field()
# reviews from community hub
class GameReview(scrapy.Item):
appid = scrapy.Field()
rank = scrapy.Field()
date = scrapy.Field()
hours = scrapy.Field()
review_text = scrapy.Field()
positive_review = scrapy.Field()
date_duration_days = scrapy.Field()
user_num_of_games = scrapy.Field()
# reviews from community hub
class ReleaseNotes(scrapy.Item):
appid = scrapy.Field()
title = scrapy.Field()
notes_body = scrapy.Field()
rate = scrapy.Field()
comment_count = scrapy.Field()
|
import math
from collections import OrderedDict
from functools import partial
from typing import Any, Callable, List, Optional, Sequence, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from torchvision.models._api import register_model, Weights, WeightsEnum
from torchvision.models._meta import _IMAGENET_CATEGORIES
from torchvision.models._utils import _ovewrite_named_param, handle_legacy_interface
from torchvision.ops.misc import Conv2dNormActivation, SqueezeExcitation
from torchvision.ops.stochastic_depth import StochasticDepth
from torchvision.transforms._presets import ImageClassification, InterpolationMode
from torchvision.utils import _log_api_usage_once
__all__ = [
"MaxVit",
"MaxVit_T_Weights",
"maxvit_t",
]
def _get_conv_output_shape(input_size: Tuple[int, int], kernel_size: int, stride: int, padding: int) -> Tuple[int, int]:
return (
(input_size[0] - kernel_size + 2 * padding) // stride + 1,
(input_size[1] - kernel_size + 2 * padding) // stride + 1,
)
def _make_block_input_shapes(input_size: Tuple[int, int], n_blocks: int) -> List[Tuple[int, int]]:
"""Util function to check that the input size is correct for a MaxVit configuration."""
shapes = []
block_input_shape = _get_conv_output_shape(input_size, 3, 2, 1)
for _ in range(n_blocks):
block_input_shape = _get_conv_output_shape(block_input_shape, 3, 2, 1)
shapes.append(block_input_shape)
return shapes
def _get_relative_position_index(height: int, width: int) -> torch.Tensor:
coords = torch.stack(torch.meshgrid([torch.arange(height), torch.arange(width)]))
coords_flat = torch.flatten(coords, 1)
relative_coords = coords_flat[:, :, None] - coords_flat[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += height - 1
relative_coords[:, :, 1] += width - 1
relative_coords[:, :, 0] *= 2 * width - 1
return relative_coords.sum(-1)
class MBConv(nn.Module):
"""MBConv: Mobile Inverted Residual Bottleneck.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
expansion_ratio (float): Expansion ratio in the bottleneck.
squeeze_ratio (float): Squeeze ratio in the SE Layer.
stride (int): Stride of the depthwise convolution.
activation_layer (Callable[..., nn.Module]): Activation function.
norm_layer (Callable[..., nn.Module]): Normalization function.
p_stochastic_dropout (float): Probability of stochastic depth.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
expansion_ratio: float,
squeeze_ratio: float,
stride: int,
activation_layer: Callable[..., nn.Module],
norm_layer: Callable[..., nn.Module],
p_stochastic_dropout: float = 0.0,
) -> None:
super().__init__()
proj: Sequence[nn.Module]
self.proj: nn.Module
should_proj = stride != 1 or in_channels != out_channels
if should_proj:
proj = [nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=True)]
if stride == 2:
proj = [nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)] + proj # type: ignore
self.proj = nn.Sequential(*proj)
else:
self.proj = nn.Identity() # type: ignore
mid_channels = int(out_channels * expansion_ratio)
sqz_channels = int(out_channels * squeeze_ratio)
if p_stochastic_dropout:
self.stochastic_depth = StochasticDepth(p_stochastic_dropout, mode="row") # type: ignore
else:
self.stochastic_depth = nn.Identity() # type: ignore
_layers = OrderedDict()
_layers["pre_norm"] = norm_layer(in_channels)
_layers["conv_a"] = Conv2dNormActivation(
in_channels,
mid_channels,
kernel_size=1,
stride=1,
padding=0,
activation_layer=activation_layer,
norm_layer=norm_layer,
inplace=None,
)
_layers["conv_b"] = Conv2dNormActivation(
mid_channels,
mid_channels,
kernel_size=3,
stride=stride,
padding=1,
activation_layer=activation_layer,
norm_layer=norm_layer,
groups=mid_channels,
inplace=None,
)
_layers["squeeze_excitation"] = SqueezeExcitation(mid_channels, sqz_channels, activation=nn.SiLU)
_layers["conv_c"] = nn.Conv2d(in_channels=mid_channels, out_channels=out_channels, kernel_size=1, bias=True)
self.layers = nn.Sequential(_layers)
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x (Tensor): Input tensor with expected layout of [B, C, H, W].
Returns:
Tensor: Output tensor with expected layout of [B, C, H / stride, W / stride].
"""
res = self.proj(x)
x = self.stochastic_depth(self.layers(x))
return res + x
class RelativePositionalMultiHeadAttention(nn.Module):
"""Relative Positional Multi-Head Attention.
Args:
feat_dim (int): Number of input features.
head_dim (int): Number of features per head.
max_seq_len (int): Maximum sequence length.
"""
def __init__(
self,
feat_dim: int,
head_dim: int,
max_seq_len: int,
) -> None:
super().__init__()
if feat_dim % head_dim != 0:
raise ValueError(f"feat_dim: {feat_dim} must be divisible by head_dim: {head_dim}")
self.n_heads = feat_dim // head_dim
self.head_dim = head_dim
self.size = int(math.sqrt(max_seq_len))
self.max_seq_len = max_seq_len
self.to_qkv = nn.Linear(feat_dim, self.n_heads * self.head_dim * 3)
self.scale_factor = feat_dim**-0.5
self.merge = nn.Linear(self.head_dim * self.n_heads, feat_dim)
self.relative_position_bias_table = nn.parameter.Parameter(
torch.empty(((2 * self.size - 1) * (2 * self.size - 1), self.n_heads), dtype=torch.float32),
)
self.register_buffer("relative_position_index", _get_relative_position_index(self.size, self.size))
# initialize with truncated normal the bias
torch.nn.init.trunc_normal_(self.relative_position_bias_table, std=0.02)
def get_relative_positional_bias(self) -> torch.Tensor:
bias_index = self.relative_position_index.view(-1) # type: ignore
relative_bias = self.relative_position_bias_table[bias_index].view(self.max_seq_len, self.max_seq_len, -1) # type: ignore
relative_bias = relative_bias.permute(2, 0, 1).contiguous()
return relative_bias.unsqueeze(0)
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x (Tensor): Input tensor with expected layout of [B, G, P, D].
Returns:
Tensor: Output tensor with expected layout of [B, G, P, D].
"""
B, G, P, D = x.shape
H, DH = self.n_heads, self.head_dim
qkv = self.to_qkv(x)
q, k, v = torch.chunk(qkv, 3, dim=-1)
q = q.reshape(B, G, P, H, DH).permute(0, 1, 3, 2, 4)
k = k.reshape(B, G, P, H, DH).permute(0, 1, 3, 2, 4)
v = v.reshape(B, G, P, H, DH).permute(0, 1, 3, 2, 4)
k = k * self.scale_factor
dot_prod = torch.einsum("B G H I D, B G H J D -> B G H I J", q, k)
pos_bias = self.get_relative_positional_bias()
dot_prod = F.softmax(dot_prod + pos_bias, dim=-1)
out = torch.einsum("B G H I J, B G H J D -> B G H I D", dot_prod, v)
out = out.permute(0, 1, 3, 2, 4).reshape(B, G, P, D)
out = self.merge(out)
return out
class SwapAxes(nn.Module):
"""Permute the axes of a tensor."""
def __init__(self, a: int, b: int) -> None:
super().__init__()
self.a = a
self.b = b
def forward(self, x: torch.Tensor) -> torch.Tensor:
res = torch.swapaxes(x, self.a, self.b)
return res
class WindowPartition(nn.Module):
"""
Partition the input tensor into non-overlapping windows.
"""
def __init__(self) -> None:
super().__init__()
def forward(self, x: Tensor, p: int) -> Tensor:
"""
Args:
x (Tensor): Input tensor with expected layout of [B, C, H, W].
p (int): Number of partitions.
Returns:
Tensor: Output tensor with expected layout of [B, H/P, W/P, P*P, C].
"""
B, C, H, W = x.shape
P = p
# chunk up H and W dimensions
x = x.reshape(B, C, H // P, P, W // P, P)
x = x.permute(0, 2, 4, 3, 5, 1)
# colapse P * P dimension
x = x.reshape(B, (H // P) * (W // P), P * P, C)
return x
class WindowDepartition(nn.Module):
"""
Departition the input tensor of non-overlapping windows into a feature volume of layout [B, C, H, W].
"""
def __init__(self) -> None:
super().__init__()
def forward(self, x: Tensor, p: int, h_partitions: int, w_partitions: int) -> Tensor:
"""
Args:
x (Tensor): Input tensor with expected layout of [B, (H/P * W/P), P*P, C].
p (int): Number of partitions.
h_partitions (int): Number of vertical partitions.
w_partitions (int): Number of horizontal partitions.
Returns:
Tensor: Output tensor with expected layout of [B, C, H, W].
"""
B, G, PP, C = x.shape
P = p
HP, WP = h_partitions, w_partitions
# split P * P dimension into 2 P tile dimensionsa
x = x.reshape(B, HP, WP, P, P, C)
# permute into B, C, HP, P, WP, P
x = x.permute(0, 5, 1, 3, 2, 4)
# reshape into B, C, H, W
x = x.reshape(B, C, HP * P, WP * P)
return x
class PartitionAttentionLayer(nn.Module):
"""
Layer for partitioning the input tensor into non-overlapping windows and applying attention to each window.
Args:
in_channels (int): Number of input channels.
head_dim (int): Dimension of each attention head.
partition_size (int): Size of the partitions.
partition_type (str): Type of partitioning to use. Can be either "grid" or "window".
grid_size (Tuple[int, int]): Size of the grid to partition the input tensor into.
mlp_ratio (int): Ratio of the feature size expansion in the MLP layer.
activation_layer (Callable[..., nn.Module]): Activation function to use.
norm_layer (Callable[..., nn.Module]): Normalization function to use.
attention_dropout (float): Dropout probability for the attention layer.
mlp_dropout (float): Dropout probability for the MLP layer.
p_stochastic_dropout (float): Probability of dropping out a partition.
"""
def __init__(
self,
in_channels: int,
head_dim: int,
# partitioning parameters
partition_size: int,
partition_type: str,
# grid size needs to be known at initialization time
# because we need to know hamy relative offsets there are in the grid
grid_size: Tuple[int, int],
mlp_ratio: int,
activation_layer: Callable[..., nn.Module],
norm_layer: Callable[..., nn.Module],
attention_dropout: float,
mlp_dropout: float,
p_stochastic_dropout: float,
) -> None:
super().__init__()
self.n_heads = in_channels // head_dim
self.head_dim = head_dim
self.n_partitions = grid_size[0] // partition_size
self.partition_type = partition_type
self.grid_size = grid_size
if partition_type not in ["grid", "window"]:
raise ValueError("partition_type must be either 'grid' or 'window'")
if partition_type == "window":
self.p, self.g = partition_size, self.n_partitions
else:
self.p, self.g = self.n_partitions, partition_size
self.partition_op = WindowPartition()
self.departition_op = WindowDepartition()
self.partition_swap = SwapAxes(-2, -3) if partition_type == "grid" else nn.Identity()
self.departition_swap = SwapAxes(-2, -3) if partition_type == "grid" else nn.Identity()
self.attn_layer = nn.Sequential(
norm_layer(in_channels),
# it's always going to be partition_size ** 2 because
# of the axis swap in the case of grid partitioning
RelativePositionalMultiHeadAttention(in_channels, head_dim, partition_size**2),
nn.Dropout(attention_dropout),
)
# pre-normalization similar to transformer layers
self.mlp_layer = nn.Sequential(
nn.LayerNorm(in_channels),
nn.Linear(in_channels, in_channels * mlp_ratio),
activation_layer(),
nn.Linear(in_channels * mlp_ratio, in_channels),
nn.Dropout(mlp_dropout),
)
# layer scale factors
self.stochastic_dropout = StochasticDepth(p_stochastic_dropout, mode="row")
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x (Tensor): Input tensor with expected layout of [B, C, H, W].
Returns:
Tensor: Output tensor with expected layout of [B, C, H, W].
"""
# Undefined behavior if H or W are not divisible by p
# https://github.com/google-research/maxvit/blob/da76cf0d8a6ec668cc31b399c4126186da7da944/maxvit/models/maxvit.py#L766
gh, gw = self.grid_size[0] // self.p, self.grid_size[1] // self.p
torch._assert(
self.grid_size[0] % self.p == 0 and self.grid_size[1] % self.p == 0,
"Grid size must be divisible by partition size. Got grid size of {} and partition size of {}".format(
self.grid_size, self.p
),
)
x = self.partition_op(x, self.p)
x = self.partition_swap(x)
x = x + self.stochastic_dropout(self.attn_layer(x))
x = x + self.stochastic_dropout(self.mlp_layer(x))
x = self.departition_swap(x)
x = self.departition_op(x, self.p, gh, gw)
return x
class MaxVitLayer(nn.Module):
"""
MaxVit layer consisting of a MBConv layer followed by a PartitionAttentionLayer with `window` and a PartitionAttentionLayer with `grid`.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
expansion_ratio (float): Expansion ratio in the bottleneck.
squeeze_ratio (float): Squeeze ratio in the SE Layer.
stride (int): Stride of the depthwise convolution.
activation_layer (Callable[..., nn.Module]): Activation function.
norm_layer (Callable[..., nn.Module]): Normalization function.
head_dim (int): Dimension of the attention heads.
mlp_ratio (int): Ratio of the MLP layer.
mlp_dropout (float): Dropout probability for the MLP layer.
attention_dropout (float): Dropout probability for the attention layer.
p_stochastic_dropout (float): Probability of stochastic depth.
partition_size (int): Size of the partitions.
grid_size (Tuple[int, int]): Size of the input feature grid.
"""
def __init__(
self,
# conv parameters
in_channels: int,
out_channels: int,
squeeze_ratio: float,
expansion_ratio: float,
stride: int,
# conv + transformer parameters
norm_layer: Callable[..., nn.Module],
activation_layer: Callable[..., nn.Module],
# transformer parameters
head_dim: int,
mlp_ratio: int,
mlp_dropout: float,
attention_dropout: float,
p_stochastic_dropout: float,
# partitioning parameters
partition_size: int,
grid_size: Tuple[int, int],
) -> None:
super().__init__()
layers: OrderedDict = OrderedDict()
# convolutional layer
layers["MBconv"] = MBConv(
in_channels=in_channels,
out_channels=out_channels,
expansion_ratio=expansion_ratio,
squeeze_ratio=squeeze_ratio,
stride=stride,
activation_layer=activation_layer,
norm_layer=norm_layer,
p_stochastic_dropout=p_stochastic_dropout,
)
# attention layers, block -> grid
layers["window_attention"] = PartitionAttentionLayer(
in_channels=out_channels,
head_dim=head_dim,
partition_size=partition_size,
partition_type="window",
grid_size=grid_size,
mlp_ratio=mlp_ratio,
activation_layer=activation_layer,
norm_layer=nn.LayerNorm,
attention_dropout=attention_dropout,
mlp_dropout=mlp_dropout,
p_stochastic_dropout=p_stochastic_dropout,
)
layers["grid_attention"] = PartitionAttentionLayer(
in_channels=out_channels,
head_dim=head_dim,
partition_size=partition_size,
partition_type="grid",
grid_size=grid_size,
mlp_ratio=mlp_ratio,
activation_layer=activation_layer,
norm_layer=nn.LayerNorm,
attention_dropout=attention_dropout,
mlp_dropout=mlp_dropout,
p_stochastic_dropout=p_stochastic_dropout,
)
self.layers = nn.Sequential(layers)
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x (Tensor): Input tensor of shape (B, C, H, W).
Returns:
Tensor: Output tensor of shape (B, C, H, W).
"""
x = self.layers(x)
return x
class MaxVitBlock(nn.Module):
"""
A MaxVit block consisting of `n_layers` MaxVit layers.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
expansion_ratio (float): Expansion ratio in the bottleneck.
squeeze_ratio (float): Squeeze ratio in the SE Layer.
activation_layer (Callable[..., nn.Module]): Activation function.
norm_layer (Callable[..., nn.Module]): Normalization function.
head_dim (int): Dimension of the attention heads.
mlp_ratio (int): Ratio of the MLP layer.
mlp_dropout (float): Dropout probability for the MLP layer.
attention_dropout (float): Dropout probability for the attention layer.
p_stochastic_dropout (float): Probability of stochastic depth.
partition_size (int): Size of the partitions.
input_grid_size (Tuple[int, int]): Size of the input feature grid.
n_layers (int): Number of layers in the block.
p_stochastic (List[float]): List of probabilities for stochastic depth for each layer.
"""
def __init__(
self,
# conv parameters
in_channels: int,
out_channels: int,
squeeze_ratio: float,
expansion_ratio: float,
# conv + transformer parameters
norm_layer: Callable[..., nn.Module],
activation_layer: Callable[..., nn.Module],
# transformer parameters
head_dim: int,
mlp_ratio: int,
mlp_dropout: float,
attention_dropout: float,
# partitioning parameters
partition_size: int,
input_grid_size: Tuple[int, int],
# number of layers
n_layers: int,
p_stochastic: List[float],
) -> None:
super().__init__()
if not len(p_stochastic) == n_layers:
raise ValueError(f"p_stochastic must have length n_layers={n_layers}, got p_stochastic={p_stochastic}.")
self.layers = nn.ModuleList()
# account for the first stride of the first layer
self.grid_size = _get_conv_output_shape(input_grid_size, kernel_size=3, stride=2, padding=1)
for idx, p in enumerate(p_stochastic):
stride = 2 if idx == 0 else 1
self.layers += [
MaxVitLayer(
in_channels=in_channels if idx == 0 else out_channels,
out_channels=out_channels,
squeeze_ratio=squeeze_ratio,
expansion_ratio=expansion_ratio,
stride=stride,
norm_layer=norm_layer,
activation_layer=activation_layer,
head_dim=head_dim,
mlp_ratio=mlp_ratio,
mlp_dropout=mlp_dropout,
attention_dropout=attention_dropout,
partition_size=partition_size,
grid_size=self.grid_size,
p_stochastic_dropout=p,
),
]
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x (Tensor): Input tensor of shape (B, C, H, W).
Returns:
Tensor: Output tensor of shape (B, C, H, W).
"""
for layer in self.layers:
x = layer(x)
return x
class MaxVit(nn.Module):
"""
Implements MaxVit Transformer from the `MaxViT: Multi-Axis Vision Transformer <https://arxiv.org/abs/2204.01697>`_ paper.
Args:
input_size (Tuple[int, int]): Size of the input image.
stem_channels (int): Number of channels in the stem.
partition_size (int): Size of the partitions.
block_channels (List[int]): Number of channels in each block.
block_layers (List[int]): Number of layers in each block.
stochastic_depth_prob (float): Probability of stochastic depth. Expands to a list of probabilities for each layer that scales linearly to the specified value.
squeeze_ratio (float): Squeeze ratio in the SE Layer. Default: 0.25.
expansion_ratio (float): Expansion ratio in the MBConv bottleneck. Default: 4.
norm_layer (Callable[..., nn.Module]): Normalization function. Default: None (setting to None will produce a `BatchNorm2d(eps=1e-3, momentum=0.99)`).
activation_layer (Callable[..., nn.Module]): Activation function Default: nn.GELU.
head_dim (int): Dimension of the attention heads.
mlp_ratio (int): Expansion ratio of the MLP layer. Default: 4.
mlp_dropout (float): Dropout probability for the MLP layer. Default: 0.0.
attention_dropout (float): Dropout probability for the attention layer. Default: 0.0.
num_classes (int): Number of classes. Default: 1000.
"""
def __init__(
self,
# input size parameters
input_size: Tuple[int, int],
# stem and task parameters
stem_channels: int,
# partitioning parameters
partition_size: int,
# block parameters
block_channels: List[int],
block_layers: List[int],
# attention head dimensions
head_dim: int,
stochastic_depth_prob: float,
# conv + transformer parameters
# norm_layer is applied only to the conv layers
# activation_layer is applied both to conv and transformer layers
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation_layer: Callable[..., nn.Module] = nn.GELU,
# conv parameters
squeeze_ratio: float = 0.25,
expansion_ratio: float = 4,
# transformer parameters
mlp_ratio: int = 4,
mlp_dropout: float = 0.0,
attention_dropout: float = 0.0,
# task parameters
num_classes: int = 1000,
) -> None:
super().__init__()
_log_api_usage_once(self)
input_channels = 3
# https://github.com/google-research/maxvit/blob/da76cf0d8a6ec668cc31b399c4126186da7da944/maxvit/models/maxvit.py#L1029-L1030
# for the exact parameters used in batchnorm
if norm_layer is None:
norm_layer = partial(nn.BatchNorm2d, eps=1e-3, momentum=0.99)
# Make sure input size will be divisible by the partition size in all blocks
# Undefined behavior if H or W are not divisible by p
# https://github.com/google-research/maxvit/blob/da76cf0d8a6ec668cc31b399c4126186da7da944/maxvit/models/maxvit.py#L766
block_input_sizes = _make_block_input_shapes(input_size, len(block_channels))
for idx, block_input_size in enumerate(block_input_sizes):
if block_input_size[0] % partition_size != 0 or block_input_size[1] % partition_size != 0:
raise ValueError(
f"Input size {block_input_size} of block {idx} is not divisible by partition size {partition_size}. "
f"Consider changing the partition size or the input size.\n"
f"Current configuration yields the following block input sizes: {block_input_sizes}."
)
# stem
self.stem = nn.Sequential(
Conv2dNormActivation(
input_channels,
stem_channels,
3,
stride=2,
norm_layer=norm_layer,
activation_layer=activation_layer,
bias=False,
inplace=None,
),
Conv2dNormActivation(
stem_channels, stem_channels, 3, stride=1, norm_layer=None, activation_layer=None, bias=True
),
)
# account for stem stride
input_size = _get_conv_output_shape(input_size, kernel_size=3, stride=2, padding=1)
self.partition_size = partition_size
# blocks
self.blocks = nn.ModuleList()
in_channels = [stem_channels] + block_channels[:-1]
out_channels = block_channels
# precompute the stochastich depth probabilities from 0 to stochastic_depth_prob
# since we have N blocks with L layers, we will have N * L probabilities uniformly distributed
# over the range [0, stochastic_depth_prob]
p_stochastic = np.linspace(0, stochastic_depth_prob, sum(block_layers)).tolist()
p_idx = 0
for in_channel, out_channel, num_layers in zip(in_channels, out_channels, block_layers):
self.blocks.append(
MaxVitBlock(
in_channels=in_channel,
out_channels=out_channel,
squeeze_ratio=squeeze_ratio,
expansion_ratio=expansion_ratio,
norm_layer=norm_layer,
activation_layer=activation_layer,
head_dim=head_dim,
mlp_ratio=mlp_ratio,
mlp_dropout=mlp_dropout,
attention_dropout=attention_dropout,
partition_size=partition_size,
input_grid_size=input_size,
n_layers=num_layers,
p_stochastic=p_stochastic[p_idx : p_idx + num_layers],
),
)
input_size = self.blocks[-1].grid_size
p_idx += num_layers
# see https://github.com/google-research/maxvit/blob/da76cf0d8a6ec668cc31b399c4126186da7da944/maxvit/models/maxvit.py#L1137-L1158
# for why there is Linear -> Tanh -> Linear
self.classifier = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Flatten(),
nn.LayerNorm(block_channels[-1]),
nn.Linear(block_channels[-1], block_channels[-1]),
nn.Tanh(),
nn.Linear(block_channels[-1], num_classes, bias=False),
)
self._init_weights()
def forward(self, x: Tensor) -> Tensor:
x = self.stem(x)
for block in self.blocks:
x = block(x)
x = self.classifier(x)
return x
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.zeros_(m.bias)
def _maxvit(
# stem parameters
stem_channels: int,
# block parameters
block_channels: List[int],
block_layers: List[int],
stochastic_depth_prob: float,
# partitioning parameters
partition_size: int,
# transformer parameters
head_dim: int,
# Weights API
weights: Optional[WeightsEnum] = None,
progress: bool = False,
# kwargs,
**kwargs: Any,
) -> MaxVit:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
assert weights.meta["min_size"][0] == weights.meta["min_size"][1]
_ovewrite_named_param(kwargs, "input_size", weights.meta["min_size"])
input_size = kwargs.pop("input_size", (224, 224))
model = MaxVit(
stem_channels=stem_channels,
block_channels=block_channels,
block_layers=block_layers,
stochastic_depth_prob=stochastic_depth_prob,
head_dim=head_dim,
partition_size=partition_size,
input_size=input_size,
**kwargs,
)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
class MaxVit_T_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
# URL empty until official release
url="https://download.pytorch.org/models/maxvit_t-bc5ab103.pth",
transforms=partial(
ImageClassification, crop_size=224, resize_size=224, interpolation=InterpolationMode.BICUBIC
),
meta={
"categories": _IMAGENET_CATEGORIES,
"num_params": 30919624,
"min_size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#maxvit",
"_metrics": {
"ImageNet-1K": {
"acc@1": 83.700,
"acc@5": 96.722,
}
},
"_ops": 5.558,
"_file_size": 118.769,
"_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""",
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", MaxVit_T_Weights.IMAGENET1K_V1))
def maxvit_t(*, weights: Optional[MaxVit_T_Weights] = None, progress: bool = True, **kwargs: Any) -> MaxVit:
"""
Constructs a maxvit_t architecture from
`MaxViT: Multi-Axis Vision Transformer <https://arxiv.org/abs/2204.01697>`_.
Args:
weights (:class:`~torchvision.models.MaxVit_T_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.MaxVit_T_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.maxvit.MaxVit``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/maxvit.py>`_
for more details about this class.
.. autoclass:: torchvision.models.MaxVit_T_Weights
:members:
"""
weights = MaxVit_T_Weights.verify(weights)
return _maxvit(
stem_channels=64,
block_channels=[64, 128, 256, 512],
block_layers=[2, 2, 5, 2],
head_dim=32,
stochastic_depth_prob=0.2,
partition_size=7,
weights=weights,
progress=progress,
**kwargs,
)
|
# def main(a):
# if a%2 ==0:
# print('偶数')
# else:
# print('奇数')
# main(10)
# def main(a):
# for i in range(2,a):
# if(a % i) == 0:
# print(a,'不是素数')
# break
# else:
# print(a,'是素数')
# break
# main(7)
# import random
# import numpy as np
# COUNT = 1
# def main(u,p):
# global COUNT
# u = input('请输入账号')
# p = input('请输入密码')
# y = random.randrange(1000,9999)
# res = int(input('验证码是:%d' %y))
# np.random.choice(['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z'
# ])
# if res == y:
# if u == 'admin' and p == '123456':
# print('登录成功')
# else:
# print('验证码错误,请重新输入')
# if COUNT !=3:
# COUNT += 1
# main(u,p)
# else:
# print('尝试次数达到上限,请去客服咨询,客服电话:13838384381')
# main('admin','123456')
# # pyMySQL,函数体内需要改变外部变量需要global
# def login():
# username = input('zhang')
# password = input('zhang')
# if username == '' and password == '':
# def verify():
# def verify():
# pass
# def error():
# pass
# def API():
# pass
# def start():
# login
# 作业一:
# def getPentagonalNumber(n):
# count=0
# for i in range(1,n+1):
# num = i*(i*3-1)/2
# print('%d' %num,end = ' ')
# count +=1
# if count % 10 ==0:
# print('\n')
# getPentagonalNumber(100)
# 作业二:
# def sumDigits(n):
# count = 1
# nu = input('请输入一个整数:>>')
# # 作业三:
# def displaySortedNumbers(num1,num2,num3):
# num1,num2,num3 = map(int(input('请输入三个整数:>>'))).split(',')
# 作业四:
# def printChars():
# for i in range(73,91):
# print(chr(i),end=" ")
# if i% 10 == 0:
# print("\n")
# printChars()
# 作业五:
# def numberOfDaysInAYear(year):
# for count in range(year,year+11):
# if count % 4 == 0 and count % 100 != 0 or count % 400 == 0:
# print("{}年有366天".format(count))
# else:
# print("{}年有365天".format(count))
# numberOfDaysInAYear(2016)
# 作业六:
# def distance(x1,x2,y1,y2):
# dis =((x2-x1)**2+(y2-y1)**2)**0.5
# print("这两点的距离是: %f" %dis)
# distance(1,4,4,2)
# 作业七:
# import time
# def main():
# localtime = time.asctime(time.localtime(time.time()))
# print("本地时间为 :", localtime)
# main()
# 作业八:
# import random
# def sz():
# a=random.choice([1,2,3,4,5,6])
# b=random.choice([1,2,3,4,5,6])
# if a+b==2 or a+b==3 or a+b==12:
# print('%d + %d = %d' %(a,b,a+b))
# print('你输了')
# elif a+b==7 or a+b==11:
# print('%d + %d = %d' %(a,b,a+b))
# print('你赢了')
# else:
# print('%d + %d = %d' %(a,b,a+b))
# c=random.choice([1,2,3,4,5,6])
# d=random.choice([1,2,3,4,5,6])
# if c+d==7:
# print('%d + %d = %d' %(c,d,c+d))
# print('你输了')
# elif c+d==a+b:
# print('%d + %d = %d' %(c,d,c+d))
# print('你赢了')
# sz()
|
# baseline3.py
# Author: Neha Bhagwat
from gutenberg.acquire import load_etext
from gutenberg.cleanup import strip_headers
from gutenberg.query import get_etexts, get_metadata
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.tree import Tree
from nltk.chunk import ne_chunk
from itertools import chain, groupby
from collections import defaultdict
import urllib2, urllib
import os
import json
from bs4 import BeautifulSoup
import random
from PIL import Image
import itertools
import subprocess
import time
import operator
try:
import wikipedia
except ImportError as e:
print "Library could not be imported. Web page will not be created"
import datetime
try:
import matplotlib.pyplot as plt
except ImportError as e:
print "Matplotlib could not be imported. Graphs will not be plotted"
NOUN_TAGS = ["NN", "NNS", "NNP", "NNPS"]
VERB_TAGS = ["VB", "VBD", "VBG", "VBP", "VBN", "VBP", "VBZ"]
class GoogleImageSearch:
def __init__(self,location):
self.location = location
def download_image(self):
keyword = self.location
print "--------------------------Extracting image for " + str(self.location) + "--------------------------"
image_type = "Image"
keyword = keyword.split()
keyword = '+'.join(keyword)
url = "https://www.google.com/search?q=" + keyword + "&source=lnms&tbm=isch"
# print url
# DIR_PATH represents the path of the directory where the images will be stored
DIR_PATH = "Pictures"
header = {
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"
}
soup = BeautifulSoup(urllib2.urlopen(urllib2.Request(url, headers=header)), 'html.parser')
ImagesList = []
for a in soup.find_all("div", {"class": "rg_meta"}):
link, type = json.loads(a.text)["ou"], json.loads(a.text)["ity"]
ImagesList.append((link, type))
# print "no of images is: " + str(len(ImagesList))
dir_name = keyword.split('+')
dir_name = '_'.join(dir_name)
if not os.path.exists(DIR_PATH):
os.mkdir(DIR_PATH)
DIR_PATH = os.path.abspath(os.path.join(DIR_PATH, dir_name))
if not os.path.exists(DIR_PATH):
os.mkdir(DIR_PATH)
num = random.randint(0,10)
img, Type = ImagesList[num]
try:
req = urllib2.Request(img, headers={'User-Agent': header})
raw_img = urllib2.urlopen(req).read()
cntr = len([i for i in os.listdir(DIR_PATH) if image_type in i]) + 1
if len(Type) == 0:
f = open(str((os.path.join(DIR_PATH, image_type + "_" + str(cntr) + ".jpg"), 'wb')))
print ("Image can be found at: " + str(os.path.join(DIR_PATH, image_type + "_" + str(cntr) + ".jpg")))
# img_display = Image.open(str((os.path.join(DIR_PATH, image_type + "_" + str(cntr) + ".jpg")).encode('utf-8')))
# img_display.show()
# img_display.close()
else:
f = open(os.path.join(DIR_PATH, image_type + "_" + str(cntr) + "." + Type), 'wb')
print ("Image can be found at: " + str(os.path.join(DIR_PATH, image_type + "_" + str(cntr) + Type)))
# img_display = Image.open(os.path.join(DIR_PATH, image_type + "_" + str(cntr) + Type))
# img_display.show()
# img_display.close()
# print "now writing image"
f.write(raw_img)
f.close()
except Exception as e:
print "could not load: " + img
print e
class DataFromGutenberg:
def __init__(self, id):
self.bookID = id
self.book_text = ""
self.author = ""
self.title = ""
def read_book(self):
text = strip_headers(load_etext(self.bookID)).strip()
self.book_text = text
def extract_metadata(self):
url = "http://www.gutenberg.org/ebooks/" + str(self.bookID)
page = urllib.urlopen(url)
soup = BeautifulSoup(page,'html.parser')
table = soup.find('div', attrs = {'id':'bibrec'})
th_list = table.find_all('th')
td_list = table.find_all('td')
for i in range(0, len(th_list)):
if th_list[i].text == 'Author':
self.author = td_list[i].text
elif th_list[i].text == 'Title':
self.title = td_list[i].text
print self.title
if self.author == "":
self.author = "Author not found"
if self.title == "":
self.title == "Title not found"
class TagData:
def __init__(self, gutenberg_object):
self.gutenberg_object = gutenberg_object
self.sentences = []
def extract_sentences(self):
self.sentences = sent_tokenize(self.gutenberg_object.book_text)
def extract_names(self):
names = []
for sentence in self.sentences:
text = word_tokenize(sentence)
tags = nltk.pos_tag(text)
# for chunk in ne_chunk(tags):
# if isinstance(chunk, Tree):
# print chunk
for i in list(chain(*[chunk.leaves() for chunk in ne_chunk(tags) if isinstance(chunk, Tree)])):
names.extend(i)
unique_names = list(set(names))
unique_names.remove("NNS")
unique_names.remove("NNP")
unique_names.remove("NNPS")
print "unique names: ", unique_names
return unique_names
def tag_book_text(self):
self.extract_sentences()
self.unique_names = self.extract_names()
for i in range(0, len(self.unique_names)):
self.unique_names[i] = str(self.unique_names[i].encode('utf-8'))
# print self.unique_names
class Interactions:
def __init__(self, tag_object):
self.tag_object = tag_object
def find_interactions(self):
gut_obj = self.tag_object.gutenberg_object
temp_interactions = defaultdict(list)
interaction_count = 0
sentences = nltk.sent_tokenize(gut_obj.book_text)
sentences = [nltk.word_tokenize(sent) for sent in sentences]
sentences = [nltk.pos_tag(sent) for sent in sentences]
grammar = r"""
NP: {<PERSON> <.*>* <V.*> <.*>* <PERSON>}
"""
r_parser = nltk.RegexpParser(grammar)
for sentence in sentences:
tree = r_parser.parse(nltk.ne_chunk(sentence))
for subtree in tree.subtrees():
if subtree.label() == 'NP':
if subtree != None:
# print "here"
interaction_count += 1
# temp_interactions[interaction_count].append([word for word, pos in subtree.pos() if pos == 'PERSON'])
word_list = []
for word, pos in subtree.pos():
if pos == 'PERSON':
act_word, tag = word
word_list.append(act_word.encode('utf-8'))
temp_interactions[interaction_count].append(word_list)
# print temp_interactions
interactions = {}
interactor_list = []
num_interactions = []
for interaction in temp_interactions.itervalues():
for character in interaction[0]:
for oth_character in interaction[0]:
if character != oth_character:
# print character, oth_character
if (character + ", " + oth_character) in interactions.iterkeys():
interactions[character + ", " + oth_character] +=1
elif (oth_character + ", " + character) in interactions.iterkeys():
interactions[oth_character + ", " + character] += 1
else:
interactions[character + ", " + oth_character] = 1
print "-----------------------------------------------------------------------------------"
print "INTERACTIONS:"
print interactions
print "-----------------------------------------------------------------------------------"
sorted_interactions = sorted(interactions.items(), key=operator.itemgetter(1))
for interaction in interactions.iterkeys():
interactor_list.append(interaction)
num_interactions.append(interactions[interaction])
try:
fig, ax = plt.subplots()
plt.title("Plot of characters v/c interactions")
plt.xlabel("Character pairs")
plt.ylabel("Number of interactions")
plt.bar(interactor_list[len(interactor_list)-10:], num_interactions[len(num_interactions)-10:], color="black")
for tick in ax.get_xticklabels():
tick.set_rotation(20)
legend_text = "X axis:1 unit=character pair\nY axis:2 units=1 interaction"
plt.annotate(legend_text, (0.85, 1), xycoords='axes fraction')
plt.savefig("person_person_interactions.jpg")
except Exception as e:
print "Graph could not be generated due to the following error:"
print e
def find_associations(self):
gut_obj = self.tag_object.gutenberg_object
associations = []
association_count = 0
sentences = nltk.sent_tokenize(gut_obj.book_text)
sentences = [nltk.word_tokenize(sent) for sent in sentences]
sentences = [nltk.pos_tag(sent) for sent in sentences]
grammar = r"""
NP: { <.>* <PERSON> <.*>+ <GPE> <.>* }
{ <.>* <PERSON> <GPE> <.*>+ }
{ <.>* <GPE> <.*>+ <PERSON> <.>* }
{ <.>* <GPE> <PERSON> <.*>+ }
{ <.>* <PERSON> <.*>+ <LOCATION> <.>* }
"""
r_parser = nltk.RegexpParser(grammar)
individual_associations = {}
for sentence in sentences:
statement = ""
for ele in sentence:
word, tag = ele
statement += (str(word.encode('utf-8')) + " ")
# print sentence
tree = r_parser.parse(nltk.ne_chunk(sentence))
for subtree in tree.subtrees():
if subtree.label() == 'NP':
if subtree != None:
# print "here"
association_count += 1
# associations[association_count].append([word for word, pos in subtree.pos() if pos == 'PERSON' or 'LOCATION' or 'GPE'])
association = defaultdict(list)
person_list = []
location_list = []
for word, pos in subtree.pos():
if pos == 'PERSON':
act_word, tag = word
person_list.append(act_word)
elif pos == 'LOCATION' or 'GPE':
act_word, tag = word
# print act_word
if act_word != ','or '.'or '...' or '..':
location_list.append(act_word)
association[association_count].append({'PERSON':person_list, 'LOCATION':location_list, 'SENTENCE': statement})
associations.append(association)
for character in person_list:
for loc in location_list:
# print character, oth_character
if (character + ", " + loc) in individual_associations.iterkeys():
individual_associations[character + ", " + loc] += 1
elif (loc + ", " + character) in individual_associations.iterkeys():
individual_associations[loc + ", " + character] += 1
else:
individual_associations[character + ", " + loc] = 1
# print association
# print association_count
# print "\n\n\n\n\n\n"
print "-----------------------------------------------------------------------------------"
print "ASSOCIATIONS: "
for association in associations:
print association
print "-----------------------------------------------------------------------------------"
# output_file = open('output_file.txt', 'w')
# output_file.write(associations)
# output_file.close()
assoc_list = []
num_assoc = []
sorted_interactions = sorted(individual_associations.items(), key=operator.itemgetter(1))
for interaction in individual_associations.iterkeys():
assoc_list.append(interaction)
num_assoc.append(individual_associations[interaction])
try:
fig, ax = plt.subplots()
plt.title("Plot of characters v/c location interactions")
plt.xlabel("Character pairs")
plt.ylabel("Number of interactions")
plt.bar(assoc_list[len(assoc_list) - 10:], num_assoc[len(num_assoc) - 10:],
color="black")
for tick in ax.get_xticklabels():
tick.set_rotation(20)
# legend_text = "X axis:1 unit=character-location pair\nY axis:2 units=1 interaction"
# plt.annotate(legend_text, (0.85, 1), xycoords='axes fraction')
plt.savefig("person_location_interactions.jpg")
except Exception as e:
print "Graph could not be generated due to the following error:"
print e
self.associations = associations
class ImageDownload:
def __init__(self, interactions_object):
self.interactions_object = interactions_object
def extract_image(self):
ele_list = random.sample(range(0, len(self.interactions_object.associations)), 10)
for ele in ele_list:
association = self.interactions_object.associations[ele]
# print association
# print ele
sentence = association[ele+1][0]['SENTENCE']
print sentence
os.chdir("jythonMusic")
process_jython = subprocess.Popen([".\jython.bat", "-i", "text_to_music.py", sentence],shell=True)
time.sleep(5)
process_jython.terminate()
os.chdir('..')
locations = association[ele+1][0]['LOCATION']
for location in locations:
location = str(location.encode('utf-8'))
search_object = GoogleImageSearch(location)
search_object.download_image()
class CreateHTMLPage:
def __init__(self, gutenberg_object, tag_object, interactions_object, download_object):
self.gutenberg_object = gutenberg_object
self.tag_object = tag_object
self.interactions_object = interactions_object
self.download_object = download_object
def extract_wiki_summary(self, keyword):
summary = ""
book_url = ""
try:
summary = wikipedia.summary(keyword)
page = wikipedia.page(keyword)
book_url = page.url
except Exception as e:
print e
finally:
if summary == "":
summary = "Summary not found"
if book_url == "":
book_url = "URL not found"
self.summary = summary
self.book_url = book_url
return summary
def extract_date(self, book_url):
url = book_url
page = urllib.urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
table = soup.find('table', attrs={'class': 'infobox vcard'})
th_list = table.find_all('th')
td_list = table.find_all('td')
for i in range(0, len(th_list)):
print th_list[i].text
try:
for i in range(0, len(th_list)):
print th_list[i].text
if th_list[i].text == 'Publication date':
self.pub_date = td_list[i].text
except:
print "Could not extract publication date"
def create_page(self):
out_file = open('BigIdea.html', 'w')
out_file.write("<!DOCTYPE html>\n<html>\n<body>\n<h1 align='center'>\n<b>" + str(self.gutenberg_object.title) + "</b>\n</h1>\
\n<h2 align = 'center'>" + str(self.gutenberg_object.author) + "</h2>")
book_summary = self.extract_wiki_summary(self.gutenberg_object.title)
date = self.extract_date(self.book_url)
author_summary = self.extract_wiki_summary(self.gutenberg_object.author)
out_file.write("\n<h3>\nBook Summary:\n</h3>\n<p>" + str(book_summary.encode('utf-8')) + "</p>\n" )
out_file.write("\n<h3>\nAuthor Summary:\n</h3>\n<p>" + str(author_summary.encode('utf-8')) + "</p>\n")
try:
out_file.write("\n<b>Publication Date: </b>" + str(self.pub_date))
except:
print "Did not find Publication date"
# print book_summary
# print author_summary
out_file.write("\n</body>\n</html>")
out_file.close()
class ImplementNLTK:
def __init__(self, book_ID):
gutenberg_object = DataFromGutenberg(book_ID)
gutenberg_object.read_book()
gutenberg_object.extract_metadata()
tag_object = TagData(gutenberg_object)
tag_object.tag_book_text()
interactions_object = Interactions(tag_object)
interactions_object.find_interactions()
# interactions_object.find_associations()
# download_object = ImageDownload(interactions_object)
# download_object.extract_image()
# page_object = CreateHTMLPage(gutenberg_object, tag_object, interactions_object, download_object)
# try:
# page_object.create_page()
# except Exception as e:
# print "Could not create web page due to following error"
# print e
def main():
# print "Enter the book ID of the book you want to access from the Gutenberg project"
# book_ID = 11 # Alice's Adventures in Wonderland
# book_ID = 8599 # Arabian Nights
# book_ID = 53499 # Martin Luther
# book_ID = 3289
book_ID = 48320
# book_ID = 11212 # Modern India
# book_ID = int(raw_input())
nltk_object = ImplementNLTK(book_ID)
if __name__ == "__main__":
main()
|
from django.contrib import admin
from django.urls import path, include
from rest_framework_swagger.views import get_swagger_view
from django.conf.urls import url, include
schema_view = get_swagger_view(title='Analytica API')
urlpatterns = [
path('admin/', admin.site.urls),
#path('hho/', include('oauth2_provider.urls')),
url(r'^', include('dataprocessing.urls')),
url(r'^', include('workprogramsapp.urls')),
url(r'^', include('onlinecourse.urls')),
url(r'^', include('records.urls')),
path('auth/', include('djoser.urls')),
path('auth/', include('djoser.urls.authtoken')),
path('auth/', include('djoser.urls.jwt')),
#path('auth/social/itmo/', ItmoOAuth2),
# path('djoser/auth/social/', include('djoser.social.urls')),
# path("api/accounts/", include("accounts.urls")),
path(r'swagger-docs/', schema_view),
#url(r'^auth0/', include('rest_framework_social_oauth2.urls')),
#url(r'^social-docs/', include('social_django.urls')),
#path('hho/', include('oauth2_provider.urls')),
#url(r'social_auth/', include('social_auth.urls')),
# url(r'^login/', include('rest_social_auth.urls_jwt_pair')),
# url(r'^login/', include('rest_social_auth.urls_jwt')),
]
|
import argparse
import math
import os
import random
import time
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import spacy
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.data import Field, BucketIterator
from torchtext.data.metrics import bleu_score
# from torchtext.datasets import WMT14
from torchtext.datasets import Multi30k
# sec: args
from models import Seq2Seq, Decoder, Encoder, Attention, init_weights
parser = argparse.ArgumentParser()
parser.add_argument('--run_local', default=False, action='store_true')
parser.add_argument('--fixed', default=False, action='store_true')
parser.add_argument('--cosine', default=False, action='store_true')
parser.add_argument('--cuda', type=int, default=0)
parser.add_argument('--runname', type=str)
parser.add_argument('--batch_size', type=int, default=128)
args = parser.parse_args()
# sec: wandb
if not args.run_local:
import wandb
wandb.init(project="Neural_Machine_Translation", name=args.runname, dir='/yoav_stg/gshalev/wandb')
# S funcs
def calculate_bleu(data, src_field, trg_field, model, device, max_len=50):
trgs = []
pred_trgs = []
for datum in data:
src = vars(datum)['src']
trg = vars(datum)['trg']
pred_trg, _ = translate_sentence(src, src_field, trg_field, model, device, max_len)
# cut off <eos> token
pred_trg = pred_trg[:-1]
pred_trgs.append(pred_trg)
trgs.append([trg])
return bleu_score(pred_trgs, trgs)
def tokenize_de(text):
"""
Tokenizes German text from a string into a list of strings
"""
return [tok.text for tok in spacy_de.tokenizer(text)]
def tokenize_en(text):
"""
Tokenizes English text from a string into a list of strings
"""
return [tok.text for tok in spacy_en.tokenizer(text)]
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def train(model, iterator, optimizer, criterion, clip):
model.train()
epoch_loss = 0
for i, batch in enumerate(iterator):
# if i > 3: # TODELETE
# break
start = time.time()
src, src_len = batch.src
trg = batch.trg
optimizer.zero_grad()
output = model(src, src_len, trg)
# trg = [trg len, batch size]
# output = [trg len, batch size, output dim]
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].view(-1)
# trg = [(trg len - 1) * batch size]
# output = [(trg len - 1) * batch size, output dim]
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Batch Time {batch_time:.3f}\t'
'Loss {loss:.4f})\t'
.format(epoch, i, len(iterator),
batch_time=time.time() - start,
loss=loss.item()
))
return epoch_loss / len(iterator)
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
# if i > 3: # TODELETE
# break
start = time.time()
src, src_len = batch.src
trg = batch.trg
output = model(src, src_len, trg, 0) # turn off teacher forcing
# trg = [trg len, batch size]
# output = [trg len, batch size, output dim]
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].view(-1)
# trg = [(trg len - 1) * batch size]
# output = [(trg len - 1) * batch size, output dim]
loss = criterion(output, trg)
epoch_loss += loss.item()
if i % print_freq == 0:
print('Eval: Epoch: [{0}][{1}/{2}]\t'
'Batch Time {batch_time:.3f}\t'
'Loss {loss:.4f})\t'
.format(epoch, i, len(iterator),
batch_time=time.time() - start,
loss=loss.item()
))
return epoch_loss / len(iterator)
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
def translate_sentence(sentence, src_field, trg_field, model, device, max_len=50):
model.eval()
if isinstance(sentence, str):
nlp = spacy.load('de')
tokens = [token.text.lower() for token in nlp(sentence)]
else:
tokens = [token.lower() for token in sentence]
tokens = [src_field.init_token] + tokens + [src_field.eos_token]
src_indexes = [src_field.vocab.stoi[token] for token in tokens]
src_tensor = torch.LongTensor(src_indexes).unsqueeze(1).to(device)
src_len = torch.LongTensor([len(src_indexes)]).to(device)
with torch.no_grad():
encoder_outputs, hidden = model.encoder(src_tensor, src_len)
mask = model.create_mask(src_tensor)
trg_indexes = [trg_field.vocab.stoi[trg_field.init_token]]
attentions = torch.zeros(max_len, 1, len(src_indexes)).to(device)
for i in range(max_len):
trg_tensor = torch.LongTensor([trg_indexes[-1]]).to(device)
with torch.no_grad():
output, hidden, attention = model.decoder(trg_tensor, hidden, encoder_outputs, mask)
attentions[i] = attention
pred_token = output.argmax(1).item()
trg_indexes.append(pred_token)
if pred_token == trg_field.vocab.stoi[trg_field.eos_token]:
break
trg_tokens = [trg_field.vocab.itos[i] for i in trg_indexes]
return trg_tokens[1:], attentions[:len(trg_tokens) - 1]
def display_attention(sentence, translation, attention):
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
attention = attention.squeeze(1).cpu().detach().numpy()
cax = ax.matshow(attention, cmap='bone')
ax.tick_params(labelsize=15)
ax.set_xticklabels([''] + ['<sos>'] + [t.lower() for t in sentence] + ['<eos>'],
rotation=45)
ax.set_yticklabels([''] + translation)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
plt.close()
def get_embeddings(embedding_size, vocab_size):
word2vec_dictionary = dict()
for cls_idx in range(vocab_size):
v = np.random.randint(low=-100, high=100, size=embedding_size)
v = v / np.linalg.norm(v)
word2vec_dictionary[cls_idx] = torch.from_numpy(v).float()
w2v_matrix = torch.stack(list(word2vec_dictionary.values()), dim=1)
return w2v_matrix
# sec: initialization
print_freq = 100
SEED = 1234
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
spacy_de = spacy.load('de_core_news_sm')
spacy_en = spacy.load('en_core_web_sm')
SRC = Field(tokenize=tokenize_de,
init_token='<sos>',
eos_token='<eos>',
lower=True,
include_lengths=True)
TRG = Field(tokenize=tokenize_en,
init_token='<sos>',
eos_token='<eos>',
lower=True)
# sec: data
print('before: train_data, valid_data, test_data')
data_path = '.data' if args.run_local else '/yoav_stg/gshalev/semantic_labeling/Multi30k'
train_data, valid_data, test_data = Multi30k.splits(exts=('.de', '.en'), fields=(SRC, TRG), root=data_path)
print('completed: train_data, valid_data, test_data')
# sec: Build the vocabulary
SRC.build_vocab(train_data, min_freq=2)
TRG.build_vocab(train_data, min_freq=2)
# sec: Define the device
device = torch.device('cuda:{}'.format(args.cuda) if torch.cuda.is_available() else 'cpu')
# sec: Create the iterators
BATCH_SIZE = args.batch_size
print('befor: train_iterator, valid_iterator, test_iterator')
train_iterator, valid_iterator, test_iterator = BucketIterator.splits((train_data, valid_data, test_data),
batch_size=BATCH_SIZE,
sort_within_batch=True,
sort_key=lambda x: len(x.src),
device=device)
print('completed: train_iterator, valid_iterator, test_iterator')
# sec: initialization
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
ENC_EMB_DIM = 256
DEC_EMB_DIM = 256
ENC_HID_DIM = 512
DEC_HID_DIM = 512
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
SRC_PAD_IDX = SRC.vocab.stoi[SRC.pad_token]
# sec: models
attn = Attention(ENC_HID_DIM, DEC_HID_DIM)
enc = Encoder(INPUT_DIM, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, DEC_DROPOUT, attn)
model = Seq2Seq(enc, dec, SRC_PAD_IDX, device).to(device)
model.apply(init_weights)
print('The model has {count_parameters(model):,} trainable parameters')
optimizer = optim.Adam(model.parameters())
TRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token]
criterion = nn.CrossEntropyLoss(ignore_index=TRG_PAD_IDX)
# sec: wandb
if not args.run_local:
wandb.watch(dec)
# sec: mkdir
if args.run_local:
save_dir = args.runname
else:
save_dir = os.path.join('/yoav_stg/gshalev/neural_machine_translation', args.runname)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
print('created dir : {}'.format(save_dir))
# sec: start epoch
N_EPOCHS = 10
CLIP = 1
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
print('start train')
train_loss = train(model, train_iterator, optimizer, criterion, CLIP)
print('start val')
valid_loss = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save({
'model': model.state_dict(),
'encoder': enc.state_dict()
}, os.path.join(save_dir, 'BEST.pt'))
print(f'Epoch: {epoch + 1:02} | Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}')
if not args.run_local:
wandb.log({"train_loss": train_loss,
"valid_loss": valid_loss})
# --------------------------------
# bleu_score = calculate_bleu(test_data, SRC, TRG, model, device)
#
# example_idx = 12
#
# src = vars(train_data.examples[example_idx])['src']
# trg = vars(train_data.examples[example_idx])['trg']
#
# print(f'src = {src}')
# print(f'trg = {trg}')
#
# translation, attention = translate_sentence(src, SRC, TRG, model, device)
#
# print(f'predicted trg = {translation}')
# display_attention(src, translation, attention)
# original_train.py
|
from .directory_creator import *
|
import math
import numpy as np
from matplotlib.pyplot import figure, savefig, show
import full_henon as fh
def norm_vect(vector):
""" Calculate the norm of a vector. """
values = [i*i for i in vector]
return math.sqrt(sum(values))
def inner_vect(vect1, vect2):
""" Calculate the inner product of two vectors. """
values = [vect1[i] * vect2[i] for i in range(len(vect1))]
return sum(values)
def proj_vect(vect1, vect2):
""" Calculate the projection of vector v onto vector u. """
return (inner_vect(vect1, vect2) / inner_vect(vect1, vect1)) * vect1
def basis(dim):
""" Creating the standard basis vectors for n dimensions. """
basisVects = [np.zeros(dim) for i in range(dim)]
for i in range(dim):
basisVects[i][i] += 1
return basisVects
def Gram_Schmidt(vectors):
""" Function that uses the Gram-Schmidt process to orthogonalize a set of n-
dimensional vectors. The normalization of the vectors is not included.
Input: vectors = list containing the vectors; a valid input is for
example:[v1, v2] where v1 = [[x1], [y1]] and v2 =
[[x2], [y2]];
Returns: basis = list containing the orthogonalised set of vectors in
the same format as the input 'vectors'.
"""
basis = [vectors[0]]
for v in range(1, len(vectors)):
for j in range(v):
new_vect = vectors[v] - proj_vect(vectors[j], vectors[v])
basis.append(new_vect)
return basis
def Lyapunov(N, xvalues, A, B):
""" Function that calculates the Lyapunov exponents for the Henon map.
Input: N = number of loops that have to be computed (integer);
basis = the standard basis vectors in n dimensions. The
syntax is for example: [e1, e2] where e1 = [[1], [0]]
and e2 = [[0], [1]] (list);
xvalues = list of x values of the Henon map;
A = value for parameter a for the Henon map;
B = value for parameter b for the Henon map;
Returns:lya = list containing the computed lyapunov exponents.
"""
dim = 2 # Dimension of system
exponents = [0 for i in range(2)] # Array to put the results in
u_nk = basis(dim) # Basis vectors
for n in range(1, N):
J = np.array([[-2*A*xvalues[n], 1], [B, 0]]) # Updating the Jacobian
v_nk = [np.matmul(J, u) for u in u_nk] # Calculating v_nk
u_nk = Gram_Schmidt([v_nk[i] for i in range(dim)]) # Gram-Schmidt
for i in range(dim):
u_i = u_nk[i]
norm_v = norm_vect(u_i) # Norm of the vector
exponents[i] += np.log(norm_v) # Adding the newly obtained value
u_nk[i] = u_i / norm_v # Normalizing
# Calculating the lyapunov exponents
lya = [exponents[i] / N for i in range(dim)]
return lya
def plot_1D(vals, const, nIts, nCut, a=True, plotMin=False, saveFig=None):
""" Plotting the Lyapunov exponents for varying the parameter a or b """
lyaMin, lyaMax = [], []
xs, ys = 0, 0 # Initial conditions
# Initializing the plot
fig = figure(figsize=(15,8))
frame = fig.add_subplot(1,1,1)
for ind, val in enumerate(vals):
if a: # If b is kept constant
x, y = fh.Henon(xs, ys, nIts, val, const)
Lexp = Lyapunov(nIts-nCut, x[nCut:], val, const)
frame.set_xlabel("a", fontsize=20)
else: # If a is kept constant
x, y = fh.Henon(xs, ys, nIts, const, val)
Lexp = Lyapunov(nIts-nCut, x[nCut:], const, val)
frame.set_xlabel("b", fontsize=20)
# Adding the exponents to the lists
lyaMax.append(Lexp[0])
lyaMin.append(Lexp[1])
frame.plot(vals, lyaMax, color="darkblue", lw=0.8)
if plotMin: frame.plot(vals, lyaMin, color="crimson", lw=0.8)
frame.set_ylabel("Lyapunov exponent", fontsize=20)
frame.grid()
if saveFig != None: fig.savefig(str(saveFig))
else: show()
def plot_diff(maxIts, a=1.4, b=0.3, saveFig=None):
""" Function to plot the values of the Lyapunov exponents for different
number of iterations.
"""
x0 = y0 = 0 # Starting point
cut = 100 # Removed points
step = int(maxIts/10) # Number of steps
its = np.linspace(cut, maxIts, step) # Iteration steps
x, y = fh.Henon(x0, y0, maxIts, a, b) # Henon map
lExp = [Lyapunov(int(it), x, a, b) for it in its] # Exponents
# Plotting
fig = figure(figsize=(16,8))
ax1 = fig.add_subplot(1,1,1)
ax2 = ax1.twinx()
for ind, exp in enumerate(lExp):
ax1.scatter(its[ind], exp[0], marker="x", s=10, color="navy")
ax2.scatter(its[ind], exp[1], marker="o", s=10, color="maroon")
ax1.grid()
ax1.set_xlabel("Iterations", fontsize=20)
ax1.set_ylabel("Max exponent", color="navy", fontsize=20)
ax2.set_ylabel("Min exponent", color="maroon", fontsize=20)
if saveFig != None: fig.savefig(str(saveFig))
else: show()
|
# API to interact with database
from .models import Assignment, Student, Course
from datetime import datetime
from .google_cal_api import *
import pytz
"""
add_student(student_name)
get_student_calendar(student_name)
add_course(course_data, itr)
add_all_courses()
get_list_students()
get_list_courses()
get_list_student_courses(student_name)
add_course_to_student(course_data, student_name)
remove_course_from_student(course_key, student_name)
get_list_assignments(student_name)
add_assignment(assignment_data, student_name)
get_assignment(assignment_key, student_name)
delete_assignment(assignment_key)
remove_all_courses()
remove_all_students()
blacklist_assignment()
remove_blacklist()
"""
def add_student(student_name):
stud = Student.objects.filter(stud_name=student_name)
if len(stud) == 0:
new_stud = Student()
new_stud.stud_name = student_name
new_stud.stud_calendar = create_calendar()
new_stud.save()
stud = new_stud
else:
stud = stud[0]
return stud
def get_student_calendar(student_name):
stud = add_student(student_name)
front = "https://calendar.google.com/calendar/embed?src="
back = "&ctz=America%2FNew_York"
return {'Student_Calendar': front+stud.stud_calendar+back}
def add_course(course_data, itr):
new_course = Course()
new_course.course_num = course_data[0]
new_course.course_mn = course_data[1]
new_course.course_num2 = course_data[2]
new_course.course_sec = course_data[3]
new_course.course_title = course_data[4]
new_course.course_key = itr
new_course.save()
#checks if courses are in the database
#if not it reads through Courses.csv and adds them to the database
def add_all_courses():
Course_List = Course.objects.all()
if len(Course_List) < 100:
file = open('Courses.csv').readlines()
file.pop(0)
itr = 0
for line in file:
line = line.split(',')
line = [elem.rstrip() for elem in line]
#process when the title has commas
if len(line) > 5:
title = line[4][1:]
new_line = []
for i in range(len(line)):
if i < 4:
new_line.append(line[i])
if i > 4:
title +=',' + line[i]
title = title[:-1]
new_line.append(title)
line = new_line
add_course(line, itr)
itr += 1
def get_list_students():
Student_List = Student.objects.all()
context = {
'Student_List':Student_List,
}
return context
def get_list_courses():
Course_List = Course.objects.all()
context = {
'Course_List':Course_List,
}
return context
def get_list_student_courses(student_name):
stud = add_student(student_name)
context = {
'Course_List':stud.stud_course.all()
}
return context
# Returns false if the course doesn't exist
# Returns true otherwise
def add_course_to_student(course_data, student_name):
#Get/create the appropriate student
stud = add_student(student_name)
course = Course.objects.filter(course_num=course_data[0])
if len(course) == 0:
return 'failed_to_find_course'
else:
course = course[0]
stud.stud_course.add(course)
def remove_course_from_student(course_key, student_name):
stud = Student.objects.filter(stud_name = student_name)[0]
course = Course.objects.filter(course_key = course_key)[0]
stud.stud_course.remove(course)
def get_list_assignments(student_name):
stud = add_student(student_name)
Assignment_List1 = []
Assignment_List = []
blacklist = [elem for elem in stud.stud_blacklist.all()]
for c in stud.stud_course.all():
Assignment_List1.extend(Assignment.objects.filter(assi_course=c).all())
for assi in Assignment_List1:
if assi not in blacklist:
Assignment_List.append(assi)
Assignment_List.sort(key=lambda a: a.assi_due_date)
Assignment_Course_List = [(assi, assi.assi_course.all()[0]) for assi in Assignment_List]
context = {
'Assignment_List':Assignment_List,
'Assignment_Course_List': Assignment_Course_List,
}
return context
def add_assignment(assignment_data, student_name):
stud = add_student(student_name)
course = stud.stud_course.filter(course_key=assignment_data[0])
if len(course) > 0:
course = course[0]
else:
return
new_assign = Assignment()
new_assign.assi_title = assignment_data[1]
due_date = assignment_data[2]
due_time = assignment_data[3]
due_date += ' ' + due_time + ' EST'
datetime_object = datetime.strptime(due_date, '%Y-%m-%d %H:%M %Z')
new_assign.assi_due_date = datetime_object
new_assign.assi_description = assignment_data[4]
new_assign.save()
new_assign.assi_course.add(course)
new_assign.assi_event_id = create_event(new_assign, stud)
def get_assignment(assignment_key, student_name):
stud = add_student(student_name)
context = {}
Assignment_List = get_list_assignments(stud)
for assignment in Assignment_List["Assignment_List"]:
if int(assignment.pk) == int(assignment_key):
context = {'Assignment' : assignment}
context.update({'Assi_Course': context['Assignment'].assi_course.all()[0]})
return context
def delete_assignment(assignment_key):
assignment = Assignment.objects.filter(pk=assignment_key)
if len(assignment) != 0:
assignment[0].delete()
def remove_all_courses(): #~~tread with caution~~
Course.objects.all().delete()
def refresh_calendar(student_name):
stud = add_student(student_name)
assi_list = get_list_assignments(student_name)['Assignment_List']
event_list = get_event_list(stud)
events_to_add = []
events_to_remove = []
#decide which assignments to add that are not in the calendar but ARE on the student's list
for assi in assi_list:
exists = False
for event in event_list:
if assi.assi_event_id == event['id']:
exists = True
if exists == False:
events_to_add.append(assi)
#decide which assignments to remove from the calendar that aren't in the student's list
for event in event_list:
exists = False
for assi in assi_list:
if assi.assi_event_id == event['id']:
exists = True
if exists == False:
events_to_remove.append(event['id'])
#actually add the events to the calendar
for assi in events_to_add:
create_event(assi, stud)
for event_id in events_to_remove:
delete_event(event_id, stud)
def remove_all_students():
Student.objects.all().delete()
def blacklist_assignment(assignment_key,student_name):
stud = add_student(student_name)
assignment = Assignment.objects.filter(pk = assignment_key)[0]
stud.stud_blacklist.add(assignment)
def remove_blacklist(assignment_key,student_name):
stud = add_Student(student_name)
assignment = Assignment.objects.filter(pk = assignment_key)[0]
stud.stud_blacklist.remove(assignment)
|
from api.libs.base import CoreView
from cmdb.models import DataCenter
from django.contrib.auth.models import User
from account.models import UserProfile
from django.db.utils import IntegrityError
class DataCenterView(CoreView):
"""
数据中心视图类
"""
login_required_action = ["get_list", "post_create", "post_delete", "post_change"]
superuser_required_action = ["post_create", "post_delete", "post_change"]
def get_list(self):
per_page = self.parameters("per_page")
if per_page:
datacenter_objs = self.page_split(DataCenter.objects.all())
else:
datacenter_objs = DataCenter.objects.all()
datacenter_list = []
for datacenter_obj in datacenter_objs:
datacenter_list.append(datacenter_obj.get_info())
self.response_data['data'] = datacenter_list
def post_create(self):
try:
name = self.parameters("name")
contact = self.parameters("contact")
memo = self.parameters("memo")
address = self.parameters("address")
admin_id = int(self.parameters("admin"))
admin_obj = UserProfile.objects.filter(id=admin_id).first()
if admin_obj and admin_obj.user:
new_datacenter_obj = DataCenter(name=name, contact=contact, memo=memo, admin=admin_obj.user, address=address)
else:
new_datacenter_obj = DataCenter(name=name, contact=contact, memo=memo, address=address)
new_datacenter_obj.save()
self.response_data['data'] = new_datacenter_obj.get_info()
except IntegrityError:
self.response_data['status'] = False
self.status_code = 416
except Exception:
self.response_data['status'] = False
self.status_code = 500
def post_delete(self):
datacenter_id = self.parameters("id")
try:
datacenter_obj = DataCenter.objects.filter(id=datacenter_id).first()
if datacenter_obj:
datacenter_obj.delete()
else:
self.response_data['status'] = False
self.status_code = 404
except Exception as e:
self.response_data['status'] = False
self.status_code = 500
def post_change(self):
datacenter_id = self.parameters("id")
name = self.parameters("name")
admin_id = self.parameters("admin_id")
contact = self.parameters("contact")
memo = self.parameters("memo")
address = self.parameters("address")
try:
datacenter_obj = DataCenter.objects.filter(id=datacenter_id).first()
if datacenter_obj:
datacenter_obj.name = name
admin_obj = UserProfile.objects.filter(id=admin_id).first()
datacenter_obj.admin = admin_obj.user if admin_obj and hasattr(admin_obj, "user") else None
datacenter_obj.contact = contact
datacenter_obj.memo = memo
datacenter_obj.address = address
datacenter_obj.save()
self.response_data['data'] = datacenter_obj.get_info()
else:
self.response_data['status'] = False
self.status_code = 404
except IntegrityError:
self.response_data['status'] = False
self.status_code = 416
except Exception as e:
self.response_data['status'] = False
self.status_code = 500
|
import math
"""
XNoderna måste innehålla både nyckel och värde
X Hashtabellen ska vara lagom stor
XNågon krockhantering måste ingå, t ex krocklistor eller probning
AnvändKeyError för att tala om att en nyckel inte finns
X Skriv en egen hashfunktion
Ska klara testning med hashtest.py ovan
"""
class Hashtabell:
def __init__(self, antalElement):
self.length = antalElement*2
self.l = [None]*self.length
def get(self, namn):
#get måste kunna kolla next om namn =/= key-namn
key = hash(namn,self.length)
get = self.sokning(namn,self.l[key])
if get == None:
raise KeyError
return get
def sokning(self, namn, node):
print(namn)
if node == None:
return None
elif node.value.namn == namn:
return node.value
return self.sokning(namn, node.next)
def put(self, namn, nyAtom):
key = hash(namn,self.length)
if self.l[key] == None:
self.l[key] = Node(key,nyAtom)
else:
self.krock(namn,self.l[key],nyAtom,key)
def krock(self, namn, node, nyAtom, key):
#Kollar om key-platsen är tom, om den är skicka OK
#Om den ej är, kolla om det finns ett annat namn
#om det finns ett annat namn, så läggs den in som nästa i noden.
if node.value.namn == namn:
return
elif node.next == None:
node.next = Node(key,nyAtom)
return
self.krock(namn,node.next,nyAtom, key)
class Node:
def __init__(self, key, value = None):
self.value = value
self.key = key
self.next = None
def __str__(self):
return "NodeClass" + str(self.value)
def hash(word, length):
l = len(word)
splitatLista = list(word)
key = 0
for a in range(l):
key += (ord(splitatLista[a])*math.pow(5,a))
key = key%(length)
return int(key)
"""if not (hash("a")) == 97.0:
print("SHIT")
if not (hash("ab")) == 1077.0:
print("SHIT")
if not (hash("ba")) == 1068.0:
print("SHIT")
"""
|
# Generated by Django 3.0.5 on 2020-08-13 10:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0003_auto_20200813_0922'),
]
operations = [
migrations.AddField(
model_name='attendance',
name='numMins',
field=models.TimeField(),
preserve_default=False,
),
]
|
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import sys
def Round(a):
return int(a+.5)
def init():
glClearColor(0.0,1.0,1.0,0.0)
glColor3f(0.0,0.0,1.0)
glPointSize(2.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(0.0,600.0,0.0,600.0)
def setpixel(x,y):
glBegin(GL_POINTS)
glVertex2i(x,y)
glEnd()
glFlush()
def readinput():
global x1,y1,x2,y2
x1=input("Enter x1 : ")
y1=input("Enter Y1 : ")
x2=input("Enter x2 : ")
y2=input("Enter Y2 : ")
def linedda(x1,y1,x2,y2):
delta_x=x2-x1
delta_y=y2-y1
dx=abs(x2-x1)
dy=abs(y2-y1)
x,y=x1,y1
steps=dx if dx>dy else dy
if steps!=0:
change_x=dx/float(steps)
change_y=dy/float(steps)
else:
change_x=0
change_y=0
setpixel(Round(x),Round(y))
for k in range(steps):
if delta_x>0:
x+=change_x
else:
x-=change_x
if delta_y>0:
y+=change_y
else:
y-=change_y
setpixel(Round(x),Round(y))
def display():
glClear(GL_COLOR_BUFFER_BIT)
linedda(x1,y1,x2,y2)
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB)
glutInitWindowSize(600,600)
glutInitWindowPosition(100,100)
glutCreateWindow("Simple DDA ")
readinput()
glutDisplayFunc(display)
init()
glutMainLoop()
main()
|
import tensorflow as tf
import numpy as np
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
xy = np.load("./data/boston.npy")
x_data = xy[0][0]
y_data = xy[0][1]
x_test = xy[1][0]
y_test = xy[1][1]
print(x_data.shape)
print(x_test.shape)
tf.set_random_seed(777)
y_data = y_data.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)
# print(x_data.shape, y_data.shape)
# x_data = x_data.reshape(x_data.shape[0], x_data.shape[1], 1)
x = tf.placeholder(tf.float32, [None, 13])
y = tf.placeholder(tf.float32, [None, 1])
w = tf.get_variable(name = 'w1', shape = [13, 8], initializer = tf.zeros_initializer())
b = tf.Variable(tf.random_normal([8]))
layer1 = tf.nn.leaky_relu(tf.matmul(x, w) + b)
w = tf.get_variable(name = 'w2', shape = [8, 1], initializer = tf.zeros_initializer())
b = tf.Variable(tf.random_normal([1]))
hypothesis = tf.nn.leaky_relu(tf.matmul(layer1, w) + b)
cost = tf.reduce_mean(tf.square(tf.subtract(hypothesis, y)))
train = tf.train.GradientDescentOptimizer(learning_rate=0.00001).minimize(cost)
session = tf.Session()
# Initializes global variables in the graph.
session.run(tf.global_variables_initializer())
for step in range(10001):
cost_val, hy_val, _ = session.run([cost, hypothesis, train], feed_dict = {x : x_data, y : y_data})
print(step, "Cost : ", cost_val)
predict = session.run([hypothesis], feed_dict = {x : x_test})
predict = np.array(predict)
y_test_reshape = y_test.reshape((-1, ))
predict = predict.reshape((-1, ))
from sklearn.metrics import r2_score
r2_y_predict = r2_score(y_test_reshape, predict)
print("R2: ", r2_y_predict)
|
# radix sort
def countingSort(arr, exp):
n = len(arr)
result = [0 for i in range(n)]
count = [0 for i in range(10)]
for i in range(n):
indx = arr[i] // exp
count[indx % 10] += 1
for i in range(1, 10):
count[i] += count[i - 1]
for i in range(n - 1, -1, -1):
indx = arr[i] // exp
result[count[indx % 10] - 1] = arr[i]
count[indx % 10] -= 1
for i in range(n):
arr[i] = result[i]
def radixSort(arr):
k = max(arr)
exp = 1
while k//exp > 0:
countingSort(arr, exp)
exp *= 10
print('Sorted Array:', arr)
def main():
arr = [170, 45, 75, 90, 802, 24, 2, 66]
radixSort(arr)
main()
|
def lengthOfLongestSubstring(s):
dict1={}
start=0
maxlen=0
len1=0
if s is None or len(s)==0:
return 0
for i in range(len(s)):
if s[i] not in dict1:
dict1[s[i]]=i
elif s[i] in dict1 and dict1[s[i]]>=start:
start=dict1[s[i]]+1
dict1[s[i]]=i
else:
dict1[s[i]]=i
len1=i-start+1
maxlen=max(maxlen,len1)
print (maxlen)
s="aabaab!bb"
lengthOfLongestSubstring(s)
|
# Problem statement :
# Start a knight at a corner sq of an otherwise-empty chessboard. Move the knight at random by choosing uniformly from the legal knight-moves at each step. What is the mean number if moves until the knight returns to the starting square?
from random import randint
# Move a knight from (x, y) to a random new position
def newPos(x, y):
while True:
dx, dy = 1, 2
# it takes three bits to determine a random knight move:
# (1, 2) vs (2, 1) and the sign of each
r = randint(0, 7)
if r % 2:
dx, dy = dy, dx
if (r >>1) % 2:
dx = -dx
if (r >> 2) % 2:
dy = -dy
newx, newy = x + dx, y + dy
# If the new position is on the board, take it.
# Otherwise try again
if ( newx >= 0 and newx < 8 and newy >=0 and newy < 8) :
return (newx, newy)
# Count the number of steps in one random tour
def rand_tour():
x, y = x0, y0 = 0, 0
count = 0
while True:
x, y = newPos(x, y)
count += 1
if x == x0 and y == y0:
return count
def main():
# Average the length of many random tours
sum = 0
num_reps = 100000
for i in xrange(num_reps) :
sum += rand_tour()
print sum/ float(num_reps)
if __name__ == "__main__": main()
|
from model.serializer import JSONSerializable
import datetime
from model.dao import DAO
from model.users.users import UserDAO
import uuid
import logging
class Position(JSONSerializable):
def __init__(self):
self.userId = None
self.name = None
self.id = None
"""
def __init__(self, userId, name):
self.userId = userId
self.name = name
self.id = None
"""
@classmethod
def findByUser(cls, con, userIds):
return PositionDAO.findByUser(con, userIds)
class PositionDAO(DAO):
dependencies = [UserDAO]
@classmethod
def _createSchema(cls, con):
super()._createSchema(con)
cur = con.cursor()
try:
sql = """
CREATE SCHEMA IF NOT EXISTS position;
create table IF NOT EXISTS assistance.positions (
id varchar primary key,
user_id varchar not null references profile.users (id),
name varchar not null
);
"""
cur.execute(sql)
finally:
cur.close()
@classmethod
def _fromResult(cls, r):
p = Position()
p.id = r['id']
p.userId = r['user_id']
p.name = r['name']
return p
@classmethod
def findByUser(cls, con, userIds):
assert isinstance(userIds, list)
if len(userIds) <= 0:
return
cur = con.cursor()
try:
logging.info('userIds: %s', tuple(userIds))
cur.execute('select * from assistance.positions where user_id in %s',(tuple(userIds),))
return [ cls._fromResult(r) for r in cur ]
finally:
cur.close()
|
import numpy as np
import pandas as pd
import tensorflow as tf
import sys
import os
print(tf.__version__)
class recommender:
def __init__(self, mode, train_file, outdir, test_file=None,
user_info_file=None, program_info_file=None,
batch_size=32, epochs=500,
learning_rate=1e-3, num_hidden=50,
display_step=5):
self.mode = mode
self.train_file = train_file
self.outdir = outdir
self.test_file = test_file
self.batch_size = batch_size
self.learning_rate = learning_rate
self.num_hidden = num_hidden
self.epochs = epochs
self.display_step = display_step
self.user_info_file = user_info_file
self.program_info_file = program_info_file
def read_data(self):
if self.mode == 'train':
self.train_data = np.load(self.train_file)
self.num_ranks = self.train_data.shape[2]
self.num_programs = self.train_data.shape[1]
self.users = self.train_data.shape[0]
else:
self.train_df = pd.read_csv(self.train_file)
self.test_data = np.load(self.test_file)
self.test_df = pd.DataFrame(self.test_data)
if self.user_info_file != None:
self.user_info_df = pd.read_csv(self.user_info_file)
if self.program_info_file != None:
self.program_info_df = pd.read_csv(self.program_info_file)
def next_batch(self):
while True:
ix = np.random.choice(np.arange(self.train_data.shape[0]), self.batch_size)
train_X = self.train_data[ix, :, :]
yield train_X
def __network(self):
tf.compat.v1.disable_eager_execution()
self.x = tf.compat.v1.placeholder(tf.float32, [None, self.num_programs, self.num_ranks], name="x")
self.xr = tf.reshape(self.x, [-1, self.num_programs * self.num_ranks], name="xr")
self.W = tf.Variable(tf.compat.v1.random_normal([self.num_programs * self.num_ranks, self.num_hidden], 0.01), name="W")
self.b_h = tf.Variable(tf.zeros([1, self.num_hidden], tf.float32, name="b_h"))
self.b_v = tf.Variable(tf.zeros([1, self.num_programs * self.num_ranks], tf.float32, name="b_v"))
self.k = 2
def sample_hidden(probs):
return tf.floor(probs + tf.compat.v1.random_uniform(tf.shape(probs), 0, 1))
def sample_visible(logits):
logits = tf.reshape(logits, [-1, self.num_ranks])
sampled_logits = tf.compat.v1.multinomial(logits, 1)
sampled_logits = tf.one_hot(sampled_logits, depth=5)
logits = tf.reshape(logits, [-1, self.num_programs * self.num_ranks])
print(logits)
return logits
def gibbs_step(x_k):
h_k = sample_hidden(tf.sigmoid(tf.matmul(x_k, self.W) + self.b_h))
x_k = sample_visible(tf.add(tf.matmul(h_k, tf.transpose(self.W)), self.b_v))
return x_k
def gibbs_sample(k, x_k):
for i in range(k):
x_k = gibbs_step(x_k)
return x_k
self.x_s = gibbs_sample(self.k, self.xr)
self.h_s = sample_hidden(tf.sigmoid(tf.matmul(self.x_s, self.W) + self.b_h))
self.h = sample_hidden(tf.sigmoid(tf.matmul(self.xr, self.W) + self.b_h))
self.x_ = sample_visible(tf.matmul(self.h, tf.transpose(self.W)) + self.b_v)
self.W_add = tf.multiply(self.learning_rate / self.batch_size,
tf.subtract(tf.matmul(tf.transpose(self.xr), self.h),
tf.matmul(tf.transpose(self.x_s), self.h_s)))
self.bv_add = tf.multiply(self.learning_rate / self.batch_size,
tf.reduce_sum(tf.subtract(self.xr, self.x_s), 0, True))
self.bh_add = tf.multiply(self.learning_rate / self.batch_size,
tf.reduce_sum(tf.subtract(self.h, self.h_s), 0, True))
self.updt = [self.W.assign_add(self.W_add), self.b_v.assign_add(self.bv_add), self.b_h.assign_add(self.bh_add)]
def _train(self):
self.__network()
with tf.compat.v1.Session() as sess:
self.saver = tf.compat.v1.train.Saver()
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
total_batches = self.train_data.shape[0] // self.batch_size
batch_gen = self.next_batch()
# Start the training
for epoch in range(self.epochs):
if epoch < 150:
self.k = 2
if (epoch > 150) & (epoch < 250):
self.k = 3
if (epoch > 250) & (epoch < 350):
self.k = 5
if (epoch > 350) & (epoch < 500):
self.k = 9
for i in range(total_batches):
self.X_train = next(batch_gen)
_ = sess.run([self.updt], feed_dict={self.x: self.X_train})
if epoch % self.display_step == 0:
print("Epoch:", '%04d' % (epoch + 1))
self.saver.save(sess, os.path.join(self.outdir, 'model'), global_step=epoch)
self.logits_pred = tf.reshape(self.x_, [self.users, self.num_programs, self.num_ranks])
self.probs = tf.nn.softmax(self.logits_pred, axis=2)
out = sess.run(self.probs, feed_dict={self.x: self.train_data})
recs = []
for i in range(self.users):
for j in range(self.num_programs):
rec = [i, j, np.argmax(out[i, j, :]) + 1]
recs.append(rec)
recs = np.array(recs)
df_pred = pd.DataFrame(recs, columns=['user_id', 'program_id', 'predicted_rating'])
df_pred.to_csv(self.outdir + 'pred_all_recs.csv', index=False)
print("RBM training Completed !")
def inference(self):
self.df_result = self.test_df.merge(self.train_df, on=['user_id', 'program_id'])
self.df_result['user_id'] = self.df_result['user_id'] + 1
self.df_result['program_id'] = self.df_result['program_id'] + 1
if self.user_info_file != None:
self.df_result = self.df_result.merge(self.user_info_df, on=['user_id'])
if self.program_info_file != None:
self.df_result = self.df_result.merge(self.program_info_df, on=['program_id'])
self.df_result.to_csv(self.outdir + 'test_results.csv', index=False)
print(f'output written to {self.outdir}test_results.csv')
test_rmse = (np.mean((self.df_result['rating'].values - self.df_result['predicted_rating'].values) ** 2)) ** 0.5
print(f'test RMSE : {test_rmse}')
def main_process(self):
self.read_data()
if self.mode == 'train':
self._train()
else:
self.inference()
|
# Preprocessing time series data
import pandas as pd
import numpy as np
from tsfresh import extract_features
df = pd.read_csv('complete_df_7.csv')
df.drop('Unnamed: 0', axis=1, inplace=True)
df['stock_open'] = df['stock_open'].astype(float)
# Create aggregate of sales down to product level
aggregate = df.groupby(['sku_key', 'tran_date']).agg({'sales':'sum',
'selling_price':'mean',
'avg_discount': 'mean',
'stock_open': 'sum'})
aggregate.reset_index(inplace=True)
# Create categorical to join to aggregates
categorical = df[['sku_key', 'sku_department', 'sku_subdepartment',
'sku_category', 'sku_subcategory', 'sku_label']]
nw_df = pd.DataFrame([], columns=['sku_key', 'sku_department',
'sku_subdepartment', 'sku_category',
'sku_subcategory', 'sku_label'])
for i in categorical['sku_key'].unique():
cats = pd.DataFrame(categorical[categorical['sku_key'] == i].iloc[0]).T
nw_df = pd.concat([nw_df, cats])
# Join categoricals and aggregates and write sku labels/joint table to csv
nw_df.reset_index(inplace=True, drop=True)
nw_df.to_csv('sku_labels.csv', index=False)
aggregate['sku_key'] = aggregate['sku_key'].astype(int)
nw_df['sku_key'] = nw_df['sku_key'].astype(int)
aggregate_df = aggregate.merge(nw_df, how='left', on='sku_key')
aggregate_df.to_csv('aggregate_products.csv', index=False)
# Extract features from TS using tsfresh and write
aggregate_df['tran_date'] = pd.to_datetime(df['tran_date'])
extracted_features = extract_features(aggregate_df[['sku_key',
'tran_date',
'sales']],
column_id="sku_key",
column_sort="tran_date")
extracted_features.to_csv('extracted_features.csv')
|
# Generated by Django 3.1.5 on 2021-01-21 15:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contas',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('agencia', models.CharField(max_length=10)),
('conta', models.CharField(max_length=6)),
('saldo', models.DecimalField(decimal_places=2, default=0.0, max_digits=30)),
('ultima_movimentacao', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Deposito',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('valor', models.DecimalField(decimal_places=2, default=0.0, max_digits=10)),
('data_deposito', models.DateTimeField(auto_now_add=True)),
('conta', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contas.contas')),
],
),
]
|
from numpy import *
from numpy.ma import sqrt, cos, sin
from scipy.constants import pi
from pylab import *
import os
class KnnClassifier(object):
def __init__(self, labels, samples):
""" Initialize classifier with training data. """
self.labels = labels # one label for one sample. can be integers or strings because we use dictionary
self.samples = samples
def classify(self, point, k=3):
""" Classify a point against k nearest
in the training data, return label. """
# compute distance to all training points
dist = array([L2dist(point, s) for s in self.samples]) # feel free to use other distance measure
# sort them
ndx = dist.argsort()
# use dictionary to store the k nearest
votes = {}
for i in range(k):
label = self.labels[ndx[i]]
votes.setdefault(label, 0)
votes[label] += 1
return max(votes)
def L2dist(p1,p2):
return sqrt( sum( (p1-p2)**2) )
def create_sample_2d_points():
from numpy.random import randn
import pickle
# create sample data of 2D points
n = 200
# two normal distributions
class_1 = 0.6 * randn(n, 2)
class_2 = 1.2 * randn(n, 2) + array([5, 1])
labels = hstack((ones(n), -ones(n)))
# save with Pickle
with open('points_normal_test.pkl', 'wb') as f:
pickle.dump(class_1, f)
pickle.dump(class_2, f)
pickle.dump(labels, f)
# normal distribution and ring around it
class_1 = 0.6 * randn(n, 2)
r = 0.8 * randn(n, 1) + 5
angle = 2 * pi * randn(n, 1)
class_2 = hstack((r * cos(angle), r * sin(angle)))
labels = hstack((ones(n), -ones(n)))
# save with Pickle
with open('points_ring_test.pkl', 'wb') as f:
pickle.dump(class_1, f)
pickle.dump(class_2, f)
pickle.dump(labels, f)
def example_classify_2d():
import pickle
import imtools
def classify_2d(traning_data, test_data):
# load 2D points using Pickle
with open(traning_data, 'rb') as f:
class_1 = pickle.load(f)
class_2 = pickle.load(f)
labels = pickle.load(f)
model = KnnClassifier(labels, vstack((class_1, class_2)))
# load test data using Pickle
with open(test_data, 'rb') as f:
class_1 = pickle.load(f)
class_2 = pickle.load(f)
labels = pickle.load(f)
# define function for plotting
def classify(x, y, model=model):
return array([model.classify([xx, yy]) for (xx, yy) in zip(x, y)])
# plot the classification boundary
imtools.plot_2D_boundary([-6, 6, -6, 6], [class_1, class_2], classify, [1, -1])
classify_2d('points_normal.pkl', 'points_normal_test.pkl')
classify_2d('points_ring.pkl', 'points_ring_test.pkl')
show()
def process_images_to_dsift():
from n7_classifying_image_content import dsift
path1 = 'train'
path2 = 'test'
imlist = [os.path.join(path1, f) for f in os.listdir(path1) if f.endswith('.ppm')]
imlist.extend([os.path.join(path2, f) for f in os.listdir(path2) if f.endswith('.ppm')])
# process images at fixed size (50 ,50)
# otherwise the images will have varying number of descriptors, and therefore varying length of feature vectors
for filename in imlist:
featfile = filename[:-3] + 'dsift'
dsift.process_image_dsift(filename, featfile, 10, 5, resize=(50,50))
def example_classify_images():
from n1_local_image_descriptors import sift
def read_gesture_features_labels(path):
# create list of all files ending in .dsift
featlist = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.dsift')]
# read the features
features = []
for featfile in featlist:
l, d = sift.read_features_from_file(featfile)
features.append(d.flatten())
features = array(features)
# create labels (the first char of the filename)
labels = [featfile.split('/')[-1][0] for featfile in featlist]
return features, array(labels)
def print_confusion(res, labels, classnames):
n = len(classnames)
# confusion matrix
class_ind = dict([(classnames[i], i) for i in range(n)])
confuse = zeros((n, n))
for i in range(len(test_labels)):
confuse[class_ind[res[i]], class_ind[test_labels[i]]] += 1
print('Confusion matrix for')
print(classnames)
print(confuse)
# column 'A' contains how many times an 'A' was classified as each classes
features, labels = read_gesture_features_labels('train/')
test_features, test_labels = read_gesture_features_labels('test/')
classnames = unique(labels)
# test kNN
k = 1
knn_classifier = KnnClassifier(labels, features)
res = array([knn_classifier.classify(test_features[i], k) for i in
range(len(test_labels))])
# accuracy
acc = sum(1.0 * (res == test_labels)) / len(test_labels)
print('Accuracy:', acc)
print_confusion(res, test_labels, classnames)
# example_classify_2d()
# example_classify_images()
|
"""Module to run the bot. Executes the work() method of bot that executes the endless loop of reading comments and
submissions and replying to them if the match any response.
"""
from bot.worker import work, logger
from util.logger import setup_logger
__author__ = 'MePsyDuck'
if __name__ == '__main__':
setup_logger()
try:
work()
except (KeyboardInterrupt, SystemExit):
logger.exception("Script stopped")
|
import tkinter as tk
from PIL import Image
from tkinter import filedialog
root=tk.Tk()
canvas1=tk.Canvas(root, width=300,height=250,bg='azure3',relief='raised')
canvas1.pack()
label1 = tk.Label(root,text='Images to PDF converter',bg='azure3')
label1.config(font=('helvetica',20))
canvas1.create_window(150,60,window=label1)
def getfile():
global lst
files= filedialog.askopenfilenames()
lst=list(files)
print(lst)
def converttopdf():
global pdf1,pd
pdf1=[]
pd=[]
for i in lst:
pd.append(Image.open(i))
for i in pd:
pdf1.append(i.convert('RGB'))
file= filedialog.asksaveasfilename(defaultextension='.pdf')
pdf2=pdf1[1:]
pdf1[0].save(file,save_all=True,append_images=pdf2)
button1=tk.Button(text=" Insert files ",command=getfile,bg='royalblue',fg='white',font=('helvetica',12,'bold'))
canvas1.create_window(150,130,window=button1)
button2=tk.Button(text='Convert into pdf',command=converttopdf,bg='royalblue',fg='white',font=('helvetica',12,'bold'))
canvas1.create_window(150,180,window=button2)
root.mainloop()
|
from onnx_chainer.functions.activation import convert_ClippedReLU # NOQA
from onnx_chainer.functions.activation import convert_ELU # NOQA
from onnx_chainer.functions.activation import convert_HardSigmoid # NOQA
from onnx_chainer.functions.activation import convert_LeakyReLU # NOQA
from onnx_chainer.functions.activation import convert_LogSoftmax # NOQA
from onnx_chainer.functions.activation import convert_PReLUFunction # NOQA
from onnx_chainer.functions.activation import convert_ReLU # NOQA
from onnx_chainer.functions.activation import convert_Selu # NOQA
from onnx_chainer.functions.activation import convert_Sigmoid # NOQA
from onnx_chainer.functions.activation import convert_Softmax # NOQA
from onnx_chainer.functions.activation import convert_Softplus # NOQA
from onnx_chainer.functions.activation import convert_Tanh # NOQA
from onnx_chainer.functions.array import convert_Cast # NOQA
from onnx_chainer.functions.array import convert_Concat # NOQA
from onnx_chainer.functions.array import convert_Copy # NOQA
from onnx_chainer.functions.array import convert_Depth2Space # NOQA
from onnx_chainer.functions.array import convert_Dstack # NOQA
from onnx_chainer.functions.array import convert_ExpandDims # NOQA
from onnx_chainer.functions.array import convert_GetItem # NOQA
from onnx_chainer.functions.array import convert_Hstack # NOQA
from onnx_chainer.functions.array import convert_Moveaxis # NOQA
from onnx_chainer.functions.array import convert_Pad # NOQA
from onnx_chainer.functions.array import convert_Permutate # NOQA
from onnx_chainer.functions.array import convert_Repeat # NOQA
from onnx_chainer.functions.array import convert_Reshape # NOQA
from onnx_chainer.functions.array import convert_ResizeImages # NOQA
from onnx_chainer.functions.array import convert_Separate # NOQA
from onnx_chainer.functions.array import convert_Shape # NOQA
from onnx_chainer.functions.array import convert_Space2Depth # NOQA
from onnx_chainer.functions.array import convert_SplitAxis # NOQA
from onnx_chainer.functions.array import convert_Squeeze # NOQA
from onnx_chainer.functions.array import convert_Stack # NOQA
from onnx_chainer.functions.array import convert_Swapaxes # NOQA
from onnx_chainer.functions.array import convert_Tile # NOQA
from onnx_chainer.functions.array import convert_Transpose # NOQA
from onnx_chainer.functions.array import convert_Vstack # NOQA
from onnx_chainer.functions.array import convert_Where # NOQA
from onnx_chainer.functions.connection import convert_Convolution2DFunction # NOQA
from onnx_chainer.functions.connection import convert_ConvolutionND # NOQA
from onnx_chainer.functions.connection import convert_Deconvolution2DFunction # NOQA
from onnx_chainer.functions.connection import convert_DeconvolutionND # NOQA
from onnx_chainer.functions.connection import convert_EmbedIDFunction # NOQA
from onnx_chainer.functions.connection import convert_LinearFunction # NOQA
from onnx_chainer.functions.loss import convert_SoftmaxCrossEntropy # NOQA
from onnx_chainer.functions.math import convert_Absolute # NOQA
from onnx_chainer.functions.math import convert_Add # NOQA
from onnx_chainer.functions.math import convert_AddConstant # NOQA
from onnx_chainer.functions.math import convert_Arccos # NOQA
from onnx_chainer.functions.math import convert_Arcsin # NOQA
from onnx_chainer.functions.math import convert_Arctan # NOQA
from onnx_chainer.functions.math import convert_ArgMax # NOQA
from onnx_chainer.functions.math import convert_ArgMin # NOQA
from onnx_chainer.functions.math import convert_BroadcastTo # NOQA
from onnx_chainer.functions.math import convert_Clip # NOQA
from onnx_chainer.functions.math import convert_Cos # NOQA
from onnx_chainer.functions.math import convert_Cosh # NOQA
from onnx_chainer.functions.math import convert_Div # NOQA
from onnx_chainer.functions.math import convert_DivFromConstant # NOQA
from onnx_chainer.functions.math import convert_Exp # NOQA
from onnx_chainer.functions.math import convert_Identity # NOQA
from onnx_chainer.functions.math import convert_LinearInterpolate # NOQA
from onnx_chainer.functions.math import convert_Log # NOQA
from onnx_chainer.functions.math import convert_LogSumExp # NOQA
from onnx_chainer.functions.math import convert_MatMul # NOQA
from onnx_chainer.functions.math import convert_Max # NOQA
from onnx_chainer.functions.math import convert_Maximum # NOQA
from onnx_chainer.functions.math import convert_Mean # NOQA
from onnx_chainer.functions.math import convert_Min # NOQA
from onnx_chainer.functions.math import convert_Minimum # NOQA
from onnx_chainer.functions.math import convert_Mul # NOQA
from onnx_chainer.functions.math import convert_MulConstant # NOQA
from onnx_chainer.functions.math import convert_Neg # NOQA
from onnx_chainer.functions.math import convert_PowConstVar # NOQA
from onnx_chainer.functions.math import convert_PowVarConst # NOQA
from onnx_chainer.functions.math import convert_PowVarVar # NOQA
from onnx_chainer.functions.math import convert_Prod # NOQA
from onnx_chainer.functions.math import convert_RsqrtGPU # NOQA
from onnx_chainer.functions.math import convert_Sin # NOQA
from onnx_chainer.functions.math import convert_Sinh # NOQA
from onnx_chainer.functions.math import convert_Sqrt # NOQA
from onnx_chainer.functions.math import convert_Square # NOQA
from onnx_chainer.functions.math import convert_Sub # NOQA
from onnx_chainer.functions.math import convert_SubFromConstant # NOQA
from onnx_chainer.functions.math import convert_Sum # NOQA
from onnx_chainer.functions.math import convert_Tan # NOQA
from onnx_chainer.functions.noise import convert_Dropout # NOQA
from onnx_chainer.functions.normalization import convert_BatchNormalization # NOQA
from onnx_chainer.functions.normalization import convert_FixedBatchNormalization # NOQA
from onnx_chainer.functions.normalization import convert_GroupNormalization # NOQA
from onnx_chainer.functions.normalization import convert_LocalResponseNormalization # NOQA
from onnx_chainer.functions.normalization import convert_NormalizeL2 # NOQA
from onnx_chainer.functions.pooling import convert_AveragePooling2D # NOQA
from onnx_chainer.functions.pooling import convert_AveragePoolingND # NOQA
from onnx_chainer.functions.pooling import convert_MaxPooling2D # NOQA
from onnx_chainer.functions.pooling import convert_MaxPoolingND # NOQA
from onnx_chainer.functions.pooling import convert_ROIPooling2D # NOQA
from onnx_chainer.functions.pooling import convert_Unpooling2D # NOQA
|
#encoding=utf-8
import cv2
img = cv2.imread("./images/baboon2.jpg")
(B, G, R) = cv2.split(img)
cv2.imshow("blue", B)
cv2.imshow("green", G)
cv2.imshow("red", R)
merge = cv2.merge([B, G, R])
cv2.imshow("merge", merge)
cv2.waitKey(0)
|
#############################################################################
# Copyright (c) Members of the EGEE Collaboration. 2006-2010.
# See http://www.eu-egee.org/partners/ for details on the copyright holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Joel Casutt - joel.casutt@switch.ch
#############################################################################
'''
Created on 4/jan/2012
@author: joelcasutt
'''
import urllib2
import httplib
import socket
import sys
from AbstractProbe import PseudonymityAbstractProbe
class HTTPSClientAuthenticationHandler( urllib2.HTTPSHandler ):
"""
key and cert MUST exists
"""
def __init__(self, key, cert, timeout):
urllib2.HTTPSHandler.__init__(self)
self.key = key
self.cert = cert
self.timeout = timeout
socket.setglobaltimeout = timeout
def https_open(self, req):
return self.do_open(self.getConnection, req)
'''
There seems to be a change in the API between python 2.4 and more recent versions of python.
The getConnection function of the module urllib2 is taking a supplementary argument (timeout)
in Versions newer than 2.4.
'''
if sys.version_info[1] < 5:
def getConnection(self, host):
return httplib.HTTPSConnection(host, key_file=self.key, cert_file=self.cert)
else:
def getConnection(self, host, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
return httplib.HTTPSConnection(host, key_file=self.key, cert_file=self.cert)
|
# -*- coding: latin1 -*-
"""
/***************************************************************************
ChangementViewer
A QGIS plugin
Temporal evolution viewer for statistical calculations
-------------------
begin : 2012-01-06
copyright : (C) 2012 by Kevin Aubert
email : kevin.aubert@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# Import the PyQt and QGIS libraries
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from PyQt4 import uic
from PyQt4 import QtGui
import os, sys,re
import pdb
import time
sys.path.append("~/.qgis/python")
# Initialize Qt resources from file resources.py
import resources
# Import the code for the dialog
from changementviewerdialog import ChangementViewerDialog
import gettings
class ChangementViewer:
def __init__(self, iface):
# Save reference to the QGIS interface
self.iface = iface
self.settingsDialog = None
self.tmpRenderer = None
self.timer = QTimer()
def initGui(self):
# Create action that will start plugin configuration
self.action = QAction(QIcon(":/plugins/changementviewer/icon.png"), \
"Changement Viewer", self.iface.mainWindow())
# connect the action to the run method
QObject.connect(self.action, SIGNAL("triggered()"), self.run)
# Add toolbar button and menu item
self.iface.addToolBarIcon(self.action)
self.iface.addPluginToMenu("Changement Viewer", self.action)
# load the forms and connect actions
path = os.path.dirname( os.path.abspath( __file__ ) )
self.dock = uic.loadUi( os.path.join( path, "ui_changementviewer.ui" ) )
self.iface.addDockWidget( Qt.BottomDockWidgetArea, self.dock )
path = os.path.dirname( os.path.abspath( __file__ ) )
self.settingsDialog = uic.loadUi(os.path.join(path,"settings.ui"))
QObject.connect(self.dock.btnSettings, SIGNAL('clicked()'),self.showSettingsDialog)
QObject.connect(self.dock.timeSlide,SIGNAL('valueChanged(int)'),self.selectedField)
self.settingsDialog.cmbLayers.addItem( "Layers" )
lstLayers = gettings.getLayersNames( "vector" )
self.settingsDialog.cmbLayers.addItems( lstLayers )
QObject.connect(self.dock.pushButtonBack,SIGNAL('clicked()'),self.stepBackward)
QObject.connect(self.dock.pushButtonForward,SIGNAL('clicked()'),self.stepForward)
QObject.connect(self.dock.pushButtonPlay,SIGNAL('clicked()'),self.stepPlay)
QObject.connect(self.dock.pushButtonStop,SIGNAL('clicked()'),self.stepStop)
QObject.connect(self.dock.btnQ,SIGNAL('clicked()'),self.refresh)
def refresh(self):
# Remove the plugin menu item and icon
self.iface.removePluginMenu("&Changement Viewer",self.action)
self.iface.removeToolBarIcon(self.action)
self.iface.removeDockWidget(self.dock)
self.initGui()
def unload(self):
# Remove the plugin menu item and icon
self.iface.removePluginMenu("&Changement Viewer",self.action)
self.iface.removeToolBarIcon(self.action)
self.iface.removeDockWidget(self.dock)
def run(self):
self.dock.show()
def showSettingsDialog(self):
# fill layers combobox
#self.settingsDialog.cmbLayers.clear()
#self.settingsDialog.cmbLayers.addItem( "Layers" )
lstLayers = gettings.getLayersNames( "vector" )
#self.settingsDialog.cmbLayers.addItems( lstLayers )
if len(lstLayers) == 0:
QtGui.QMessageBox.warning(None,'Error','There are no unmanaged vector layers in the project !')
pass
else:
# show the form
self.settingsDialog.show()
#connect
QObject.connect( self.settingsDialog.cmbLayers, SIGNAL( "currentIndexChanged(QString)" ), self.updateFields ) #for tracking layers change
QObject.connect( self.settingsDialog.ltbFields, SIGNAL( 'itemSelectionChanged()' ), self.updateSelectedFields ) # for tracking fields selection
QObject.connect(self.settingsDialog.btnCancel, SIGNAL('clicked()'),self.settingsDialog.close) # close the settings dialog
QObject.connect(self.settingsDialog.btnOk, SIGNAL('clicked()'),self.settingsDialog.hide) # close the settings dialog
QObject.connect(self.settingsDialog.btnOk, SIGNAL('clicked()'),self.selectedField) # load the layer properties dialog
QObject.connect(self.settingsDialog.btnApply, SIGNAL('clicked()'),self.selectedField) # load the layer properties dialog
def updateFields( self ):
layName = unicode( self.settingsDialog.cmbLayers.currentText() )
self.settingsDialog.ltbFields.clear()
if layName != "Layers":
self.showModelist()
vLayer = gettings.getVectorLayerByName( layName )
# Modif RC ici
lstFields = vLayer.pendingFields()
#proFields = vLayer.dataProvider().fields()
for i in lstFields:
self.settingsDialog.ltbFields.addItem( unicode( lstFields[i].name() ) )
#Joined fields control for absolute discretization
if len(lstFields.items()) > vLayer.dataProvider().fieldCount():
self.settingsDialog.ccbAbsolu.setVisible(0)
self.settingsDialog.ccbAbsolu.setChecked(0)
else:
self.settingsDialog.ccbAbsolu.setVisible(1)
self.settingsDialog.ccbAbsolu.setChecked(0)
def updateSelectedFields (self ):
# update selected fields
layName = unicode( self.settingsDialog.cmbLayers.currentText() )
vLayer = gettings.getVectorLayerByName( layName )
lstFields = vLayer.pendingFields()
#lstIndex = dict([(field.name(), index) for index, field in lstFields.iteritems()])
#lstFields = vLayer.dataProvider().fields()
myfields = self.settingsDialog.ltbFields
self.settingsDialog.tabSelectedFields.clear()
self.settingsDialog.tabSelectedFields.setRowCount(0)
for i in range(len(myfields)):
#for i in range(len(lstIndex)):
if myfields.item(i).isSelected() == True:
date=re.findall(r'\d+',lstFields[i].name())
if len(date)!=1:
QtGui.QMessageBox.warning(None,'Error','Warning : there is no date information for this attribute !')
for u in range(len(date)):
layerName=lstFields[i].name()
sdate=date[u]
self.addRowToOptionsTable(layerName,sdate)
self.settingsDialog.tabSelectedFields.sortItems(1,order = Qt.AscendingOrder)
n=self.settingsDialog.tabSelectedFields.rowCount()
self.dock.timeSlide.setMinimum(0)
self.dock.timeSlide.setMaximum(n-1)
# Selected fields table headers
item1 = QtGui.QTableWidgetItem()
item1.setText(QtGui.QApplication.translate("settings", "Fields", None, QtGui.QApplication.UnicodeUTF8))
self.settingsDialog.tabSelectedFields.setHorizontalHeaderItem(0, item1)
item2 = QtGui.QTableWidgetItem()
item2.setText(QtGui.QApplication.translate("settings", "Date", None, QtGui.QApplication.UnicodeUTF8))
self.settingsDialog.tabSelectedFields.setHorizontalHeaderItem(1, item2)
def addRowToOptionsTable(self,layerName,sdate):
#insert selected fields in tabSelectedFields
# insert row
row=self.settingsDialog.tabSelectedFields.rowCount()
self.settingsDialog.tabSelectedFields.insertRow(row)
# insert values
layerItem = QTableWidgetItem()
layerItem.setText(layerName)
self.settingsDialog.tabSelectedFields.setItem(row,0,layerItem)
dateItem = QTableWidgetItem()
dateItem.setText(sdate)
self.settingsDialog.tabSelectedFields.setItem(row,1,dateItem)
def showModelist(self):
layName = unicode( self.settingsDialog.cmbLayers.currentText() )
vLayer=gettings.getVectorLayerByName(layName)
self.settingsDialog.cmbMode.clear()
self.settingsDialog.cmbMode.addItem( "Mode" )
if vLayer.isUsingRendererV2():
# new symbology - subclass of QgsFeatureRendererV2 class
lstModes = ["EqualInterval", "Quantiles", "Jenks", "StdDev" ,"Pretty"]
else:
# old symbology - subclass of QgsRenderer class
lstModes = ["EqualInterval", "Quantiles", "Empty"]
#fill the mode combobox
self.settingsDialog.cmbMode.addItems( lstModes )
def selectedField(self):
layName = unicode( self.settingsDialog.cmbLayers.currentText() )
if layName != "Layers":
vLayer = gettings.getVectorLayerByName( layName )
else:
vLayer=self.iface.mapCanvas().currentLayer()
u=self.dock.timeSlide.value()
fieldName=self.settingsDialog.tabSelectedFields.item(u,0)
date=self.settingsDialog.tabSelectedFields.item(u,1)
if self.settingsDialog.tabSelectedFields.rowCount()!=0:
self.dock.labelDate.setText(date.text())
if self.settingsDialog.ccbAbsolu.isChecked():
# absolu discretization
self.absolu(vLayer,fieldName.text())
else:
#relative discretization
self.ApplyClicked(vLayer,fieldName.text())
self.dock.timeSlide.setPageStep(1)
def totalLayer(self,vLayer):
vLayer.updateFieldMap()
# Create temp layer with an attribute that will contain all the selected fields values
geometryTypes = ['POINT', 'LINESTRING', 'POLYGON']
tmpLayer = QgsVectorLayer(geometryTypes[vLayer.geometryType()], "tmpLayer", "memory")
tmpProvider = tmpLayer.dataProvider()
tmpProvider.addAttributes([QgsField("myvalues", QVariant.Double)])
tmpLayer.commitChanges()
# We access to features with the dataProviders, for reading (in vLayer) and writing (in tmpLayer)
vProvider = vLayer.dataProvider()
#lstFields = vLayer.pendingAllAttributesList()
allAttrs = vProvider.attributeIndexes()
vProvider.select(allAttrs)
# We select all the attributes, and will access the ones with need later
# Loop from 0 to the count of selected Fields
for i in range(self.settingsDialog.tabSelectedFields.rowCount()):
tmpLayer.startEditing()
fldName = self.settingsDialog.tabSelectedFields.item(i,0)
fldIndex = vLayer.fieldNameIndex(fldName.text())
# fldIndex is the number of the field we want to access
feat = QgsFeature()
# Creation of a new feature
#joinInfo=vLayer.joinForFieldIndex([int(fldIndex)].toDouble()[0],vProvider.maximumIndex(),int(0))
while vProvider.nextFeature(feat):
#tmpLayer.addJoinedFeatureAttributes(feat, joinInfo, fldName.text(), QVariant(feat.attributeMap()[int(fldIndex)].toDouble()[0]), lstFields, int(0) )
newfeat = QgsFeature()
# We give this feature the same geometry as in the origin layer
newfeat.setGeometry(feat.geometry())
# Adding the value of the selected fieldindex/feature inside the newfeature
newfeat.addAttribute(int(0), QVariant(feat.attributeMap()[int(fldIndex)].toDouble()[0]))
tmpProvider.addFeatures( [newfeat] )
tmpLayer.commitChanges()
tmpLayer.updateExtents()
numberOfClasses =self.settingsDialog.snbClasses.value()
mode=(self.settingsDialog.cmbMode.currentIndex()-1)
sym = QgsSymbolV2.defaultSymbol(vLayer.geometryType())
ramp=QgsVectorGradientColorRampV2(QColor(0,255,0),QColor(255,0,0))
self.tmpRenderer = QgsGraduatedSymbolRendererV2().createRenderer ( tmpLayer, "myvalues", numberOfClasses, mode, sym, ramp )
def absolu(self,vLayer,fieldName):
#if self.tmpRenderer == None:
self.totalLayer(vLayer)
modeName = unicode( self.settingsDialog.cmbMode.currentText() )
if modeName != "Mode":
absoluteRenderer = QgsGraduatedSymbolRendererV2(fieldName, self.tmpRenderer.ranges())
absoluteRenderer.setRotationField(fieldName)
vLayer.setRendererV2( absoluteRenderer )
self.iface.mapCanvas().refresh()
self.iface.legendInterface().refreshLayerSymbology(vLayer)
else:
QtGui.QMessageBox.warning(None,'Error','You have to choose a discretization mode')
def ApplyClicked(self,vLayer,fieldName):
# Set the numeric field and the number of classes to be generated
numberOfClasses =self.settingsDialog.snbClasses.value()
# Get the field index based on the field name
fieldIndex = vLayer.fieldNameIndex(fieldName)
# Set the discretization mode
modeName = unicode( self.settingsDialog.cmbMode.currentText() )
if modeName != "Mode":
mode=(self.settingsDialog.cmbMode.currentIndex()-1)
if self.iface.mapCanvas().currentLayer().isUsingRendererV2():
# new symbology - subclass of QgsFeatureRendererV2 class
sym = QgsSymbolV2.defaultSymbol(vLayer.geometryType())
ramp=QgsVectorGradientColorRampV2(QColor(0,255,0),QColor(255,0,0))
rendererV2 = QgsGraduatedSymbolRendererV2.createRenderer ( vLayer, fieldName, numberOfClasses, mode, sym, ramp )
rendererV2.setRotationField(fieldName)
vLayer.setRendererV2( rendererV2 )
else:
# old symbology - subclass of QgsRenderer class
# Create the renderer object to be associated to the layer later
renderer = QgsGraduatedSymbolRenderer( vLayer.geometryType() )
# Here you may choose the renderer mode from EqualInterval/Quantile/Empty
renderer.setMode( mode )
# Define classes (lower and upper value as well as a label for each class)
provider = vLayer.dataProvider()
minimum = provider.minimumValue( fieldIndex ).toDouble()[ 0 ]
maximum = provider.maximumValue( fieldIndex ).toDouble()[ 0 ]
for i in range( numberOfClasses ):
# Switch if attribute is int or double
lower = ('%.*f' % (2, minimum + ( maximum - minimum ) / numberOfClasses * i ) )
upper = ('%.*f' % (2, minimum + ( maximum - minimum ) / numberOfClasses * ( i + 1 ) ) )
label = "%s - %s" % (lower, upper)
color = QColor(255*i/numberOfClasses, 255-255*i/numberOfClasses, 0)
sym = QgsSymbol( vLayer.geometryType(), lower, upper, label, color )
renderer.addSymbol( sym )
# Set the field index to classify and set the created renderer object to the layer
renderer.setClassificationField( fieldIndex )
vLayer.setRenderer( renderer )
#self.iface.showLayerProperties(vLayer)
self.iface.mapCanvas().refresh()
self.iface.legendInterface().refreshLayerSymbology(vLayer)
else:
QtGui.QMessageBox.warning(None,'Error','You have to choose a discretization mode')
def stepForward(self):
u=self.dock.timeSlide.value()
self.dock.timeSlide.setValue(u+1)
n=self.settingsDialog.tabSelectedFields.rowCount()
umax=n-1
if u == umax:
self.timer.stop()
self.dock.timeSlide.setValue(0)
def stepBackward(self):
u=self.dock.timeSlide.value()
self.dock.timeSlide.setValue(u-1)
def stepPlay(self):
self.timer.stop()
self.timer.timeout.connect(self.stepForward)
self.timer.start(self.settingsDialog.snbPlay.value()*1000)
def stepStop(self):
self.timer.stop()
|
import os
class Emp:
def __init__(self,Name,Dept):
self.Name = Name
self.Dept = Dept
def show (self):
print (self.Name, self.Dept)
|
import codecs
import json
import os
import time
from datetime import datetime
from ..utils.matches import is_won_match, get_match_id, is_matchmaking
from ..FaceitApi import get_player_matches
from .elo import get_player_info
DATA_DIRECTORY = os.path.join(os.path.dirname(__file__), '..', '..', 'settings')
DATA_FILE = os.path.join(DATA_DIRECTORY, 'session.json')
def init_session(parent, arguments):
player_info = get_player_info(parent, arguments)
session_data = {
'init': int(time.mktime(datetime.now().timetuple())),
'player_id': player_info['player_id'],
'elo': player_info['elo'],
# TODO: Remove this when FACEIT API gets FIXED
'initial_matches': set_initial_matches_deprecated(parent, arguments['api_key'], player_info['player_id'])
}
with codecs.open(DATA_FILE, encoding='utf-8-sig', mode='w+') as f:
json.dump(session_data, f, encoding='utf-8', ensure_ascii=False)
with codecs.open(DATA_FILE.replace('.json', '.js'), encoding='utf-8-sig', mode='w+') as f:
f.write(
'const settings = {0};'.format(json.dumps(session_data, f, encoding='utf-8', ensure_ascii=False)))
if arguments['overlays_enabled']:
parent.BroadcastWsEvent('EVENT_FCARRASCOSA_FACEIT_SESSION_START', '')
def get_session_initial_data():
with codecs.open(DATA_FILE, encoding='utf-8-sig', mode='r') as f:
session_data = json.load(f, encoding='utf-8-sig')
return session_data
def get_session_analysis(parent, arguments):
api_key = arguments['api_key']
initial_data = get_session_initial_data()
initial_date = initial_data['init']
initial_elo = initial_data['elo']
player_id = initial_data['player_id']
get_all = arguments['include_all_matches']
current_elo = get_player_info(parent, arguments)['elo']
matches = [match for match in get_player_matches(parent, api_key, player_id, matches_from=initial_date)["items"] if
(get_all or is_matchmaking(match))]
won_matches = 0
total_matches = len(matches)
for match in matches:
won_matches += 1 if is_won_match(player_id, match) else 0
result = {
'total_matches': total_matches,
'won_matches': won_matches,
'lost_matches': total_matches - won_matches,
'elo_balance': current_elo - initial_elo,
}
if arguments['overlays_enabled']:
parent.BroadcastWsEvent('EVENT_FCARRASCOSA_FACEIT_SESSION_UPDATE', json.dumps(result))
return result
# TODO: Remove this when FACEIT API gets FIXED
def set_initial_matches_deprecated(parent, api_key, player_id):
matches = get_player_matches(parent, api_key, player_id)["items"]
return map(get_match_id, matches)
# TODO: Remove this when FACEIT API gets FIXED
def get_session_analysis_deprecated(parent, arguments):
api_key = arguments['api_key']
get_all = arguments['include_all_matches']
initial_data = get_session_initial_data()
initial_elo = initial_data['elo']
initial_matches = initial_data['initial_matches']
player_id = initial_data['player_id']
current_elo = get_player_info(parent, arguments)['elo']
matches = get_player_matches(parent, api_key, player_id)["items"]
matches_to_analyze = []
won_matches = 0
for match in matches:
if match['match_id'] not in initial_matches and (get_all or is_matchmaking(match)):
matches_to_analyze.append(match)
total_matches = len(matches_to_analyze)
for match in matches_to_analyze:
won_matches += 1 if is_won_match(player_id, match) else 0
result = {
'total_matches': total_matches,
'won_matches': won_matches,
'lost_matches': total_matches - won_matches,
'elo_balance': current_elo - initial_elo,
}
if arguments['overlays_enabled']:
parent.BroadcastWsEvent('EVENT_FCARRASCOSA_FACEIT_SESSION_UPDATE', json.dumps(result))
return result
|
# 每次选择偷或不偷
# 通过一个列表维护上层偷或不偷的结果
class Solution:
def rob(self, root: TreeNode) -> int:
def _rob(root):
if not root: return 0, 0 # 偷,不偷
left = _rob(root.left)
right = _rob(root.right)
# 偷当前节点, 则左右子树都不能偷
v1 = root.val + left[1] + right[1]
# 不偷当前节点, 则取左右子树中最大的值
v2 = max(left) + max(right)
return v1, v2
return max(_rob(root))
|
import os
import json
import matplotlib.pyplot as plt
with open('all_curve.json', 'r') as f:
file = json.load(f)
x1 = file['gamma0.99']['x']
y1 = file['gamma0.99']['y']
x2 = file['gamma1']['x']
y2 = file['gamma1']['y']
x3 = file['gamma0.75']['x']
y3 = file['gamma0.75']['y']
x4 = file['gamma0.50']['x']
y4 = file['gamma0.50']['y']
title = 'different gamma curve'
plt.plot(x1, y1, linewidth=3, label='GAMMA 0.99')
plt.plot(x2, y2, linewidth=3, label='GAMMA 1.00')
plt.plot(x3, y3, linewidth=3, label='GAMMA 0.75')
plt.plot(x4, y4, linewidth=3, label='GAMMA 0.50')
plt.title(title, fontsize=14)
plt.xlabel("Steps", fontsize=10)
plt.ylabel("Avg Reward", fontsize=10)
plt.legend()
plt.savefig('gamma_curve.png')
|
import pickle
import matplotlib.pyplot
import numpy as np
import matplotlib.pyplot as plt
from experiment_setup import loadExperimentFile
#Format is [minSwitchingProb,q1_required,q1])
def calcAbsErrors(minSwitchProfile):
absErrors = []
for perModelVector in minSwitchProfile:
for entry in perModelVector:
(minSwitchingProb,q1_required,q1,question_id) = entry
absErrors.append(minSwitchingProb-q1_required)
if (q1_required < q1):
print "Error in response on issue #",question_id
print "Risk compensation response is",q1_required," and starting probability=",q1
return absErrors
def calcRelativeErrors(minSwitchProfile):
relErrors = []
for perModelVector in minSwitchProfile:
for entry in perModelVector:
(minSwitchingProb,q1_required,q1,_) = entry
relErrors.append((minSwitchingProb-q1_required)/(1.0-q1))
return relErrors
def plotResults(expFileName, inFileName,outFileDir):
#Load experiment data
(perPersonData,individualProspectList,\
groupProspectList,groupToPeopleMap,paramRanges) = loadExperimentFile(expFileName)
print "\n\nLoading all data from %s"%inFileName
pkl_file = open('%s'%inFileName,'rb')
assert(pkl_file != None)
training_sets = pickle.load(pkl_file)
allPeopleFittedModels = pickle.load(pkl_file)
allPeopleNumCorrectChoices = pickle.load(pkl_file)
allPeopleNumCorrectSwitches = pickle.load(pkl_file)
allPeopleMinSwitchingProb = pickle.load(pkl_file)
print "Finished loading"
pkl_file.close()
print "Closed file, now printing results"
print "TRAINING SETS",training_sets
NUM_INDIVIDUALS = len(allPeopleFittedModels)
NUM_REPEATS = len(training_sets)
TRAIN_SZ = len(training_sets[0])
TEST_SZ = 16 - TRAIN_SZ
print "Num individuals: ",NUM_INDIVIDUALS
print "Training size: %i"%TRAIN_SZ
print "Num Repeats: %i"%NUM_REPEATS
#Plot distribution of switching probabilities in three modes, each in absolute and relative format
#per person
#per issue
#aggregate
perPersonSwitchProbDistRel = [[] for _ in range(NUM_INDIVIDUALS)]
perPersonSwitchProbDistAbs = [[] for _ in range(NUM_INDIVIDUALS)]
perIssueSwitchProbDistRel = [[] for _ in range(16)]
perIssueSwitchProbDistAbs = [[] for _ in range(16)]
aggregateSwitchProbDistRel = []
aggregateSwitchProbDistAbs = []
for i,switchProbList in enumerate(allPeopleMinSwitchingProb):
for switchProb in switchProbList:
for (minSwitchingProb,p_required,p_original,question_id) in switchProb:
absDelta = minSwitchingProb - p_required
relDelta = absDelta/(1.0 - p_original )
if (relDelta > 1.0):
print "Relative delta too high",relDelta, "for person ID",(i+1), "on issue ID",(question_id+1)
print "Skipping!"
continue
perPersonSwitchProbDistAbs[i].append(absDelta)
perPersonSwitchProbDistRel[i].append(relDelta)
perIssueSwitchProbDistRel[question_id].append(relDelta)
perIssueSwitchProbDistAbs[question_id].append(absDelta)
aggregateSwitchProbDistRel.append(relDelta)
aggregateSwitchProbDistAbs.append(absDelta)
SKIP_COUNT = 50
#per person
for person_id in range(NUM_INDIVIDUALS):
if (person_id % SKIP_COUNT == 0): #plot
plt.figure()
plt.suptitle("Person %i - Switching Prediction" %(person_id+1))
plt.subplot(121)
plt.title("Abs : %s"%printStats(perPersonSwitchProbDistAbs[person_id]))
plt.hist(perPersonSwitchProbDistAbs[person_id])
plt.subplot(122)
plt.title("Rel : %s"%printStats(perPersonSwitchProbDistRel[person_id]))
plt.hist(perPersonSwitchProbDistRel[person_id])
plt.show(block=False)
#per issue absolute
plt.figure()
plt.suptitle("Per Issue Switching Abs. Error")
for issue_id in range(16):
plt.subplot(4,4,(issue_id+1))
plt.title("Issue %i:%s"%(issue_id,printStats(perIssueSwitchProbDistAbs[issue_id])))
plt.hist(perIssueSwitchProbDistAbs[issue_id])
plt.figure()
plt.suptitle("Per Issue Switching Rel. Error")
for issue_id in range(16):
plt.subplot(4,4,(issue_id+1))
plt.title("Issue %i:%s"%(issue_id,printStats(perIssueSwitchProbDistRel[issue_id])))
plt.hist(perIssueSwitchProbDistRel[issue_id])
plt.figure()
plt.suptitle("Distribution of Errors for Switching Model (over all examples)")
plt.subplot(121)
plt.title("Switching Model Abs Delta :%s"%printStats(aggregateSwitchProbDistAbs))
plt.hist(aggregateSwitchProbDistAbs)
plt.subplot(122)
plt.title("Switching Model Rel Delta :%s"%printStats(aggregateSwitchProbDistRel))
plt.hist(aggregateSwitchProbDistRel)
#Aggregate stats dist
perPersonRelDeltaMean = [ np.mean(np.array(dist)) for dist in perPersonSwitchProbDistRel]
perPersonRelDeltaVar = [ np.var(np.array(dist)) for dist in perPersonSwitchProbDistRel]
plt.figure()
plt.suptitle("Distribution of Relative Delta Statistics (over all people)")
plt.subplot(121)
plt.title("Average Relative Error Distribution : %s"%printStats(perPersonRelDeltaMean))
print perPersonRelDeltaMean
plt.hist(perPersonRelDeltaMean)
plt.subplot(122)
plt.title("Variance of Relative Error Distribution: %s"%printStats(perPersonRelDeltaVar))
plt.hist(perPersonRelDeltaVar)
plt.show(block=False)
#Plot distribution of accuracies
aggNumCorrectChoiceDist = []
aggNumCorrectSwitchDist = []
aggAbsErrorSwitch = []
aggRelErrorSwitch = []
for person_id in range(NUM_INDIVIDUALS):
#Recover individual params
numCorrectChoiceDist = allPeopleNumCorrectChoices[person_id]
print "Num correct choices predicted for person",person_id,"is ",allPeopleNumCorrectChoices[person_id]
numCorrectSwitchDist = allPeopleNumCorrectSwitches[person_id]
#minSwitchingProb (via model), recorded switching prob, compensation probability infimum
minSwitchProfile = allPeopleMinSwitchingProb[person_id]
#Calculate rel and abs error for risk switch
absErrorSwitch = calcAbsErrors(minSwitchProfile)
relErrorSwitch = calcRelativeErrors(minSwitchProfile)
"""
plt.figure()
plt.suptitle("Results for person %i(%s)" % (person_id,outFileDir))
plt.subplot(221)
plt.hist(numCorrectChoiceDist)
plt.title("Correct Choice Dist")
plt.subplot(222)
plt.hist(numCorrectSwitchDist)
plt.title("Correct Switch Dist")
plt.subplot(223)
plt.hist(absErrorSwitch)
plt.title("Abs. Error Switch")
plt.subplot(224)
plt.hist(relErrorSwitch)
plt.title("Rel. Error Switch")
"""
#plt.show()
#extend aggregate lists
aggNumCorrectChoiceDist = numCorrectChoiceDist + aggNumCorrectChoiceDist
aggNumCorrectSwitchDist = numCorrectSwitchDist + aggNumCorrectSwitchDist
aggRelErrorSwitch = aggRelErrorSwitch + relErrorSwitch
aggAbsErrorSwitch = aggAbsErrorSwitch + absErrorSwitch
## PLOT AGGREGATE PARAMS
alphaDist = []
betaDist = []
gammaPDist = []
gammaMDist = []
lambdaDist = []
thetaDist= []
alphaLambdaXY = []
alphaGammaPlusXY = []
alphaGammaMinusXY = []
gammaPlusGammaMinusXY = []
lambdaGammaPlusXY = []
lambdaGammaMinusXY = []
for modelList in allPeopleFittedModels:
for value in modelList:
alphaDist.append(value[0])
betaDist.append(value[1])
lambdaDist.append(value[2])
gammaPDist.append(value[3])
gammaMDist.append(value[4])
thetaDist.append(value[5])
#
alphaLambdaXY.append((value[0],value[2]))
alphaGammaPlusXY.append((value[0],value[3]))
alphaGammaMinusXY.append((value[0],value[4]))
gammaPlusGammaMinusXY.append((value[3],value[4]))
lambdaGammaPlusXY.append((value[2],value[3]))
lambdaGammaMinusXY.append((value[2],value[4]))
plt.figure()
plt.title("alpha lambda plot")
plt.scatter([a for (a,_) in alphaLambdaXY],[l for (_,l) in alphaLambdaXY])
plt.figure()
plt.title("alpha-gamma plus plot")
plt.scatter([a for (a,_) in alphaGammaPlusXY],[g for (_,g) in alphaGammaPlusXY])
plt.figure()
plt.title("alpha-gamma minus plot")
plt.scatter([a for (a,_) in alphaGammaMinusXY],[g for (_,g) in alphaGammaMinusXY])
plt.figure()
plt.title("gamma plus-gamma minus plot")
plt.scatter([a for (a,_) in gammaPlusGammaMinusXY],[g for (_,g) in gammaPlusGammaMinusXY])
plt.figure()
plt.title("lambda -gamma plus plot")
plt.scatter([a for (a,_) in lambdaGammaPlusXY],[g for (_,g) in lambdaGammaPlusXY])
plt.figure()
plt.title("lambda -gamma minus plot")
plt.scatter([a for (a,_) in lambdaGammaMinusXY],[g for (_,g) in gammaPlusGammaMinusXY])
plt.figure()
plt.subplot(231)
plt.xlabel("alpha")
plt.hist(alphaDist)
plt.subplot(232)
plt.xlabel("beta")
plt.hist(betaDist)
plt.subplot(233)
plt.xlabel("lambda")
plt.hist(lambdaDist)
plt.subplot(234)
plt.xlabel("gamma+")
plt.hist(gammaPDist)
plt.subplot(235)
plt.xlabel("gamma-")
plt.hist(gammaMDist)
plt.subplot(236)
plt.xlabel("theta")
plt.hist(thetaDist)
plt.show(block=False)
## END PLOT AGGREGATE PARAMS
"""
Plot distribution of aggregated accuracies
"""
choiceMean = np.mean(np.array(aggNumCorrectChoiceDist))
choicePercent = (choiceMean/TEST_SZ)
choiceVar = np.var(np.array(aggNumCorrectChoiceDist))
switchCorrectMean = np.mean(np.array(aggNumCorrectSwitchDist))
switchCorrectVar = np.var(np.array(aggNumCorrectSwitchDist))
switchCorrectPercent = (switchCorrectMean/TEST_SZ)
plt.figure(figsize=(22,8))
plt.suptitle("Accuracy Results (aggregating all test examples) (%s)"%inFileName)
plt.subplot(121)
plt.hist(aggNumCorrectChoiceDist)
plt.title("Correct Choice Dist ($\mu$=%.3f,$\sigma^2$=%.2f out of %i (%.2f Percent Accuracy))"%(choiceMean,choiceVar,TEST_SZ,choicePercent))
plt.subplot(122)
plt.hist(aggNumCorrectSwitchDist)
plt.title("Correct Switch Dist ($\mu$=%.3f,$\sigma^2$=%.2f out of %i (%.2f Percent Accuracy))"%(switchCorrectMean,switchCorrectVar,TEST_SZ,switchCorrectPercent))
plt.show(block=False)
#Aggregate per person
perPersonChoiceCorrectMeans = [ np.mean(np.array(dist)) for dist in allPeopleNumCorrectChoices]
perPersonSwitchCorrectMeans = [ np.mean(np.array(dist)) for dist in allPeopleNumCorrectSwitches]
plt.figure(figsize=(22,8))
plt.suptitle("Accuracy Results (distribution of all people) (%s)"%inFileName)
plt.subplot(121)
plt.hist(perPersonChoiceCorrectMeans)
plt.title("Mean number of correct choice predictions:%s"%(printStats(perPersonChoiceCorrectMeans)))
plt.subplot(122)
plt.hist(perPersonSwitchCorrectMeans)
plt.title("Mean number of correct switch predictions:%s"%(printStats(perPersonSwitchCorrectMeans)))
plt.show(block=True)
#PLOT CORRECTNESS PER ISSUE via box plots
#End plot result
def printStats(dist):
s = "($\mu$=%.2f,$\sigma^2$=%.2f)"%(np.mean(np.array(dist)),np.var(np.array(dist)))
return s
|
"""
Ref:
- https://tools.ietf.org/html/rfc2617
- https://en.wikipedia.org/wiki/Basic_access_authentication
- https://en.wikipedia.org/wiki/Digest_access_authentication
- https://github.com/dimagi/python-digest/blob/master/python_digest/utils.py
- https://gist.github.com/dayflower/5828503
"""
from base64 import b64encode
from asgi_webdav.response import DAVResponse
MESSAGE_401 = b"""<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<title>Error</title>
</head>
<body>
<h1>401 Unauthorized.</h1>
</body>
</html>"""
class HTTPAuthMiddleware:
def __init__(self, app, username: str, password: str):
self.app = app
self.username = bytes(username, encoding="utf8")
self.password = bytes(password, encoding="utf8")
self.realm = "ASGI WebDAV"
self.basic = b64encode(self.username + b":" + self.password)
async def __call__(self, scope, receive, send) -> None:
authenticated = await self.handle(scope)
if not authenticated:
headers = {
b"WWW-Authenticate": 'Basic realm="{}"'.format(self.realm).encode(
"utf-8"
)
}
await DAVResponse(
status=401, message=MESSAGE_401, headers=headers
).send_in_one_call(send)
return
await self.app(scope, receive, send)
async def handle(self, scope) -> bool:
headers = scope.get("headers")
if headers is None:
# TODO raise
return False
authorization = dict(headers).get(b"authorization")
if authorization is None:
return False
if authorization[:6] == b"Basic ":
if authorization[6:] == self.basic:
return True
else:
print(self.basic)
return False
if authorization[:6] == b"Digest":
# TODO
pass
return False
|
# Arquivo para efetuar as 10000 execuções para testes
import tarefas
import escalonador
from random import randint
for i in range(0, 100, 1):
# Executando pela n vez
print("Executando pela " + str(i+1) + "vez!")
# Definindo a lista para enviarmos ao sistema.
listaTeste = []
# Aleatoriamente criando as tarefas para enviar ao algoritmo de escalonamento
for j in range(0, 10000, 1):
tarefaAux = tarefas.Tarefa()
tarefaAux.nome = "Tarefa" + str(j)
tarefaAux.inicio = randint(0,10000)
tarefaAux.fim = tarefaAux.inicio + randint(3, 40)
listaTeste.append(tarefaAux)
# Aqui estamos escalonando as tarefas geradas aleatoriamente
escalonador.escalonar(listaTeste)
print("Finalizando a " + str(i+1) + "vez!")
# Aqui estamos limpando a lista depois do uso
listaTeste.clear()
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Implement next permutation, which rearranges numbers into the lexicographically next greater permutation of numbers.
# If such arrangement is not possible,
# it must rearrange it as the lowest possible order (ie, sorted in ascending order).
# The replacement must be in-place, do not allocate extra memory.
# Here are some examples. Inputs are in the left-hand column and its corresponding outputs are in the right-hand column.
# 1,2,3 → 1,3,2
# 3,2,1 → 1,2,3
# 1,1,5 → 1,5,1
# Find Pivot --> Reverse nums from pivot to high -- >
# If Pivot > 0: Find the first number smaller than nums[pivot - 1], exchange them value.
# 265 / 265 test cases passed.
# Status: Accepted
# Runtime: 59 ms
# Your runtime beats 33.74 % of python submissions.
class Solution(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
if not nums:
return
lo, hi = 0, len(nums) - 1
while lo < hi:
if nums[hi - 1] < nums[hi]:
change_index = hi
for i in range(len(nums) - 1, hi - 1, -1):
if nums[hi - 1] < nums[i]:
change_index = i
break
nums[hi - 1], nums[change_index] = nums[change_index], nums[hi - 1]
nums[hi:] = reversed(nums[hi:])
return
hi -= 1
nums.reverse()
# 265 / 265 test cases passed.
# Status: Accepted
# Runtime: 52 ms
# Your runtime beats 60.80 % of python submissions.
class Solution(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
if not nums or len(nums) == 1:
return
# O(1)
if nums[-1] > nums[-2]:
nums[-1], nums[-2] = nums[-2], nums[-1]
return
lo, pivot, hi = 0, len(nums) - 1, len(nums) - 1
while lo < pivot and nums[pivot - 1] >= nums[pivot]:
pivot -= 1
reverse_index = pivot
while reverse_index < hi:
nums[reverse_index], nums[hi] = nums[hi], nums[reverse_index]
reverse_index += 1
hi -= 1
swap_index = pivot
if pivot > 0:
while swap_index <= hi and nums[swap_index] <= nums[pivot - 1]:
swap_index += 1
nums[swap_index], nums[pivot - 1] = nums[pivot - 1], nums[swap_index]
print(nums)
if __name__ == '__main__':
print(Solution().nextPermutation([3, 2, 1]))
print(Solution().nextPermutation([1, 1, 5]))
print(Solution().nextPermutation([1, 3, 2]))
print(Solution().nextPermutation([4, 3, 1, 2]))
print(Solution().nextPermutation([1, 3, 2, 4]))
|
import cv2
import numpy as np
import os
import glob
from clize import run
CHECKERBOARD = (6,9)
subpix_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
calibration_flags = cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC + cv2.fisheye.CALIB_CHECK_COND + cv2.fisheye.CALIB_FIX_SKEW
objp = np.zeros((1, CHECKERBOARD[0]*CHECKERBOARD[1], 3), np.float32)
objp[0,:,:2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)
def calibrate(dirpath, width, height):
_img_shape = None
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob(os.path.join(dirpath, '*.jpg'))
#print("# found %u files" % len(images))
for fname in images:
img = cv2.imread(fname)
img = cv2.resize(img, (width,height))
if _img_shape == None:
_img_shape = img.shape[:2]
else:
assert _img_shape == img.shape[:2], "All images must share the same size."
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, CHECKERBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
cv2.cornerSubPix(gray, corners, (3,3), (-1,-1), subpix_criteria)
imgpoints.append(corners)
N_OK = len(objpoints)
#print("# found %u points" % N_OK)
K = np.zeros((3, 3))
D = np.zeros((4, 1))
rvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]
tvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]
try:
rms, _, _, _, _ = cv2.fisheye.calibrate(
objpoints,
imgpoints,
gray.shape[::-1],
K,
D,
rvecs,
tvecs,
calibration_flags,
(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-6)
)
print("if w == %u and h == %u:" % (width, height))
print("\tK = np.array(" + str(K.tolist()) + ")")
print("\tD = np.array(" + str(D.tolist()) + ")")
print("\treturn K, D")
except:
print("# failed for size %u x %u" % (width, height))
def calibrate_all(dirpath):
v_res = [120, 240, 300, 480, 720, 1080, 1440, 1600, 1800, 2000, 2400, 2464, 3072, 4800]
for v in v_res:
h_res = int(round(v / 3.0) * 4)
calibrate(dirpath, h_res, v)
if __name__ == "__main__":
run(calibrate_all)
|
def max_num(num1, num2, num3):
if num1 >= num2 and num1 >= num3:
return num1
elif num2 >= num1 and num2 >= num3:
return num2
else:
return num3
print("You're number is: " + str(max_num(3,20,5)))
|
import sys
#input parameters
input_string = "mango";
test_string = "goman";
#SOLUTION: This is an O(n) solution on AVERAGE (where 'n' is the length of the LONGER string)
copy_string = 2*test_string;
if ( (input_string in copy_string) and (len(input_string)==len(test_string)) ):
print("ROTATION DETECTED!");
|
from django.db import models
from slugify import slugify
class Tool(models.Model):
name = models.CharField(max_length=500)
slug = models.SlugField(max_length=500)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
super(Tool, self).save(*args, **kwargs)
def __str__(self):
return self.name
|
import math
import torch
from torch.nn import functional as F
import argparse
import numpy as np
import pickle
import neural_network as nn
from neural_network import tf, tint
from replay_buffer import ReplayBuffer
from envs import AtariEnv
from ram_annotations import atari_dict
parser = argparse.ArgumentParser()
parser.add_argument("--learning_rate", default=2.5e-4, help="Learning rate", type=float)
parser.add_argument("--run", default=0, help="run", type=int)
parser.add_argument("--mbsize", default=32, help="Minibatch size", type=int)
parser.add_argument("--buffer_size", default=100000, help="Replay buffer size",type=int)
parser.add_argument("--checkpoint", default='results/41.pkl', help="checkpoint file",type=str)
parser.add_argument("--expert_is_self", default=1, help="is expert params", type=int)
parser.add_argument("--loss_func", default='td', help="target loss")
parser.add_argument("--device", default='cuda', help="device")
def sl1(a, b):
"""Smooth L1 distance"""
d = a - b
u = abs(d)
s = d**2
m = (u < s).float()
return u * m + s * (1 - m)
def make_opt(opt, theta, lr, weight_decay):
if opt == "sgd":
return torch.optim.SGD(theta, lr, weight_decay=weight_decay)
elif opt == "msgd":
return torch.optim.SGD(theta, lr, momentum=0.9, weight_decay=weight_decay)
elif opt == "rmsprop":
return torch.optim.RMSprop(theta, lr, weight_decay=weight_decay)
elif opt == "adam":
return torch.optim.Adam(theta, lr, weight_decay=weight_decay)
else:
raise ValueError(opt)
def load_parameters_from_checkpoint():
data = pickle.load(open(ARGS.checkpoint, 'rb'))
return [tf(data[str(i)]) for i in range(10)]
def fill_buffer_with_expert(replay_buffer, env_name, epsilon=0.01):
mbsize = ARGS.mbsize
envs = [AtariEnv(env_name) for i in range(mbsize)]
num_act = envs[0].num_actions
nhid = 32
_, theta_q, Qf, _ = nn.build(
nn.conv2d(4, nhid, 8, stride=4), # Input is 84x84
nn.conv2d(nhid, nhid * 2, 4, stride=2),
nn.conv2d(nhid * 2, nhid * 2, 3),
nn.flatten(),
nn.hidden(nhid * 2 * 12 * 12, nhid * 16),
nn.linear(nhid * 16, num_act),
)
theta_q_trained = load_parameters_from_checkpoint()
if ARGS.expert_is_self:
theta_expert = theta_q_trained
else:
expert_id = {
'ms_pacman':457, 'asterix':403, 'seaquest':428}[env_name]
with open(f'checkpoints/dqn_model_{expert_id}.pkl',
"rb") as f:
theta_expert = pickle.load(f)
theta_expert = [tf(i) for i in theta_expert]
obs = [i.reset() for i in envs]
trajs = [list() for i in range(mbsize)]
enumbers = list(range(mbsize))
replay_buffer.ram = torch.zeros([replay_buffer.size, 128],
dtype=torch.uint8,
device=replay_buffer.device)
while True:
mbobs = tf(obs) / 255
greedy_actions = Qf(mbobs, theta_expert).argmax(1)
random_actions = np.random.randint(0, num_act, mbsize)
actions = [
j if np.random.random() < epsilon else i
for i, j in zip(greedy_actions, random_actions)
]
for i, (e, a) in enumerate(zip(envs, actions)):
obsp, r, done, _ = e.step(a)
trajs[i].append([obs[i], int(a), float(r), int(done), e.getRAM() + 0])
obs[i] = obsp
if replay_buffer.idx + len(trajs[i]) + 4 >= replay_buffer.size:
# We're done!
return Qf, theta_q_trained
replay_buffer.new_episode(trajs[i][0][0], enumbers[i] % 2)
for s, a, r, d, ram in trajs[i]:
replay_buffer.ram[replay_buffer.idx] = tint(ram)
replay_buffer.add(s, a, r, d, enumbers[i] % 2)
trajs[i] = []
obs[i] = envs[i].reset()
enumbers[i] = max(enumbers) + 1
def main():
gamma = 0.99
hps = pickle.load(open(ARGS.checkpoint, 'rb'))['hps']
env_name = hps["env_name"]
if 'Lambda' in hps:
Lambda = hps['Lambda']
else:
Lambda = 0
device = torch.device(ARGS.device)
nn.set_device(device)
replay_buffer = ReplayBuffer(ARGS.run, ARGS.buffer_size)
Qf, theta_q = fill_buffer_with_expert(replay_buffer, env_name)
for p in theta_q:
p.requires_grad = True
if Lambda > 0:
replay_buffer.compute_episode_boundaries()
replay_buffer.compute_lambda_returns(lambda s: Qf(s, theta_q), Lambda, gamma)
td__ = lambda s, a, r, sp, t, idx, w, tw: sl1(
r + (1 - t.float()) * gamma * Qf(sp, tw).max(1)[0].detach(),
Qf(s, w)[np.arange(len(a)), a.long()],
)
td = lambda s, a, r, sp, t, idx, w, tw: Qf(s, w).max(1)[0]
tdL = lambda s, a, r, sp, t, idx, w, tw: sl1(
Qf(s, w)[:, 0], replay_buffer.LG[idx])
loss_func = {
'td': td, 'tdL': tdL}[ARGS.loss_func]
opt = torch.optim.SGD(theta_q, 1)
def grad_sim(inp, grad):
dot = sum([(p.grad * gp).sum() for p, gp in zip(inp, grad)])
nA = torch.sqrt(sum([(p.grad**2).sum() for p, gp in zip(inp, grad)]))
nB = torch.sqrt(sum([(gp**2).sum() for p, gp in zip(inp, grad)]))
return (dot / (nA * nB)).item()
relevant_features = np.int32(
sorted(list(atari_dict[env_name.replace("_", "")].values())))
sims = []
ram_sims = []
for i in range(2000):
sim = []
*sample, idx = replay_buffer.sample(1)
loss = loss_func(*sample, idx, theta_q, theta_q).mean()
loss.backward()
g0 = [p.grad + 0 for p in theta_q]
for j in range(-30, 31):
opt.zero_grad()
loss = loss_func(*replay_buffer.get(idx + j), theta_q, theta_q).mean()
loss.backward()
sim.append(grad_sim(theta_q, g0))
sims.append(np.float32(sim))
for j in range(200):
opt.zero_grad()
*sample_j, idx_j = replay_buffer.sample(1)
loss = loss_func(*sample_j, idx_j, theta_q, theta_q).mean()
loss.backward()
ram_sims.append(
(grad_sim(theta_q, g0),
abs(replay_buffer.ram[idx[0]][relevant_features].float() -
replay_buffer.ram[idx_j[0]][relevant_features].float()).mean()))
opt.zero_grad()
ram_sims = np.float32(
ram_sims) #np.histogram(np.float32(ram_sim), 100, (-1, 1))
# Compute "True" gradient
grads = [i.detach() * 0 for i in theta_q]
N = 0
for samples in replay_buffer.in_order_iterate(ARGS.mbsize * 8):
loss = loss_func(*samples, theta_q, theta_q).mean()
loss.backward()
N += samples[0].shape[0]
for p, gp in zip(theta_q, grads):
gp.data.add_(p.grad)
opt.zero_grad()
dots = []
i = 0
for sample in replay_buffer.in_order_iterate(1):
loss = loss_func(*sample, theta_q, theta_q).mean()
loss.backward()
dots.append(grad_sim(theta_q, grads))
opt.zero_grad()
i += 1
histo = np.histogram(dots, 100, (-1, 1))
results = {
"grads": [i.cpu().data.numpy() for i in grads],
"sims": np.float32(sims),
"histo": histo,
"ram_sims": ram_sims,
}
path = f'results/grads_{ARGS.checkpoint}.pkl'
with open(path, "wb") as f:
pickle.dump(results, f)
if __name__ == "__main__":
ARGS = parser.parse_args()
main()
|
from django.urls import path
from . import views
from jobs.views import app
urlpatterns = [
path('',views.myblog,name='myblog'),
path('app/',app,name='app'),
path('<int:blog_id>/',views.detail,name='detail')
]
|
import cv2
import numpy as np
import utils
path = "fotos/teste2.jpg"
width = 586
height = 826
widthG = 165
heightG = 805
img = cv2.imread(path) #Lendo a imagem
img = cv2.resize(img, (width, height)) #diminuindo a largura e altura da imagem
imgContours = img.copy()
imgBiggestContours = img.copy()
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray,(5,5),1)
imgCanny = cv2.Canny(imgBlur, 10, 50)
countours, hierarchy = cv2.findContours(imgCanny,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
cv2.drawContours(imgContours, countours,-1,(0,255,0),5)
rectCon = utils.rectContour(countours)
biggestContour = utils.getCornerPoints(rectCon[0])
#print(biggestContour.shape)
if biggestContour.size != 0:
cv2.drawContours(imgBiggestContours, biggestContour, -1, (0,255,0), 10)
biggestContour = utils.reorder(biggestContour)
pt1 = np.float32(biggestContour)
pt2 = np.float32([[0,0],[width,0],[0,height],[width,height]])
matrix = cv2.getPerspectiveTransform(pt1,pt2)
imgWarpColored = cv2.warpPerspective(img,matrix,(width,height))
imgWarpGray = cv2.cvtColor(imgWarpColored,cv2.COLOR_BGR2GRAY)
imgThresh = cv2.threshold(imgWarpGray, 170, 255,cv2.THRESH_BINARY_INV )[1]
#print(imgThresh)
utils.splitBoxes(imgThresh)
imgBlack = np.zeros_like(img)
#[img,imgGray,imgBlur,imgCanny],
imgArray = ([imgContours,imgBiggestContours,imgWarpColored,imgBlack],
[imgBlack,imgBlack,imgBlack,imgBlack])
imgStacked = utils.stackImages(imgArray, 0.5)
cv2.imshow("Stacked",imgStacked)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
'''
Alessia Pizzoccheri - CS 5001 02
Consulted https://stackoverflow.com/questions/22025764/python-check-for-integer-input
to learn how to prevent interpreter from throwing an error message if user
input is not an integer
'''
import hanoi_viz
MIN = 1
MAX = 8
SOURCE = 'Start'
MIDDLE = 'Transfer'
TARGET = 'End'
def check_input():
''' Name: check_input
parameters: none
returns: int
'''
while True:
# ask users how many disks in the tower
user_input = input('How many disks does the Tower of Hanoi have? ')
# check if input is integer and prevent error message
try:
num_disks = int(user_input)
except ValueError:
print('Please, enter a valid input.')
else:
# check int is between 1 and 8 inclusive
if num_disks >= MIN and num_disks <= MAX:
break
# go back to top if int is outside valid range
else:
print('Please, enter a valid input.')
return num_disks
def move_tower(disks,source,target,middle,towers):
'''
Name: move_tower
Input: int, dict, dict, dict, dict
Return: None
'''
# if there's only one disks
if disks == 1:
hanoi_viz.move_disk(source,target,towers)
# if there are multiple disks
else:
move_tower(disks-1,source,middle,target,towers)
hanoi_viz.move_disk(source,target,towers)
move_tower(disks-1,middle,target,source,towers)
def main():
print('Welcome to the Tower of Hanoi.\n'+
'To start, enter an integer between 1 and 8.')
# select number of disks
num_disks = check_input()
# draw the towers
towers = hanoi_viz.initialize_towers(num_disks,SOURCE,TARGET,MIDDLE)
# start the Tower of Hanoi
move_tower(num_disks,SOURCE,TARGET,MIDDLE,towers)
main()
|
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import csv
from django.db import transaction
from django.test import override_settings
from logging import getLogger
from data_aggregator.models import Report, SubaccountActivity
from data_aggregator.utilities import set_gcs_base_path
from data_aggregator.exceptions import TermNotStarted
from restclients_core.util.retry import retry
from restclients_core.exceptions import DataFailureException
from uw_canvas.accounts import Accounts as CanvasAccounts
from uw_canvas.analytics import Analytics as CanvasAnalytics
from uw_canvas.reports import Reports as CanvasReports
from uw_canvas.terms import Terms as CanvasTerms
logger = getLogger(__name__)
RETRY_STATUS_CODES = [0, 408, 500, 502, 503, 504]
RETRY_MAX = 5
RETRY_DELAY = 5
class ReportBuilder():
def __init__(self):
self._accounts = CanvasAccounts(per_page=100)
self._analytics = CanvasAnalytics()
self._reports = CanvasReports()
self._terms = CanvasTerms()
@retry(DataFailureException, status_codes=RETRY_STATUS_CODES,
tries=RETRY_MAX, delay=RETRY_DELAY, logger=logger)
def get_statistics_by_account(self, sis_account_id, sis_term_id):
return self._analytics.get_statistics_by_account(
sis_account_id, sis_term_id)
@retry(DataFailureException, status_codes=RETRY_STATUS_CODES,
tries=RETRY_MAX, delay=RETRY_DELAY, logger=logger)
def get_activity_by_account(self, sis_account_id, sis_term_id):
return self._analytics.get_activity_by_account(
sis_account_id, sis_term_id)
@retry(DataFailureException, status_codes=RETRY_STATUS_CODES,
tries=RETRY_MAX, delay=RETRY_DELAY, logger=logger)
def get_account_activities_data(self, root_account, sis_term_id):
activities = []
accounts = []
accounts.append(root_account)
accounts.extend(
self._accounts.get_all_sub_accounts_by_sis_id(
root_account.sis_account_id))
activities = []
for account in accounts:
sis_account_id = account.sis_account_id
if sis_account_id is None:
continue
activity = SubaccountActivity(term_id=sis_term_id,
subaccount_id=sis_account_id,
subaccount_name=account.name)
data = self.get_statistics_by_account(sis_account_id, sis_term_id)
for key, val in data.items():
if key == "courses":
continue
setattr(activity, key, val)
try:
data = self.get_activity_by_account(sis_account_id,
sis_term_id)
for item in data["by_category"]:
setattr(activity,
"{}_views".format(item["category"]),
item["views"])
except DataFailureException as ex:
if ex.status != 504:
raise
activities.append(activity)
return activities
def get_xlist_courses(self, root_account, sis_term_id):
# create xlist lookup
term = self._terms.get_term_by_sis_id(sis_term_id)
xlist_courses = set()
xlist_prov_report = self._reports.create_xlist_provisioning_report(
root_account.account_id, term.term_id,
params={"include_deleted": True})
xlist_data_file = self._reports.get_report_data(xlist_prov_report)
reader = csv.reader(xlist_data_file)
next(reader, None) # skip the headers
for row in reader:
if not len(row):
continue
sis_course_id = row[6]
if sis_course_id:
xlist_courses.add(sis_course_id)
self._reports.delete_report(xlist_prov_report)
return xlist_courses
def get_course_data(self, root_account, sis_term_id):
# create course totals lookup
term = self._terms.get_term_by_sis_id(sis_term_id)
course_prov_report = self._reports.create_course_provisioning_report(
root_account.account_id, term.term_id,
params={"include_deleted": True})
course_data_file = self._reports.get_report_data(course_prov_report)
course_data = []
reader = csv.reader(course_data_file)
next(reader, None) # skip the headers
for row in reader:
if not len(row):
continue
course_data.append(row)
self._reports.delete_report(course_prov_report)
return course_data
@transaction.atomic
@override_settings(RESTCLIENTS_CANVAS_TIMEOUT=90)
def build_subaccount_activity_report(self, root_account_id,
sis_term_id=None, week_num=None):
try:
report = Report.objects.get_or_create_report(
Report.SUBACCOUNT_ACTIVITY,
sis_term_id=sis_term_id,
week_num=week_num)
except TermNotStarted as ex:
logger.info("Term {} not started".format(ex))
return
set_gcs_base_path(report.term_id, report.term_week)
root_account = self._accounts.get_account_by_sis_id(root_account_id)
account_courses = {}
# save activities and initialize course totals
activity_data = self.get_account_activities_data(root_account,
report.term_id)
for activity in activity_data:
account_courses[activity.subaccount_id] = {
"courses": 0,
"active_courses": 0,
"ind_study_courses": 0,
"active_ind_study_courses": 0,
"xlist_courses": 0,
"xlist_ind_study_courses": 0
}
activity.report = report
activity.save()
# calculate course totals
xlist_courses = self.get_xlist_courses(root_account, report.term_id)
course_data = self.get_course_data(root_account, report.term_id)
for row in course_data:
if not len(row):
continue
sis_course_id = row[1]
sis_account_id = row[6]
if (sis_course_id is None or sis_account_id is None or
sis_account_id not in account_courses):
continue
status = row[9]
ind_study = (len(sis_course_id.split("-")) == 6)
is_xlist = (sis_course_id in xlist_courses)
is_active = (status == "active")
for sis_id in account_courses.keys():
if sis_account_id.find(sis_id) == 0:
account_courses[sis_id]["courses"] += 1
if is_xlist:
account_courses[sis_id]["xlist_courses"] += 1
elif is_active:
account_courses[sis_id]["active_courses"] += 1
if ind_study:
account_courses[sis_id][
"ind_study_courses"] += 1
if is_xlist:
account_courses[sis_id][
"xlist_ind_study_courses"] += 1
elif is_active:
account_courses[sis_id][
"active_ind_study_courses"] += 1
# save course totals
for sis_account_id in account_courses:
try:
totals = account_courses[sis_account_id]
activity = SubaccountActivity.objects.get(
report=report, term_id=report.term_id,
subaccount_id=sis_account_id)
activity.courses = totals["courses"]
activity.active_courses = totals["active_courses"]
activity.ind_study_courses = totals["ind_study_courses"]
activity.active_ind_study_courses = \
totals["active_ind_study_courses"]
activity.xlist_courses = totals["xlist_courses"]
activity.xlist_ind_study_courses = \
totals["xlist_ind_study_courses"]
activity.save()
except SubaccountActivity.DoesNotExist:
continue
report.finished()
|
# -*- coding: utf-8 -*-
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isSubtree(self, s, t):
if s is None and t is None:
return True
elif s is None and t is not None:
return False
elif s is not None and t is None:
return True
elif self.isSameTree(s, t):
return True
return self.isSubtree(s.left, t) or self.isSubtree(s.right, t)
def isSameTree(self, s, t):
if s is None and t is None:
return True
elif s is None or t is None:
return False
return (
s.val == t.val
and self.isSameTree(s.left, t.left)
and self.isSameTree(s.right, t.right)
)
if __name__ == "__main__":
solution = Solution()
t0_0 = TreeNode(3)
t0_1 = TreeNode(4)
t0_2 = TreeNode(5)
t0_3 = TreeNode(1)
t0_4 = TreeNode(2)
t0_1.right = t0_4
t0_1.left = t0_3
t0_0.right = t0_2
t0_0.left = t0_1
t1_0 = TreeNode(4)
t1_1 = TreeNode(1)
t1_2 = TreeNode(2)
t1_0.right = t1_2
t1_0.left = t1_1
assert solution.isSubtree(t0_0, t1_0)
t2_0 = TreeNode(3)
t2_1 = TreeNode(4)
t2_2 = TreeNode(5)
t2_3 = TreeNode(1)
t2_4 = TreeNode(2)
t2_5 = TreeNode(0)
t2_4.left = t2_5
t2_1.right = t2_4
t2_1.left = t2_3
t2_0.right = t2_2
t2_0.left = t2_1
assert not solution.isSubtree(t2_0, t1_0)
|
import imap_test
import threading
import threadpool
pool = threadpool.ThreadPool(30)
def write_email(submit):
'''
write dict into txt file
eg: write a dict into a.txt
requires the target file with path and the dict to write in
return nothing,just write content into file
'''
# content = json.dumps(content)
file = r'..\res\hotmail\\'+submit['Email_emu']+'.txt'
with open(file,'w') as f:
# content += '\n'
f.write(submit['Email_emu_pwd'])
def multi_test(submit):
flag = imap_test.Email_emu_getlink(submit)
if flag == 1:
write_email(submit)
def main():
with open('Microsoft Mail Access.txt','r') as f:
lines = f.readlines()
print(len(lines))
pwds = ''
for line in lines:
line_split = line.split(':')
if len(line_split) == 0:
print('bad line')
for line_ in line_split:
if '@' in line_ or '.' in line_ or ' ' in line_:
pass
else:
pwds += line_.strip('\n') + '\n'
with open('pwd.txt','w') as f:
f.write(pwds)
# if len(line_split) == 3:
# print(line_split)
# print('---')
# print(line_split[0].split('@')[1])
# if 'imap-mail.outlook.com' in line_split[2]:
# # print('===')
# if 'msn.com' in line_split[0].split('@')[1]:
# # print('----')
# submit = {}
# submit['Email_emu'] = line_split[0]
# submit['Email_emu_pwd'] = line_split[1]
# submits.append(submit)
print(len(pwds))
return
requests = threadpool.makeRequests(multi_test,submits)
[pool.putRequest(req) for req in requests]
pool.wait()
if __name__ == '__main__':
main()
|
import random
from collections import defaultdict
def generated_list(number):
random_generated_list = []
while len(random_generated_list) < number:
random_generated_list.append(random.randint(0, 100))
return random_generated_list
def generated_dictionary(numbers_list):
numbers_occurrences = defaultdict(int)
for i in numbers_list:
numbers_occurrences[i] += 1
return numbers_occurrences
def max_number_occurrence(generated_list):
if generated_list == []:
raise ValueError
dictionary = generated_dictionary(generated_list)
items_list = dictionary.items()
sorted_items_list = sorted(
items_list, key=lambda max_item_value: max_item_value[1]
)
result = sorted_items_list[-1]
return result
if __name__ == '__main__':
print max_number_occurrence(generated_list(10))
|
def any(it):
for x in it:
if x:
return True
return False
|
# encoding:utf-8
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import urllib.parse
import urllib.request
import base64
import json
import time
import os
def draw_hands_point(path, savePath, originfilename,hands,resultfilename,pointsize,pointcolor):
from PIL import Image, ImageDraw
image_origin = Image.open(path+originfilename)
draw =ImageDraw.Draw(image_origin)
for hand in hands:
for hand_part in hand['hand_parts'].values():
#print(hand_part)
draw.ellipse((hand_part['x']-pointsize,hand_part['y']-pointsize,hand_part['x']+pointsize,hand_part['y']+pointsize),fill = pointcolor)
gesture = hand['location']
draw.rectangle((gesture['left'],gesture['top'],gesture['left']+gesture['width'],gesture['top']+gesture['height']),outline = "red")
image_origin.save(savePath+"/images/"+resultfilename, "JPEG")
def hand_analysis(path, savePath, filename,resultfilename,pointsize,pointcolor):
request_url = "https://aip.baidubce.com/rest/2.0/image-classify/v1/hand_analysis"
print(filename)
f = open(path+filename, 'rb')
img = base64.b64encode(f.read())
params = dict()
params['image'] = img
params = urllib.parse.urlencode(params).encode("utf-8")
#params = json.dumps(params).encode('utf-8')
# access_token = get_token()
begin = time.perf_counter()
request_url = request_url + "?access_token=" + '24.93a77997cff7bf42c8ef760bd5d9d32c.2592000.1620200643.282335-23933490'
request = urllib.request.Request(url=request_url, data=params)
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
response = urllib.request.urlopen(request)
content = response.read()
end = time.perf_counter()
print('Time:'+'%.2f'%(end-begin)+'s')
if content:
#print(content)
content=content.decode('utf-8')
#print(content)
data = json.loads(content)
# print('hand_num:',data['hand_num'])
# print('hand_info:',data['hand_info'])
# print('hand_info:',data['hand_info'])
#print(data)
result=data['hand_info']
file = list(filename.split('.'))
file.remove(file[1])
txtName = ''.join(file)
with open(savePath+"/informations/"+txtName+".txt", "w") as fp:
fp.write(json.dumps(result, indent=4))
f.close()
draw_hands_point(path, savePath, filename,result,resultfilename,pointsize,pointcolor)
if __name__ =='__main__':
path = "./dataAndLabel/"
compare = "./keyPointsInfo/images/"
files = os.listdir(path)
compare = os.listdir(compare)
nameSet = set(files).difference(set(compare))
savePath = "./keyPointsInfo/"
for filename in nameSet:
hand_analysis(path, savePath, filename, filename, 20,'#800080')
|
# -*- python -*-
# Make Change
#
# You'll probably remember this one from your morning algorithm sessions,
# but I'll explain it just in case you haven't done it yet.
#
# Write a function that takes an amount of money in cents and returns the fewest number of coins possible for
# the number of cents. Here's an example, given the input 387. Now that you have a few tools at your disposal,
# the output should be a dictionary, as shown below:
#
# Solving this problem may seem relatively simple, and it is, as long as we use only one type of currency.
# Here we are assuming American currency:
# - Dollar: 1
# - Half-Dollar: 0.5 (optional)
# - Quarter: 0.25
# - Dime: 0.1
# - Nickel: 0.05
# - Penny: 0.01
# To help get you started, here's the basic outline for your function:
#
# def change(cents):
# # {'dollars': 3, 'quarters': 3, 'dimes': 1, 'nickels': 0, 'pennies': 2}
# coins = {}
# ...
# return coins
#
# Modifying this algorithm to work with any currency is a very difficult to solve problem. If you want an
# extra hard challenge or to learn about something called dynamic programming, you can give it a shot, but
# don't spend too long on it (no more than 2 hours).
def change( cents ):
money_values = [
('dollars', 100),
('half-dollars', 50),
('quarters', 25),
('dimes', 10),
('nickels', 5),
('pennies', 1)
]
coins = {}
for k,v in money_values:
coins[k] = cents / v
cents %= v
return( coins )
# Testing
print "Money:", 19, "Change:", change( 19 )
print "Money:", 78, "Change:", change( 78 )
print "Money:", 183, "Change:", change( 183 )
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 7 15:24:49 2019
@author: HP
"""
str="hiimnaveen"
str=str[0:3]+str[6:7]
print(str)
|
import warnings
warnings.filterwarnings("ignore")
from keras.models import Model
from keras.layers import Dense, Activation, Flatten, Input
from keras.layers import Conv2D, Conv2DTranspose
#from keras.layers import UpSampling2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D
from keras.layers import LeakyReLU, Dropout, GaussianNoise
from keras.layers import BatchNormalization
from keras.optimizers import Adam
from keras.initializers import RandomNormal
import sys
class Network:
def __init__(self):
self.Discriminator = None
self.Generator = None
self.AdversarialNetwork = None
self.compiled = False
self.imagesize = (128, 128, 3) # (64, 64, 3)
self.noiseshape = (1, 1, 100)
self.dropoutrate_d = 0.35 ## 0.3
self.leakyreluparam = 0.2
self.batchnormmomentum= 0.8 ## 0.8
self.kernel_initializer = RandomNormal(mean=0.0, stddev=0.02) #glorot_uniform is default
self.adam_d = Adam(lr=0.0002, beta_1=0.5) #0.002 or 0.0002?
self.adam_g = Adam(lr=0.0002, beta_1=0.5) #different learning rate for gan?
self.loss = "binary_crossentropy"
def build_discriminator(self) -> None:
if self.Discriminator:
return
input_tensor = Input(shape=self.imagesize)
'''
^^^ Input Layer Definition
vvv Output Layer Definition
'''
### Simple layering definition for repititive layer stacking
def repeatlayering(layers, filters, shape=(4, 4), batch=True):
x = Conv2D(
filters,
shape,
strides=2,
padding="same",
kernel_initializer=self.kernel_initializer) (layers)
if batch:
x = BatchNormalization(momentum=self.batchnormmomentum) (x)
x = LeakyReLU(alpha=self.leakyreluparam) (x)
x = Dropout(self.dropoutrate_d) (x)
return x
### Layering
l = input_tensor
#128x128
l = GaussianNoise(0.1) (l)
l = repeatlayering(l, 64, batch=False)
#64x64
l = repeatlayering(l, 128)
#32x32
l = repeatlayering(l, 256)
#16x16
l = repeatlayering(l, 512)
#8x8
l = repeatlayering(l, 1024)
#4x4
l = Flatten() (l)
### sigmoid activation output layer for discriminator NN
l = Dense(1, kernel_initializer=self.kernel_initializer) (l)
l = Activation("sigmoid") (l) #hard_sigmoid?
self.Discriminator = Model(inputs=input_tensor, outputs=l)
self.Discriminator.name = "Discriminator"
def build_generator(self) -> None:
if self.Generator:
return
input_tensor = Input(shape=self.noiseshape)
'''
^^^ Input Layer Definition
VVV Output Layer Definition
'''
### Simple layering definition for repititive layer stacking
def repeatlayering(layers, filters, shape=(4, 4), paddingval='same', strides=True):
if strides:
x = Conv2DTranspose(
filters,
shape,
padding=paddingval,
strides=(2, 2),
kernel_initializer=self.kernel_initializer) (layers)
#x = UpSampling2D((2, 2)) (layers)
#x = Conv2D(filters, shape, padding=paddingval) (x)
else:
x = Conv2DTranspose(
filters,
shape,
padding=paddingval,
kernel_initializer=self.kernel_initializer) (layers)
x = BatchNormalization(momentum=self.batchnormmomentum) (x)
x = LeakyReLU(alpha=self.leakyreluparam) (x)
return x
### Layering
l = input_tensor
l = repeatlayering(l, 1024, paddingval="valid", strides=False)
l = repeatlayering(l, 512)
l = repeatlayering(l, 256)
l = repeatlayering(l, 128)
l = repeatlayering(l, 64)
l = Conv2D(64, (3, 3), padding='same', kernel_initializer=self.kernel_initializer) (l)
l = BatchNormalization(momentum=self.batchnormmomentum) (l)
l = LeakyReLU(alpha=self.leakyreluparam) (l)
l = Conv2DTranspose(3, (4, 4), padding='same', strides=(2, 2), kernel_initializer=self.kernel_initializer) (l)
l = Activation("tanh") (l) #tanh
# output should be 128x128 at this point
self.Generator = Model(inputs=input_tensor, outputs=l)
self.Generator.name = "Generator"
def build_and_compile(self):
if self.compiled:
return self.Generator, self.Discriminator, self.AdversarialNetwork
if not self.Discriminator:
self.build_discriminator()
# Compile. . .
self.Discriminator.compile(self.adam_d, loss=self.loss)
self.Discriminator.trainable = False
# Discriminator won't be trained during GAN training
if not self.Generator:
self.build_generator()
noise = Input(shape=self.noiseshape)
gen = self.Generator(noise)
output = self.Discriminator(gen)
self.AdversarialNetwork = Model(inputs=noise, outputs=output)
self.AdversarialNetwork.name = "Generative Adversarial Network"
self.AdversarialNetwork.compile(self.adam_g, loss=self.loss)
### redirecting keras model architecture printing to a file
self.compiled = True
original_stdout = sys.stdout
f = open("datafiles/models/current_GAN_architecture.txt", "w+")
sys.stdout = f
self.Generator.summary()
self.Discriminator.summary()
self.AdversarialNetwork.summary()
sys.stdout = original_stdout
f.close()
###
print(
'''
The current architecture for the GAN has been saved to
.../GAN/datafiles/models/current_GAN_architecture.txt
Caffe prototext type format will be supported later(?)
'''
)
return self.Generator, self.Discriminator, self.AdversarialNetwork
#n = Network()
#n.build_and_compile()
|
from .no_arvore_inteiro import NoArvoreInteiro
class Arvore:
def __init__(self, raiz=None):
self.__raiz = raiz
@property
def raiz(self):
return self.__raiz
def inserir_elemento(self, no):
no.no_direito = None
no.no_esquerdo = None
if self.__raiz is None:
self.__raiz = no
else:
self.__inserir(no, self.raiz)
def __inserir(self, no, referencia):
if (referencia.peso() > no.peso()):
if referencia.no_esquerdo == None:
referencia.no_esquerdo = no
else:
self.__inserir(no, referencia.no_esquerdo)
else:
if referencia.no_direito == None:
referencia.no_direito = no
else:
self.__inserir(no, referencia.no_esquerdo)
|
import os
from PIL import Image
from matplotlib.widgets import Button
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from img_segmentation import generate_trainig_set, get_neighbours_values
from mlp import MultiLayerPerceptron as MLP
class ImageSegmentation:
def __init__(self, img_path):
self._figure = plt.figure()
self._setup_axes()
self._setup_gui()
self._object_img = None
self._background_img = None
self._area = []
self._cropping = None
self._mlp = MLP((4, 15, 5, 1))
# Load target image in grayscale
self._img = Image.open(img_path).convert('L')
# Plot image
self._img_ax.imshow(self._img, cmap='gray')
# Set event listeners
self._figure.canvas.mpl_connect('button_press_event', self._onclick)
def _setup_axes(self):
grid = gridspec.GridSpec(2, 5)
self._img_ax = self._create_axes('Img. Original', grid[:, :2])
self._object_ax = self._create_axes('Img. Objeto', grid[0:1, 2:3])
self._background_ax = self._create_axes('Img. Fondo', grid[1:2, 2:3])
self._result_ax = self._create_axes('Img. Segmentada', grid[:, 3:])
def _setup_gui(self):
self._object_btn = Button(self._figure.add_axes([0.1, 0.05, 0.2, 0.06]), 'Selec. Objeto')
self._background_btn = Button(self._figure.add_axes([0.3, 0.05, 0.2, 0.06]), 'Selec. Fondo')
self._training_btn = Button(self._figure.add_axes([0.5, 0.05, 0.2, 0.06]), 'Entrenar')
self._test_btn = Button(self._figure.add_axes([0.7, 0.05, 0.2, 0.06]), 'Probar')
self._object_btn.on_clicked(self._on_select_object)
self._background_btn.on_clicked(self._on_select_background)
self._training_btn.on_clicked(self._on_train)
self._test_btn.on_clicked(self._on_test)
def _create_axes(self, title, spec):
axes = self._figure.add_subplot(spec)
axes.set_title(title)
axes.set_axis_off()
return axes
def _on_select_object(self, event):
print 'Seleccionar objeto'
self._cropping = 'object'
def _on_select_background(self, event):
print 'Seleccionar fondo'
self._cropping = 'background'
def _on_train(self, event):
if self._object_img and self._background_img:
images = [self._object_img, self._background_img]
training_set = generate_trainig_set(images)
self._mlp.train(
training_set,
learning_rate=0.05,
max_epochs=200,
min_error=0.003)
self._plot_result()
else:
print 'Debes seleccionar el objeto y el fondo'
def _on_test(self, event):
results_folder_name = 'Results'
test_folder = raw_input('Carpeta de imagenes a probar: ')
test_images = []
for file in os.listdir(test_folder):
filename, ext = file.split('.')
if ext == 'jpg' or ext == 'png' or ext == 'jpeg':
test_images.append(file)
if not os.path.exists(results_folder_name):
os.makedirs(results_folder_name)
#Process all the images to test
for image in test_images:
new_test_image = Image.open(test_folder + '/' + image).convert('L')
pixels = new_test_image.load()
ncols, nrows = new_test_image.size
#Process test image
for col in range(ncols):
for row in range(nrows):
inputs = get_neighbours_values(new_test_image, (row, col))
output, = self._mlp.test(inputs)
if output == 0:
pixels[col, row] = 255
else:
pixels[col, row] = 0
#save segmented image
filename, ext = image.split('.')
new_image_name = filename + '__result__.' + ext
new_test_image.save(results_folder_name + '/' + new_image_name)
print 'Pruebas terminadas'
def _plot_result(self):
result_img = self._img.copy()
pixels = result_img.load()
ncols, nrows = result_img.size
for col in range(ncols):
for row in range(nrows):
inputs = get_neighbours_values(self._img, (row, col))
output, = self._mlp.test(inputs)
if output == 0:
pixels[col, row] = 1
else:
pixels[col, row] = 0
self._result_ax.imshow(result_img, cmap='gray')
def _onclick(self, event):
if (self._cropping
and event.inaxes
and event.inaxes == self._img_ax):
# Get position
x = int(event.xdata)
y = int(event.ydata)
self._area.extend([x, y])
if len(self._area) == 4:
try:
self._crop()
self._exit_cropping_mode()
except:
print 'Error al recortar imagen!'
self._area = []
def _crop(self):
cropped = self._img.crop(self._area)
if self._cropping == 'object':
self._object_img = cropped
self._object_ax.imshow(cropped, cmap='gray')
else:
self._background_img = cropped
self._background_ax.imshow(cropped, cmap='gray')
self._figure.canvas.draw()
def _exit_cropping_mode(self):
self._cropping = None
if __name__ == '__main__':
segmentation = ImageSegmentation('1.jpg')
plt.show()
|
import os
import sys
import mysql.connector
import subprocess
import colors
import requests
from requests.exceptions import HTTPError
import urllib.request
dbName = 'craft_update_test'
dbHost = 'localhost'
dbUser = 'root'
dbPass = 's4mb4lb1J'
adminEmail = "craft@oberon.nl"
adminName = "admin"
adminPass = "secret"
serverUrl = 'localhost:8000'
mydb = mysql.connector.connect(
host=dbHost,
user=dbUser,
passwd=dbPass
)
mycursor = mydb.cursor()
# try:
# colors.green("Removing database %s if it exists" % dbName)
# mycursor.execute("DROP DATABASE %s" % dbName)
# except:
# colors.cyan("Database %s does not yet exist" % dbName)
#
# colors.green("Creating database %s" % dbName)
# try:
# mycursor.execute("CREATE DATABASE %s" % dbName)
# except:
# colors.red("Could not create Database %s" % dbName)
# sys.exit(1)
#
# # setup database config
# setupDbCmd = "php craft setup/db --interactive=0 --driver=mysql --server=\"%s\" --database=\"%s\" --user=\"%s\" --password=\"%s\"" % (dbHost, dbName, dbUser, dbPass)
#
# # install craft
# installCmd = "php craft install --interactive=0 --email=\"%s\" --username=\"%s\" --password=\"%s\" --siteName=craftTest --siteUrl=\"$SITE_URL\" --language=\"en\"" % (adminEmail, adminName, adminPass)
#
# # project sync command
# projectSyncCmd = "php craft project-config/sync"
# clearCacheCmd = "php craft clear-caches/all"
#
# colors.green("Setting up the database in .env")
# os.system(setupDbCmd)
# colors.green("Installing craft")
# os.system(installCmd)
# os.system(projectSyncCmd)
# os.system(clearCacheCmd)
# colors.green("Done installing craft")
# start php server (This should be in a docker container)
colors.green("Starting PHP server")
phpServer = subprocess.Popen(["php", "-S", serverUrl, "-t", "web/"], stdout=subprocess.PIPE, shell=True)
# perform some url calls, there should be a working installation now
url = 'http://%s/' % serverUrl
# call all existing routes
try:
response = requests.get('%s/actions/route-map/routes/get-all-urls' % url)
# If the response was successful, no Exception will be raised
response.raise_for_status()
except HTTPError as http_err:
colors.red(f'HTTP error occurred: {http_err}')
sys.exit(1)
except Exception as err:
colors.red(f'Other error occurred: {err}') # Python 3.6
sys.exit(1)
else:
directory = 'data/'
allLinks = response.json()
print(response.json())
if not os.path.exists(directory):
os.makedirs(directory)
for link in allLinks:
if link == '/':
link = ''
resp = requests.get(url + link)
f = open(r'%s%s.txt' % (directory, link.replace("/", "_")), "w+")
f.write(resp.content)
f.close()
# update composer
# colors.green("Performing composer update")
# os.system('composer update')
# phpServer.terminate()
# phpServer.kill()
# colors.green("Dropping database")
# mycursor.execute("DROP DATABASE craft_update_test")
|
# -*- coding: utf-8 -*-
import logging
TRACE = 5
logging.addLevelName(logging.FATAL, "FATAL")
logging.addLevelName(logging.WARN, "WARN")
logging.addLevelName(TRACE, "TRACE")
def _trace(self, msg, *args, **kwargs):
self.log(TRACE, msg, *args, **kwargs)
logging.Logger.trace = _trace
class ColorfulFormatter(logging.Formatter):
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(30, 38)
COLOR_CODE = "\033[1;%dm"
RESET_CODE = "\033[0m"
NAME_CODE = COLOR_CODE % CYAN
LEVEL_COLOR = {
"FATAL": COLOR_CODE % RED,
"ERROR": COLOR_CODE % RED,
"WARN": COLOR_CODE % YELLOW,
"INFO": COLOR_CODE % BLUE,
"DEBUG": COLOR_CODE % MAGENTA,
"TRACE": COLOR_CODE % WHITE
}
def format(self, record):
# record = self.reset_levelname(record)
color = self.LEVEL_COLOR.get(record.levelname)
if color:
record.levelname = color + record.levelname + self.RESET_CODE
record.name = self.NAME_CODE + record.name + self.RESET_CODE
return super().format(record)
|
# -*- coding: utf-8 -*-
"""
* @file mtq.py
* @author Gustavo Diaz H.
* @date 22 May 2020
* @brief Simple model of a Magnetorquer actuator
"""
import numpy as np
class MTQ(object):
def __init__(self, R, L, A, N):
self.R = R
self.L = L
self.A = A
self.N = N
self.i = 0
self.m = 0
def main_routine(self, V, dt):
self.rungeKutta(self.i, V, dt)
def dynamics(self, i, V):
di = V/self.L - (self.R/self.L)*i
return di
def rungeKutta(self, i, V, dt):
"""
* Runge-Kutta method to integrate the electric current differential equation
*
* @param i float Actual electric current state
* @param V float Actual input Voltage
* @param dt float integration time step
* @update i float Next electric current state
"""
k1 = self.dynamics(i, V)
k2 = self.dynamics(i + 0.5*dt*k1, V)
k3 = self.dynamics(i + 0.5*dt*k2, V)
k4 = self.dynamics(i + dt*k3, V)
i_next = i + dt*(k1 + 2*(k2+k3)+k4)/6.0
self.i = i_next
self.m = self.N * self.i * self.A
def reset(self):
self.i = 0
self.m = 0
if __name__ == '__main__':
# TEST
import numpy as np
import matplotlib.pyplot as plt
import time
from step import Step
from pwm import PWM
from current_sensor import CurrentSensor
from magnetometer import Magnetometer
# MTQ Model
R = 145.9 #[Ohm]
L = 10.08e-3 #[H]
A = 46.82e-4 #[m2]
l = np.sqrt(A) #[m]
N_coil = 25*10 #[number of turns]
tau = L/R #[s]
mtq = MTQ(R, L, A, N_coil)
# Time parameters for simulation
tf = 1
dt = 0.1*tau
N = int(tf/dt)
t = np.linspace(0, tf, N)
# Voltage Signal parameters
#Step Response
V = 3.3 #[V]
delay = 0.2*tf #[s]
# voltage = Step(0, tf, dt, V, delay).signal
#PWM input
freq = 100 #[Hz]
duty = 30 #[%]
voltage = PWM(0, tf, dt, V, freq, duty).signal
# MTQ Data Storage
i_data = np.zeros(N)
m_data = np.zeros(N)
B_data = np.zeros(N)
m_calc = np.zeros(N)
# Sensors
# Electric current
i_std = 22e-5 #[A]
i_bias = 22e-6 #[A]
current_sensor = CurrentSensor(i_bias, i_std)
# Magnetometer
B_std = 1 #[uT]
B_bias = 1e-1 #[uT]
magnetometer = Magnetometer(B_bias, B_std)
z0 = 16e-3 #[m]
for i in range(0, N):
# Process data
mtq.main_routine(voltage[i], dt)
# Data of interest
i_data[i] = current_sensor.measure(mtq.i)
m_data[i] = mtq.m
B_data[i] = magnetometer.measure(z0, mtq.i, l, N_coil)
m_calc[i] = (5)*B_data[i]*z0**3
# Data Visualization
from monitor import Monitor
v_mon = Monitor([t], [voltage], "MTQ input voltage", "V[V]", "time[s]", sig_name = ["V"])
v_mon.plot()
i_mon = Monitor([t], [i_data], "MTQ electric current", "i[A]", "time[s]", sig_name = ["i"])
i_mon.plot()
B_mon = Monitor([t], [B_data], "MTQ magnetic field", "B[uT]", "time[s]", sig_name = ["B"])
B_mon.plot()
mCalc_mon = Monitor([t], [m_calc], "MTQ calculated magnetic moment", "m[Am2]", "time[s]", sig_name = ["m"])
mCalc_mon.plot()
m_mon = Monitor([t], [m_data], "MTQ magnetic moment", "m[Am2]", "time[s]", sig_name = ["m"])
m_mon.plot()
m_mon.show()
|
from django.db import models
from django.contrib.gis.db import models
# Create your models here.
class Incidence(models.Model):
name = models.CharField(max_length=30)
location = models.PointField(srid=4326)
objects = models.Manager()
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'Incidences'
class Country(models.Model):
counties = models.CharField(max_length=25)
codes = models.IntegerField()
cty_code = models.CharField(max_length=24)
dis = models.IntegerField()
geom = models.MultiPolygonField(srid=4326)
def __str__(self):
return self.counties
class Meta:
verbose_name_plural = 'Countries'
|
# Test that shows PyObject_GetBuffer does copy when called
from mypybuffer import MyPyBuffer
import os
xtc_file = '/cds/home/m/monarin/lcls2/psana/psana/tests/test_data/dgramedit-test.xtc2'
fd = os.open(xtc_file, os.O_RDONLY)
size = 2643928 # size of dgrampy-test.xtc2
view = bytearray(os.read(fd, size))
offset = 0
cn_dgrams = 0
mpb = MyPyBuffer()
while offset < size:
mpb.get_buffer(view[offset:])
# To demonstrate that copy happens, we obtained the view ptr from the line
# above and here we'll replace the content of that view. You'll see that
# print_dgram() still prints out the old content.
if cn_dgrams == 1:
view[offset:] = config_bytes
# Uncomment below to see that we need to call PyObject_GetBuffer again
# to update view ptr
#mpb.free_buffer()
#mpb.get_buffer(view[offset:])
mpb.print_dgram()
# Save config so that we can use it to replace the second dgram
if offset == 0:
config_bytes = view[: mpb.dgram_size]
offset += mpb.dgram_size
cn_dgrams += 1
mpb.free_buffer()
if cn_dgrams == 2:
break
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# 方法一、使用静态方法统计------------------------------------------------
class Spam(object):
numInstances = 0
def __init__(self):
Spam.numInstances += 1
@staticmethod
def printNumInstances():
print('Numbers of instances created: ', Spam.numInstances)
class Sub(Spam):
@staticmethod
def printNumInstances():
print('Extra stuff...')
# super().printNumInstances()
# 上述语句会报错,因为超类中的 printNumInstances() 方法为静态方法
Spam.printNumInstances()
class Other(Spam):
pass
a = Spam()
b = Spam()
c = Spam()
a.printNumInstances()
Spam.printNumInstances()
d = Sub()
e = Sub()
d.printNumInstances()
Sub.printNumInstances()
Spam.printNumInstances()
f = Other()
f.printNumInstances()
Other.printNumInstances()
Spam.printNumInstances()
print('-' * 30, '测试分割线', '-' * 30)
# 方法二、使用类方法统计--------------------------------------------------
class Spam(object):
numInstances = 0
def __init__(self):
Spam.numInstances += 1
@classmethod
def printNumInstances(cls):
print('Numbers of instances created: ', cls.numInstances)
# 此处直接使用 cls 调用类
class Sub(Spam):
@classmethod
def printNumInstances(cls):
print('Extra stuff...')
super().printNumInstances()
# 此处不会报错,因为超类中的 printNumInstances() 方法为类方法,可以通过 super() 调用
class Other(Spam):
pass
a = Spam()
b = Spam()
c = Spam()
a.printNumInstances()
Spam.printNumInstances()
d = Sub()
e = Sub()
d.printNumInstances()
Sub.printNumInstances()
Spam.printNumInstances()
f = Other()
f.printNumInstances()
Other.printNumInstances()
Spam.printNumInstances()
|
"""
from autobahn.wamp.exception import ApplicationError
from autobahn.twisted.wamp import ApplicationSession
from twisted.internet.defer import inlineCallbacks
from twisted.python import log
"""
import autobahn
import copy
from model.session.wamp import WampTransport, WampSession
import wamp
class SessionLink:
def __init__(self, realm):
self.realm = realm
@autobahn.wamp.subscribe('wamp.session.on_join')
def on_session_join(self, details):
print('join - {}'.format(self.realm))
session = WampSession.fromDetails(details)
conn = wamp.getConnectionManager()
con = conn.get()
try:
session.create(con)
con.commit()
finally:
conn.put(con)
@autobahn.wamp.subscribe('wamp.session.on_leave')
def on_session_leave(self, sid):
print('leave - {}'.format(self.realm))
sid = str(sid)
conn = wamp.getConnectionManager()
con = conn.get()
try:
sessions = WampSession.findById(con, sid)
for s in sessions:
s.destroy(con)
con.commit()
finally:
conn.put(con)
class WampSessionComponent(wamp.SystemComponentSession):
def getWampComponents(self):
return [SessionLink(self.config.realm)]
|
from flask_classful import FlaskView, route
from flask import Flask
app = Flask(__name__)
class TestView(FlaskView):
def index(self):
return "<h1>Index</h1>"
@route('/hello/<world>/')
def bsicname(self, super):
return f"<h1>hello world {super}</h1>"
TestView.register(app, route_base='/')
if __name__ == "__main__":
app.run(debug=True)
|
import torch
from torch import device
import torch.nn as nn
from torch.utils.data.dataloader import DataLoader
import time
import copy
class AugTrainer:
def __init__(self,
model: nn.Module,
loss_fn: nn.Module,
optimizer: nn.Module,
target_loader: DataLoader,
attack_loader: DataLoader,
val_loader: DataLoader,
num_epoch: int = 100
):
self.model = model
self.loss_fn = loss_fn
self.optimizer = optimizer
self.target_loader = target_loader
self.attack_loader = attack_loader
self.val_loader = val_loader
self.num_epoch = num_epoch
def _train_epoch(self, epoch):
print('Epoch {}/{}'.format(epoch + 1, self.num_epoch))
print('-' * 10)
for batch_idx, (targets, attacks) in enumerate(zip(self.target_loader, self.attack_loader)):
target_imgs, target_labels = targets
attack_imgs, attack_labels = attacks
target_imgs, target_labels = target_imgs.cuda(), target_labels.cuda()
attack_imgs, attack_labels = attack_imgs.cuda(), attack_labels.cuda()
with torch.no_grad():
sk_source = get_internal_representation(model, attack_imgs)
sk_attack = get_internal_representation(model, target_imgs)
optimizer.zero_grad()
outputs = model(target_imgs)
loss_ce = loss_fn(outputs, target_labels)
dist = torch.dist(sk_source, sk_attack)
loss_term = d_tar - dist
loss = loss_ce + lamb * loss_term
loss.backward()
print(loss_ce.data, loss_term.data)
print('batch :', batch_idx, 'loss :', loss.data)
|
import pandas as pd
from sklearn.preprocessing import StandardScaler, MinMaxScaler, Imputer, LabelEncoder
from sklearn.feature_selection import VarianceThreshold
import numpy as np
def data_preprocess(url,feat_cols, lab_cols):
tt = pd.read_csv(url)
for col in feat_cols:
if type(col) == 'float' or type(col) == 'int':
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
tt[col] = imp.fit_transform(tt[col])
else:
le = LabelEncoder()
tt[col] = le.fit_transform(tt[col])
feat_scaler = MinMaxScaler()
tt[feat_cols] = feat_scaler.fit_transform(tt[feat_cols])
features = tt[feat_cols].values
labels = tt[lab_cols].values
#Seperating Training and testing data
diff_amount = int(round(len(features) * 0.75))
features_train = features[:diff_amount]
labels_train = labels[:diff_amount]
features_test = features[diff_amount:]
labels_test = labels[diff_amount:]
return features_train, labels_train, features_test, labels_test
|
from sklearn.datasets import *
from sklearn.model_selection import train_test_split, cross_val_score
import numpy as np
import math
def lasso_regression_penalty(l1_lambda, feature_weights):
""" l2_lambda is between 0 and positive infinity """
try:
lasso_penalty = l1_lambda * sum(map(lambda x: abs(x), feature_weights))
except ValueError:
raise ValueError("Lambda should be a value between 0 and positive infinity")
return lasso_penalty
def ridge_regression_penalty(l2_lambda, feature_weights):
""" l2_lambda is between 0 and positive infinity """
try:
ridge_penalty = l2_lambda * sum(map(lambda x: x**2, feature_weights))
except ValueError:
raise ValueError("Lambda should be a value between 0 and positive infinity")
return ridge_penalty
def elasticnet_regression_penalty(lambda1, lambda2, feature_weights):
""" elasticnet regression is just the addition of lasso and ridge regressions
:params lambda1: regularization parameter for LASSO regression penalty
:params lambda2: regularization parameter for Ridge regression penalty
"""
return lasso_regression_penalty(lambda1, feature_weights) + ridge_regression_penalty(lambda2, feature_weights)
def average(X):
""" The average function without the looping and indexing through a list"""
res = 0
for x in X:
res += x
res = res / len(X)
return res
def variance(values):
return sum([(x - average(values)) ** 2 for x in values]) / len(values)
def std_dev(X):
""" standard deviation implementation dependent on the variance calculation """
return math.sqrt(variance(X))
def covariance(X, Y):
""" Covariance is a generatlization of correlation. Correlation describes the relationship between
two groups of numbers, whereas covariance describes it between two or more groups of numbers
return:
sum((x(i) - mean(x)) * (y(i) - mean(y)))
"""
res = 0
for x, y in zip(X, Y):
res += (x - average(X)) * (y - average(Y))
return res
def coeff(X, Y):
""" Estimate the weigtht coefficients of each predictor variable """
return covariance(X, Y) / variance(X)
def intercept(X, Y):
""" calculate the y-intercept of the linear regression function """
return average(Y) - coeff(X, Y) * average(X)
def simple_linear_regression(X_train, X_test, y_train, y_test, random_error=np.random.random(), regularizer=None):
""" Simple Linear Regression function is a univariate regressor"""
y_pred = np.empty(shape=len(y_test))
b0, b1 = intercept(X_train, y_train), coeff(X_train, y_train)
for x_test in X_test:
np.append(y_pred, b0 + b1 * x_test + random_error)
return y_pred
def root_mean_squared_error(actual, predicted, regularizer=None):
""" Loss function by which we use to evaluate our SLR model """
sum_error = 0
for act, pred in zip(actual, predicted):
prediction_error = act - pred
sum_error += prediction_error ** 2
mean_error = sum_error / len(actual)
error = math.sqrt(mean_error)
# regularization... find somewhere to plug in the lambda values into the function depending on the regularization
if regularizer == "l1":
error = error + lasso_regression_penalty(lambda1, b1)
elif regularizer == "l2":
error = error + ridge_regression_penalty(lambda2, b1)
elif regularizer == "elasticnet":
error = error + elasticnet_regression_penalty(lambda1, lambda2, b1)
return error
def evaluate_SLR(dataset, algorithm):
test_set = list()
for row in dataset:
row_copy = list(row)
row_copy[-1] = None
test_set.append(row_copy)
predicted = algorithm(dataset, test_set)
print(predicted)
actual = [row[-1] for row in dataset]
rmse = root_mean_squared_error(actual, predicted)
return rmse
# test simple linear regression
iris = load_iris()
X = iris.data
y = iris.target
feature_1_x = X[:, 0]
print(feature_1_x)
mse_ridge = lasso_regression_penalty(0.2, feature_1_x)
print("The MSE after the ridge regression is: {}".format(mse_ridge))
# train_dataset = [list(i) for i in zip(dog_heights_train, dog_weights_train)]
# test_dataset = [list(i) for i in zip(dog_heights_test, dog_weights_test)]
# # fitting the SLR to get predictions
# y_pred = simple_linear_regression(
# dog_heights_train,
# dog_heights_test,
# dog_weights_train,
# dog_weights_test,
# random_error=np.random.rand(1),
# )
# print(
# "This is the prediction of dog weights "
# "given new dog height information from the "
# "learned coefficients: {}\n".format(y_pred)
# )
# # evaluating the performance of the SLR
# rmse = root_mean_squared_error(dog_weights_test, y_pred)
# print("RMSE between the predicted and actual dog_weights is : {0:.3f}\n".format(rmse))
|
from resources.employees import Employee
bob = Employee("bob",20000,567)
print(bob.basicSalary)
print(bob.payeTax)
print(bob.nhif)
print(bob.nssf)
print(bob.personal_relief)
print(bob.tax_charged)
|
import argparse
import glob
import os
import random
import sys
import time
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from torch.utils.data import DataLoader
from dataset import CocoDataset, EmbedDataset
from model import CaptionEncoder, ImageEncoder
from utils import collater, sec2str
from vocab import Vocabulary
def main():
args = parse_args()
transform = transforms.Compose(
[
transforms.Resize((args.imsize, args.imsize)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
if args.dataset == "coco":
val_dset = CocoDataset(root=args.root_path, split="val", transform=transform,)
val_loader = DataLoader(
val_dset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.n_cpu,
collate_fn=collater,
)
vocab = Vocabulary(max_len=args.max_len)
vocab.load_vocab(args.vocab_path)
imenc = ImageEncoder(args.out_size, args.cnn_type)
capenc = CaptionEncoder(len(vocab), args.emb_size, args.out_size, args.rnn_type)
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
imenc = imenc.to(device)
capenc = capenc.to(device)
assert args.checkpoint is not None
print("loading model and optimizer checkpoint from {} ...".format(args.checkpoint), flush=True)
ckpt = torch.load(args.checkpoint, map_location=device)
imenc.load_state_dict(ckpt["encoder_state"])
capenc.load_state_dict(ckpt["decoder_state"])
begin = time.time()
dset = EmbedDataset(val_loader, imenc, capenc, vocab, args)
print("database created | {} ".format(sec2str(time.time() - begin)), flush=True)
savedir = os.path.join("out", args.config_name)
if not os.path.exists(savedir):
os.makedirs(savedir, 0o777)
image = dset.embedded["image"]
caption = dset.embedded["caption"]
n_i = image.shape[0]
n_c = caption.shape[0]
all = np.concatenate([image, caption], axis=0)
emb_file = os.path.join(savedir, "embedding_{}.npy".format(n_i))
save_file = os.path.join(savedir, "{}.npy".format(args.method))
vis_file = os.path.join(savedir, "{}.png".format(args.method))
np.save(emb_file, all)
print("saved embeddings to {}".format(emb_file), flush=True)
dimension_reduction(emb_file, save_file, method=args.method)
plot_embeddings(save_file, n_i, vis_file, method=args.method)
def dimension_reduction(numpyfile, dstfile, method="PCA"):
all = np.load(numpyfile)
begin = time.time()
print("conducting {} on data...".format(method), flush=True)
if method == "T-SNE":
all = TSNE(n_components=2).fit_transform(all)
elif method == "PCA":
all = PCA(n_components=2).fit_transform(all)
else:
raise NotImplementedError()
print("done | {} ".format(sec2str(time.time() - begin)), flush=True)
np.save(dstfile, all)
print("saved {} embeddings to {}".format(method, dstfile), flush=True)
def plot_embeddings(numpyfile, n_v, out_file, method="PCA"):
all = np.load(numpyfile)
assert all.shape[1] == 2
fig = plt.figure(clear=True)
fig.suptitle("visualization of embeddings using {}".format(method))
plt.scatter(all[:n_v, 0], all[:n_v, 1], s=2, c="red", label="image")
plt.scatter(all[n_v::5, 0], all[n_v::5, 1], s=2, c="blue", label="caption")
plt.xticks([])
plt.yticks([])
plt.legend()
plt.savefig(out_file)
print("saved {} plot to {}".format(method, out_file), flush=True)
def parse_args():
parser = argparse.ArgumentParser()
# configurations of dataset (paths)
parser.add_argument("--dataset", type=str, default="coco")
parser.add_argument("--root_path", type=str, default="/groups1/gaa50131/datasets/MSCOCO")
parser.add_argument("--vocab_path", type=str, default="captions_train2017.txt")
parser.add_argument(
"--method",
type=str,
default="PCA",
help="Name of dimensionality reduction method, should be {T-SNE | PCA}",
)
parser.add_argument(
"--config_name",
type=str,
default="embedding",
help="name of config, filename where to save",
)
# configurations of models
parser.add_argument("--cnn_type", type=str, default="resnet152")
parser.add_argument("--rnn_type", type=str, default="GRU")
# training config
parser.add_argument("--n_cpu", type=int, default=8)
parser.add_argument("--emb_size", type=int, default=300, help="embedding size of vocabulary")
parser.add_argument(
"--out_size", type=int, default=1024, help="embedding size for output vectors"
)
parser.add_argument("--max_len", type=int, default=30)
parser.add_argument("--no_cuda", action="store_true", help="disable gpu training")
# hyperparams
parser.add_argument(
"--imsize_pre", type=int, default=256, help="to what size to crop the image"
)
parser.add_argument("--imsize", type=int, default=224, help="image size to resize on")
parser.add_argument("--batch_size", type=int, default=128, help="batch size. irrelevant")
# retrieval config
parser.add_argument(
"--checkpoint",
type=str,
required=True,
help="Path to checkpoint, will load model from there",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def pivotIndex(self, nums: List[int]) -> int:
left_sum, total_sum = 0, sum(nums)
for i, num in enumerate(nums):
if left_sum == total_sum - left_sum - num:
return i
left_sum += num
return -1
if __name__ == "__main__":
solution = Solution()
assert 3 == solution.pivotIndex([1, 7, 3, 6, 5, 6])
assert -1 == solution.pivotIndex([1, 2, 3])
|
# -*- coding: utf-8 -*-
import json
from watson_developer_cloud import VisualRecognitionV3
import argparse
import vr_func
if (__name__ == "__main__"):
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--classifier", type=str, help="id of the classifier to apply (default is 'default')")
parser.add_argument("-t", "--threshold", type=float, help="{float} threshold to use for classifier (defaut 0.5)")
parser.add_argument("images", nargs="+", help="list of images to classify of images")
args = parser.parse_args()
# uncomment those below if you want to know what is given as an argument
print("classifier ids given as arguments: {}".format(args.classifier))
#print("images given as arguments: {}".format(args.images))
# default value for classifier
if args.classifier is None:
_vr_classifier_id = 'default'
else:
_vr_classifier_id = args.classifier
# default value for threshold
if args.threshold is None:
_threshold = 0.5
else:
_threshold = args.threshold
# obtains a connector to Watson V.R.
vr_connector = vr_func.vr_open()
# for each image in the list given as argument of the script
for image in args.images:
# uses function to obtain set of classes
ret_classes = vr_func.classify_image(vr_connector,
image,
_vr_classifier_id,
_threshold)
# you can comment that line below later
print(json.dumps(ret_classes, indent=2))
# and just print that...
print("image '{}' classes: {}".format(image, vr_func.parse_classes(ret_classes)))
|
# Exercício 8.13 - Livro
def letraValida(op):
str(op.lower())
while True:
v = input('Digite uma letra: ').lower()
if v not in op:
print('Tente novamente!')
continue
else:
break
letraValida('mf')
|
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_excel("../data/data.xlsx", sheetname="US_EU_CPI",
index_col=0, skiprows=[0, 1]).dropna()
dc = df.iloc[:, [1]].pct_change(periods=1)*100*12
dc['yoy'] = df.iloc[:, [1]].pct_change(periods=12)*100
dc['target'] = 2.0
dc.dropna(inplace=True)
dc.columns = ['mom (Annualized)', 'yoy', 'target']
de = df.iloc[:, [0]].pct_change(periods=1)*100*12
de['yoy'] = df.iloc[:, [0]].pct_change(periods=12)*100
de['target'] = 2.0
de.dropna(inplace=True)
de.columns = ['mom (Annualized)', 'yoy', 'target']
# charts
def gen_chart(df, title, y_title, date_ini):
""""""
plt.style.use("ggplot")
df_final = df[df.index >= date_ini]
fig = plt.figure()
ax = fig.add_subplot(111)
df_final.iloc[:, 1].plot(ax=ax, style="-o", color='red', linewidth=2, legend=True)
df_final.iloc[:, 0].plot(ax=ax, style="-o", color='orange', linewidth=2, legend=True)
df_final.iloc[:, 2].plot(ax=ax, style="--", color='blue', linewidth=1, legend=True)
# labels labels
for label in ax.xaxis.get_ticklabels():
label.set_fontsize(14)
for label in ax.yaxis.get_ticklabels():
label.set_fontsize(14)
# title
ax.set_title(title, fontsize=24)
ax.title.set_position([.5,1.03])
ax.set_ylabel(y_title)
ax.set_xlabel('')
#margins
ax.margins(0.0, 0.2)
ax.set_xlim(ax.get_xlim()[0]-2, ax.get_xlim()[1]+ 2)
#legend
ax.legend(loc='lower left', fontsize=16)
# label
fig.tight_layout()
return fig
# cpi
date_ini = "2013-01-01"
fig_cli = gen_chart(dc, "CPI Inflation - US", "%", date_ini)
plt.savefig("./cpi.png")
fig_cli_eu = gen_chart(de, "CPI Inflation - EU", "%", date_ini)
plt.savefig("./cpi_eu.png")
|
__author__ = "Narwhale"
def bubble_sort(alist):
"""冒泡排序"""
n = len(alist)
for j in range(0,n-1):
count = 0
for i in range(0,n-1-j):
if alist[i] > alist[i+1]:
alist[i],alist[i+1] = alist[i+1],alist[i]
count += 1
if count == 0:
return
#j=0,i=(0,n-1)
#j=1,i=(0,n-1-1)
#j=2,i=(0,n-1-2)
#j=3,i=(0,n-1-3)
li = [54,26,93,17,77,31,44,55,20]
bubble_sort(li)
print(li)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.