text stringlengths 8 6.05M |
|---|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
::
In [36]: run textgrid.py
E0 C1 C2 C3 C4 C5 C6 C7 C8 F0
E1 A1 . . . . . . B8 F1
E2 . A2 . . . . B7 . F2
E3 . . A3 . . B6 . . F3
E4 . . . A4 B5 . . . F4
E5 . . . B4 A5 . . . F5
E6 . . B3 . . A6 . . F6
E7 . B2 . . . . A7 . F7
E8 B1 . . . . . . A8 F8
E9 D1 D2 D3 D4 D5 D6 D7 D8 F9
"""
import logging
log = logging.getLogger(__name__)
import numpy as np
class T(np.ndarray):
@classmethod
def init(cls, a, itemfmt="%3s", rowjoin="\n", empty=""):
assert len(a.shape) == 2, a
t = a.view(cls)
t.itemfmt = itemfmt
t.rowjoin = rowjoin
t.empty = empty
return t
def __repr__(self):
row_ = lambda r:" ".join(map(lambda _:self.itemfmt % (_ if _ is not None else self.empty) ,r))
tab_ = lambda a:self.rowjoin.join(map(row_, a))
return tab_(self)
class TextGrid(object):
def __init__(self, ni, nj, **kwa):
"""
grid of None (might produce gibberish in some imps?)
dont want to use zeros as then get zeros at every spot on the grid
"""
a = np.empty((ni,nj),dtype=np.object)
t = T.init(a, **kwa)
self.a = a
self.t = t
def __str__(self):
return repr(self.t)
if __name__ == '__main__':
pass
n = 10
tg = TextGrid(n,n, itemfmt="%3s", rowjoin="\n\n", empty=".")
for k in range(n):
tg.a[k,k] = "A%d"%k # diagonal from top-left to bottom-right
tg.a[n-1-k,k] = "B%d"%k # diagonal from bottom-left to top right
tg.a[0,k] = "C%d"%k # top row
tg.a[-1,k] = "D%d"%k # bottom row
tg.a[k,0] = "E%d"%k # left column
tg.a[k,-1] = "F%d"%k # right column
pass
print(tg)
|
#Autor: Aline Villegas Berdejo
#Calcula la velocidad promedio de un viaje
tiempo=int(input("Teclea el tiempo del viaje en horas: "))
distancia=int(input("Teclea la distancia del viaje en kilometros: "))
velocidad=tiempo / distancia
print("La Velocidad Promedio es: ", velocidad, "km/h")
|
import numpy as np
import torch
from network import MultiImageAlexNet
import pickle
from data_utils import FullAppImageDataset
import torch.utils.data as td
end = 10000
image_dir = "./data/images"
json_dir = "./data/scraped"
class_map_file = "./data/class_maps/tags_map.pkl"
dat = FullAppImageDataset(image_dir, json_dir, end=end)
n_exmaples = len(dat)
class_map = pickle.load(open(class_map_file, "rb"))
n_classes = len(class_map)
data_loader = td.DataLoader(dat, num_workers=4, pin_memory=True)
net = MultiImageAlexNet(num_classes=n_classes)
net.load_state_dict(torch.load("checkpoint.saved.pth.tar")['state_dict'])
net.cuda()
ids = []
codes = []
n_total = len(data_loader)
n_processed = 0
for id, imgs in data_loader:
y = net.encode(imgs)
# codes.append([id, y.squeeze(0).data.cpu().numpy()])
ids.append(id)
codes.append(y.squeeze(0).data.cpu().numpy())
n_processed += 1
if n_processed % 100 == 0:
print("processed {0}/{1}".format(n_processed, n_total))
ids_arr = np.array(ids)
codes_arr = np.array(codes)
np.save("./data/ids_arr", ids_arr)
np.save("./data/codes_arr", codes_arr)
|
import pygame
import global_variables as gv
top_big_healthbar_l_ratio = 1 / 20
left_big_healthbar_w_ratio = 1 / 40
right_big_healthbar_w_ratio = 22 / 40
top_left_health_border_x_placement = round(gv.WINDOW_W * left_big_healthbar_w_ratio)
top_left_health_border_y_placement = round(gv.WINDOW_L * top_big_healthbar_l_ratio)
top_right_health_border_x_placement = round(gv.WINDOW_W * right_big_healthbar_w_ratio)
top_right_health_border_y_placement = round(gv.WINDOW_L * top_big_healthbar_l_ratio)
def display_health(surface, vehicle):
# top_left_health_border_x_placement = WINDOW_W * big_healthbar_w_ratio
# top_left_health_border_x_placement = int(round(WINDOW_W / 120))
# top_health_border_x_thickness = WINDOW_W/600
# top_health_border_x_thickness = WINDOW_L / 432
# health on top of car
pygame.draw.rect(surface, gv.BLACK, (vehicle.x - round(gv.WINDOW_W/120) - gv.WINDOW_W/600, vehicle.y +
round(vehicle.l * 4 / 5) - gv.WINDOW_L/432,
round(gv.WINDOW_W / 24) + 2, 3 + 2))
pygame.draw.rect(surface, gv.RED, (vehicle.x - round(gv.WINDOW_W/120), vehicle.y + round(vehicle.l * 4 / 5),
round(gv.WINDOW_W / 24), 3))
pygame.draw.rect(surface, gv.GREEN, (vehicle.x - 10, vehicle.y + round(vehicle.l * 4 / 5),
vehicle.health / 20, 3))
if vehicle.movement_pattern == "player2":
# health in top right
pygame.draw.rect(surface, gv.BLACK, (round(top_right_health_border_x_placement) - 1,
round(top_right_health_border_y_placement) - 1,
round(gv.PLAYER_STARTING_HEALTH / 2) + 2, 20 + 2))
pygame.draw.rect(surface, gv.RED, (round(top_right_health_border_x_placement),
round(top_right_health_border_y_placement),
gv.PLAYER_STARTING_HEALTH / 2, 20))
pygame.draw.rect(surface, gv.GREEN, (round(top_right_health_border_x_placement),
round(top_right_health_border_y_placement),
round(vehicle.health / 2), 20))
return
# health in top left
pygame.draw.rect(surface, gv.BLACK, (round(top_left_health_border_x_placement) - 1,
round(top_left_health_border_y_placement) - 1,
round(gv.PLAYER_STARTING_HEALTH / 2) + 2, 20 + 2))
pygame.draw.rect(surface, gv.RED, (round(top_left_health_border_x_placement),
round(top_left_health_border_y_placement),
gv.PLAYER_STARTING_HEALTH / 2, 20))
pygame.draw.rect(surface, gv.GREEN, (round(top_left_health_border_x_placement),
round(top_left_health_border_y_placement),
round(vehicle.health / 2), 20))
|
import FaBo9Axis_MPU9250
import time
import sys
import math
#mpu9250 = FaBo9Axis_MPU9250.MPU9250()
def init():
mpu9250 = FaBo9Axis_MPU9250.MPU9250()
return mpu9250
def angle(mpu9250):
accel = mpu9250.readAccel()
#print " ax = " , ( accel['x'] )
#print " ay = " , ( accel['y'] )
#print " az = " , ( accel['z'] )
gyro = mpu9250.readGyro()
#print " gx = " , ( gyro['x'] )
#print " gy = " , ( gyro['y'] )
#print " gz = " , ( gyro['z'] )
mag = mpu9250.readMagnet()
#print " mx = " , ( mag['x'] )
#print " my = " , ( mag['y'] )
#print " mz = " , ( mag['z'] )
#print
Acceleration_angle = [0,0]
Acceleration_angle[0] = math.atan((accel['y'])/math.sqrt(pow(accel['x'],2) + pow(accel['z'],2)))*180/3.141592654;
Acceleration_angle[1] = math.atan(-1*accel['x']/math.sqrt(pow(accel['y'],2) + pow(accel['z'],2)))*180/3.141592654;
return Acceleration_angle
#mpu9250=init()
#angle(mpu9250)
'''
try:
while True:
accel = mpu9250.readAccel()
#print " ax = " , ( accel['x'] )
#print " ay = " , ( accel['y'] )
#print " az = " , ( accel['z'] )
gyro = mpu9250.readGyro()
#print " gx = " , ( gyro['x'] )
#print " gy = " , ( gyro['y'] )
#print " gz = " , ( gyro['z'] )
mag = mpu9250.readMagnet()
#print " mx = " , ( mag['x'] )
#print " my = " , ( mag['y'] )
#print " mz = " , ( mag['z'] )
#print
Acceleration_angle = [0,0]
Acceleration_angle[0] = math.atan((accel['y'])/math.sqrt(pow(accel['x'],2) + pow(accel['z'],2)))*180/3.141592654;
Acceleration_angle[1] = math.atan(-1*accel['x']/math.sqrt(pow(accel['y'],2) + pow(accel['z'],2)))*180/3.141592654;
print Acceleration_angle
time.sleep(0.1)
#except KeyboardInterrupt:
# sys.exit()
'''
|
'''
PDFPage class
'''
from . import word
from sortedcontainers import SortedDict
class PDFPage():
def __init__(self, folder_location, number_of_pages):
self.folder = folder_location
self.number_of_pages = number_of_pages
self.text_dict = {}
self.id_dict = {}
self.position_dict = SortedDict() # x, then y
def add_word(self, word):
# add word to multiple sortable dictionaries
# by text - creates a list with every instance of the word
if word.get_text() not in self.text_dict:
self.text_dict[word.get_text()] = [word]
else:
self.text_dict[word.get_text()].append(word)
# by id
self.id_dict[word.get_id()] = word
# by position
if word.get_x() not in self.position_dict:
self.position_dict[word.get_x()] = SortedDict()
#print("Empty x value of {}: {}".format(word.get_x(), self.position_dict[word.get_x()]))
(self.position_dict[word.get_x()])[word.get_y()] = word
print("X: {} Y: {}".format(word.get_x(), word.get_y()))
else:
self.position_dict[word.get_x()][word.get_y()] = word
print("X: {} Y: {}".format(word.get_x(), word.get_y()))
def sort_dictionaries(self):
return_value = SortedDict()
for key1 in sorted(self.position_dict.keys()):
x_return_value = SortedDict()
for key2 in self.position_dict[key1]:
x_return_value = sorted((self.position_dict[key1]).keys())
# def print(self):
# for word_id in self.id_dict:
# print("{}: {}".format(word_id, self.id_dict[word_id].get_text()))
|
"""
Python script that returns the maximum
number of paths from the top left to
the bottom right of a grid with only
'Down' and 'Right' movement allowed.
Input: n m
n -> number of rows
m -> number of columns
"""
def check_bound(x, y, n, m):
if x < n and y < m:
return True
return False
# Direction of neighbor logic goes here.
# We get right and bottom neighbors here.
# If we want to get top and left neighbors
# There needs to be a little more checking
# in the check bound logic function.
def get_neighbors(x, y, n, m):
neigh = []
if check_bound(x + 1, y, n, m):
neigh.insert(0, (x + 1, y))
if check_bound(x, y + 1, n, m):
neigh.insert(0, (x, y + 1))
return neigh
def num_paths(n: int, m: int) -> int:
target = (n - 1, m - 1)
queue = [(0, 0)]
count = 0
while len(queue) > 0:
x, y = queue.pop()
if (x, y) == target:
count += 1
neigh = get_neighbors(x, y, n, m)
queue.extend(neigh)
return count
def main():
n = 8
m = 3
print(num_paths(n, m))
if __name__ == "__main__":
main()
|
a=int(input(""))
last_digit=a%10
second_digit=(a%100-last_digit)/10
print(last_digit+second_digit) |
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#########################################################################################
# #
# create_full_focal_plane_data.py: create/update full resolution focal plane data #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 03, 2021 #
# #
#########################################################################################
import sys
import os
import string
import re
import numpy
import getopt
import time
import random
import Chandra.Time
import Ska.engarchive.fetch as fetch
import unittest
#
#--- reading directory list
#
path = '/data/mta/Script/ACIS/Focal/Script/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append path to a private folder
#
sys.path.append(mta_dir)
sys.path.append(bin_dir)
import mta_common_functions as mcf
#
#--- temp writing file name
#
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
#-------------------------------------------------------------------------------
#-- create_full_focal_plane_data: create/update full resolution focal plane data
#-------------------------------------------------------------------------------
def create_full_focal_plane_data():
"""
create/update full resolution focal plane data
input: none, but read from <short_term>/data_*
output: <data_dir>/full_focal_plane_data
"""
#
#--- read already processed data file names
#
rfile = house_keeping + 'prev_short_files'
try:
rlist = mcf.read_data_file(rfile)
cmd = 'mv ' + rfile + ' ' + rfile + '~'
os.system(cmd)
except:
rlist = []
#
#--- read currently available data file names
#
cmd = 'ls ' + short_term + 'data_* > ' + rfile
os.system(cmd)
flist = mcf.read_data_file(rfile)
#
#--- find un-processed data file names
#
flist = list(numpy.setdiff1d(flist, rlist))
#
#--- quite often crat and crbt data are not processed on time; so repeat the last
#--- three data sets to make sure that missing data part is covered
#
if len(rlist) > 3:
repeat_part = rlist[-3:]
else:
repeat_part = rlist
flist = repeat_part + flist
schk = 0
for ifile in flist:
print("INPUT: " + ifile)
#
#---- checking whether the year change occues in this file
#
[year, chg] = find_year_change(ifile)
data = mcf.read_data_file(ifile)
#
#---- find the last entry time
#
outfile = data_dir + 'full_focal_plane_data_' + str(year)
if schk == 0:
try:
ldata = mcf.read_data_file(outfile)
ltemp = re.split('\s+', ldata[-1])
start = int(float(ltemp[0]))
except:
start = 0
schk = 1
#
#--- select unique data
#
tdict = {}
for ent in data:
atemp = re.split('\s+', ent)
if len(atemp) < 4:
continue
try:
tdict[atemp[0]] = [atemp[1], atemp[2]]
except:
continue
temp1 = []
temp2 = []
for key in tdict.keys():
[val1, val2] = tdict[key]
#
#--- convert time into Chandra time (time is coming in, e.g.: 142:43394.950000 format)
#
try:
ctime = ptime_to_ctime(year, val1, chg)
except:
continue
if ctime < start:
continue
temp1.append(ctime)
temp2.append(float(val2))
if len(temp1) == 0:
continue
#
#--- sorting lists with time order
#
temp1, temp2 = zip(*sorted(zip(temp1, temp2)))
temp1 = list(temp1)
temp2 = list(temp2)
#
#--- keep the last entry to mark the starting point to the next round
#
start = temp1[-1]
#
#--- find cold plate temperatures
#
[crat, crbt] = find_cold_plates(temp1)
#
#--- prep for data print out
#
sline = ' '
for k in range(0, len(temp1)):
#
#---- if the focal temp is warmer than -20C or colder than -273, something wrong with the data: drop it
#
if temp2[k] > -20:
continue
elif temp2[k] < -273:
continue
elif crat[k] == 999.0:
continue
tline = "%d\t%4.3f\t%4.3f\t%4.3f" % (temp1[k], temp2[k], crat[k], crbt[k])
tline = tline.replace('\s+', '')
if tline == '':
continue
else:
sline = sline + tline + '\n'
if sline == '':
continue
#
#--- write the data out
#
if os.path.isfile(outfile):
wind = 'a'
cmd = 'cp ' + outfile + ' ' +outfile + '~'
os.system(cmd)
else:
wind = 'w'
with open(outfile, wind) as fo:
fo.write(sline)
cmd = 'chmod 774 ' + outfile
os.system(cmd)
cmd = 'chgrp mtagroup ' + outfile
os.system(cmd)
#-------------------------------------------------------------------------------
#-- find_cold_plates: create cold plate temperature data lists corresponding to the given time list
#-------------------------------------------------------------------------------
def find_cold_plates(t_list):
"""
create cold plate temperature data lists corresponding to the given time list
input: t_list --- a list of time
output: crat --- a list of temperature of plate A
crbt --- a list of temperature of plate B
"""
#
#--- set data time interval
#
start = t_list[0]
stop = t_list[-1]
#
#--- cold plate A
#
out = fetch.MSID('1crat', start, stop)
tlist = out.times
alist = out.vals
#
#--- cold plate B
#
out = fetch.MSID('1crbt', start, stop)
blist = out.vals
#
#--- make sure that data have the same numbers of entries
#
alen = len(alist)
blen = len(blist)
if alen < blen:
blist = blist[:alen]
elif alen > blen:
for k in range(blen, alen):
blist.append(blist[-1])
#
#--- find the cold plate temperatures correpond to the given time
#
crat = []
crbt = []
for tent in t_list:
#
#-- take +/- 10 seconds
#
begin = tent - 30
end = tent + 30
m = 0
chk = 0
for k in range(m, alen):
if (tlist[k] >= begin) and (tlist[k] <= end):
crat.append(float(alist[k]) - 273.15)
crbt.append(float(blist[k]) - 273.15)
m = k - 10
if m < 0:
m = 0
chk = 1
break
if chk == 0:
crat.append(999.0)
crbt.append(999.0)
return [crat, crbt]
#-------------------------------------------------------------------------------
#-- find_year_change: find year of the data and whether the year change occures in this file
#-------------------------------------------------------------------------------
def find_year_change(ifile):
"""
find year of the data and whether the year change occures in this file
input: ifile --- input file name
assume that the ifile has a form of : .../Short_term/data_2017_365_2059_001_0241"
output: year --- year of the data
chk --- whether the year change occures in this file; 1: yes/ 0: no
"""
atemp = re.split('\/', ifile)
btemp = re.split('_', atemp[-1])
year = int(float(btemp[1]))
day1 = int(float(btemp[2]))
day2 = int(float(btemp[4]))
if day2 < day1:
chk = 1
else:
chk = 0
return [year, chk]
#-------------------------------------------------------------------------------
#-- ptime_to_ctime: convert focal plate time to chandra time --
#-------------------------------------------------------------------------------
def ptime_to_ctime(year, atime, chg):
"""
convert focal plate time to chandra time
input: year --- year of the data
atime --- data time in focal data time format:.e.g., 115:1677.850000
chg --- indicator that telling that year is changed (if 1)
output: ctime --- time in seconds from 1998.1.1
"""
btemp = re.split(':', atime)
day = int(float(btemp[0]))
#
#--- the year changed during this data; so change the year to the next year
#
if chg > 0:
if day == 1:
year += 1
fday = float(btemp[1])
fday /= 86400.0
tmp = fday * 24.0
hh = int(tmp)
tmp = (tmp - hh) * 60.0
mm = int(tmp)
ss = int((tmp - mm) * 60.0)
#
#--- add leading zeros
#
day = add_leading_zero(day, 3)
hh = add_leading_zero(hh, 2)
mm = add_leading_zero(mm, 2)
ss = add_leading_zero(ss, 2)
yday = str(year) + ':' + str(day) + ':' + str(hh) + ':' + str(mm) + ':' + str(ss)
try:
ctime = Chandra.Time.DateTime(yday).secs
except:
ctime = 999.0
return ctime
#-------------------------------------------------------------------------------
#-- add_leading_zero: add leading zero if needed --
#-------------------------------------------------------------------------------
def add_leading_zero(val, num):
"""
add leading zero if needed
input: val --- value to modified
num --- how many digit we need
output: sval--- adjust val in string
"""
#
#--- how many digit the "val" already has
#
tval = str(int(val))
bst = len(tval)
#
#--- add the leading zero as needed
#
sval = str(val)
for k in range(bst, num):
sval = '0' + sval
return sval
#-------------------------------------------------------------------------------
#-- TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST -
#-------------------------------------------------------------------------------
class TestFunctions(unittest.TestCase):
"""
testing functions
"""
#-------------------------------------------------------------------------------
def test_find_cold_plates(self):
t_list = [638025960, 638243227, 637326500, 638572802]
testa = [-123.14513244628904, -125.53862609863279, 999.0, -125.53862609863279]
testb = [-123.14513244628904, -125.53862609863279, 999.0, -123.14513244628904]
[crat, crbt] = find_cold_plates(t_list)
self.assertEquals(testa, crat)
self.assertEquals(testb, crbt)
# print str(crat)
# print str(crbt)
#
# out = fetch.MSID('1crat', '2018:001:00:00:00', '2018:001:00:10:00')
# fo = open('ztest', 'w')
# data = out.vals
# tt = out.times
# for k in range(0, len(data)):
# line = str(tt[k]) + '<-->' + str(data[k]) + '\n'
# fo.write(line)
# fo.close()
#-------------------------------------------------------------------------------
def test_find_year_change(self):
ifile = '/data/mta/Script/ACIS/Focal/Short_term/data_2017_365_2059_001_0241'
[year, chg] = find_year_change(ifile)
self.assertEquals(2017, year)
self.assertEquals(1, chg)
#-------------------------------------------------------------------------------
# def test_add_leading_zero(self):
#
# out = add_leading_zero(10.121, 2)
# print out
#-------------------------------------------------------------------------------
if __name__ == "__main__":
create_full_focal_plane_data()
#unittest.main()
|
# Generated by Django 2.0.3 on 2018-04-07 05:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0008_auto_20180331_1432'),
]
operations = [
migrations.AddField(
model_name='basic_info',
name='promo_video',
field=models.FileField(blank=True, null=True, upload_to='Promo Video'),
),
]
|
from django.contrib import admin
from .models import category, gif
# Register your models here.
admin.site.register(category)
admin.site.register(gif) |
#! /usr/bin/python
# Find data file in switchboard files using basename.
import sys
import subprocess
sys.path.append('/scratch2/nxs113020/cch_plda/sid_dir/../tools/')
import make_cochannel
if __name__=='__main__':
"""
Reads list of nist sre08 formatted as:
filename, spkrid, channel
Generates co-channel wav data for a given SIR for the
input data list.
Returns corresponding wav.scp and utt2spk files.
input:
1. input audio list in the format described above.
2. signal-to-interference ratio (dB), sir
outputs:
1. data wav.scp
2. data utt2spk
"""
input_audio_list = sys.argv[1]
sir = float(sys.argv[2])
out_wavscp = sys.argv[3]
out_utt2spk = sys.argv[4]
channel_map = {'1':'A', '2':'B','':'A','A':'1','B':'2'}
file_dict = {}
wavscp_list = []
utt2spk_list = []
for i in open(input_audio_list):
filename_spkr_channel = i.split(',')
filename = filename_spkr_channel[0].strip()
basename = filename.split('/')[-1].split('.sph')[0]
spkr_id = filename_spkr_channel[1].strip()
channel = filename_spkr_channel[2].strip()
filepath = filename
#print basename
wavscp_format = "%s sox --ignore-length %s -t wav -b 16 - | "
uttid = spkr_id+'_'+basename+':'+channel_map[channel]
wavpath = make_cochannel.nistsre2008(filepath,channel_map[channel],sir)
wavscp_list.append(wavscp_format%(uttid,wavpath)+'\n')
utt2spk_list.append(uttid+' '+spkr_id+'\n')
wavscp_list = sorted(set(wavscp_list))
utt2spk_list = sorted(set(utt2spk_list))
wavscp = open(out_wavscp,'w')
utt2spk = open(out_utt2spk,'w')
for i in range(len(wavscp_list)):
wavscp.write(wavscp_list[i])
utt2spk.write(utt2spk_list[i])
wavscp.close()
utt2spk.close()
|
#*_* coding=utf8 *_*
#!/usr/bin/env python
import eventlet
from eventlet.green import socket
from unreal import backend
httplib = eventlet.import_patched('httplib')
class HTTPConnection(httplib.HTTPConnection):
def connect(self):
self.sock = socket.socket()
backend_address = backend.get_backend(self.host) or self.host
self.sock.connect((backend_address, self.port))
|
#!/usr/bin/python
def char_to_int(char):
return ord(char.lower()) - 97
def last_occur(pattern):
last = [-1] * 26
for i in range(len(pattern)):
last[char_to_int(pattern[i].lower())] = i
return last
def good_suffix(pattern):
suffix = [0] * len(pattern)
indexes = [-1]
last_char = pattern[len(pattern) - 1]
j = 1
for i in range(len(pattern) - 1):
if pattern[i] == last_char:
indexes.append(i)
last = True
for i in range(len(pattern) - 2, -1, -1):
if last and pattern[i] != last_char:
suffix[len(suffix) - 1] = i
last = False
x = 0
for k in range(len(indexes)):
k -= x
if indexes[k] - j < 0:
suffix[i] = indexes[k] - j
elif pattern[indexes[k] - j] != pattern[i]:
suffix[i] = indexes[k] - j
indexes.remove(indexes[k])
x += 1
j += 1
return suffix
def boyer_moore(pattern, text):
L = last_occur(pattern)
S = good_suffix(pattern)
i = len(pattern) - 1
j = i
while i < len(text) and j >= 0:
if text[i] == pattern[j]:
i -= 1
j -= 1
else:
i += len(pattern) - 1 - min(L[char_to_int(text[i])], S[j])
j = len(pattern) - 1
if j == -1:
return i + 1
else:
return -1
# TEST CASES
print(boyer_moore('pan', 'anpanman')) # 2
print(boyer_moore('odetofood', 'ilikefoodfrommexico')) # -1
print(boyer_moore('ABCDABD', 'ABCABCDABABCDABCDABDE')) # 13
print(boyer_moore('caga', 'gcagagag')) # 1
print(boyer_moore('abacaba', 'abaxyabacabbaababacaba')) # 15
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Rico Sennrich
# Distributed under MIT license
import argparse
import sys
def main(args):
k = args.k
if k is None:
k = float('inf')
cur = 0
best_score = float('inf')
best_sent = ''
idx = 0
for line in sys.stdin:
num, sent, scores = line.split(' ||| ')
# new input sentence: print best translation of previous sentence, and reset stats
if int(num) > cur:
sys.stderr.write('{} {} \n'.format(cur, best_score))
sys.stdout.write('{}\n'.format(best_sent))
cur = int(num)
best_score = float('inf')
best_sent = ''
idx = 0
# only consider k-best hypotheses
if idx >= k:
continue
scores = map(float, scores.split())
if args.select_gt:
scores = scores[args.select_gt:]
elif args.select:
scores = [scores[i] for i in args.select]
score = sum(scores)
if score < best_score:
best_score = score
best_sent = sent.strip()
idx += 1
# end of file; print best translation of last sentence
sys.stderr.write('{} {} \n'.format(cur, best_score))
sys.stdout.write('{}\n'.format(best_sent))
# print best_score
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--k", type=int, help="only consider k-best hypotheses")
parser.add_argument("--select-gt", type=int, help="select scores indexed from i for reranking")
parser.add_argument("--select", type=int, nargs='+', help="specify indexed scores for ranking")
args = parser.parse_args()
|
from settings import settings
from office365.runtime.auth.user_credential import UserCredential
from office365.sharepoint.client_context import ClientContext
from office365.sharepoint.search.searchRequest import SearchRequest
from office365.sharepoint.search.searchService import SearchService
ctx = ClientContext(settings['url']).with_credentials(UserCredential(settings['user_credentials']['username'],
settings['user_credentials']['password']))
search = SearchService(ctx)
request = SearchRequest("IsDocument:1")
result = search.post_query(request)
ctx.execute_query()
relevant_results = result.PrimaryQueryResult.RelevantResults
for i in relevant_results['Table']['Rows']:
cells = relevant_results['Table']['Rows'][i]['Cells']
print(cells[6]['Value'])
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
# __author__ = caicaizhang
class Solution:
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 0:
return 0
elif len(nums) == 1:
return 1
else:
idx=0
nums[idx]=nums[0]
for index,val in enumerate(nums):
if val != nums[idx]:
idx += 1
nums[idx]=val
return idx+1 |
import os, sys
if not __package__:
path = os.path.join(os.path.dirname(__file__), os.pardir)
sys.path.insert(0, path)
import article_gen
article_gen.main()
|
# test pygame package
import pygame.sound.echo
pygame.sound.echo.test_echo()
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sys
import os.path
from PyQt4 import QtCore, QtGui
QtCore.Signal = QtCore.pyqtSignal
import vtk
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
class VTKFrame(QtGui.QFrame):
def __init__(self, parent = None):
super(VTKFrame, self).__init__(parent)
self.vtkWidget = QVTKRenderWindowInteractor(self)
vl = QtGui.QVBoxLayout(self)
vl.addWidget(self.vtkWidget)
vl.setContentsMargins(0, 0, 0, 0)
self.ren = vtk.vtkRenderer()
self.vtkWidget.GetRenderWindow().AddRenderer(self.ren)
self.iren = self.vtkWidget.GetRenderWindow().GetInteractor()
sphere = vtk.vtkSphereSource()
sphere.SetCenter(1, 1, 1)
sphere.SetRadius(1)
sphere.SetThetaResolution(100)
sphere.SetPhiResolution(100)
sphere.Update()
cube = vtk.vtkCubeSource()
cube.SetBounds(-1,1,-1,1,-1,1)
cube.Update()
# Create 3D cells so vtkImplicitDataSet evaluates inside vs outside correctly
tri = vtk.vtkDelaunay3D()
tri.SetInput(cube.GetOutput())
tri.BoundingTriangulationOff()
# vtkImplicitDataSet needs some scalars to interpolate to find inside/outside
elev = vtk.vtkElevationFilter()
elev.SetInputConnection(tri.GetOutputPort())
implicit = vtk.vtkImplicitDataSet()
implicit.SetDataSet(elev.GetOutput())
clipper = vtk.vtkClipPolyData()
clipper.SetClipFunction(implicit)
clipper.SetInputConnection(sphere.GetOutputPort())
clipper.InsideOutOn()
clipper.Update()
# Vis for clipped sphere
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(clipper.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetRepresentationToWireframe()
# Vis for cube so can see it in relation to clipped sphere
mapper2 = vtk.vtkDataSetMapper()
mapper2.SetInputConnection(elev.GetOutputPort())
actor2 = vtk.vtkActor()
actor2.SetMapper(mapper2)
#actor2.GetProperty().SetRepresentationToWireframe()
#create renderers and add actors of plane and cube
self.ren.AddActor(actor)
self.ren.AddActor(actor2)
self.ren.SetBackground(0.1, 0.2, 0.4)
self.ren.ResetCamera()
self._initialized = False
def showEvent(self, evt):
if not self._initialized:
self.iren.Initialize()
self._initialized = True
class MainPage(QtGui.QMainWindow):
def __init__(self, parent = None):
super(MainPage, self).__init__(parent)
self.setCentralWidget(VTKFrame())
self.setWindowTitle("Implicit data set example")
def categories(self):
return ['Clipper', 'Implict Data Set', 'Filters']
def mainClasses(self):
return ['vtkSphereSource', 'vtkDelaunay3D', 'vtkElevationFilter', 'vtkImplicitDataSet', 'vtkDataSetMapper', 'vtkClipPolyData']
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
w = MainPage()
w.show()
sys.exit(app.exec_())
|
from common.run_method import RunMethod
import allure
@allure.step("/班贴/班贴列表")
def course_notes_getNoteList_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "/班贴/班贴列表"
url = f"/service-profile/course-notes/getNoteList"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("/班贴/班贴列表导出")
def course_notes_real_exportExcel_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "/班贴/班贴列表导出"
url = f"/service-profile/course-notes/real/exportExcel"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/班帖/组件查询")
def course_notes_component_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/班帖/组件查询"
url = f"/service-profile/course-notes/component"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/班帖/组件启用修改")
def course_notes_changeEnable_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/班帖/组件启用修改"
url = f"/service-profile/course-notes/changeEnable"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/班帖/模板筛选查询")
def course_notes_queryNote_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/班帖/模板筛选查询"
url = f"/service-profile/course-notes/queryNote"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/班帖/创建模板")
def course_notes_createNote_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/班帖/创建模板"
url = f"/service-profile/course-notes/createNote"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/班帖/模板筛选查询")
def course_notes_queryNoteById_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/班帖/模板筛选查询"
url = f"/service-profile/course-notes/queryNoteById"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/班帖/模板启用停用")
def course_notes_rNoteEnable_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/班帖/模板启用停用"
url = f"/service-profile/course-notes/rNoteEnable"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/班帖/模板编辑")
def course_notes_editNote_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/班帖/模板编辑"
url = f"/service-profile/course-notes/editNote"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/班帖/模板复制")
def course_notes_copyNote_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/班帖/模板复制"
url = f"/service-profile/course-notes/copyNote"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/班帖/模板校区查询")
def course_notes_queryNoteCampus_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/班帖/模板校区查询"
url = f"/service-profile/course-notes/queryNoteCampus"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/班帖/添加模板校区关联")
def course_notes_addNoteCampus_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/班帖/添加模板校区关联"
url = f"/service-profile/course-notes/addNoteCampus"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/班帖/发布新班帖(存草稿)")
def course_notes_courseNoteId_newNote_post(courseNoteId, params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/班帖/发布新班帖(存草稿)"
url = f"/service-profile/course-notes/{courseNoteId}/newNote"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/班帖/查看新班帖详情")
def course_notes_detail_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/班帖/查看新班帖详情"
url = f"/service-profile/course-notes/detail"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/班帖/提醒学生查看班帖(新)")
def course_notes_classStickerId_remindNew_post(classStickerId, params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/班帖/提醒学生查看班帖(新)"
url = f"/service-profile/course-notes/{classStickerId}/remindNew"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/班帖/班帖列表(已发布)")
def course_notes_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/班帖/班帖列表(已发布)"
url = f"/service-profile/course-notes"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/班帖/班帖列表(未发布)")
def course_notes_findOrCreateNote_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/班帖/班帖列表(未发布)"
url = f"/service-profile/course-notes/findOrCreateNote"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("APP/班帖/班帖列表")
def course_notes_stickerList_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "APP/班帖/班帖列表"
url = f"/service-profile/course-notes/stickerList"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("APP/班帖/班级列表")
def course_notes_queryClassList_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "APP/班帖/班级列表"
url = f"/service-profile/course-notes/queryClassList"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/班帖/模板删除")
def course_notes_deleteNote_delete(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/班帖/模板删除"
url = f"/service-profile/course-notes/deleteNote"
res = RunMethod.run_request("DELETE", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/首页感叹号")
def course_notes_status_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/首页感叹号"
url = f"/service-profile/course-notes/status"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/班帖/查看班帖详情")
def course_notes_courseNoteId_detail_get(courseNoteId, params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/班帖/查看班帖详情"
url = f"/service-profile/course-notes/{courseNoteId}/detail"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/班帖/提醒学生查看班帖")
def course_notes_courseNoteId_reminds_post(courseNoteId, params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/班帖/提醒学生查看班帖"
url = f"/service-profile/course-notes/{courseNoteId}/reminds"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/班帖/发布班帖(存草稿)")
def course_notes_courseNoteId_post(courseNoteId, params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/班帖/发布班帖(存草稿)"
url = f"/service-profile/course-notes/{courseNoteId}"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/上课/极客币")
def course_notes_queryJKDeatil_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/上课/极客币"
url = f"/service-profile/course-notes/queryJKDeatil"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/学习报告/极客资料")
def course_notes_getGeekMaterial_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/学习报告/极客资料"
url = f"/service-profile/course-notes/getGeekMaterial"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/学习报告/获取教材所有讲次")
def course_notes_listLectureIndexes_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/学习报告/获取教材所有讲次"
url = f"/service-profile/course-notes/listLectureIndexes"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/学习报告/下讲预习")
def course_notes_prepareLessonMaterials_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/学习报告/下讲预习"
url = f"/service-profile/course-notes/prepareLessonMaterials"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/学习报告/发布极客资料")
def course_notes_publishGeekMaterial_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/学习报告/发布极客资料"
url = f"/service-profile/course-notes/publishGeekMaterial"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/学习报告/保存极客资料")
def course_notes_saveGeekMaterial_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/学习报告/保存极客资料"
url = f"/service-profile/course-notes/saveGeekMaterial"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/学习报告/本讲辅导")
def course_notes_tutorialMaterials_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/学习报告/本讲辅导"
url = f"/service-profile/course-notes/tutorialMaterials"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("获取以上课的课次信息")
def course_notes_getTaughtClassSchedule_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "获取以上课的课次信息"
url = f"/service-profile/course-notes/getTaughtClassSchedule"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通web/学习报告/预存课次信息")
def course_notes_preview_getTaughtClassSchedule_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通web/学习报告/预存课次信息"
url = f"/service-profile/course-notes/preview/getTaughtClassSchedule"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通web/学习报告/预存学习报告详情")
def course_notes_preview_detail_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通web/学习报告/预存学习报告详情"
url = f"/service-profile/course-notes/preview/detail"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通web/学习报告/发送预存学习报告")
def course_notes_courseNoteId_preview_newNote_post(courseNoteId, params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通web/学习报告/发送预存学习报告"
url = f"/service-profile/course-notes/{courseNoteId}/preview/newNote"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect
from django.contrib import messages
from .models import User
import bcrypt
def index(request):
return render(request, 'login/index.html')
def register(request):
if request.method == "POST":
errors = User.objects.validate(request.POST)
if errors:
for error in errors:
messages.error(request, error)
else:
messages.success(request, "Success! Welcome, {}".format(
request.POST['first_name']))
hashed_pass = bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt())
User.objects.create(
first_name=request.POST['first_name'], last_name=request.POST['last_name'], email=request.POST['email'], password=hashed_pass, pconfirm=request.POST['pconfirm'])
return redirect('/success')
return redirect('/')
def login(request):
if request.method == "POST":
users = User.objects.filter(email=request.POST['email'])
if users:
user = users[0]
hashed_pass = bcrypt.hashpw(request.POST['password'].encode(), user.password.encode())
if user.password == hashed_pass:
messages.success(request, "You have successfully logged in!")
request.session['logged_user'] = user.id
return redirect('/success')
messages.error(request, 'Invalid authenication credentials')
return redirect('/')
def success(request):
context = {
'my_users': User.objects.all()
}
return render(request, 'login/success.html', context)
|
jogar = '6'
while jogar != '1' and jogar !='2':
jogar = input('Eu sou o computador e me chamo izaki, vamos jogar ? \n [1] PARA SIM \n [2] PARA NÃO ' )
if jogar == '1' :
import random
import time
numero = [1,2,3,4,5,6,7,8,9,10]
numeros = random.choice(numero)
num = 0
tentativas = 0
print(' \n TENTE ADVINHAR O NUMERO QUE EU PENSEI.\n ...........................')
while True :
num = int (input('Qual numero você acha que eu pensei? \n R: ' ))
if num =='sair' :
print('Programa encerrado pelo usuário.')
break
tentativas = tentativas +1
if numeros != num:
resposta_maior = ('Você errou!!! eu pensei em um numero maior que esse.')
resposta_menor = ('Você errou!!! eu pensei em um numero menor que esse.')
if numeros > num :
r_maior = len(resposta_maior)
print('=' * r_maior )
print('Você errou!!! eu pensei em um numero MAIOR que esse.')
print('=' * r_maior )
opcao = input('[1] TENTAR NOVAMENTE \n [2] RESETAR NUMERO \n [3] SAIR \n R: ' )
if opcao == '2':
numeros = random.choice(numero)
print('Certo, pensarei em um outro numero..')
elif opcao == '3':
break
elif numeros < num :
r_menor = len(resposta_menor)
print('=' * r_menor)
print('Você errou!!! eu pensei em um numero MENOR que esse.')
print('=' * r_menor )
opcao = input('[1] TENTAR NOVAMENTE \n [2] RESETAR NUMERO \n [3] SAIR \n R: ' )
if opcao == '2':
numeros = random.choice(numero)
print('Certo, pensarei em um outro numero..')
elif opcao == '3':
break
if num == numeros :
print(f'\n ===FIM DE JOGO VOCÊ GANHOU===\n \n \n AAffz vc me venceu :(, muito bem mas você teve que tentar {tentativas} vezes para me vencer')
opcao = input('Quer jogo denovo? \n [1] - SIM \n [2] - NÃO \n R: ' )
if opcao == '2':
break
if opcao == '1':
numeros = random.choice(numero)
elif jogar == '2' :
print('que pena que vc não quer jogar comigo :( \n encerrando programa.')
exit
|
# Copyright (c) 2021 kamyu. All rights reserved.
#
# Google Code Jam 2021 Round 1A - Problem B. Prime Time
# https://codingcompetitions.withgoogle.com/codejam/round/000000000043585d/00000000007543d8
#
# Time: O((MAX_P * logX) * (M + logX)), X is the sum of all cards
# Space: O(1)
#
from collections import OrderedDict
def max_card_number_of_group2(X, count): # Time: O(logX)
# return (X-1).bit_length() # ceil_log2_X, estimated max_card_number_of_group2
result, prod = 0, 1
for p in count.iterkeys(): # use count.iterkeys() instead of count.iteritems() to avoid TLE
for _ in xrange(count[p]):
if prod*p > X:
return result
prod *= p
result += 1
return result
def estimated_max_card_sum_of_group2(X, count): # Time: O(logX)
# return max_card_number_of_group2(X, count) * next(reversed(count)) # less accurate estimated_max_card_sum_of_group2
result, remain = 0, max_card_number_of_group2(X, count) # Time: O(logX)
for p in reversed(count):
result += p*min(remain, count[p])
remain -= min(remain, count[p])
if remain == 0:
break
return result # more accurate but may not be the lower bound of max_card_sum_of_group2
# given prod = p1*p2*...*pk, check if
# (1) p1+p2+...+pk = total
# (2) numbers of p1,p2,...pk are within the given count limit
def check(prod, total, count): # Time: O(M + logX)
for p in count.iterkeys(): # use count.iterkeys() instead of count.iteritems() to avoid TLE
for _ in xrange(count[p]):
if prod%p != 0:
break
prod //= p # at most O(logX) times
total -= p
if total < 0: # early return
return False
return prod == 1 and total == 0
def prime_time():
M = input()
count = OrderedDict()
for _ in xrange(M):
P, N = map(int, raw_input().strip().split())
count[P] = N
X = sum(p*n for p, n in count.iteritems())
for i in xrange(1, estimated_max_card_sum_of_group2(X, count)+1): # prune impossible i
if check(X-i, i, count):
return X-i
return 0
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, prime_time())
|
from flask import Flask, jsonify, request
from library import Library
app = Flask(__name__)
library = Library()
@app.route('/')
def get_all_books():
return jsonify(books=[b.serialize for b in library.get_all_books()])
@app.route('/<string:name>')
def get_book_by_name(name):
book = library.get_book_by_name(name)
if book is not None:
return jsonify(book=book.serialize)
else:
return jsonify(error="There is no book with name: " + name)
@app.route('/addBook', methods=['POST'])
def add_book():
name = request.get_json().get("name")
author = request.get_json().get("author")
return jsonify(book=library.add_book(name, author).serialize)
@app.route('/updateBook', methods=['PUT'])
def update_book():
id = request.get_json().get("id")
name = request.get_json().get("name")
author = request.get_json().get("author")
book = library.update_book(id, name, author)
if book is not None:
return jsonify(book=book.serialize)
else:
return jsonify(error="There is no book with id: " + str(id))
@app.route('/deleteBook/<int:id>', methods=['DELETE'])
def delete_book(id):
is_deleted = library.delete_book_by_id(id)
if is_deleted:
return jsonify(message="Book with id = " + str(id) + " was successfully deleted")
else:
return jsonify(error="There is no book with id: " + str(id))
if __name__ == '__main__':
app.run()
|
from rest_framework.mixins import CreateModelMixin, ListModelMixin
from rest_framework.viewsets import GenericViewSet
from ..models import User
from .serializers import UserSignUpSerializer
class UserSignUpViewSet(CreateModelMixin, GenericViewSet):
queryset = User.objects.all()
serializer_class = UserSignUpSerializer
|
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
This module contains helper methods for detecting terminal capabilities.
"""
from __future__ import annotations
from subprocess import DEVNULL, PIPE, Popen
from typing import Optional, Tuple
def termsize() -> "Tuple[Optional[int], Optional[int]]":
"""
Return the current size of the terminal. If the current terminal is not a tty, then
``(None, None)`` is returned.
"""
try:
with Popen(["stty", "size"], stdout=PIPE, stderr=DEVNULL) as proc:
term_size_str = proc.stdout.read().decode("utf8") if proc.stdout is not None else None
except FileNotFoundError:
term_size_str = None
if term_size_str is not None:
dimensions = term_size_str.split()
if len(dimensions) >= 2:
return int(dimensions[0]), int(dimensions[1])
else:
return None, None
else:
return None, None
def print_termcap() -> None:
"""
Output discovered settings to the terminal.
"""
rows, cols = termsize()
if rows is not None and cols is not None:
print("The terminal size is:")
print(termsize())
else:
print("Not a terminal.")
if __name__ == "__main__":
print_termcap()
|
class Stack(object):
def __init__(self):
self._stack = []
def push(self, data):
self._stack.append(data)
def pop(self):
if self._stack:
return self._stack.pop()
else:
print("error: stack is empty")
def isEmpty(self):
return not bool(self._stack)
def top(self):
return self._stack[-1]
|
def solution(roman):
r_nums = {'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000}
output, addt = 0, 0
for i,x in enumerate(roman[:-1], 0):
if r_nums[x]<r_nums[roman[i+1]]:
addt+=r_nums[x]
else:
output+=r_nums[x] - addt
addt = 0
return output+(r_nums[roman[-1]] - addt)
'''
Create a function that takes a Roman numeral as its argument and returns its value as a
numeric decimal integer. You don't need to validate the form of the Roman numeral.
Modern Roman numerals are written by expressing each decimal digit of the number to be
encoded separately, starting with the leftmost digit and skipping any 0s. So 1990 is
rendered "MCMXC" (1000 = M, 900 = CM, 90 = XC) and 2008 is rendered "MMVIII" (2000 = MM, 8 = VIII).
The Roman numeral for 1666, "MDCLXVI", uses each letter in descending order.
Example:
solution('XXI') # should return 21
Help:
Symbol Value
I 1
V 5
X 10
L 50
C 100
D 500
M 1,000
''' |
from context import imu_tools
from context import imu_base
from context import pressure_base
from context import data_logger
import datetime
# from imu_framework.tests.context import imu_no_thrd_9250
if __name__ == '__main__':
######## instantiate IMUs ####################################
# myIMU_no_thrd_9250 = imu_no_thrd_9250()
myIMU_base = imu_base()
myPressure_base = pressure_base(0.042389869, -0.124235215, 100)
######## connect all IMUs #############################################
# myIMU_no_thrd_9250.connect()
myIMU_base.connect()
myPressure_base.setUpChanel(fileName='pressure_base_data_2.csv')
# fix me take all and put into tools so multiple instantiations are can be achieved
##########################################################################
myTools = imu_tools(imu=myIMU_base)
myDataLogger = data_logger()
i = 0
print('start')
while i <= 4500:
rawAccel = myTools.get_raw_scale_data()
myPressure_base.set_data()
voltageData = myPressure_base.getAllVoltageData()
pressureData = myPressure_base.get_pressure()
time_now = datetime.datetime.now().strftime("%H:%M:%S")
data = [rawAccel[0], rawAccel[1], rawAccel[2],
rawAccel[3], rawAccel[4], rawAccel[5],
voltageData[0], voltageData[1], pressureData[0],
voltageData[2], voltageData[3], pressureData[1],
time_now]
print(data)
# myDataLogger.a3_g3_t2(data, i, 'test_data_logger')
i = i + 1
######## disconnect all IMUs #############################################
# myIMU_no_thrd_sparton.disconnect()
print(i) |
from django import forms
from django.forms.utils import ErrorList
from django.utils.html import conditional_escape, format_html_join
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
class ImageForm(forms.Form):
file = forms.ImageField()
class PastedImageForm(forms.Form):
data = forms.ImageField()
class FileForm(forms.Form):
file = forms.FileField()
class CruncherErrorList(ErrorList):
def __init__(self, initlist=None, error_class=None, html_id=None):
self._html_id = html_id
super().__init__(initlist=initlist, error_class=error_class)
def as_flat(self):
return self.as_ul()
def as_ul(self):
if not self.data:
return ''
return format_html_join(
'{}',
'<label {} class="error-label">{}</label>',
(('for={}'.format(self._html_id) if self._html_id else '', e) for e in self),
)
class CruncherFormRenderer(forms.Form):
def __init__(self, *args, **kwargs):
kwargs.update(error_class=CruncherErrorList)
super().__init__(*args, **kwargs)
def _html_output(
self,
normal_row,
error_row,
row_ender,
help_text_html,
errors_on_separate_row=True,
):
"Output HTML. Used by as_table(), as_ul(), as_p()."
top_errors = (
self.non_field_errors()
) # Errors that should be displayed above all fields.
output, hidden_fields = [], []
for name, field in self.fields.items():
html_class_attr = ''
bf = self[name]
bf_errors = self.error_class(bf.errors, html_id=bf.id_for_label)
if bf.is_hidden:
if bf_errors:
top_errors.extend(
[
_('(Hidden field %(name)s) %(error)s')
% {'name': name, 'error': str(e)}
for e in bf_errors
]
)
hidden_fields.append(str(bf))
else:
# Create a 'class="..."' attribute if the row should have any
# CSS classes applied.
css_classes = bf.css_classes()
if css_classes:
html_class_attr = ' class="%s"' % css_classes
if bf.label:
label = conditional_escape(bf.label)
label = bf.label_tag(label) or ''
else:
label = ''
if field.help_text:
help_text = mark_safe(
'<label for="%s" class="help-label">%s</label>'
% (bf.id_for_label, field.help_text)
)
else:
help_text = ''
output.append(
normal_row
% {
'errors': '',
'label': label,
'field': bf,
'help_text': help_text,
'html_class_attr': html_class_attr,
'css_classes': css_classes,
'field_name': bf.html_name,
}
)
if bf_errors:
output.append(error_row % str(bf_errors))
if top_errors:
output.insert(0, error_row % top_errors)
if hidden_fields: # Insert any hidden fields in the last row.
str_hidden = ''.join(hidden_fields)
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>') and
# insert the hidden fields.
if not last_row.endswith(row_ender):
# This can happen in the as_p() case (and possibly others
# that users write): if there are only top errors, we may
# not be able to conscript the last row for our purposes,
# so insert a new, empty row.
last_row = normal_row % {
'errors': '',
'label': '',
'field': '',
'help_text': '',
'html_class_attr': html_class_attr,
'css_classes': '',
'field_name': '',
}
output.append(last_row)
output[-1] = last_row[: -len(row_ender)] + str_hidden + row_ender
else:
# If there aren't any rows in the output, just append the
# hidden fields.
output.append(str_hidden)
return mark_safe('\n'.join(output))
|
# coding: utf-8
# In[4]:
import urllib.request
response = urllib.request.urlopen("http://www.baidu.com")
print(response.read())
# 在Pytho2.x中使用import urllib2——-对应的,在Python3.x中会使用import urllib.request,urllib.error。
# 在Pytho2.x中使用import urllib——-对应的,在Python3.x中会使用import urllib.request,urllib.error,urllib.parse。
# 在Pytho2.x中使用import urlparse——-对应的,在Python3.x中会使用import urllib.parse。
# 在Pytho2.x中使用import urlopen——-对应的,在Python3.x中会使用import urllib.request.urlopen。
# 在Pytho2.x中使用import urlencode——-对应的,在Python3.x中会使用import urllib.parse.urlencode。
# 在Pytho2.x中使用import urllib.quote——-对应的,在Python3.x中会使用import urllib.request.quote。
# 在Pytho2.x中使用cookielib.CookieJar——-对应的,在Python3.x中会使用http.CookieJar。
# 在Pytho2.x中使用urllib2.Request——-对应的,在Python3.x中会使用urllib.request.Request。
|
import bs4 as bs
import urllib.request
urls = [line.rstrip('\n') for line in open("websites.txt")]
tags = [line.rstrip('\n') for line in open("tags.txt")]
for url in urls:
try:
source = urllib.request.urlopen(url).read()
soup = bs.BeautifulSoup(source, 'lxml')
print("Title: " + soup.title.get_text() + " url ||" + url)
for tag in tags:
for paragraph in soup.find_all(tag):
print(paragraph.get_text())
except:
pass
|
"""
MAAS Mock API
"""
from __future__ import division, unicode_literals
import json
import collections
from six.moves.urllib.parse import parse_qs
import random
import re
from uuid import uuid4
import attr
from six import text_type
from zope.interface import implementer
from twisted.plugin import IPlugin
from mimic.catalog import Entry
from mimic.catalog import Endpoint
from mimic.rest.auth_api import base_uri_from_request
from mimic.rest.mimicapp import MimicApp
from mimic.imimic import IAPIMock
from mimic.canned_responses.maas_json_home import json_home
from mimic.canned_responses.maas_monitoring_zones import monitoring_zones
from mimic.canned_responses.maas_alarm_examples import alarm_examples
from mimic.model.maas_errors import ObjectDoesNotExist, ParentDoesNotExist
from mimic.model.maas_objects import (Agent,
Alarm,
AlarmState,
Check,
Entity,
MaasStore,
Notification,
NotificationPlan,
Suppression)
from mimic.util.helper import json_from_request
from mimic.util.helper import Matcher, random_hex_generator, random_hipsum
MISSING_REQUIRED_ARGUMENT_REGEX = re.compile(
r'__init__\(\) missing \d+ required positional argument: \'(\w+)\'')
REMOTE_CHECK_TYPE_REGEX = re.compile(r'^remote\.')
@implementer(IAPIMock, IPlugin)
class MaasApi(object):
"""
Rest endpoints for mocked MAAS Api.
"""
def __init__(self, regions=["ORD"]):
"""
Set regions
"""
self._regions = regions
def catalog_entries(self, tenant_id):
"""
List catalog entries for the MaaS API.
"""
return [
Entry(
tenant_id, "rax:monitor", "cloudMonitoring",
[
Endpoint(tenant_id, region, text_type(uuid4()),
"v1.0")
for region in self._regions
]
)
]
def resource_for_region(self, region, uri_prefix, session_store):
"""
Get an : obj: `twisted.web.iweb.IResource` for the given URI prefix;
implement : obj: `IAPIMock`.
"""
return MaasMock(self, uri_prefix, session_store, region).app.resource()
class MCache(object):
"""
M(onitoring) Cache Object to hold dictionaries of all entities, checks and alarms.
"""
def __init__(self, clock):
"""
Create the initial structs for cache
"""
current_time_milliseconds = int(1000 * clock.seconds())
self.entities = collections.OrderedDict()
self.notifications = collections.OrderedDict(
[(u'ntTechnicalContactsEmail',
Notification(id=u'ntTechnicalContactsEmail',
label=u'Email All Technical Contacts',
created_at=current_time_milliseconds,
updated_at=current_time_milliseconds,
type=u'technicalContactsEmail'))])
self.notification_plans = collections.OrderedDict(
[(u'npTechnicalContactsEmail',
NotificationPlan(id=u'npTechnicalContactsEmail',
label=u'Technical Contacts - Email',
created_at=current_time_milliseconds,
updated_at=current_time_milliseconds))])
self.notificationtypes_list = [{'id': 'webhook', 'fields': [{'name': 'url',
'optional': False,
'description': 'An HTTP or \
HTTPS URL to POST to'}]},
{'id': 'email', 'fields': [{'name': 'address',
'optional': False,
'description': 'Email \
address to send notifications to'}]},
{'id': 'pagerduty', 'fields': [{'name': 'service_key',
'optional': False,
'description': 'The PagerDuty \
service key to use.'}]},
{'id': 'sms', 'fields': [{'name': 'phone_number',
'optional': False,
'description': 'Phone number to send \
the notification to, \
with leading + and country \
code (E.164 format)'}]}]
self.suppressions = collections.OrderedDict()
self.audits_list = []
self.maas_store = MaasStore(clock)
self.test_alarm_responses = {}
self.test_alarm_errors = {}
def _only_keys(dict_ins, keys):
"""
Filters out unwanted keys of a dict.
"""
return {k: dict_ins[k] for k in dict_ins if k in keys}
def create_entity(clock, params):
"""
Returns a dictionary representing an entity
:return: an Entity model, which is described in `the Rackspace Cloud
Monitoring Developer Guide, section 5.4
<http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/service-entities.html>`_
:rtype: ``dict`` mapping ``unicode`` to ``unicode``, ``float``,
``bool``, ``dict`` or ``NoneType``.
"""
current_time_milliseconds = int(1000 * clock.seconds())
params_copy = _only_keys(params, Entity.USER_SPECIFIABLE_KEYS)
params_copy['created_at'] = params_copy['updated_at'] = current_time_milliseconds
return Entity(**params_copy)
def create_check(clock, params):
"""
Returns a dictionary representing a check
:return: a Check model, which is described in `the Rackspace Cloud
Monitoring Developer Guide, section 5.7
<http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/service-checks.html>`_
:rtype: ``dict`` mapping ``unicode`` to ``unicode``, ``float``,
``int``, ``bool``, ``dict`` or ``NoneType``.
"""
current_time_milliseconds = int(1000 * clock.seconds())
params_copy = _only_keys(params, Check.USER_SPECIFIABLE_KEYS)
params_copy['created_at'] = params_copy['updated_at'] = current_time_milliseconds
return Check(**params_copy)
def create_alarm(clock, entity_id, params):
"""
Returns a dictionary representing an alarm
:return: an Alarm model, which is described in `the Rackspace Cloud
Monitoring Developer Guide, section 5.12
<http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/service-alarms.html>`_
:rtype: ``dict`` mapping ``unicode`` to ``unicode``, ``float``,
``bool``, ``dict``, or ``NoneType``.
"""
current_time_milliseconds = int(1000 * clock.seconds())
params_copy = _only_keys(params, Alarm.USER_SPECIFIABLE_KEYS)
params_copy['entity_id'] = entity_id
params_copy['created_at'] = params_copy['updated_at'] = current_time_milliseconds
return Alarm(**params_copy)
def create_notification_plan(clock, params):
"""
Creates a notification plan
:return: a Notification Plan model, which is described in `the
Rackspace Cloud Monitoring Developer Guide, section 5.11
<http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/service-notification-plans.html>`_
:rtype: ``dict`` mapping ``unicode`` to ``unicode``, ``float``,
``dict`` or ``NoneType``.
"""
current_time_milliseconds = int(1000 * clock.seconds())
params_copy = _only_keys(params, NotificationPlan.USER_SPECIFIABLE_KEYS)
params_copy['created_at'] = params_copy['updated_at'] = current_time_milliseconds
return NotificationPlan(**params_copy)
def create_notification(clock, params):
"""
Creates a notification target
:return: a Notification model, which is described in `the Rackspace
Cloud Monitoring Developer Guide, section 5.10
<http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/service-notifications.html>`_
:rtype: ``dict`` mapping ``unicode`` to ``unicode``, ``float``,
``dict`` or ``NoneType``.
"""
current_time_milliseconds = int(1000 * clock.seconds())
params_copy = _only_keys(params, Notification.USER_SPECIFIABLE_KEYS)
params_copy['created_at'] = params_copy['updated_at'] = current_time_milliseconds
return Notification(**params_copy)
def create_suppression(clock, params):
"""
Creates a suppression
:return: a Suppression model, which is described in `the Rackspace
Cloud Monitoring Developer Guide, section 5.16
<http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/service-suppressions.html>`_
:rtype: ``dict`` mapping ``unicode`` to ``unicode`` or ``list``.
"""
params_copy = _only_keys(params, Suppression.USER_SPECIFIABLE_KEYS)
params_copy['created_at'] = params_copy['updated_at'] = int(1000 * clock.seconds())
return Suppression(**params_copy)
def _get_object(collection, object_type, object_key, alt_key=None):
"""
Gets the specified object from the collection or throws ObjectDoesNotExist.
The collection should behave like a dict where object_key retrieves
an object from the collection.
"""
try:
return collection[object_key]
except KeyError:
raise ObjectDoesNotExist(object_type=object_type,
key=(alt_key or object_key))
def _delete_object(collection, object_type, object_key, alt_key=None):
"""
Deletes the specified object from the collection or throws ObjectDoesNotExist.
"""
try:
del collection[object_key]
except KeyError:
raise ObjectDoesNotExist(object_type=object_type,
key=(alt_key or object_key))
def _get_entity(entities, entity_id):
"""
Gets the entity from the collection or throws ObjectDoesNotExist.
"""
return _get_object(entities, 'Entity', entity_id)
def _delete_entity(entities, entity_id):
"""
Deletes the entity from the collection or throws ObjectDoesNotExist.
"""
_delete_object(entities, 'Entity', entity_id)
def _get_parent_entity(entities, entity_id):
"""
Gets the parent entity from the collection, or throws ParentDoesNotExist.
"""
try:
return entities[entity_id]
except KeyError:
raise ParentDoesNotExist(object_type='Entity', key=entity_id)
def _get_check(entities, entity_id, check_id):
"""
Gets the check from the collection, or throws an error if it does not exist.
"""
entity = _get_parent_entity(entities, entity_id)
return _get_object(entity.checks,
'Check',
check_id,
'{0}:{1}'.format(entity_id, check_id))
def _delete_check(entities, entity_id, check_id):
"""
Deletes the check from the collection, or throws an error if it does not exist.
"""
entity = _get_parent_entity(entities, entity_id)
_delete_object(entity.checks,
'Check',
check_id,
'{0}:{1}'.format(entity_id, check_id))
alarms_to_delete = [alarm_id for alarm_id in entity.alarms
if entity.alarms[alarm_id].check_id == check_id]
for alarm_id in alarms_to_delete:
del entity.alarms[alarm_id]
def _delete_alarm(entities, entity_id, alarm_id):
"""
Deletes the alarm from the collection, or throws an error if it does not exist.
"""
entity = _get_parent_entity(entities, entity_id)
_delete_object(entity.alarms,
'Alarm',
alarm_id,
'{0}:{1}'.format(entity_id, alarm_id))
def _get_alarm(entities, entity_id, alarm_id):
"""
Gets the alarm from the collection, or throws an error if it does not exist.
"""
entity = _get_parent_entity(entities, entity_id)
return _get_object(entity.alarms,
'Alarm',
alarm_id,
'{0}:{1}'.format(entity_id, alarm_id))
def _get_notification(notifications, nt_id):
"""
Gets the notification from the collection or throws ObjectDoesNotExist.
"""
return _get_object(notifications, 'Notification', nt_id)
def _delete_notification(notifications, nt_id):
"""
Deletes the notification from the collection, or throws ObjectDoesNotExist.
"""
_delete_object(notifications, 'Notification', nt_id)
def _get_notification_plan(notification_plans, np_id):
"""
Gets the notification plan from the collection.
This function gets the notification plan or throws ObjectDoesNotExist
if it does not exist.
"""
return _get_object(notification_plans, 'NotificationPlan', np_id)
def _delete_notification_plan(notification_plans, np_id):
"""
Deletes the notification plan from the collection, or throws ObjectDoesNotExist.
"""
_delete_object(notification_plans, 'NotificationPlan', np_id)
def _get_suppression(suppressions, sp_id):
"""
Gets the suppression from the collection or throws ObjectDoesNotExist.
"""
return _get_object(suppressions, 'Suppression', sp_id)
def _delete_suppression(suppressions, sp_id):
"""
Deletes the suppression from the collection, or throws ObjectDoesNotExist.
"""
_delete_object(suppressions, 'Suppression', sp_id)
def _map_getter(collection, request, object_type, object_key):
"""
Getter handler for objects in a Mapping type collection.
"""
try:
obj = _get_object(collection, object_type, object_key)
return json.dumps(obj.to_json())
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
return json.dumps(e.to_json())
def _find_missing_required_key(cls, post_data, additional_keys):
"""
Finds a missing required key in the case that trying to create an instance
failed with a TypeError.
"""
fields_by_name = {field.name: field for field in attr.fields(cls)}
specified_keys = {key for sublist in [post_data.keys(), additional_keys]
for key in sublist}
missing_keys = [key for key in fields_by_name
if fields_by_name[key].default is attr.NOTHING and key not in specified_keys]
return missing_keys[0]
def _metric_list_for_check(maas_store, entity, check):
"""
Computes the metrics list for a given check.
Remote checks return a metric for each monitoring zone and
each type of metric for the check type. Agent checks return
a metric for each metric type on the check type. Check types
that Mimic doesn't know about generate an empty list.
"""
if check.type not in maas_store.check_types:
return []
if REMOTE_CHECK_TYPE_REGEX.match(check.type):
return [{'name': '{0}.{1}'.format(mz, metric.name),
'type': metric.type,
'unit': metric.unit}
for metric in maas_store.check_types[check.type].metrics
for mz in check.monitoring_zones_poll]
return [{'name': metric.name,
'type': metric.type,
'unit': metric.unit}
for metric in maas_store.check_types[check.type].metrics]
def _metric_list_for_entity(maas_store, entity):
"""
Creates the metrics list for one entity.
"""
return {'entity_id': entity.id,
'entity_label': entity.label,
'checks': [{'id': check.id,
'label': check.label,
'type': check.type,
'metrics': _metric_list_for_check(maas_store, entity, check)}
for check in entity.checks.values()]}
def _multiplot_interval(from_date, to_date, points):
"""
Computes the size of the interval between points in a multiplot.
:return: the multiplot interval size.
:rtype: ``float``
"""
if points < 2:
return 0.0
return (to_date - from_date) / (points - 1)
def _compute_multiplot(maas_store, entity_id, check, metric_name, from_date, to_date, points):
"""
Computes multiplot data for a single (entity, check, metric) group.
"""
fallback = {'entity_id': entity_id,
'check_id': check.id,
'metric': metric_name,
'unit': 'unknown',
'type': 'unknown',
'data': []}
if check.type not in maas_store.check_types:
return fallback
interval = _multiplot_interval(from_date, to_date, points)
metric = None
base_metric_name = metric_name
metric_value_kwargs = {'entity_id': entity_id,
'check_id': check.id}
if re.match(r'^remote\.', check.type):
match = re.match(r'^(mz\w+)\.(\w+)$', metric_name)
if not match:
return fallback
metric_value_kwargs['monitoring_zone'] = match.group(1)
base_metric_name = match.group(2)
try:
metric = maas_store.check_types[check.type].get_metric_by_name(base_metric_name)
except NameError:
return fallback
return {'entity_id': entity_id,
'check_id': check.id,
'metric': metric_name,
'unit': metric.unit,
'type': metric.type,
'data': [{'numPoints': 4,
'timestamp': int(from_date + (i * interval)),
'average': metric.get_value(
timestamp=int(from_date + (i * interval)),
**metric_value_kwargs)}
for i in range(points)]}
def parse_and_flatten_qs(url):
"""
Parses a querystring and flattens 1-arg arrays.
"""
qs = parse_qs(url)
flat_qs = {}
for key in qs:
flat_qs[key] = qs[key][0] if len(qs[key]) == 1 else qs[key]
return flat_qs
def _mcache_factory(clock):
"""
Returns a function that makes a defaultdict that makes MCache objects
for each tenant.
"""
return lambda: collections.defaultdict(lambda: MCache(clock))
class MaasMock(object):
"""
Klein routes for the Monitoring API.
"""
def __init__(self, api_mock, uri_prefix, session_store, name):
"""
Create a maas region with a given URI prefix (used for generating URIs
to servers).
"""
self._api_mock = api_mock
self._session_store = session_store
self._name = name
def _entity_cache_for_tenant(self, tenant_id):
"""
Retrieve the M_cache object containing all objects created so far
"""
clock = self._session_store.clock
return (self._session_store.session_for_tenant_id(tenant_id)
.data_for_api(self._api_mock, _mcache_factory(clock))[self._name]
)
def _audit(self, app, request, tenant_id, status, content=b''):
headers = {k.decode("utf-8"): [vv.decode("utf-8") if isinstance(vv, bytes) else vv for vv in v]
for k, v in request.getAllHeaders().items()
if k != b'x-auth-token'}
record = {
'id': text_type(uuid4()),
'timestamp': int(1000 * self._session_store.clock.seconds()),
'headers': headers,
'url': request.path.decode("utf-8"),
'app': app,
'query': parse_and_flatten_qs(request.uri.decode("utf-8")),
'txnId': text_type(uuid4()),
'payload': content.decode("utf-8"),
'method': request.method.decode("utf-8"),
'account_id': tenant_id,
'who': '',
'why': '',
'statusCode': status
}
self._entity_cache_for_tenant(tenant_id).audits_list.append(record)
app = MimicApp()
@app.route('/v1.0/<string:tenant_id>/mimic/reset', methods=['GET'])
def doreset(self, request, tenant_id):
"""
Reset the session
"""
self._session_store.session_for_tenant_id(tenant_id)._api_objects = {}
return "Session has been reset for tenant_id " + tenant_id
@app.route('/v1.0/<string:tenant_id>/entities', methods=['GET'])
def list_entities(self, request, tenant_id):
"""
Replies the entities list call
"""
entities = list(self._entity_cache_for_tenant(tenant_id).entities.values())
limit = 100
marker = None
next_marker = None
next_href = None
if b'limit' in request.args:
limit = int(request.args[b'limit'][0].strip())
if b'marker' in request.args:
marker = request.args[b'marker'][0].strip().decode("utf-8")
for q in range(len(entities)):
if entities[q].id == marker:
entities = entities[q:]
break
try:
next_marker = entities[limit].id
except Exception:
pass
entities = entities[:limit]
metadata = {'count': len(entities),
'limit': limit,
'marker': marker,
'next_marker': next_marker,
'next_href': next_href}
request.setResponseCode(200)
return json.dumps({'metadata': metadata,
'values': [entity.to_json() for entity in entities]})
@app.route('/v1.0/<string:tenant_id>/entities', methods=['POST'])
def create_entity(self, request, tenant_id):
"""
Creates a new entity
"""
content = request.content.read()
postdata = json.loads(content.decode("utf-8"))
newentity = create_entity(self._session_store.clock, postdata)
self._entity_cache_for_tenant(tenant_id).entities[newentity.id] = newentity
status = 201
request.setResponseCode(status)
request.setHeader(b'location', base_uri_from_request(request).rstrip('/').encode('utf-8') +
request.path + b'/' + newentity.id.encode('utf-8'))
request.setHeader(b'x-object-id', newentity.id.encode('utf-8'))
request.setHeader(b'content-type', b'text/plain')
self._audit('entities', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>', methods=['GET'])
def get_entity(self, request, tenant_id, entity_id):
"""
Fetches a specific entity
"""
return _map_getter(self._entity_cache_for_tenant(tenant_id).entities,
request,
"Entity",
entity_id)
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/checks', methods=['GET'])
def get_checks_for_entity(self, request, tenant_id, entity_id):
"""
Returns all the checks for a paricular entity
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
entity = _get_parent_entity(entities, entity_id)
checks = entity.list_checks()
metadata = {'count': len(checks),
'limit': 1000,
'marker': None,
'next_marker': None,
'next_href': None}
request.setResponseCode(200)
return json.dumps({'metadata': metadata, 'values': checks})
except ParentDoesNotExist as e:
request.setResponseCode(e.code)
return json.dumps(e.to_json())
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>', methods=['PUT'])
def update_entity(self, request, tenant_id, entity_id):
"""
Update entity in place.
"""
content = request.content.read()
update = json.loads(content.decode("utf-8"))
update_kwargs = dict(update)
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
entity = _get_entity(entities, entity_id)
entity.update(clock=self._session_store.clock, **update_kwargs)
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('entities', request, tenant_id, e.code, content)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
request.setHeader(b'location', base_uri_from_request(request).rstrip('/').encode('utf-8') +
request.path)
request.setHeader(b'x-object-id', entity_id.encode('utf-8'))
request.setHeader(b'content-type', b'text/plain')
self._audit('entities', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>', methods=['DELETE'])
def delete_entity(self, request, tenant_id, entity_id):
"""
Delete an entity, all checks that belong to entity, all alarms that belong to those checks
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
_delete_entity(entities, entity_id)
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('entities', request, tenant_id, e.code)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
request.setHeader(b'content-type', b'text/plain')
self._audit('entities', request, tenant_id, status)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/checks', methods=['POST'])
def create_check(self, request, tenant_id, entity_id):
"""
Create a check
"""
content = request.content.read()
postdata = json.loads(content.decode("utf-8"))
entities = self._entity_cache_for_tenant(tenant_id).entities
newcheck = None
try:
newcheck = create_check(self._session_store.clock, postdata)
except TypeError:
missing_key = _find_missing_required_key(Check, postdata, ['created_at', 'updated_at'])
status = 400
request.setResponseCode(status)
self._audit('checks', request, tenant_id, status, content)
return json.dumps({'type': 'badRequest',
'code': status,
'message': 'Validation error for key \'{0}\''.format(missing_key),
'details': 'Missing required key ({0})'.format(missing_key),
'txnId': '.fake.mimic.transaction.id.c-1111111.ts-123444444.v-12344frf'})
try:
entity = _get_entity(entities, entity_id)
entity.checks[newcheck.id] = newcheck
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('checks', request, tenant_id, e.code, content)
return json.dumps(e.to_json())
status = 201
request.setResponseCode(status)
request.setHeader(b'location', base_uri_from_request(request).rstrip('/').encode('utf-8') +
request.path + b'/' + newcheck.id.encode('utf-8'))
request.setHeader(b'x-object-id', newcheck.id.encode('utf-8'))
request.setHeader(b'content-type', b'text/plain')
self._audit('checks', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/checks/<string:check_id>',
methods=['GET'])
def get_check(self, request, tenant_id, entity_id, check_id):
"""
Get a specific check that was created before
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
check = _get_check(entities, entity_id, check_id)
return json.dumps(check.to_json())
except (ObjectDoesNotExist, ParentDoesNotExist) as e:
request.setResponseCode(e.code)
return json.dumps(e.to_json())
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/checks/<string:check_id>',
methods=['PUT'])
def update_check(self, request, tenant_id, entity_id, check_id):
"""
Updates a check in place.
"""
content = request.content.read()
update = json.loads(content.decode("utf-8"))
update_kwargs = dict(update)
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
check = _get_check(entities, entity_id, check_id)
check.update(clock=self._session_store.clock, **update_kwargs)
except (ObjectDoesNotExist, ParentDoesNotExist) as e:
request.setResponseCode(e.code)
self._audit('checks', request, tenant_id, e.code, content)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
request.setHeader(b'location', base_uri_from_request(request).rstrip('/').encode('utf-8') +
request.path)
request.setHeader(b'x-object-id', check_id.encode('utf-8'))
request.setHeader(b'content-type', b'text/plain')
self._audit('checks', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/checks/<string:check_id>',
methods=['DELETE'])
def delete_check(self, request, tenant_id, entity_id, check_id):
"""
Deletes check and all alarms associated to it
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
_delete_check(entities, entity_id, check_id)
except (ObjectDoesNotExist, ParentDoesNotExist) as e:
request.setResponseCode(e.code)
self._audit('checks', request, tenant_id, e.code)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
request.setHeader(b'content-type', b'text/plain')
self._audit('checks', request, tenant_id, status)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/test-check', methods=['POST'])
def test_check(self, request, tenant_id, entity_id):
"""
Tests a check.
If the user has configured overrides using the control API for
test-check using this entity and check type, those will be used.
Otherwise, random values within each metric type will be
generated. For instance, integer metrics generate integers, and
string metrics generate strings. No other guarantees are made.
"""
content = request.content.read()
test_config = json.loads(content.decode("utf-8"))
check_type = test_config['type']
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
response_code, response_body = maas_store.check_types[check_type].get_test_check_response(
entity_id=entity_id,
monitoring_zones=test_config.get('monitoring_zones_poll'),
timestamp=int(1000 * self._session_store.clock.seconds()))
request.setResponseCode(response_code)
self._audit('checks', request, tenant_id, response_code, content)
return json.dumps(response_body)
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/alarms', methods=['POST'])
def create_alarm(self, request, tenant_id, entity_id):
"""
Creates alarm
"""
content = request.content.read()
postdata = json.loads(content.decode("utf-8"))
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
newalarm = create_alarm(self._session_store.clock, entity_id, postdata)
except TypeError:
missing_key = _find_missing_required_key(Alarm, postdata, ['created_at',
'updated_at',
'entity_id'])
status = 400
request.setResponseCode(status)
self._audit('alarms', request, tenant_id, status, content)
return json.dumps({'type': 'badRequest',
'code': status,
'message': 'Validation error for key \'{0}\''.format(missing_key),
'details': 'Missing required key ({0})'.format(missing_key),
'txnId': '.fake.mimic.transaction.id.c-1111111.ts-123444444.v-12344frf'})
try:
entity = _get_parent_entity(entities, entity_id)
entity.alarms[newalarm.id] = newalarm
except ParentDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('alarms', request, tenant_id, e.code, content)
return json.dumps(e.to_json())
status = 201
request.setResponseCode(status)
request.setHeader(b'location', base_uri_from_request(request).rstrip('/').encode('utf-8') +
request.path + b'/' + newalarm.id.encode('utf-8'))
request.setHeader(b'x-object-id', newalarm.id.encode('utf-8'))
request.setHeader(b'content-type', b'text/plain')
self._audit('alarms', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/alarms/<string:alarm_id>',
methods=['GET'])
def get_alarm(self, request, tenant_id, entity_id, alarm_id):
"""
Gets an alarm by ID.
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
alarm = _get_alarm(entities, entity_id, alarm_id)
return json.dumps(alarm.to_json())
except (ObjectDoesNotExist, ParentDoesNotExist) as e:
request.setResponseCode(e.code)
return json.dumps(e.to_json())
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/alarms/<string:alarm_id>',
methods=['PUT'])
def update_alarm(self, request, tenant_id, entity_id, alarm_id):
"""
Updates an alarm in place.
Documentation for this API can be found in the Rackspace Cloud
Monitoring Developer Guide, section 5.12.5, "Update alarm by ID".
The full link is quite long, but you can reach it by browsing
to the following goo.gl URL:
http://goo.gl/NhxgTZ
"""
content = request.content.read()
update = json.loads(content.decode("utf-8"))
update_kwargs = dict(update)
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
alarm = _get_alarm(entities, entity_id, alarm_id)
alarm.update(clock=self._session_store.clock, **update_kwargs)
except (ObjectDoesNotExist, ParentDoesNotExist) as e:
request.setResponseCode(e.code)
self._audit('alarms', request, tenant_id, e.code, content)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
request.setHeader(b'location', base_uri_from_request(request).rstrip('/').encode('utf-8') +
request.path)
request.setHeader(b'x-object-id', alarm_id.encode('utf-8'))
request.setHeader(b'content-type', b'text/plain')
self._audit('alarms', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/alarms/<string:alarm_id>',
methods=['DELETE'])
def delete_alarm(self, request, tenant_id, entity_id, alarm_id):
"""
Delete an alarm
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
_delete_alarm(entities, entity_id, alarm_id)
except (ObjectDoesNotExist, ParentDoesNotExist) as e:
request.setResponseCode(e.code)
self._audit('alarms', request, tenant_id, e.code)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
self._audit('alarms', request, tenant_id, status)
request.setHeader(b'content-type', b'text/plain')
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/test-alarm', methods=['POST'])
def test_alarm(self, request, tenant_id, entity_id):
"""
Test an alarm.
This API can be driven using the control API to set an error
or canned success response. If no error or success response is set,
it will return success with a random state and status. Users should
not expect this API to consistently return either OK, WARNING or
CRITICAL without first setting the response in the control API.
"""
content = request.content.read()
payload = json.loads(content.decode("utf-8"))
n_tests = len(payload['check_data'])
current_time_milliseconds = int(1000 * self._session_store.clock.seconds())
status = 200
response_payload = []
test_responses = self._entity_cache_for_tenant(tenant_id).test_alarm_responses
test_errors = self._entity_cache_for_tenant(tenant_id).test_alarm_errors
if entity_id in test_errors and len(test_errors[entity_id]) > 0:
error_response = test_errors[entity_id].popleft()
status = error_response['code']
response_payload = error_response['response']
elif entity_id in test_responses:
n_responses = len(test_responses[entity_id])
for i in range(n_tests):
test_response = test_responses[entity_id][i % n_responses]
response_payload.append({'state': test_response['state'],
'status': test_response.get(
'status', 'Matched default return statement'),
'timestamp': current_time_milliseconds})
else:
for _ in range(n_tests):
response_payload.append({'state': random.choice(['OK', 'WARNING', 'CRITICAL']),
'status': random_hipsum(12),
'timestamp': current_time_milliseconds})
request.setResponseCode(status)
self._audit('alarms', request, tenant_id, status, content)
return json.dumps(response_payload)
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/alarms', methods=['GET'])
def get_alarms_for_entity(self, request, tenant_id, entity_id):
"""
Get all alarms for the specified entity.
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
entity = _get_parent_entity(entities, entity_id)
alarms = entity.list_alarms()
metadata = {'count': len(alarms),
'limit': 1000,
'marker': None,
'next_marker': None,
'next_href': None}
request.setResponseCode(200)
return json.dumps({'metadata': metadata, 'values': alarms})
except ParentDoesNotExist as e:
request.setResponseCode(e.code)
return json.dumps(e.to_json())
@app.route('/v1.0/<string:tenant_id>/views/overview', methods=['GET'])
def overview(self, request, tenant_id):
"""
serves the overview api call,returns all entities,checks and alarms
"""
entity_map = self._entity_cache_for_tenant(tenant_id).entities
all_entities = None
if b'entityId' in request.args:
entity_ids = [a.decode("utf-8") for a in request.args[b'entityId']]
all_entities = [entity_map[entity_id] for entity_id in entity_ids
if entity_id in entity_map]
if len(all_entities) == 0:
err = ObjectDoesNotExist(object_type='Entity', key=','.join(entity_ids))
request.setResponseCode(err.code)
return json.dumps(err.to_json())
else:
all_entities = list(entity_map.values())
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
page_limit = min(int(request.args.get(b'limit', [100])[0]), 1000)
offset = 0
current_marker = request.args.get(b'marker', [None])[0]
if current_marker is not None:
current_marker = current_marker.decode("utf-8")
try:
offset = all_entities.index(Matcher(lambda entity: entity.id == current_marker))
except ValueError:
offset = 0
entities = all_entities[offset:offset + page_limit]
next_marker = None
if offset + page_limit < len(all_entities):
next_marker = all_entities[offset + page_limit].id
metadata = {
'count': len(entities),
'marker': current_marker,
'next_marker': next_marker,
'limit': page_limit,
'next_href': None
}
values = [{'alarms': entity.list_alarms(),
'checks': entity.list_checks(),
'entity': entity.to_json(),
'latest_alarm_states': [
state.brief_json()
for state in maas_store.latest_alarm_states_for_entity(entity.id)]}
for entity in entities]
request.setResponseCode(200)
return json.dumps({'metadata': metadata, 'values': values})
@app.route('/v1.0/<string:tenant_id>/audits', methods=['GET'])
def list_audits(self, request, tenant_id):
"""
Gets the user's audit logs.
"""
ordering = -1 if request.args.get(b'reverse', False) else 1
all_audits = self._entity_cache_for_tenant(tenant_id).audits_list[::ordering]
page_limit = min(int(request.args.get(b'limit', [100])[0]), 1000)
offset = 0
current_marker = request.args.get(b'marker', [None])[0]
if current_marker is not None:
current_marker = current_marker.decode("utf-8")
try:
offset = all_audits.index(Matcher(lambda audit: audit['id'] == current_marker))
except ValueError:
offset = 0
audits = all_audits[offset:offset + page_limit]
next_marker = None
if offset + page_limit < len(all_audits):
next_marker = all_audits[offset + page_limit]['id']
metadata = {
'count': len(audits),
'marker': current_marker,
'next_marker': next_marker,
'limit': page_limit,
'next_href': None
}
request.setResponseCode(200)
return json.dumps({'metadata': metadata, 'values': audits})
@app.route('/v1.0/<string:tenant_id>/__experiments/json_home', methods=['GET'])
def service_json_home(self, request, tenant_id):
"""
jsonhome call. CloudIntellgiences doesn't actually use these URLs directly.
Rather, do some regex on them to figure how to know what permissions the user as
have
TO DO: Regionless api
"""
request.setResponseCode(200)
mockapi_id = re.findall('/mimicking/(.+?)/', request.path.decode("utf-8"))[0]
url = base_uri_from_request(request).rstrip('/') + '/mimicking/' + mockapi_id + '/ORD/v1.0'
return json.dumps(json_home(url))
@app.route('/v1.0/<string:tenant_id>/views/agent_host_info', methods=['GET'])
def view_agent_host_info(self, request, tenant_id):
"""
Mocks the /views/agent_host_info API call.
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
if b'include' not in request.args:
request.setResponseCode(400)
return json.dumps({'type': 'badRequest',
'code': 400,
'message': 'Validation error for key \'include\'',
'details': 'Must include at least one HOST_INFO_TYPE.',
'txnId': ('.fake.mimic.transaction.id.c-1111111'
'.ts-123444444.v-12344frf')})
if b'entityId' not in request.args:
request.setResponseCode(400)
return json.dumps({'type': 'badRequest',
'code': 400,
'message': 'Validation error for key \'agentId, entityId, uri\'',
'details': 'You must specify an agentId, entityId, or an entity URI.',
'mimicNotes': 'But mimic will only accept entityId right now',
'txnId': ('.fake.mimic.transaction.id.c-1111111'
'.ts-123444444.v-12344frf')})
entity_id = request.args[b'entityId'][0].strip().decode("utf-8")
entity = None
try:
entity = _get_entity(entities, entity_id)
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
return json.dumps(e.to_json())
entity = entities[entity_id]
if entity.agent_id is None:
request.setResponseCode(400)
return json.dumps({'type': 'agentDoesNotExist',
'code': 400,
'message': 'Agent does not exist',
'details': 'Agent null does not exist',
'txnId': ('.fake.mimic.transaction.id.c-1111111.'
'ts-123444444.v-12344frf')})
try:
agent = maas_store.agents[entity.agent_id]
except KeyError:
request.setResponseCode(400)
return json.dumps({'type': 'agentDoesNotExist',
'code': 400,
'message': 'Agent does not exist',
'details': 'Agent {0} does not exist'.format(entity.agent_id),
'txnId': ('.fake.mimic.transaction.id.c-1111111.'
'ts-123444444.v-12344frf')})
request.setResponseCode(200)
return json.dumps({
'values': [{'agent_id': entity.agent_id,
'entity_id': entity_id,
'entity_uri': entity.uri,
'host_info': agent.get_host_info(
maas_store.host_info_types,
[arg.decode('utf-8')
for arg in request.args[b'include']],
entity_id,
self._session_store.clock)}],
'metadata': {'count': 1,
'limit': 100,
'marker': None,
'next_marker': None,
'next_href': None}})
@app.route('/v1.0/<string:tenant_id>/views/connections', methods=['GET'])
def view_connections(self, request, tenant_id):
"""
Lists agent connections.
"""
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
if b'agentId' not in request.args:
request.setResponseCode(400)
return json.dumps({'type': 'badRequest',
'code': 400,
'message': 'Validation error for key \'agentId\'',
'details': 'You must specify an agentId',
'txnId': '.fake.mimic.transaction.id.c-1111111.ts-123444444.v-12344frf'})
agent_ids = request.args[b'agentId']
decoded_agent_ids = [agent_id.decode("utf-8") for agent_id in agent_ids]
connections = [{'agent_id': agent_id,
'connections': [connection.to_json()
for connection in maas_store.list_connections_for_agent(
agent_id)]}
for agent_id in decoded_agent_ids]
return json.dumps({'values': connections,
'metadata': {'count': len(connections),
'limit': None,
'marker': None,
'next_marker': None,
'next_href': None}})
@app.route('/v1.0/<string:tenant_id>/agent_installers', methods=['POST'])
def agent_installer(self, request, tenant_id):
"""
URL of agent install script
"""
xsil = (b'https://monitoring.api.rackspacecloud.com'
b'/v1.0/00000/agent_installers/c69b2ceafc0444506fb32255af3d9be3.sh')
status = 201
request.setResponseCode(status)
request.setHeader(b'x-shell-installer-location', xsil)
self._audit('agent_installers', request, tenant_id, status, request.content.read())
return b''
@app.route('/v1.0/<string:tenant_id>/notifications', methods=['POST'])
def create_notification(self, request, tenant_id):
"""
Create notification target
"""
content = request.content.read()
new_n = create_notification(self._session_store.clock, json.loads(content.decode("utf-8")))
notifications = self._entity_cache_for_tenant(tenant_id).notifications
notifications[new_n.id] = new_n
status = 201
request.setResponseCode(status)
request.setHeader(b'content-type', b'text/plain')
request.setHeader(b'location', base_uri_from_request(request).rstrip('/').encode('utf-8') +
request.path + b'/' + new_n.id.encode('utf-8'))
request.setHeader(b'x-object-id', new_n.id.encode('utf-8'))
self._audit('notifications', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/notifications', methods=['GET'])
def get_notifications(self, request, tenant_id):
"""
Get notification targets
"""
notifications = self._entity_cache_for_tenant(tenant_id).notifications
metadata = {'count': len(notifications),
'limit': 100,
'marker': None,
'next_marker': None,
'next_href': None}
request.setResponseCode(200)
return json.dumps({'values': [nt.to_json() for nt in notifications.values()],
'metadata': metadata})
@app.route('/v1.0/<string:tenant_id>/notifications/<string:nt_id>', methods=['PUT'])
def update_notifications(self, request, tenant_id, nt_id):
"""
Updates notification targets
"""
content = request.content.read()
postdata = json.loads(content.decode("utf-8"))
update_kwargs = dict(postdata)
notifications = self._entity_cache_for_tenant(tenant_id).notifications
try:
notification = _get_notification(notifications, nt_id)
notification.update(clock=self._session_store.clock, **update_kwargs)
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('notifications', request, tenant_id, e.code, content)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
request.setHeader(b'content-type', b'text/plain')
self._audit('notifications', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/notifications/<string:nt_id>', methods=['DELETE'])
def delete_notification(self, request, tenant_id, nt_id):
"""
Delete a notification
"""
notifications = self._entity_cache_for_tenant(tenant_id).notifications
try:
_delete_notification(notifications, nt_id)
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('notifications', request, tenant_id, e.code)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
self._audit('notifications', request, tenant_id, status)
request.setHeader(b'content-type', b'text/plain')
return b''
@app.route('/v1.0/<string:tenant_id>/notification_plans', methods=['POST'])
def create_notificationplan(self, request, tenant_id):
"""
Creates a new notificationPlans
"""
content = request.content.read()
postdata = json.loads(content.decode("utf-8"))
notification_plans = self._entity_cache_for_tenant(tenant_id).notification_plans
newnp = create_notification_plan(self._session_store.clock, postdata)
notification_plans[newnp.id] = newnp
status = 201
request.setResponseCode(status)
request.setHeader(b'content-type', b'text/plain')
request.setHeader(b'location', base_uri_from_request(request).rstrip('/').encode('utf-8') +
request.path + b'/' + newnp.id.encode('utf-8'))
request.setHeader(b'x-object-id', newnp.id.encode('utf-8'))
self._audit('notification_plans', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/notification_plans', methods=['GET'])
def get_notification_plans(self, request, tenant_id):
"""
Get all notification plans
"""
np_list = self._entity_cache_for_tenant(tenant_id).notification_plans.values()
metadata = {'count': len(np_list),
'limit': 100,
'marker': None,
'next_marker': None,
'next_href': None}
request.setResponseCode(200)
return json.dumps({'values': [np.to_json() for np in np_list], 'metadata': metadata})
@app.route('/v1.0/<string:tenant_id>/notification_plans/<string:np_id>', methods=['GET'])
def get_notification_plan(self, request, tenant_id, np_id):
"""
Get specific notif plan
"""
notification_plans = self._entity_cache_for_tenant(tenant_id).notification_plans
return _map_getter(notification_plans, request, 'NotificationPlan', np_id)
@app.route('/v1.0/<string:tenant_id>/notification_plans/<string:np_id>', methods=['PUT'])
def update_notification_plan(self, request, tenant_id, np_id):
"""
Alter a notification plan
"""
content = request.content.read()
postdata = json.loads(content.decode("utf-8"))
update_kwargs = dict(postdata)
notification_plans = self._entity_cache_for_tenant(tenant_id).notification_plans
try:
notification_plan = _get_notification_plan(notification_plans, np_id)
notification_plan.update(clock=self._session_store.clock, **update_kwargs)
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('notification_plans', request, tenant_id, e.code, content)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
request.setHeader(b'content-type', b'text/plain')
self._audit('notification_plans', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/notification_plans/<string:np_id>', methods=['DELETE'])
def delete_notification_plan(self, request, tenant_id, np_id):
"""
Remove a notification plan
"""
notification_plans = self._entity_cache_for_tenant(tenant_id).notification_plans
entities = self._entity_cache_for_tenant(tenant_id).entities
alarmids_using_np = [alarm.id
for entity in entities.values()
for alarm in entity.alarms.values()
if alarm.notification_plan_id == np_id]
if len(alarmids_using_np):
status = 403
request.setResponseCode(status)
err_message = ('Notification plans cannot be removed while alarms ' +
'are using it: {0}'.format(' '.join(alarmids_using_np)))
self._audit('notification_plans', request, tenant_id, status)
return json.dumps({'type': 'forbiddenError',
'code': status,
'txnId': '.fake.mimic.transaction.id.c-1111111.ts-123444444.v-12344frf',
'message': err_message,
'details': err_message})
try:
_delete_notification_plan(notification_plans, np_id)
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('notification_plans', request, tenant_id, e.code)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
self._audit('notification_plans', request, tenant_id, status)
request.setHeader(b'content-type', b'text/plain')
return b''
@app.route('/v1.0/<string:tenant_id>/suppressions', methods=['GET'])
def get_suppressions(self, request, tenant_id):
"""
Get the list of suppressions for this tenant.
"""
sp_list = self._entity_cache_for_tenant(tenant_id).suppressions.values()
metadata = {
'count': len(sp_list),
'limit': 100,
'marker': None,
'next_marker': None,
'next_href': None
}
request.setResponseCode(200)
return json.dumps({'values': [sp.to_json() for sp in sp_list], 'metadata': metadata})
@app.route('/v1.0/<string:tenant_id>/suppressions/<string:sp_id>', methods=['GET'])
def get_suppression(self, request, tenant_id, sp_id):
"""
Get a suppression by ID.
"""
suppressions = self._entity_cache_for_tenant(tenant_id).suppressions
return _map_getter(suppressions, request, 'Suppression', sp_id)
@app.route('/v1.0/<string:tenant_id>/suppressions', methods=['POST'])
def create_suppression(self, request, tenant_id):
"""
Create a new suppression.
"""
content = request.content.read()
postdata = json.loads(content.decode("utf-8"))
newsp = create_suppression(self._session_store.clock, postdata)
suppressions = self._entity_cache_for_tenant(tenant_id).suppressions
suppressions[newsp.id] = newsp
status = 201
request.setResponseCode(status)
request.setHeader(b'location', base_uri_from_request(request).rstrip('/').encode('utf-8') +
request.path + b'/' + newsp.id.encode('utf-8'))
request.setHeader(b'x-object-id', newsp.id.encode('utf-8'))
request.setHeader(b'content-type', b'text/plain')
self._audit('suppressions', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/suppressions/<string:sp_id>', methods=['PUT'])
def update_suppression(self, request, tenant_id, sp_id):
"""
Update a suppression.
"""
content = request.content.read()
postdata = json.loads(content.decode("utf-8"))
update_kwargs = dict(postdata)
suppressions = self._entity_cache_for_tenant(tenant_id).suppressions
try:
suppression = _get_suppression(suppressions, sp_id)
suppression.update(clock=self._session_store.clock, **update_kwargs)
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('suppressions', request, tenant_id, e.code, content)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
request.setHeader(b'content-type', b'text/plain')
self._audit('suppressions', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/suppressions/<string:sp_id>', methods=['DELETE'])
def delete_suppression(self, request, tenant_id, sp_id):
"""
Delete a suppression.
"""
suppressions = self._entity_cache_for_tenant(tenant_id).suppressions
try:
_delete_suppression(suppressions, sp_id)
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('suppressions', request, tenant_id, e.code)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
self._audit('suppressions', request, tenant_id, status)
request.setHeader(b'content-type', b'text/plain')
return b''
@app.route('/v1.0/<string:tenant_id>/monitoring_zones', methods=['GET'])
def list_monitoring_zones(self, request, tenant_id):
"""
Lists the monitoring zones
"""
mzs = monitoring_zones()
metadata = {
'count': len(mzs),
'limit': 100,
'marker': None,
'next_marker': None,
'next_href': None
}
request.setResponseCode(200)
return json.dumps({'values': mzs, 'metadata': metadata})
@app.route('/v1.0/<string:tenant_id>/alarm_examples', methods=['GET'])
def list_alarm_examples(self, request, tenant_id):
"""
Lists all of the alarm examples.
"""
axs = alarm_examples()
metadata = {
'count': len(axs),
'limit': 100,
'marker': None,
'next_marker': None,
'next_href': None
}
request.setResponseCode(200)
return json.dumps({'values': axs, 'metadata': metadata})
@app.route('/v1.0/<string:tenant_id>/views/alarmCountsPerNp', methods=['GET'])
def alarm_counts_per_np(self, request, tenant_id):
"""
All NotificationPlans a number of alarms pointing to them.
"""
notification_plans = self._entity_cache_for_tenant(tenant_id).notification_plans
entities = self._entity_cache_for_tenant(tenant_id).entities
values = [{'notification_plan_id': np.id,
'alarm_count': len([alarm
for entity in entities.values()
for alarm in entity.alarms.values()
if alarm.notification_plan_id == np.id])}
for np in notification_plans.values()]
metadata = {'limit': 100,
'marker': None,
'next_marker': None,
'next_href': None,
'count': len(values)}
request.setResponseCode(200)
return json.dumps({'values': values, 'metadata': metadata})
@app.route('/v1.0/<string:tenant_id>/views/alarmsByNp/<string:np_id>', methods=['GET'])
def alarms_by_np(self, request, tenant_id, np_id):
"""
List of alarms pointing to a particular NotificationPlan
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
values = [alarm.to_json()
for entity in entities.values()
for alarm in entity.alarms.values()
if alarm.notification_plan_id == np_id]
metadata = {'limit': 100,
'marker': None,
'next_marker': None,
'next_href': None,
'count': len(values)}
request.setResponseCode(200)
return json.dumps({'values': values, 'metadata': metadata})
@app.route('/v1.0/<string:tenant_id>/notification_types', methods=['GET'])
def get_notification_types(self, request, tenant_id):
"""
Get the types of notifications supported: pageduty,email,sms, etc
"""
ntlist = self._entity_cache_for_tenant(tenant_id).notificationtypes_list
metadata = {'count': len(ntlist),
'limit': 100,
'marker': None,
'next_marker': None,
'next_href': None}
request.setResponseCode(200)
return json.dumps({'values': ntlist, 'metadata': metadata})
@app.route('/v1.0/<string:tenant_id>/views/metric_list', methods=['GET'])
def views_metric_list(self, request, tenant_id):
"""
All available metrics.
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
values = [_metric_list_for_entity(maas_store, entity)
for entity in entities.values()]
metadata = {'count': len(values),
'marker': None,
'next_marker': None,
'limit': 1000,
'next_href': None}
request.setResponseCode(200)
return json.dumps({'metadata': metadata, 'values': values})
@app.route('/v1.0/<string:tenant_id>/__experiments/multiplot', methods=['POST'])
def multiplot(self, request, tenant_id):
"""
datapoints for all metrics requested
Right now, only checks of type remote.ping work
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
content = request.content.read()
multiplot_request = json.loads(content.decode("utf-8"))
requested_check_ids = set([metric['check_id'] for metric in multiplot_request['metrics']])
checks_by_id = {check.id: check
for entity in entities.values()
for check in entity.checks.values()
if check.id in requested_check_ids}
for requested_metric in multiplot_request['metrics']:
if requested_metric['check_id'] not in checks_by_id:
status = 400
request.setResponseCode(status)
self._audit('rollups', request, tenant_id, status, content)
return json.dumps({
'type': 'requiredNotFoundError',
'code': status,
'message': 'Required object does not exist',
'details': 'Object "Check" with key "{0},{1}" does not exist'.format(
requested_metric['entity_id'], requested_metric['check_id']),
'txnId': '.fake.mimic.transaction.id.c-1111111.ts-123444444.v-12344frf'})
multiplot_metrics = [_compute_multiplot(maas_store,
metric['entity_id'],
checks_by_id[metric['check_id']],
metric['metric'],
int(request.args[b'from'][0]),
int(request.args[b'to'][0]),
int(request.args[b'points'][0]))
for metric in multiplot_request['metrics']]
status = 200
request.setResponseCode(200)
self._audit('rollups', request, tenant_id, status, content)
return json.dumps({'metrics': multiplot_metrics})
@app.route('/v1.0/<string:tenant_id>/views/latest_alarm_states', methods=['GET'])
def latest_alarm_states(self, request, tenant_id):
"""
Gets entities grouped with their latest alarm states.
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
values = [{'entity_id': entity.id,
'entity_uri': entity.uri,
'entity_label': entity.label,
'latest_alarm_states': [
state.detail_json()
for state in maas_store.latest_alarm_states_for_entity(entity.id)]}
for entity in entities.values()]
metadata = {'count': len(values),
'marker': None,
'next_marker': None,
'limit': 1000,
'next_href': None}
request.setResponseCode(200)
return json.dumps({'values': values, 'metadata': metadata})
@implementer(IAPIMock, IPlugin)
@attr.s
class MaasControlApi(object):
"""
This class registers the MaaS controller API in the service catalog.
"""
maas_api = attr.ib()
def catalog_entries(self, tenant_id):
"""
List catalog entries for the MaaS API.
"""
return [
Entry(
tenant_id, "rax:monitor", "cloudMonitoringControl",
[
Endpoint(tenant_id, region, text_type(uuid4()),
"v1.0")
for region in self.maas_api._regions
]
)
]
def resource_for_region(self, region, uri_prefix, session_store):
"""
Get an :obj:`twisted.web.iweb.IResource` for the given URI prefix;
implement :obj:`IAPIMock`.
"""
maas_controller = MaasController(api_mock=self,
session_store=session_store,
region=region)
return maas_controller.app.resource()
@attr.s
class MaasController(object):
"""
Klein routes for MaaS control API.
"""
api_mock = attr.ib()
session_store = attr.ib()
region = attr.ib()
def _entity_cache_for_tenant(self, tenant_id):
"""
Retrieve the M_cache object containing all objects created so far
"""
clock = self.session_store.clock
return (self.session_store.session_for_tenant_id(tenant_id)
.data_for_api(self.api_mock.maas_api, _mcache_factory(clock))[self.region])
app = MimicApp()
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/alarms/test_response',
methods=['PUT'])
def set_test_alarm_response(self, request, tenant_id, entity_id):
"""
Sets the test-alarm response for a given entity.
"""
test_responses = self._entity_cache_for_tenant(tenant_id).test_alarm_responses
dummy_response = json_from_request(request)
test_responses[entity_id] = []
for response_block in dummy_response:
ith_response = {'state': response_block['state']}
if 'status' in response_block:
ith_response['status'] = response_block['status']
test_responses[entity_id].append(ith_response)
request.setResponseCode(204)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/alarms/test_errors',
methods=['POST'])
def push_test_alarm_error(self, request, tenant_id, entity_id):
"""
Creates a new error response that will be returned from the
test-alarm API the next time it is called for this entity.
"""
test_alarm_errors = self._entity_cache_for_tenant(tenant_id).test_alarm_errors
request_body = json_from_request(request)
if entity_id not in test_alarm_errors:
test_alarm_errors[entity_id] = collections.deque()
error_obj = {'id': 'er' + random_hex_generator(4),
'code': request_body['code'],
'response': request_body['response']}
test_alarm_errors[entity_id].append(error_obj)
request.setResponseCode(201)
request.setHeader(b'x-object-id', error_obj['id'].encode('utf-8'))
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/alarms/test_response',
methods=['DELETE'])
def clear_test_alarm_response(self, request, tenant_id, entity_id):
"""
Clears the test-alarm response and restores normal behavior.
"""
test_responses = self._entity_cache_for_tenant(tenant_id).test_alarm_responses
del test_responses[entity_id]
request.setResponseCode(204)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/checks' +
'/test_responses/<string:check_type>', methods=['PUT'])
def set_test_check_overrides(self, request, tenant_id, entity_id, check_type):
"""
Sets overriding behavior on the test-check handler for a given
entity ID and check type.
"""
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
check_type_ins = maas_store.check_types[check_type]
overrides = json_from_request(request)
check_id = '__test_check'
ench_key = (entity_id, check_id)
for override in overrides:
if 'available' in override:
check_type_ins.test_check_available[ench_key] = override['available']
if 'status' in override:
check_type_ins.test_check_status[ench_key] = override['status']
metrics_dict = override.get('metrics', {})
for metric_name in metrics_dict:
test_check_metric = check_type_ins.get_metric_by_name(metric_name)
kwargs = {'entity_id': entity_id,
'check_id': check_id,
'override_fn': lambda _: metrics_dict[metric_name]['data']}
if 'monitoring_zone_id' in override:
kwargs['monitoring_zone'] = override['monitoring_zone_id']
test_check_metric.set_override(**kwargs)
request.setResponseCode(204)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/checks' +
'/test_responses/<string:check_type>', methods=['DELETE'])
def clear_test_check_overrides(self, request, tenant_id, entity_id, check_type):
"""
Clears overriding behavior on a test-check handler.
"""
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
check_type_ins = maas_store.check_types[check_type]
check_type_ins.clear_overrides()
request.setResponseCode(204)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/alarms' +
'/<string:alarm_id>/states', methods=['POST'])
def create_alarm_state(self, request, tenant_id, entity_id, alarm_id):
"""
Adds a new alarm state to the collection of alarm states.
"""
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
entities = self._entity_cache_for_tenant(tenant_id).entities
request_body = json_from_request(request)
alarm = None
try:
alarm = _get_alarm(entities, entity_id, alarm_id)
except (ObjectDoesNotExist, ParentDoesNotExist) as e:
request.setResponseCode(e.code)
return json.dumps(e.to_json())
previous_state = u'UNKNOWN'
alarm_states_same_entity_and_alarm = [
state for state in maas_store.alarm_states
if state.entity_id == entity_id and state.alarm_id == alarm_id]
if len(alarm_states_same_entity_and_alarm) > 0:
previous_state = alarm_states_same_entity_and_alarm[-1].state
monitoring_zone_id = request_body.get('analyzed_by_monitoring_zone_id', u'mzord')
new_state = None
try:
new_state = AlarmState(alarm_id=alarm_id,
entity_id=entity_id,
check_id=alarm.check_id,
alarm_label=alarm.label,
analyzed_by_monitoring_zone_id=monitoring_zone_id,
previous_state=previous_state,
state=request_body['state'],
status=request_body['status'],
timestamp=int(1000 * self.session_store.clock.seconds()))
except KeyError as e:
missing_key = e.args[0]
status = 400
request.setResponseCode(status)
return json.dumps({'type': 'badRequest',
'code': status,
'message': 'Validation error for key \'{0}\''.format(missing_key),
'details': 'Missing required key ({0})'.format(missing_key),
'txnId': '.fake.mimic.transaction.id.c-1111111.ts-123444444.v-12344frf'})
maas_store.alarm_states.append(new_state)
request.setResponseCode(201)
request.setHeader(b'x-object-id', new_state.id.encode('utf-8'))
request.setHeader(b'content-type', b'text/plain')
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/checks' +
'/<string:check_id>/metrics/<string:metric_name>', methods=['PUT'])
def set_metric_override(self, request, tenant_id, entity_id, check_id, metric_name):
"""
Sets overrides on a metric.
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
check = None
try:
check = _get_check(entities, entity_id, check_id)
except (ObjectDoesNotExist, ParentDoesNotExist) as e:
request.setResponseCode(e.code)
return json.dumps(e.to_json())
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
metric = maas_store.check_types[check.type].get_metric_by_name(metric_name)
request_body = json_from_request(request)
monitoring_zones = request_body.get('monitoring_zones', ['__AGENT__'])
override_type = request_body['type']
override_options = request_body.get('options', {})
override_fn = None
if override_type == 'squarewave':
fn_period = int(override_options.get('period', 10 * 60 * 1000))
half_period = fn_period / 2
fn_min = override_options.get('min', 20)
fn_max = override_options.get('max', 80)
fn_offset = int(override_options.get('offset', 0))
override_fn = (lambda t: (fn_min
if ((t + fn_offset) % fn_period) < half_period
else fn_max))
else:
request.setResponseCode(400)
return json.dumps({'type': 'badRequest',
'code': 400,
'message': 'Validation error for key \'type\'',
'details': 'Unknown value for "type": "{0}"'.format(override_type)})
for monitoring_zone in monitoring_zones:
metric.set_override(
entity_id=entity_id,
check_id=check_id,
monitoring_zone=monitoring_zone,
override_fn=override_fn)
request.setResponseCode(204)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/agents', methods=['POST'])
def create_agent(self, request, tenant_id, entity_id):
"""
Creates or overwrites an agent on the entity.
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
agent = None
try:
entity = _get_entity(entities, entity_id)
agent = Agent()
entity.agent_id = agent.id
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
return json.dumps(e.to_json())
maas_store.agents[agent.id] = agent
request.setResponseCode(201)
request.setHeader(b'x-object-id', agent.id.encode('utf-8'))
request.setHeader(b'content-type', b'text/plain')
return b''
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 21 21:39:46 2019
@author: Suyash
"""
##### KNN using Cross-Validation##############
import pandas as pd
df=pd.read_csv("diabetes.csv")
df.columns
X=df.drop(["diabetes"],1)
y=df["diabetes"]
from sklearn.neighbors import KNeighborsClassifier
knn= KNeighborsClassifier(n_neighbors=5)
knn
from sklearn.model_selection import cross_val_score
cross_val_score(knn,X,y,cv=10).mean()
|
from random import randint
num = randint(0,5)
chute = int(input('Digite um nº de 0 a 5: '))
print('O nº gerado foi: {}'.format(num))
if num == chute:
print('Se é o bixão mesmo, heim doido!')
else:
print('kkk se liga!')
|
# Generated by Django 3.1.3 on 2020-11-22 10:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('library', '0013_auto_20201120_1455'),
]
operations = [
migrations.AddField(
model_name='profile',
name='Number_of_copies',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='profile',
name='book_name',
field=models.CharField(max_length=50, null=True),
),
]
|
"""MNIST Data.
Custom MNIST Dataset utility since torch can't easily be installed on the Pi.
This is basically a copy of the torchvision version, but without the torch
components. Also has significantly reduced functionality, as it is only meant to
retrieve images from the pickle file.
Author:
Yvan Satyawan <y_satyawan@hotmail.com>
Created on:
April 10, 2020
"""
import os
import pickle
from PIL import Image
class MNIST:
resources = [
("http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz",
"f68b3c2dcbeaaa9fbdd348bbdeb94873"),
("http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz",
"d53e105ee54ea40749a09fcbcd1e9432"),
("http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz",
"9fb629c4189551a2d022fa330f9573f3"),
("http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz",
"ec29112dd5afa0611ce80d1b7f02629c")
]
training_file = 'training.pkl'
test_file = 'test.pkl'
classes = ['0 - zero', '1 - one', '2 - two', '3 - three', '4 - four',
'5 - five', '6 - six', '7 - seven', '8 - eight', '9 - nine']
def __init__(self, root, train=True):
"""`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset.
Args:
root (str): Root directory of dataset where
``MNIST/processed/training.pt`` and ``MNIST/processed/test.pt``
exist.
train (bool): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
"""
self.root = root
self.train = train # training set or test set
if not self._check_exists():
raise RuntimeError('Dataset not found.')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
with open(os.path.join(self.processed_folder, data_file), 'rb') as fp:
self.data, self.targets = pickle.load(fp)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.targets[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img, mode='L')
return img, target
def __len__(self):
return len(self.data)
@property
def raw_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'raw')
@property
def processed_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'processed')
@property
def class_to_idx(self):
return {_class: i for i, _class in enumerate(self.classes)}
def _check_exists(self):
return (os.path.exists(os.path.join(self.processed_folder,
self.training_file)) and
os.path.exists(os.path.join(self.processed_folder,
self.test_file)))
def extra_repr(self):
return "Split: {}".format("Train" if self.train is True else "Test") |
# This problem was asked by Jane Street.
# cons(a, b) constructs a pair, and car(pair)
# and cdr(pair) returns the first and last element of that pair.
# For example, car(cons(3, 4)) returns 3,
# and cdr(cons(3, 4)) returns 4.
def cons(a, b):
def pair(f):
return f(a, b)
return pair
def car(f):
def left(a, b):
return a
return f(left)
def cdr(f):
def right(a,b):
return b
return f(right)
print (car(cons("hello",4)))
print(cdr(cons(213,232))) |
#!python
# encoding: utf-8
# Created by djg-m at 09.01.2021
import argparse
from models import Message, User
def messages_list(user):
"""
Prints all messages for one specified user.
:param user: specified for user User object
:type user: User
:return: None
:rtype: None
"""
msg = Message()
messages = msg.load_by_receiver(user.id)
for message in messages:
sender = User()
sender.load_by_id(message.from_id)
print(f"{message.creation_date} | {sender.username} | "
f"{message.message}")
def send_message(args, user):
"""
Sends a new message from fogged user
:param args: object with CLI arguments
:type args: args
:param user: object for logged user
:type user: user
:return: None
:rtype: None
"""
receiver = User()
receiver.load_by_username(args.to)
message = Message()
message.new_message(user.id, receiver.id, args.send)
app_args = argparse.ArgumentParser(description="Aplikacja do zarządzania "
"wiadomościami")
app_args.add_argument('-p', '--password', type=str, help="Hasło użytkownika",
action='store', required=True)
app_args.add_argument('-t', '--to', help="Nazwa odbiorcy wiadomości",
action='store', type=str)
app_args.add_argument('-s', '--send', help="Treść wiadomości",
action='store', type=str)
app_args.add_argument('-u', '--username', help="Nazwa użytkownika",
action='store', type=str)
app_args.add_argument('-l', '--list', help="Lista użytkowników",
action='store_true', default=False)
args = app_args.parse_args()
try:
user = User()
user.load_by_username(args.username)
user.authorize(args.password)
if user.is_authorized:
if args.list:
messages_list(user)
elif args.to and args.send:
send_message(args, user)
except ValueError:
pass
|
# Resource: https://nathanrooy.github.io/posts/2016-08-17/simple-particle-swarm-optimization-with-python/
import pandas as pd
import numpy as np
import random
import pickle
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, RobustScaler
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.linear_model import Lasso, ElasticNet, LogisticRegression
from sklearn.kernel_ridge import KernelRidge
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
import xgboost as xgb
#import lightgbm as lgb
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.feature_selection import VarianceThreshold, RFE
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import random
import math
def func1(x):
total=0
for i in range(len(x)):
total += abs(x[i]**2 - 7*x[i] + 12)
return total
def randomizeBits(size):
return np.random.randint(2, size=size)
def getCardinality(threshold, accumulation):
for i in range(0, len(accumulation)):
if (accumulation[i] > threshold):
return i;
return len(accumulation) - 1;
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
lasso = make_pipeline(RobustScaler(), Lasso(alpha =10, random_state=1, max_iter = 7000))
ENet = make_pipeline(RobustScaler(), ElasticNet(alpha=1, l1_ratio=.9, random_state=3))
KRR = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5)
GBoost = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05,
max_depth=4, max_features='sqrt',
min_samples_leaf=15, min_samples_split=10,
loss='huber', random_state =5)
model_xgb = xgb.XGBRegressor(colsample_bytree=0.4603, gamma=0.0468,
learning_rate=0.05, max_depth=3,
min_child_weight=1.7817, n_estimators=2200,
reg_alpha=0.4640, reg_lambda=0.8571,
subsample=0.5213, silent=1,
random_state =7, nthread = -1)
'''model_lgb = lgb.LGBMRegressor(objective='regression',num_leaves=5,
learning_rate=0.05, n_estimators=720,
max_bin = 55, bagging_fraction = 0.8,
bagging_freq = 5, feature_fraction = 0.2319,
feature_fraction_seed=9, bagging_seed=9,
min_data_in_leaf =6, min_sum_hessian_in_leaf = 11)'''
forest_model = RandomForestRegressor()
#models = [lasso, ENet, KRR, GBoost, model_xgb, model_lgb, forest_model]
models = [lasso, ENet, KRR, GBoost, model_xgb, forest_model]
class MetaModel(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, base_models, meta_model):
self.base_models = base_models
self.meta_model = meta_model
def fit(self, X, y):
self.base_models_ = [list() for x in self.base_models]
self.meta_model_ = clone(self.meta_model)
kfold = KFold(n_splits=n_folds, shuffle=True, random_state=156)
# Train cloned base models then create out-of-fold predictions
# that are needed to train the cloned meta-model
#placeholder
out_of_fold_predictions = np.zeros((X.shape[0], len(self.base_models)))
for i, model in enumerate(self.base_models):
for train_index, holdout_index in kfold.split(X, y):
instance = clone(model)
self.base_models_[i].append(instance)
instance.fit(X[train_index], y[train_index])
y_pred = instance.predict(X[holdout_index])
out_of_fold_predictions[holdout_index, i] = y_pred
# Now train the cloned meta-model using the out-of-fold predictions as new feature
self.meta_model_.fit(out_of_fold_predictions, y)
return self
#Do the predictions of all base models on the test data and use the averaged predictions as
#meta-features for the final prediction which is done by the meta-model
def predict(self, X):
meta_features = np.column_stack([
np.column_stack([model.predict(X) for model in base_models]).mean(axis=1)
for base_models in self.base_models_ ])
return self.meta_model_.predict(meta_features)
n_folds = 5
def rmsle_cv(model):
kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(train)
rmse= np.sqrt(-cross_val_score(model, train, train_result.reshape(-1), scoring="neg_mean_squared_log_error", cv = kf))
return(rmse)
class Particle:
def __init__(self, models):
self.num_dimensions = len(models)
self.position = randomizeBits(len(models))
self.velocity = np.zeros((2, self.num_dimensions))
self.pbest_pos = []
self.pbest_error = -1
self.curr_error = -1
self.models = models
for i in range(0, self.num_dimensions):
self.velocity[0, i] = random.uniform(-1, 1)
def calculate_cost(self):
break_boolean = False
for i in range(0, self.num_dimensions):
if(self.position[i] != 0):
break_boolean = True
if(break_boolean == False):
return
instance_meta_model = models[0]
instance_base_models = []
for i in range(0, self.num_dimensions):
if(self.position[i] == 1):
instance_base_models.append(models[i])
meta_model = MetaModel(base_models = instance_base_models, meta_model = instance_meta_model)
meta_model.fit(train, train_result.reshape(-1))
score = rmsle_cv(meta_model)
self.curr_error = score.mean()
print("Error: " + str(self.curr_error))
print("Position: " + str(self.position))
if(self.curr_error < self.pbest_error or self.pbest_error == -1):
self.pbest_pos = self.position
self.pbest_error = self.curr_error
def updateVelocity(self, inertia, cog_const, soc_const, gbest_pos):
for i in range(0, self.num_dimensions):
r1=random.random()
r2=random.random()
card_count_soc = card_count_cog = card_count_self = 0
# Get Encoding of Cardinality
for j in self.pbest_pos:
card_count_cog += int(j)
for j in gbest_pos:
card_count_soc += int(j)
for j in self.position:
card_count_self += int(j)
# Encode Cardinality
L_cog = np.zeros((2, self.num_dimensions))
L_soc = np.zeros((2, self.num_dimensions))
L_self = np.zeros((2, self.num_dimensions))
#print(card_count_cog, card_count_soc, card_count_self)
L_cog[0, card_count_cog - 1] = 1
L_soc[0, card_count_soc - 1] = 1
L_self[0, card_count_self - 1] = 1
# Include Selection Likelihood
for a in range(0, self.num_dimensions):
if(self.pbest_pos[a] == 1 and self.position[a] == 0):
L_cog[1, a] = 1;
if(gbest_pos[a] == 1 and self.position[a] == 0):
L_soc[1, a] = 1;
vel_cognitive = cog_const * r1 * L_cog
vel_social = soc_const * r2 * L_soc
vel_self = 0.2 * L_self
self.velocity = inertia * self.velocity + vel_cognitive + vel_social + vel_self
def updatePosition(self):
# Create Accumulation Matrix
accumulation = np.zeros((self.num_dimensions))
for i in range(0, self.num_dimensions):
if (i == 0):
accumulation[i] = self.velocity[0, i]
else:
accumulation[i] = self.velocity[0, i] + accumulation[i-1]
# Get Random Threshold
random_thresh = random.random() * accumulation[-1]
# Calculate Cardinality
cardinality = getCardinality(random_thresh, accumulation)
# Fill in updated position with maximum selection likelihoods
temp_selection = np.copy(self.velocity[1, :])
print("Velocity: " + str(self.velocity))
updated_position = np.zeros((self.num_dimensions))
for i in range(cardinality):
max_index = np.argmax(temp_selection)
#print("Max Index is: " + str(max_index))
updated_position[max_index] = 1
temp_selection[max_index] = float('-inf')
self.position = updated_position
class PSO:
def __init__(self, num_particles, epoch, continue_flag = False, pso=""):
self.num_dimensions = len(models)
self.gbest_pos = []
self.gbest_error = -1
self.inertia = 0.8
self.cog_const = 1
self.soc_const = 2
self.epoch = epoch
self.num_particles = num_particles
self.epsilon = 1e6
self.meta_weights = np.full((1, self.num_dimensions), epislon)
if(continue_flag):
self = pickle.loads(pso)
else:
self.swarm = []
for i in range(0, num_particles):
self.swarm.append(Particle(models))
def run(self):
print("")
for timestep in range(self.epoch):
print("Timestep: %d" % timestep)
for i in range(0, self.num_particles):
print(self.swarm[i].position)
self.swarm[i].calculate_cost()
if self.swarm[i].curr_error < self.gbest_error or self.gbest_error == -1:
self.gbest_pos = list(self.swarm[i].position)
self.gbest_error = float(self.swarm[i].curr_error)
self.swarm[i].updateVelocity(self.inertia, self.cog_const, self.soc_const, self.gbest_pos)
self.swarm[i].updatePosition()
# Revisit Later
print("---------------------------------")
print("Final:")
print("Gbest Position: " + str(self.gbest_pos))
print("Gbest Error: " + str(self.gbest_error))
# Check Later
if __name__ == "__PSO__":
main()
pso = PSO(num_particles = 10, epoch = 2)
|
import json, os
import torch
from modeling import AlbertForCloth
from transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from transformers import AlbertTokenizer
DATA_DIR = '/home/engine210/LE/dataset/ELE/test'
MODEL_DIR = '../model'
BERT_MODEL = 'albert-xxlarge-v2'
EVAL_BATCH_SIZE = 100
CACHE_SIZE = 256
def getFileList(data_dir):
files = []
for root, dir_names, file_names in os.walk(data_dir):
for filename in sorted(file_names):
files.append(os.path.join(root, filename))
return files
# from data_utility
class ClothSample(object):
def __init__(self):
self.article = None
self.ph = []
self.ops = []
self.high = 0
def convert_tokens_to_ids(self, tokenizer):
self.article = tokenizer.convert_tokens_to_ids(self.article)
self.article = torch.Tensor(self.article)
for i in range(len(self.ops)):
for k in range(4):
self.ops[i][k] = tokenizer.convert_tokens_to_ids(self.ops[i][k])
self.ops[i][k] = torch.Tensor(self.ops[i][k])
self.ph = torch.Tensor(self.ph)
def tokenize_ops(ops, tokenizer):
ret = []
for i in range(4):
ret.append(tokenizer.tokenize(ops[i]))
return ret
def createSample(tokenizer, data):
# pdb.set_trace()
cnt = 0
article = tokenizer.tokenize(data['article'])
if (len(article) <= 512):
sample = ClothSample()
sample.article = article
sample.high = data['high']
for p in range(len(article)):
if ('_' in article[p]):
sample.article[p] = '[MASK]'
sample.ph.append(p)
ops = tokenize_ops(data['options'][cnt], tokenizer)
sample.ops.append(ops)
cnt += 1
return [sample]
else:
first_sample = ClothSample()
second_sample = ClothSample()
first_sample.high = data['high']
second_sample.high = data['high']
second_s = len(article) - 512
for p in range(len(article)):
if ('_' in article[p]):
article[p] = '[MASK]'
ops = tokenize_ops(data['options'][cnt], tokenizer)
if (p < 512):
first_sample.ph.append(p)
first_sample.ops.append(ops)
else:
second_sample.ph.append(p - second_s)
second_sample.ops.append(ops)
cnt += 1
first_sample.article = article[:512]
second_sample.article = article[-512:]
if (len(second_sample.ops) == 0):
return [first_sample]
else:
return [first_sample, second_sample]
def to_device(L, device):
if (type(L) != list):
return L.to(device)
else:
ret = []
for item in L:
ret.append(to_device(item, device))
return ret
def preprocessor(tokenizer, file_name):
data = json.loads(open(file_name, 'r').read())
data['high'] = 0
data_tensor = createSample(tokenizer, data)
for i in range(len(data_tensor)):
data_tensor[i].convert_tokens_to_ids(tokenizer)
return data_tensor
class Loader(object):
def __init__(self, data, cache_size, batch_size, device='cpu'):
self.data = data
self.cache_size = cache_size
self.batch_size = batch_size
self.data_num = len(self.data)
self.device = device
def _batchify(self, data_set, data_batch):
max_article_length = 0
max_option_length = 0
max_ops_num = 0
bsz = len(data_batch)
for idx in data_batch:
data = data_set[idx]
max_article_length = max(max_article_length, data.article.size(0))
for ops in data.ops:
for op in ops:
max_option_length = max(max_option_length, op.size(0))
max_ops_num = max(max_ops_num, len(data.ops))
articles = torch.zeros(bsz, max_article_length).long()
articles_mask = torch.ones(articles.size())
options = torch.zeros(bsz, max_ops_num, 4, max_option_length).long()
options_mask = torch.ones(options.size())
answers = torch.zeros(bsz, max_ops_num).long()
mask = torch.zeros(answers.size())
question_pos = torch.zeros(answers.size()).long()
for i, idx in enumerate(data_batch):
data = data_set[idx]
articles[i, :data.article.size(0)] = data.article
articles_mask[i, data.article.size(0):] = 0
for q, ops in enumerate(data.ops):
for k, op in enumerate(ops):
options[i,q,k,:op.size(0)] = op
options_mask[i,q,k, op.size(0):] = 0
for q, pos in enumerate(data.ph):
question_pos[i,q] = pos
inp = [articles, articles_mask, options, options_mask, question_pos]
return inp
def data_iter(self):
seqlen = torch.zeros(self.data_num)
for i in range(self.data_num):
seqlen[i] = self.data[i].article.size(0)
cache_start = 0
while (cache_start < self.data_num):
cache_end = min(cache_start + self.cache_size, self.data_num)
cache_data = self.data[cache_start:cache_end]
seql = seqlen[cache_start:cache_end]
_, indices = torch.sort(seql, descending=True)
batch_start = 0
while (batch_start + cache_start < cache_end):
batch_end = min(batch_start + self.batch_size, cache_end - cache_start)
data_batch = indices[batch_start:batch_end]
inp = self._batchify(cache_data, data_batch)
inp = to_device(inp, self.device)
yield inp
batch_start += self.batch_size
cache_start += self.cache_size
def main():
device = torch.device("cuda:0")
model = AlbertForCloth.from_pretrained(MODEL_DIR, cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(-1))
model.to(device)
model.eval()
tokenizer = AlbertTokenizer.from_pretrained(BERT_MODEL)
file_list = getFileList(DATA_DIR)
ans_dic = {0:"A", 1:"B", 2:"C", 3:"D"}
answers = {}
for file_path in file_list:
file_name = file_path.split("/")[-1].replace('.json', '')
print(file_name)
data_tensor = preprocessor(tokenizer, file_path)
valid_data = Loader(data_tensor, CACHE_SIZE, EVAL_BATCH_SIZE, device)
for inp in valid_data.data_iter():
with torch.no_grad():
out = model(inp)
out = out.cpu().numpy()
answer = list(map(lambda x: ans_dic[x], out))
print(answer)
answers[file_name] = answer
jsonstr = json.dumps(answers)
jsonstr = jsonstr.replace(": ", ",").replace("], ", "],\n").replace("{", "{\n").replace("}", "\n}").replace(' ', '')
with open('out1.json', 'w') as f:
f.write(jsonstr)
if __name__ == "__main__":
main() |
# HOW TO CALL THE SNAKEMAKE FILE:
# snakemake -s proc10xG.snakemake.py -j 999 --cluster-config templates/cluster.json --cluster "sbatch -p {cluster.partition} -n {cluster.n} -t {cluster.time}"
## ucdbioinfo_supernova_pipeline proc10xG.slurm
## runs the process_10xReads.py script from the proc10xG repo
## https://github.com/ucdavis-bioinformatics/proc10xG
## Assumes only a single pair of fastq (R1/R2) files under the fastqs folder
import os
import json
args = {}
sbatch_args = {}
configfile: "templates/keith.json"
#sbatchfile: "templates/cluster.json"
###########################################################################
# INPUT PARAMETERS
###########################################################################
args['kmers'] = config['kat_reads']['kmers']
args['pipeline'] = config['pipeline']['basepath']
args['basename'] = config['project']['basename']
args['id'] = config['project']['id']
args['fastqs'] = args['basename'] + '/' + config['project']['fastqs']
files = os.listdir(args['fastqs'])
for file in files:
if "R1_001.fastq.gz" in file:
args['fastq1'] = args['fastqs'] + '/' + file
if "R2_001.fastq.gz" in file:
args['fastq2'] = args['fastqs'] + '/' + file
###########################################################################
# OUTPUT PARAMETERS
###########################################################################
# TODO: SHOULD proc10xg be coming from the job name
# PROC10XG
args['proc10xg_out'] = args['basename'] + '/01-%s-%s_reads' % (args['id'], 'proc10xG')
args['proc10xg_outprefix'] = args['proc10xg_out'] + '/%s-%s' % (args['id'], 'proc10xG')
args['fastq1_proc10xg_out'] = args['proc10xg_outprefix'] + '_R1_001.fastq.gz'
args['fastq2_proc10xg_out'] = args['proc10xg_outprefix'] + '_R2_001.fastq.gz'
args['log_out'] = args['proc10xg_outprefix'] + '.log'
args['proc10xPath'] = args['pipeline'] + '/%s' % ('proc10xG')
# KAT READS
args['kat_reads_out'] = args['basename'] + '/02-%s-%s' % (args['id'], 'kat_reads')
args['kat_reads_outprefix'] = args['kat_reads_out'] + '/%s-%s' % (args['id'], 'kat_reads')
###########################################################################
# MODULE LOADS
###########################################################################
import socket
print (socket.gethostname())
shell.prefix("module load kat; module load anaconda2;")
print(json.dumps(args, indent=1))
###########################################################################
# KAT READS SBATCH
###########################################################################
args['cluster_time'] = config['kat_reads_sbatch']['main']['time']
args['cluster_account'] = config['kat_reads_sbatch']['main']['account']
args['cluster_partition'] = config['kat_reads_sbatch']['main']['partition']
args['cluster_nodes'] = config['kat_reads_sbatch']['main']['n']
print(json.dumps(args, indent=1))
rule kat_reads:
input:
proc10xg_out = args['log_out'],
fastq1 = args['fastq1_proc10xg_out'],
fastq2 = args['fastq2_proc10xg_out']
params:
proc10xg_outprefix = args['proc10xg_outprefix'],
proc10xg_out = args['proc10xg_out'],
proc10xg_path = args['proc10xPath'],
kat_reads_out = args['kat_reads_out'],
kat_reads_outprefix = args['kat_reads_outprefix'],
log_out = args['log_out'],
kmers = args['kmers'],
outputs = expand(args['kat_reads_outprefix'] + '-' + '{kmer}', kmer = args['kmers'])
run:
import subprocess
shell("module list")
for kmer, output in zip(params.kmers, params.outputs):
command = "sbatch -p %s -n %s -t %s ./kat_reads_call.sh %s %s %s %s/filter_10xReads.py %s %s" %(args['cluster_partition'], args['cluster_nodes'], args['cluster_time'], output, kmer, 48, params.proc10xg_path, args['fastq1_proc10xg_out'], args['fastq2_proc10xg_out'])
print (command)
os.system("mkdir %s" %(output))
shell(command)
rule proc10xG:
input:
fastq1 = args['fastq1'],
fastq2 = args['fastq2']
params:
proc10xg_outprefix = args['proc10xg_outprefix'],
proc10xg_out = args['proc10xg_out'],
proc10xg_path = args['proc10xPath'],
log_out = args['log_out']
output:
log_out = args['log_out'],
#fastq1_out = args['fastq1_proc10xg_out'],
#fastq2_out = args['fastq2_proc10xg_out']
shell:
"python {params.proc10xg_path}/process_10xReads.py -1 {input.fastq1} -2 {input.fastq2} -o {params.proc10xg_outprefix} -a 2> {output}" |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 9 13:03:16 2020
@author: pablo
"""
from mongoengine import Document,EmbeddedDocument
from mongoengine import StringField,IntField,ListField,BooleanField,URLField,ReferenceField,EmbeddedDocumentField
#Clases creadas para mongodb
class Visual_Feature(Document):
name = StringField(required=True, primary_key=True)
type = StringField(required=True)
class Visual_Task(EmbeddedDocument):
name = StringField(required=True)
viticultural_objects = ListField(StringField(max_length=50))
data_capture_condition = StringField(required=True)
data_capture_details = StringField()
electromagnetic_spectrum = StringField(required=True)
dataset_format = StringField(required=True)
camera_types = StringField(required=True)
camera_details = StringField()
benchmarking_dataset = BooleanField(required=True)
dataset_link = StringField()
visual_features = ListField(ReferenceField(Visual_Feature))
algorithms = ListField(StringField(max_length=50))
viticultural_variable = ListField(StringField(max_length=50))
viticultural_variable_details = StringField()
monitoring = BooleanField(required=True)
variety = ListField(StringField())
driving_systems = ListField(StringField())
class Institution(Document):
name = StringField(required=True, primary_key=True) #affiliation[]
#country = StringField(required=True)
class Finantial_Institution(Document):
doi = StringField() #['funder']['DOI']
name = StringField(required=True, primary_key=True)
#country = StringField()
class Author(Document):
orcid = StringField()
authenticated_orcid = BooleanField()
name = StringField(required=True, primary_key=True) #concatenar
familyName = StringField()
firstName = StringField()
#scopusID =StringField() #hay q sacarlo
class Author_Affiliation(EmbeddedDocument):
institution = ReferenceField(Institution)
author = ReferenceField(Author)
sequence = StringField(max_length=15) #first or additional
# class Inclusion(EmbeddedDocument):
# inclusion = BooleanField(required=True)
# user = StringField(required=True)
# criteria = ListField(StringField())
class Paper(Document):
title = StringField(required=True)
abstract = StringField(required=True)
doi = StringField(required=True, primary_key=True)
on_revision = StringField()
inclusion1 = BooleanField()
user_inclusion1 = StringField()
criteria_inclusion1 = ListField(StringField())
comments1 = StringField()
inclusion2 = BooleanField()
user_inclusion2 = StringField()
criteria_inclusion2 = ListField(StringField())
comments2 = StringField()
keywords = ListField(StringField(max_length=50))
publication_month = IntField(min_value=1, max_value=12)
publication_year = IntField(min_value=1950, max_value=2020)
visual_tasks = ListField(EmbeddedDocumentField(Visual_Task))
finantial_institutions = ListField(ReferenceField(Finantial_Institution))
author_affiliations = ListField(EmbeddedDocumentField(Author_Affiliation))
viticultural_aspects = StringField(max_length=50)
research_goal = ListField(StringField())
practical_contibution = StringField()
isOnlyReference = BooleanField()
citationsSearched = BooleanField()
citedBy = ListField(StringField())
#isLoadedBefore = BooleanField()
bibliographyIsLoaded = BooleanField()
references = ListField(StringField())
#research_goal = ListField(StringField()) VARIETALES
#research_goal = ListField(StringField()) SISTEMAS DE CONDUCCION |
import requests
response = requests.get('https://api.github.com')
#print(response.content) # получаем содержимое payload в байтах
response.encoding = 'utf-8' # указывем конкретную кодировку
print(response.text) # конвертация инфы в строку UTF-8
print(response.json())
|
#
# Copyright © 2021 Uncharted Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Dict, Any, List
import pandas as pd
from d3m import container, utils
from d3m.metadata import base as metadata_base, hyperparams, params
from d3m.primitive_interfaces import base
from d3m.primitive_interfaces.base import CallResult
from distil.modeling.metrics import classification_metrics
from distil.modeling.text_classification import TextClassifierCV
from distil.utils import CYTHON_DEP
import version
__all__ = ("TextClassifierPrimitive",)
logger = logging.getLogger(__name__)
class Hyperparams(hyperparams.Hyperparams):
metric = hyperparams.Enumeration[str](
values=classification_metrics,
default="f1",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
)
class Params(params.Params):
model: TextClassifierCV
label_map: Dict[int, str]
target_col_names: List[str]
class TextClassifierPrimitive(
base.PrimitiveBase[container.DataFrame, container.DataFrame, Params, Hyperparams]
):
"""
This primitive takes a dataframe containing input texts, performs TFIDF on this text, and then builds a classifier using
these features.
"""
metadata = metadata_base.PrimitiveMetadata(
{
"id": "24f51246-7487-454e-8d69-7cdf289994d1",
"version": version.__version__,
"name": "Text Classifier",
"python_path": "d3m.primitives.classification.text_classifier.DistilTextClassifier",
"source": {
"name": "Distil",
"contact": "mailto:cbethune@uncharted.software",
"uris": [
"https://github.com/uncharted-distil/distil-primitives/blob/main/distil/primitives/text_classifier.py",
"https://github.com/uncharted-distil/distil-primitives",
],
},
"installation": [
CYTHON_DEP,
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/uncharted-distil/distil-primitives.git@{git_commit}#egg=distil-primitives".format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
},
],
"algorithm_types": [
metadata_base.PrimitiveAlgorithmType.RANDOM_FOREST,
],
"primitive_family": metadata_base.PrimitiveFamily.CLASSIFICATION,
},
)
def __init__(self, *, hyperparams: Hyperparams, random_seed: int = 0) -> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed)
self._model = TextClassifierCV(
self.hyperparams["metric"], random_seed=random_seed
)
self._label_map: Dict[int, str] = {}
def set_training_data(
self, *, inputs: container.DataFrame, outputs: container.DataFrame
) -> None:
"""TODO: `TextReaderPrimitive` has a weird output format from `read_file_uri`
to remain consistent with common primitives base `FileReaderPrimitive`"""
self._inputs = inputs
self._target_col_names = list(outputs.columns)
# map labels instead of trying to force to int.
col = outputs.columns[0]
if len(pd.factorize(outputs[col])[1]) <= 2:
factor = pd.factorize(outputs[col])
outputs = pd.DataFrame(factor[0], columns=[col])
self._label_map = {k: v for k, v in enumerate(factor[1])}
self._outputs = outputs
def _format_text(self, inputs):
return inputs["filename"].values
def _format_output(self, outputs):
return outputs.values.ravel(order="C")
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
logger.debug(f"Fitting {__name__}")
self._model.fit(
self._format_text(self._inputs), self._format_output(self._outputs)
)
return CallResult(None)
def produce(
self,
*,
inputs: container.DataFrame,
timeout: float = None,
iterations: int = None,
) -> CallResult[container.DataFrame]:
logger.debug(f"Producing {__name__}")
# create dataframe to hold d3mIndex and result
result = self._model.predict(self._format_text(inputs))
df = pd.DataFrame(result)
# pipline run saving is now getting fussy about the prediction names matching the original target column
# name
df.columns = self._target_col_names
# if we mapped values earlier map them back.
if self._label_map:
df.replace(self._label_map, inplace=True)
result_df = container.DataFrame(df, generate_metadata=True)
# mark the semantic types on the dataframe
result_df.metadata = result_df.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, 0),
"https://metadata.datadrivendiscovery.org/types/PredictedTarget",
)
return base.CallResult(result_df)
def get_params(self) -> Params:
return Params(
model=self._model,
label_map=self._label_map,
target_col_names=self._target_col_names,
)
def set_params(self, *, params: Params) -> None:
self._model = params["model"]
self._label_map = params["label_map"]
self._target_col_names = params["target_col_names"]
return
|
'''
정렬을 사용해서 풀이할려고 했지만 시간초과..
'''
import sys
r = sys.stdin.readline
def swap(a, b) :
temp1 = P[b][0]
temp2 = P[b][1]
P[b][0] = P[a][0]
P[b][1] = P[a][1]
P[a][0] = temp1
P[a][1] = temp2
def setPivot(first, mid, last) :
if P[first][0] > P[mid][0] :
swap(first, mid)
if P[mid][0] > P[last][0] :
swap(mid, last)
if P[first][0] > P[mid][0] :
swap(first, mid)
def sort(first, last) :
mid = (first + last) // 2
setPivot(first, mid, last)
if last - first > 2 :
i = first + 1
j = last - 1
pivot = last - 1
swap(mid, j)
while i < j :
while i < last and P[i][0] <= P[pivot][0] :
i += 1
while j > first and P[j][0] >= P[pivot][0] :
j -= 1
if i < j :
swap(i, j)
if i > pivot :
swap(mid, pivot)
sort(first, i - 1)
return
swap(i, pivot)
sort(first, i - 1)
sort(i + 1, last)
N = int(r())
P = []
result = 0
for i in range(N) :
temp = list(map(int, r().split()))
P.append(temp)
sort(0, N - 1)
for i in range(N) :
cnt = 0
d = P[i][0]
c = P[i][1]
for j in range(i) :
if P[j][0] == d :
cnt += 1
if P[j][1] > c :
cnt += 1
if cnt == i :
result += 1
else :
continue
print(result)
|
#!/usr/bin/python
import random
maxNum = 100000
# The array that is to be sorted
arr = [int(maxNum * random.random()) for i in range(100000)]
count = [0] * (maxNum + 1)
for i in range(len(arr)):
count[arr[i]+1] += 1
for i in range(1, maxNum+1):
count[i] += count[i-1]
arrB = arr.copy()
for i in range(len(arr)):
arr[count[arrB[i]]] = arrB[i]
count[arrB[i]] += 1
print(arr)
|
"""
ゼロから学ぶスパイキングニューラルネットワーク
- Spiking Neural Networks from Scratch
Copyright (c) 2020 HiroshiARAKI. All Rights Reserved.
"""
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
time = 300
dt = 0.5
# Spike Traceを適当に作る
spikes = np.zeros(int(time/dt))
# 5本適当にスパイクを立てる
for _ in range(5):
spikes[np.random.randint(0, int(time/dt))] = 1
# Firing Traceを作成
firing = []
fire = 0
tc = 20 # 時定数
for t in range(int(time/dt)):
if spikes[t]: # 発火していれば1を立てる
fire = 1
else: # 発火していなければ時間的減衰
fire -= fire / tc
firing.append(fire)
t = np.arange(0, time, dt)
plt.subplot(2, 1, 1)
plt.plot(t, spikes, label='Spike Trace')
plt.ylabel('Spike Trace')
plt.subplot(2, 1, 2)
plt.plot(t, firing)
plt.ylabel('Firing Trace')
plt.xlabel('time [ms]')
plt.show()
|
# 给定一个非负索引 k,其中 k ≤ 33,返回杨辉三角的第 k 行。
#
#
#
# 在杨辉三角中,每个数是它左上方和右上方的数的和。
#
# 示例:
#
# 输入: 3
# 输出: [1,3,3,1]
#
#
# 进阶:
#
# 你可以优化你的算法到 O(k) 空间复杂度吗?
# Related Topics 数组
# 👍 197 👎 0
from typing import List
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def getRow(self, rowIndex: int) -> List[int]:
if rowIndex == 0:
return []
res = [[1]]
while len(res[-1]) <= rowIndex:
newRow = [a + b for a, b in zip(res[-1] + [0], [0] + res[-1])]
res = [newRow]
return res[-1]
# leetcode submit region end(Prohibit modification and deletion)
|
n = int(input("Enter a number: "))
pas = 1
for i in range(1,n):
for j in range(1,n-i+1):
print(" ",end='')
for j in range(0,i):
if(j==0 or i==0):
pas = 1
else:
pas = pas*(i-j)//j
print(pas,end=' ')
print() |
# -*- coding: utf-8 -*-
"""
pyxdevkit.console
~~~~~~~~~~~~~
module that implements xdevkit's methods.
"""
import socket
from debugger import Debugger
from pyxdevkit.exceptions import ConnectionError,NotConnectedError
class Console(object):
"""object that contains the functions that implement xdevkit"""
def __init__(self, ip_address):
# Since sdk is not neccasrily installed we use ip address not name to
# connect
self.ip_address = ip_address
self.debugger = None
self.is_connected = False
def connect(self):
"""Connects to the console"""
try:
# Set up the socket and connect
HOST, PORT = self.ip_address, 730
sock = socket.create_connection(
(HOST, PORT), timeout=.2)
if sock.recv(1024) == '201- connected\r\n':
self.is_connected = True
sock.close()
else:
sock.close()
raise ConnectionError(self.ip_address)
except:
raise ConnectionError(self.ip_address)
def get_name(self):
"""Gets the name of the connected console"""
if not self.is_connected:
raise NotConnectedError()
# Set up the socket and connect
HOST, PORT = self.ip_address, 730
sock = socket.create_connection(
(HOST, PORT), timeout=.2)
# This is the cmd that we send to the console
sock.send("DBGNAME\r\n")
# First recv just says that we are connected
sock.recv(1024)
name = sock.recv(1024)
sock.close()
return name[5:-2]
def get_mem(self, addr, length):
"""Returns the length amount of memory from addr"""
if not self.is_connected:
raise NotConnectedError()
# Set up the socket and connect
HOST, PORT = self.ip_address, 730
sock = socket.create_connection(
(HOST, PORT), timeout=.2)
# This is the cmd that we send to the console
sock.send("GETMEMEX ADDR=0x%x LENGTH=0x%x\r\n" % (addr, length))
# The first response is always 201-connected
# This is weird because when I do it using C# and xdevkit this doesn't
# happen
sock.recv(1024)
# The first thing returned will be 203- binary response follows\r\n, + the data.
# If the length is small it will be returned all in one recv.
# If it is larger it will take multiple.
# Note: The first two bytes of the binary response are not part of the
# memory
received = sock.recv(4096 + length)
received = received.replace('203- binary response follows\r\n', '')[2:]
while len(received) < length:
try:
data = sock.recv(1026)
received += data[2:]
except:
sock.close()
return received
sock.close()
return received
def set_mem(self, addr, data):
""" Sets the memory at addr to data
The value in data has to be a string of hexadecimal characters
so for example data = 'DEADBEEF' is fine
"""
if not self.is_connected:
raise NotConnectedError()
# Set up the socket and connect
HOST, PORT = self.ip_address, 730
sock = socket.create_connection(
(HOST, PORT), timeout=.2)
# This is the cmd that we send to the consoles
sock.send("SETMEM ADDR=0x%x DATA=%s\r\n" % (addr, data))
sock.close()
def connect_as_debugger(self, name):
""" Connects as a debugger so you can do things such as setting breakpoints.
There is also a flags parameter in the C# dll
but it does not seem to change the request when I monitor the requests.
So it has been ommited
"""
if not self.is_connected:
raise NotConnectedError()
# Set up the socket and connect
HOST, PORT = self.ip_address, 730
sock = socket.create_connection(
(HOST, PORT), timeout=.2)
# Connecting as a debugger actually takes two requests.
# First you have to specify a reconnect port
# Then you actually have to connect
sock.send("NOTIFY RECONNECTPORT=51523 reverse\r\n")
# Create the consoles debugger
# Now we are able to do things such as set breakpoints
self.debugger = Debugger(self.ip_address, sock)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
sock.send(
'DEBUGGER CONNECT PORT=0x0000C901 override user=WINCTRL-TQMC306 name="%s"\r\n' % name)
def reboot(self):
""" Reboots the console. """
if not self.is_connected:
raise NotConnectedError()
HOST, PORT = self.ip_address, 730
sock = socket.create_connection(
(HOST, PORT), timeout=.2)
sock.send("magicboot COLD\r\n")
|
import dash_bootstrap_components as dbc
from dash import Input, Output, State, html
placement_selector = html.Div(
[
dbc.Label("Placement:"),
dbc.RadioItems(
id="offcanvas-placement-selector",
options=[
{"label": "start (Default)", "value": "start"},
{"label": "end", "value": "end"},
{"label": "top", "value": "top"},
{"label": "bottom", "value": "bottom"},
],
value="start",
inline=True,
),
],
className="mb-2",
)
offcanvas = html.Div(
[
placement_selector,
dbc.Button(
"Open Offcanvas", id="open-offcanvas-placement", n_clicks=0
),
dbc.Offcanvas(
html.P("Some offcanvas content..."),
id="offcanvas-placement",
title="Placement",
is_open=False,
),
]
)
@app.callback(
Output("offcanvas-placement", "is_open"),
Input("open-offcanvas-placement", "n_clicks"),
[State("offcanvas-placement", "is_open")],
)
def toggle_offcanvas(n1, is_open):
if n1:
return not is_open
return is_open
@app.callback(
Output("offcanvas-placement", "placement"),
Input("offcanvas-placement-selector", "value"),
)
def toggle_placement(placement):
return placement
|
from flask import Flask, Response, request
import jsonpickle
app = Flask(__name__)
@app.route('/', methods=["POST"])
def home():
name = request.form["name"]
info = request.form["info"]
response = {
'name' : name,
'info' : info,
'success' : "yes"
}
response_pickled = jsonpickle.encode(response)
return Response(response=response_pickled, status=200, mimetype="application/json")
app.run(host="127.0.0.1", port=5000) |
import pyproj
import numpy
import math
import time
import simplejson
import os
import scandir
import my_utils
from my_utils import UserGPS
from my_utils import TransitData
class Runner(object):
"""
A collection of utilities to process the SHED10 data. The approach taken
here was to download the required tables into CSV files then post-process
them, rather than to perform complicated SQL queries. Generate lots of
intermediate files for plotting (e.g., before aggregation, etc)
"""
def __init__(self):
self._myproj = pyproj.Proj("+init=EPSG:32613")
self._data_dir = 'data/shed10'
def list_gps_users(self):
line_count = 0
f = open("%s/gps_utm.csv" % self._data_dir)
user_dict = {}
line = ''
try:
for line in f:
line_count += 1
if line_count == 1: continue
parts = line.split(",")
user_id = int(parts[0].strip())
count = user_dict.get(user_id, 0)
count += 1
user_dict[user_id] = count
except Exception as err:
print "Exception: %s" % err
print "Line: %s" % line
finally:
f.close()
for user_id, count in user_dict.iteritems():
print "User ID: %d GPS points: %d" %(user_id, count)
def find_points_near_stops_2(self, user_id=None, radius=100):
"""
Just loops through a users GPS points and detects when
"""
def load_user_gps(self, user_id):
line_count = 0
file_name = "%s/user_gps/user_gps_%d.csv" % (self._data_dir, user_id)
f = open(file_name)
points = []
for line in f:
line_count += 1
if line_count == 1: continue
parts = line.split(",")
x = float(parts[0].strip())
y = float(parts[1].strip())
sec = int(parts[2].strip())
points.append((x, y, sec))
f.close()
print "Read %s (%d points)" % (file_name, len(points))
return points
def get_point_velocity(self, start, stop):
dist = self.get_dist((start[0], start[1]), (stop[0], stop[1]))
duration = stop[2] - start[2]
velocity_ms = float(dist)/float(duration)
velocity_kph = 3600.0 * velocity_ms / 1000.0
return velocity_kph
def process_stop_points(self, stop_data):
point_list = stop_data.get('points')
if len(point_list) < 10:
# print "IGNORE less than 5 points"
return
start_point = point_list[0]
end_point = point_list[-1]
time_at_stop = end_point[2] - start_point[2]
if time_at_stop > 600:
# print "IGNORE... AT STOP TOO LONG"
return
if time_at_stop < 30:
# print "IGNORE...TOO SHORT"
return
print "------- Time at stop: %d" % time_at_stop
velocity = self.get_point_velocity(start_point, end_point)
#print "velocity through stop ====> %.2f" % velocity
velocity_in = self.get_point_velocity(point_list[0], point_list[5])
velocity_out = self.get_point_velocity(point_list[-5], point_list[-1])
if abs(velocity_in - velocity_out) > 10.0:
print "VELO IN %.2f -- %.2f %d" % (velocity_in, velocity_out, stop_data.get('image_index'))
self.make_stop_map(stop_data)
def find_points_near_stops(self, user_id=None, radius=None):
stop_point_dict = {}
f = open('data/transit/bus_stops.json', 'r')
bus_stops = simplejson.load(f)
f.close()
user_points = self.load_user_gps(user_id)
image_index = 0
f = open("%s/user_gps/user_gps_%d.csv" % (self._data_dir, user_id))
for stop_index, d in bus_stops.iteritems():
stop_x = d.get('x')
stop_y = d.get('y')
near_stop = False
point_list = []
stop_data = {}
for point in user_points:
x = point[0]
y = point[1]
sec = point[2]
dist = self.get_dist((x, y), (stop_x, stop_y))
if dist < radius:
if near_stop == False:
print "Enter Stop: %s" % d.get('name')
point_list = []
stop_data['x'] = stop_x
stop_data['y'] = stop_y
stop_data['stop_index'] = stop_index
stop_data['user_id'] = user_id
stop_data['image_index'] = image_index
stop_data['radius'] = radius
near_stop = True
point_list.append(point)
else:
if near_stop:
print "Exit Stop: %s" % d.get('name')
stop_data['points'] = point_list
self.process_stop_points(stop_data)
image_index += 1
near_stop = False
point_list = []
stop_data = {}
# key = "%d (%s)" % (int(stop_index), d.get('name'))
#
# stop_data = stop_point_dict.get(key, {})
# stop_points = stop_data.get('points', [])
# stop_points.append((x, y, sec))
# stop_data['points'] = stop_points
# stop_data['name'] = d.get('name')
#
# stop_point_dict[key] = stop_data
# file_name = "data/user_radius_%d_%d.json" % (user_id, radius)
# f = open(file_name, 'w')
# simplejson.dump(stop_point_dict, f, indent=4)
# f.close()
def check_user_stops(self, user_id=None, radius=None):
file_name = "data/user_radius_%d_%d.json" % (user_id, radius)
f = open(file_name, 'r')
stop_data_points = simplejson.load(f)
f.close()
for key, value in stop_data_points.iteritems():
stop_points = value.get('points')
print "Stop: %s points: %d" % (key, len(stop_points))
def make_stop_map(self, stop_data):
from map_html import TOP as MAP_TOP
from map_html import BOTTOM as MAP_BOTTOM
from map_html import CIRCLE1, CIRCLE2
# print type(user_id)
# file_name = "%s/user_radius_%d_%d_stop_%d_%d.json" % \
# (self._data_dir, user_id, radius, stop_index, image_index)
#
# f = open(file_name, 'r')
# stop_data_points = simplejson.load(f)
# f.close()
stop_points = None
point_list = stop_data.get('points')
stop_index = int(stop_data.get('stop_index'))
user_id = stop_data.get('user_id')
radius = stop_data.get('radius')
image_index = stop_data.get('image_index')
# for key, value in stop_data_points.iteritems():
# stop_index_this = int(value.get('index'))
# if stop_index_this == stop_index:
# stop_points = value.get('points')
# break
if point_list is None:
print "No GPS points detected"
map_name = "%s/maps/map_user_radius_stop_%d_%d_%d_%d.html" % \
(self._data_dir, user_id, radius, stop_index, image_index)
f = open(map_name, "w")
f.write(MAP_TOP)
f.write("var circle1 = {\n")
stop_x = stop_data.get('x')
stop_y = stop_data.get('y')
lon, lat = self._myproj(stop_x, stop_y, inverse=True)
f.write("%d: {center:{lat: %f, lng: %f},},\n" % (0, lat, lon))
f.write("};\n")
f.write(CIRCLE1)
f.write("var circle2 = {\n")
for i, point in enumerate(point_list):
lon, lat = self._myproj(point[0], point[1], inverse=True)
f.write("%d: {center:{lat: %f, lng: %f},},\n" % (i, lat, lon))
f.write("};\n")
f.write(CIRCLE2)
f.write(MAP_BOTTOM)
f.close()
def make_bus_stops(self):
result = {}
index = 0
line_count = 0
f = open("data/TransitStops.csv")
for line in f:
line_count += 1
if line_count == 1: continue
parts = line.split(",")
name = parts[3].strip()
lat = float(parts[5].strip())
lon = float(parts[6].strip())
print lat, lon, name
x, y = self._myproj(lon, lat)
stop_data = {
'lat' : lat,
'lon' : lon,
'x' : x,
'y' : y,
'name' : name
}
result[index] = stop_data
index += 1
f.close()
f = open('data/bus_stops.json', 'w')
simplejson.dump(result, f, indent=4)
f.close()
def make_bus_files(self):
"""
Read in all the battery records and count according to user ID.
Compute percentage of total returned by each user, and make a
list of those above a threshold and those below a threshold.
"""
route_dict = {}
line_count = 0
f = open("data/TransitShapes.csv", "r")
for line in f:
line_count += 1
if line_count == 1: continue
line = line.strip()
parts = line.split(",")
route_id = int(parts[1].strip())
lat = float(parts[2].strip())
lon = float(parts[3].strip())
seq = int(parts[4].strip())
dist = float(parts[5].strip())
x, y = self._myproj(lon, lat)
# test_lon, test_lat = self._myproj(x, y, inverse=True)
# print lon, test_lon, lat, test_lat
route_data = route_dict.get(route_id, [])
route_data.append((seq, x, y, dist))
route_dict[route_id] = route_data
f.close()
repeat_dict = {}
new_dict = {}
for route_id, route_data in route_dict.iteritems():
sum_x = 0
for item in route_data:
sum_x += item[1]
key = "%d-%d" % (len(route_data), int(sum_x))
if repeat_dict.has_key(key):
print "DUPLICATE ROUTE DETECTED!!"
continue
new_dict[route_id] = route_data
repeat_dict[key] = True
print "id: %d points: %d sum_x: %f" % (route_id, len(route_data), sum_x)
route_dict = new_dict
new_dict = {}
route_count = 0
for route_id, route_data in route_dict.iteritems():
print "count: %d id: %d points: %d" % (route_count, route_id, len(route_data))
# self.make_route_file(route_id, route_data)
route_data = self.trip_fill(route_data)
new_dict[route_id] = route_data
self.make_route_file(route_id, route_data)
f = open('data/bus_routes.json', 'w')
simplejson.dump(new_dict, f, indent=4)
f.close()
def trip_fill(self, route_data):
# Sort the data. Sequence is first item
sort_list = [(item[0], (item[1], item[2])) for item in route_data]
s = sorted(sort_list)
# This results is a list of GPS points for the route
route_data = [item[1] for item in s]
while True:
point2 = None
points_added = 0
filled_route = []
points = len(route_data)
for i in xrange(points - 1):
point1 = route_data[i]
point2 = route_data[i+1]
dist = self.get_dist(point1, point2)
# print point1, point2, dist
# Add first point to route
filled_route.append(point1)
if dist > 25:
new_point = self.insert_point(point1, point2)
filled_route.append(new_point)
points_added += 1
# Must add last point when we break out of loop
filled_route.append(point2)
route_data = filled_route
print "points", len(route_data), "Added", points_added
if points_added == 0:
break
return route_data
def insert_point(self, point1, point2):
x1 = point1[0]
y1 = point1[1]
x2 = point2[0]
y2 = point2[1]
new_x = x1 + (x2 - x1)/2.0
new_y = y1 + (y2 - y1)/2.0
return (new_x, new_y)
def get_dist(self, point1, point2):
if point1 is None or point2 is None: return 0.0
x1 = point1[0]
y1 = point1[1]
x2 = point2[0]
y2 = point2[1]
if x1 is None or x2 is None: return 0.0
return math.sqrt(math.pow((x1 - x2), 2) + math.pow((y1 - y2),2))
def make_route_file(self, route_id, route_data):
file_name = "data/route_points_%06d.csv" % route_id
f = open(file_name, "w")
f.write("index, utm_x, utm_y, dist\n")
point_count = 0
point_prev = None
for point in route_data:
point_count += 1
sep = self.get_dist(point, point_prev)
point_prev = point
f.write("%d,%f,%f,%f\n" % (point_count, point[0], point[1], sep))
f.close()
def get_trip_points(self, file_name):
f = open(file_name, "r")
line_count = 0
total_distance = 0
time_sec = []
prev_x = None
prev_y = None
points_list = []
for line in f:
line_count += 1
if line_count == 1: continue
parts = line.split(",")
x = float(parts[0].strip())
y = float(parts[1].strip())
sec = int(parts[2].strip())
time_sec.append(sec)
points_list.append((x, y))
distance = self.get_dist((prev_x, prev_y), (x, y))
prev_x = x
prev_y = y
total_distance += distance
# print "dist, total_dist", distance, total_distance
f.close()
duration = max(time_sec) - min(time_sec)
speed_ms = float(total_distance)/float(duration)
print "speed m/s", speed_ms
speed_kph = 3600.0 * speed_ms / 1000.0
print "speed_kph", speed_kph
return points_list, speed_kph, duration, total_distance
def compare_one_trip(self, file_name, bus_routes):
trip_points, speed_kph, duration, trip_dist = self.get_trip_points(file_name)
print "number of points", len(trip_points)
if trip_dist < 1000:
print "ignore trip dist < 1000m"
return
if trip_points < 20:
print "ignoring trip with less than 20 points"
return
if duration < 600:
print "ignoring trip with duration less than 600 seconds"
return
if speed_kph < 10.0:
print "ignoring trip with speed less than 10 kph"
return
count = 0
result = []
for route_id, route_data in bus_routes.iteritems():
closest_list = numpy.zeros((len(trip_points,)), dtype=float)
for i, point in enumerate(trip_points):
closest_dist = 99999999999
for route_point in route_data:
dist = self.get_dist(point, route_point)
if dist < closest_dist:
closest_dist = dist
closest_list[i] = closest_dist
ave_dist = numpy.average(closest_list, axis=0)
# print closest_list
print "Compare trip %s to route %s dist %f" %(file_name, repr(route_id), ave_dist)
if ave_dist > 30.0:
#print "discard route %s wth ave dist %f > 30m" % (repr(route_id), ave_dist)
continue
print "FOUND POTENTIAL BUS TRIP!!!"
result.append((ave_dist, route_id))
for item in result:
print "Route ID: %s Ave Dist: %s" % (repr(item[1]), repr(item[0]))
print "Speed (km/h): %f Duration: %d" % (speed_kph, duration)
if len(result) == 0:
print "No potential bus trips detected"
return
f = open("%s/closest_routes.csv" % self._data_dir, "a")
for item in result:
route_id = item[1]
ave_dist = item[0]
f.write("%s, %s, %0.2f ave dist(m), %0.2f trip dist (m), %0.2f km/h, %d sec\n" % \
(file_name, repr(route_id), ave_dist, trip_dist, speed_kph, duration))
f.close()
def compare_trips_to_routes(self):
f = open('data/transit/bus_routes.json', 'r')
bus_routes = simplejson.load(f)
f.close()
for route_id in bus_routes.iterkeys():
print "route_id", route_id
for item in scandir.scandir('%s/user_trips' % self._data_dir):
if item.is_file():
if item.name.startswith('user_trip_'):
self.compare_one_trip(item.path, bus_routes)
def correlate_test(self):
# print my_utils.seconds_to_string(1487863933)
# return
# for 1346
start_sec = my_utils.string_to_seconds("2017-02-13")
stop_sec = my_utils.string_to_seconds("2017-02-14")
start_sec = my_utils.string_to_seconds("2017-02-23")
stop_sec = my_utils.string_to_seconds("2017-02-24")
dist_list = []
# user_gps = UserGPS(10, 1346)
user_gps = UserGPS(10, 555)
user_points = user_gps.load(start_sec=start_sec, stop_sec=stop_sec)
# print "Got %d user points" % len(user_points)
transit_gps = TransitData()
# transit_points = transit_gps.load_route(49989)
transit_points = transit_gps.load_route(50347)
prev_pos = None
for user_point in user_points:
#print user_point
user_pos = (user_point[0], user_point[1])
user_time = user_point[2]
min_dist = None
for route_point in transit_points:
dist = self.get_dist(user_pos, route_point)
if min_dist is None or dist < min_dist:
min_dist = dist
score = 100.0 - min_dist
if prev_pos:
dist_to_prev = self.get_dist(prev_pos, user_pos)
score = score * dist_to_prev
prev_pos = user_pos
if score < 0: score = 0
dist_list.append((user_time, score))
for minute in xrange(24*60):
bin_middle = start_sec + (minute * 60)
bin_start = bin_middle - (20 * 60)
bin_end = bin_middle + (20 * 60)
ave_score = 0
count = 0
for item in dist_list:
if item[0] < bin_start or item[0] > bin_end: continue
ave_score += item[1]
count += 1
# if count == 0:
# ave_score = 0
# else:
# ave_score = float(ave_score)/float(count)
print "%f,%f" % (float(minute), float(ave_score))
def make_google_maps_file(self, route=None, user_id=None, trip=None, data_dir=None):
# route=50410, user_id=559, trip=15, data_dir='data/shed9'
"""
1: { center: {lat: 52.878, lng: -106.629},},
2: { center: {lat: 52.714, lng: -106.7005},},
3: { center: {lat: 52.052, lng: -106.7243},},
4: { center: {lat: 52.25, lng: -106.71},}
"""
from map_html import ROUTE_BOTTOM, ROUTE_TOP, ROUTE_MIDDLE
route_file = "data/transit/route_points_%06d.csv" % route
f = open(route_file, "r")
line_count = 0
path = []
for line in f:
line_count += 1
if line_count == 1: continue
parts = line.split(",")
utm_x = float(parts[1].strip())
utm_y = float(parts[2].strip())
lon, lat = self._myproj(utm_x, utm_y, inverse=True)
path.append((lat, lon))
f.close()
#############################
trip_file = "%s/user_trips/user_trip_%d_%d.csv" % (data_dir, user_id, trip)
f = open(trip_file, "r")
line_count = 0
trip_list = []
for line in f:
line_count += 1
if line_count == 1: continue
parts = line.split(",")
utm_x = float(parts[0].strip())
utm_y = float(parts[1].strip())
print "trip parts", parts
lon, lat = self._myproj(utm_x, utm_y, inverse=True)
trip_list.append((lat, lon))
f.close()
##################################
f = open("%s/map_trip_%d_%d_%d.html" % (data_dir, route, user_id, trip), "w")
f.write("%s\n" % ROUTE_TOP)
for item in path:
f.write("{lat: %f, lng: %f},\n" % (item[0], item[1]))
# 1: { center: {lat: 52.878, lng: -106.629},},
f.write("%s\n" % ROUTE_MIDDLE)
print len(trip_list)
for i, item in enumerate(trip_list):
g = int(250.0 * float(i)/float(len(trip_list)))
r = 256 - g
c = "#%02x%02x%02x" % (r,g,0)
print c
f.write('%d: {center:{lat: %f, lng: %f},color:"%s"},\n' % (i, item[0], item[1], c))
f.write("%s\n" % ROUTE_BOTTOM)
if __name__ == "__main__":
runner = Runner()
# runner.list_gps_users()
# raise ValueError('done')
# runner.make_bus_stops()
# runner.make_bus_files()
#runner.find_points_near_stops(user_id=555, radius=100)
#runner.check_user_stops(user_id=555, radius=100)
# runner.make_stop_map(user_id=555, radius=100, stop_index=660)
#runner.find_points_near_stops(user_id=513, radius=100)
#runner.check_user_stops(user_id=513, radius=100)
#runner.make_stop_map(user_id=513, radius=100, stop_index=1069)
#runner.find_points_near_stops(user_id=1111, radius=50)
#runner.check_user_stops(user_id=1301, radius=100)
#runner.make_stop_map(user_id=1301, radius=100, stop_index=34)
#runner.compare_trips_to_routes()
runner.correlate_test()
#runner.make_google_maps_file(route=49989, user_id=1346, trip=34, data_dir='data/shed10')
#runner.make_google_maps_file(route=50402, user_id=559, trip=137, data_dir='data/shed9')
|
from urllib.request import urlopen
from bs4 import BeautifulSoup as bs
import datetime
import random
import json
import re
random.seed(datetime.datetime.now())
def getCountry(ipaddr):
responseString = urlopen("http://api.ipstack.com/"+ipaddr+"?access_key=db42b142218bddc287a5eaaef13deebc").read().decode("utf-8")
responseJson = json.loads(responseString)
return responseJson.get("country_name")
def getLinks(url):
html = urlopen("http://www.wikipedia.org/"+url)
bsObj = bs(html, "html.parser")
return bsObj.find("div", {"id":"bodyContent"}).findAll("a",
href=re.compile("^(/wiki/)((?!:).)*$"))
def getIP(pageUrl):
pageUrl = pageUrl.replace("/wiki/", "")
historyUrl = "http://en.wikipedia.org/w/index.php?title="+pageUrl+"&action=history"
html = urlopen(historyUrl)
bsObj = bs(html, "lxml")
#finds only the links with class "mw-anonuserlink" which has IP addresses
#instead of usernames
ipAddresses = bsObj.findAll("a", {"class":"mw-anonuserlink"})
addressList = set()
for ipAddress in ipAddresses:
addressList.add(ipAddress.get_text())
return addressList
links = getLinks("/wiki/Python_(programming_language)")
while(len(links) > 0):
for link in links:
print("-------------------")
historyIPs = getIP(link.attrs["href"])
for historyIP in historyIPs:
print(historyIP, getCountry(historyIP))
newLink = links[random.randint(0, len(links)-1)].attrs["href"]
links = getLinks(newLink)
|
valor = str (input ('[1]NO DINHEIRO A VISTA \n [2] NO CARTÃO DE CREDITO A VISTA \n [3] EM 2 VEZES NO CARTÃO \n [4] EM 3 X OU MAIS NO CARTÃO.'))
valorf = float (input ('Digite o valor do produto: R$ '))
desconto1 = (valorf * 10 / 100) #a vista no dinheiro ou cheque 10% desconto
descontof = valorf - desconto1
cartao1 = (valorf * 5 / 100) # a vista cartao 5% desconto
cartaof = (valorf - cartao1)
cartao3x = (valorf * 20 / 100)
cartao3xf = (valorf + cartao3x)
print ('*' * 50 )
print ('TABELA DE PRECOS DE ACORDO COM AS NOSSAS CONDIÇÕES: ')
print ('*' * 50 )
if valor == '1':
print (f'O valor do produto sera de {descontof} reais. desconto de {desconto1} reais ')
print ('MUITO OBRIGADO POR COMPRAR NA NOSSA LOJA, VOLTE SEMPRE <3 ')
elif valor == '2' :
print(f'O valor do produto sera de {cartaof} reais. desconto de {cartao1} reais.')
print ('MUITO OBRIGADO POR COMPRAR NA NOSSA LOJA, VOLTE SEMPRE <3 ')
elif valor == '3' :
cartaosemjuros = (valorf / 2)
print(f' voce pagara em 2x de {cartaosemjuros} reais. desconto de 0 reais. ')
print ('MUITO OBRIGADO POR COMPRAR NA NOSSA LOJA, VOLTE SEMPRE <3 ')
elif valor == '4' :
valorff = int (input('Quantas parcelas serão?'))
cartao3x = (valorf * 20 / 100)
cartao3xf = (valorf + cartao3x)
cartao4x = (cartao3xf / valorff)
juros = (cartao3x / valorff)
print (f'voce pagara {valorff}x parcelas de {cartao4x} reais com juros total de {cartao3x} \n cada parcela paga {juros} de juros em cada parcela sobre o valor total do produto {valorf}')
print ('MUITO OBRIGADO POR COMPRAR NA NOSSA LOJA, VOLTE SEMPRE <3 ')
else:
print('ERROR, você digitou algo de errado, tente novamente.')
|
"""测试计算器 减法部件"""
import unittest
from count import Count
class MySubTest(unittest.TestCase):
def test_sub1(self):
#测试步骤
count = Count()
result = count.sub(3,1)
#实际结果
actual_result = result
#预期结果
expect_result = 2
#断言
self.assertEqual(actual_result,expect_result)
def test_sub2(self):
#测试步骤
count = Count()
result = count.sub(3.1,2.1)
#实际结果
actual_reuslt = result
#预期结果
expect_result = 1
#断言
self.assertEqual(actual_reuslt,expect_result) |
import subprocess
import shutil
import os
def run_cmd(cmd_list):
child = subprocess.Popen(cmd_list, stdout=subprocess.PIPE)
streamdata = child.communicate()[0]
ret = child.returncode
return streamdata.decode(), ret
def check(binary, randomize):
"""
Check if the corrected source code is still usable
"""
current_path = os.path.dirname(os.path.realpath(__file__))
shutil.copyfile('/srv/ctf_go/challs/data_exposure.dir/key', '{}/key'.format(current_path))
os.system('chown root:data_exposure ' + '{}/key'.format(current_path))
checks = [
{
'params': ['odtokjupfpenmtyo'],
'response': 'here is the encrypted text',
'message': 'Encryption API is broken. Expecting : here is the encrypted text\n\n Your code output \n\n{}',
},
{
'params': [''],
'response': 'here is the encrypted text',
'message': 'Encryption API is broken. Without user input, return exemple found in secret.\n Expecting : here is the encrypted text\n\n Your code output \n\n{}',
},
{
'params': ['tooshort'],
'response': 'encryption problem!',
'message': 'Encryption API is broken. Expecting : Encryption problem!\n\n Your code output \n\n{}',
},
]
for _check in checks:
_check['params'].insert(0, binary)
streamdata, return_code = run_cmd(_check['params'])
if return_code != 0 or _check['response'] not in streamdata.lower():
print(_check['message'].format(streamdata))
return False
return True
|
#coding:utf-8
import cv2 as cv
import numpy as np
def access_pixels(image):
print(image.shape)
height = image.shape[0]
width = image.shape[1]
channels = image.shape[2]
'''
#三通道图像
img = np.zeros([400,400,3],np.uint8)
img[:,:,0] = np.ones([400,400])*255
#img[:,:,1] = np.ones([400, 400])*255
#img[:,:,2] = np.ones([400, 400])*255
'''
'''
img = np.zeros([400,400,1],np.uint8)
img[:,:,0] = np.ones([400,400])*127
cv.imshow("create_iamge", img)
'''
#创建图像
m1 = np.ones([3,3],np.uint8)
#填充数据
m1.fill(122)
print(m1)
#将二维数组转换成一维数组
m2 = m1.reshape([1,9])
print(m2)
print("-----Hi,Python!------")
src = cv.imread("D:/python_file/Opencv3_study_file/images/!face.png")
cv.namedWindow("NO.1 image",cv.WINDOW_AUTOSIZE)
cv.imshow("NO.1 image",src)
#获取运行前时间
t1 = cv.getTickCount()
#access_pixels(src)
#获取运行后时间
t2 = cv.getTickCount()
#运行总共时间
t3=(t2-t1)/cv.getTickFrequency()*1000
print(f"time:{t3}")
cv.waitKey(0)
cv.destroyAllWindows() |
from magellan.models import BaseExtractor
from django.utils.html import strip_tags
from django.utils.encoding import force_unicode
class moinmoinwiki(BaseExtractor):
def get_content(self):
content = force_unicode(self.content)
return strip_tags(content)
|
from chain_table import ChainTable
class ChainTableDemos(ChainTable):
def reverse(self):
"""反转链表"""
if self.isEmpty():
return
if not self.head._next:
return
prev = None
node = None
next = self.head
while next._next:
prev = node
node = next
next = next._next
node._next = prev
next._next = node
self.head = next
def isLoop(self):
"""判断链表是否有环"""
isLoopExist = False
fast = self.head
slow = self.head
while fast and fast._next:
slow = slow._next
fast = fast._next._next
if slow == fast:
isLoopExist = True
break
return isLoopExist
def removeNthFromEnd(self, n):
"""删除链表的倒数第n个节点"""
if n <= 0:
print("error: n is wrong")
end = self.head
before = self.head
j = 0
while j < n:
end = end._next
j += 1
while end._next and j < self.length:
end = end._next
before = before._next
before._next = before._next._next
def getMiddleNode(self):
"""返回链表的中间节点(靠右),也可使用统计长度的方法"""
if self.isEmpty():
print("error: chain table is empty")
if not self.head._next:
return self.head
fast = self.head
slow = self.head
while fast and fast._next:
fast = fast._next._next
slow = slow._next
return slow
def mergeTwoChainTables(l1, l2):
"""合并两个有序链表"""
if not l1:
return l2
if not l2:
return l1
node1 = l1.head
node2 = l2.head
if node1.data < node2.data:
node = node1
node1 = node1._next
else:
node = node2
node2 = node2._next
chain_table = node
while node1 and node2:
if node1.data < node2.data:
node._next = node1
node = node._next
node1 = node1._next
else:
node._next = node2
node = node._next
node2= node2._next
if node1:
node._next = node1
if node2:
node._next = node2
return chain_table
import unittest
class ChainTableDemosTest(unittest.TestCase):
def setUp(self):
self.chain_table = ChainTableDemos()
for i in range(100):
self.chain_table.append(i)
def test_reverse(self):
end = self.chain_table.reverse()
for i in range(0, 100):
self.assertEqual(self.chain_table[i], 99-i)
def test_middle_node(self):
node = self.chain_table.getMiddleNode()
self.assertEqual(node.data, 50)
self.chain_table.append("a")
node = self.chain_table.getMiddleNode()
self.assertEqual(node.data, 50)
self.chain_table.append("b")
node = self.chain_table.getMiddleNode()
self.assertEqual(node.data, 51)
def test_has_loop(self):
is_loop = self.chain_table.isLoop()
self.assertEqual(is_loop, False)
node = self.chain_table.head._next._next._next
for i in range(30):
node = node._next
node._next = self.chain_table.head._next._next._next._next._next._next
is_loop = self.chain_table.isLoop()
self.assertEqual(is_loop, True)
def test_remove_from_end(self):
self.chain_table.removeNthFromEnd(1)
self.assertEqual(self.chain_table[98], 98)
self.chain_table.removeNthFromEnd(2)
self.assertEqual(self.chain_table[97], 98)
self.assertEqual(self.chain_table[96], 96)
self.chain_table.removeNthFromEnd(3)
self.assertEqual(self.chain_table[96], 98)
self.assertEqual(self.chain_table[95], 96)
self.assertEqual(self.chain_table[94], 94)
def test_merge_chain_table(self):
chain_table1 = ChainTable()
chain_table2 = ChainTable()
for i in range(0, 100):
if i % 2 == 0:
chain_table1.append(i)
else:
chain_table2.append(i)
chain_table = mergeTwoChainTables(chain_table1, chain_table2)
for i in range(0, 100):
self.assertEqual(chain_table.data, i)
chain_table = chain_table._next
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 4.1.7 on 2023-03-10 16:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("elections", "0066_alter_explanation_explanation"),
]
operations = [
migrations.AddField(
model_name="election",
name="requires_voter_id",
field=models.CharField(
choices=[
("EFA-2002", "Electoral Fraud (Northern Ireland) Act 2002"),
("EA-2022", "Elections Act 2022"),
("pilot-2018", "2018 voter ID pilot scheme"),
("pilot-2019", "2018 voter ID pilot scheme"),
],
max_length=100,
null=True,
),
),
]
|
import os
import subprocess
x = "g++ -std=c++11 *.cpp"
# os.system("echo " + x)
subprocess.call("g++ -std=c++11 *.cpp", shell=True) |
#!/usr/bin/env python
from rpyc.utils.zerodeploy import DeployedServer
from plumbum import SshMachine
from monitoring import *
# replace items in <> with your specific values
def main():
# Create Ssh tunnel to device
print 'Building ssh tunnel to device...'
mach = SshMachine("<xxx.xxx.xxx.xxx>", "<username>")
print 'Connected'
# Deploy rpyc server to device
print 'Deploying rpyc server to device...'
with DeployedServer(mach) as server:
conn = server.classic_connect()
print 'Deployed'
i = Monitor(
Service(
name="remote-service-watchdog",
monitor=(
# Monitor <sshd> process
ProcessInfo(command='<sshd>',
freq=Time.ms(1000),
fail=[
endMonitoring()
],
conn=conn
)
)
)
).run()
print i
if __name__ == '__main__':
main()
|
import os, stat, sys, textwrap
import host
from DPjudge import *
from DPjudge.Game import Time
class Check(Status):
# ----------------------------------------------------------------------
"""
This class is invoked by the cron job to check deadlines on all games,
and handle each when and as necessary.
The following flags can be added:
-t To check the timestamp of the last run, and not run if too much
time has elapsed (more than an hour).
This could indicate that the server went down. If this happens, the
judgekeeper should first make sure that all games are ok and extend the
deadline of at least all active games. He can then run the check script
manually once without any flags, or remove the timestamp file in the log
directory ($JDG/log/check.tim). This will reactivate the cron job.
Other flags:
-r To send only reminders for inactive games
-a To check only on active games (opposite of -r)
If neither -a or -r is specified, the program will check on active games
and once a day, at the midnight hour, send reminders for inactive games.
"""
# ----------------------------------------------------------------------
def __init__(self, argv = None):
Status.__init__(self)
if argv is None: argv = sys.argv
now = Time('GMT')
tsf = host.logDir + '/check.tim'
tsf2 = host.logDir + '/check2.tim'
if '-t' in argv and os.path.exists(tsf):
last = os.path.getmtime(tsf)
curr = now.seconds()
if last + 3600 < curr:
# Use a second timestamp file to keep track of subsequent
# server outages.
last2, again = 0, False
if os.path.exists(tsf2):
last2 = os.path.getmtime(tsf2)
if last2 > last and last2 + 3600 < curr:
last, again = last2, True
hours = int((curr - last) / 3600)
days, hours = hours / 24, hours % 24
msg = '\n'.join(map(lambda x: '\n'.join(textwrap.wrap(x, 75)),
('Attention: ' + ['More than ', 'Once again '][again] +
(days > 1 and ('%d days ' % days) or
days == 1 and 'a day ' or '') +
(days > 0 and hours > 0 and 'and ' or '') +
(hours > 1 and ('%d hours ' % hours) or hours == 1 and
'an hour ' or '') + (days + hours > 1 and 'have ' or
'has ') + 'passed since the last check. ' +
['This could be due to a server outage or an exception ' +
'raised during the execution of the check script. ' +
'As a precaution automatic deadline checking has been ' +
'disabled. ',
'This is probably caused by another server outage. ' +
'Automatic deadline checking is still disabled. '][again] +
'\n\nInvestigate, extend deadlines if necessary, ' +
'and only then run check once more without the ' +
'-t option to restart the process.').split('\n')))
print(msg)
# Warn the judgekeeper.
if not last2 or again:
mail = Mail(host.judgekeeper,
'%s server outage' % host.dpjudgeID)
mail.write(msg)
mail.close()
open(tsf2, 'w').close()
raise ServerOutageSuspected
print 'Checking deadlines at %s GMT' % now.cformat()
flags = [x for x in argv[1:] if x.startswith('-')]
gameList = [x for x in argv[1:] if not x.startswith('-')]
for gameName, data in self.dict.items():
if 'completed' in data or 'held' in data or 'terminated' in data:
continue
if gameList and gameName not in gameList: continue
#print('Checking %s' % gameName)
try: game = self.load(gameName)
except:
print gameName, 'DOES NOT EXIST!'
continue
if not game.master or len(game.master) != 3:
print game.name, 'HAS NO MASTER!'
continue
# -----------------------------------------------------------
# At the midnight hour or if the -r flag is set, remind a
# Master of any errors or any forming, waiting, or unprepared
# games he has.
# -----------------------------------------------------------
if 'NO_DEADLINE' in game.rules: line = None
else:
line = game.deadline
if 'active' in data and not line: game.error += ['NO DEADLINE']
if '-r' not in flags and (
'-a' in flags or now[-4:] >= '0020'): pass
elif game.error:
print game.name, 'has ERRORS ... notifying the Master'
for addr in game.master[1].split(',') + [
host.judgekeeper] * (not game.tester):
mail = Mail(addr,
'Diplomacy ERRORS (%s)' % game.name)
mail.write("The game '%s' on %s has the following "
'errors in its status file:\n\n%s\n\nLog in at\n'
' %s?game=%s\nto correct the errors!\n\n'
'Thank you,\nThe DPjudge\n' %
(game.name, host.dpjudgeID, '\n'.join(game.error),
host.dpjudgeURL, game.name))
mail.close()
elif 'active' in data:
if line and game.deadlineExpired('1W'):
critical = game.deadlineExpired('4W')
for addr in game.master[1].split(',') + [
host.judgekeeper] * (not game.tester and critical):
mail = Mail(host.judgekeeper,
'Diplomacy game alert (%s)' % game.name)
mail.write("%s:\n\nThe %s game '%s' on %s is "
'past its deadline for more than %s.\n\n'
'Visit the game at\n'
' %s?game=%s\nfor more information.\n\n'
'Thank you,\nThe DPjudge\n' %
(addr == host.judgekeeper and 'JudgeKeeper'
or 'Master', game.private and 'private' or 'public',
game.name, host.dpjudgeID, critical and '4 weeks'
or '1 week', host.dpjudgeURL, game.name))
mail.close()
elif 'terminated' not in data:
reason = ''
if 'waiting' in data:
state = 'waiting'
if game.avail:
reason = ' Need to replace %s.' % ', '.join([
game.anglify(x[:x.find('-')]) + x[x.find('-'):]
for x in game.avail])
if line and game.deadlineExpired('8W'):
mail = Mail(host.judgekeeper,
'Diplomacy game alert (%s)' % game.name)
mail.write("JudgeKeeper:\n\nThe %s game '%s' on %s is "
'in the %s state for more than 8 weeks.%s\n\n'
'Visit the game at\n'
' %s?game=%s\nfor more information.\n\n'
'Thank you,\nThe DPjudge\n' %
(game.private and 'private' or 'public',
game.name, host.dpjudgeID, state, reason,
host.dpjudgeURL, game.name))
mail.close()
elif 'forming' in data:
state = 'forming'
spots = game.avail and int(game.avail[0]) or (
len(game.map.powers) - len(game.map.dummies))
reason = ' %d position%s remain%s.' % (
spots, 's'[spots == 1:], 's'[spots != 1:])
else: state = data[1]
print game.name, 'is in the %s state' % state,
print '... reminding the Master'
for addr in game.master[1].split(','):
mail = Mail(addr,
'Diplomacy game reminder (%s)' % game.name)
mail.write("GameMaster:\n\nThe game '%s' on %s is "
'still in the %s state.%s\n\nVisit the game at\n'
' %s?game=%s\nfor more information.\n\n'
'Thank you,\nThe DPjudge\n' %
(game.name, host.dpjudgeID, state, reason,
host.dpjudgeURL, game.name))
mail.close()
if game.error or 'active' not in data or (
'-r' in flags and '-a' not in flags): continue
# ---------------------------------------------------
# Check for expired grace periods (auto-CD or RESIGN)
# ---------------------------------------------------
graceOver = game.graceExpired() and not game.avail
if graceOver and 'CIVIL_PREVIEW' not in game.rules: game.delay = 0
if game.delay:
game.delay -= 1
print game.name, 'is delayed, now delay is', game.delay
elif graceOver: # and game.latePowers()
if 'CIVIL_DISORDER' in game.rules:
print game.name, 'will process using CIVIL_DISORDER'
elif 'CIVIL_PREVIEW' in game.rules:
game.changeStatus('waiting')
game.delay, game.preview = 72, 1
game.save()
print game.name, 'will preview using CIVIL_PREVIEW'
else:
print game.name, 'will RESIGN its late player(s)'
game.lateNotice()
game.changeStatus('waiting')
game.save()
continue
game.process(now = 1)
elif game.ready() and not game.await:
game.preview = 'PREVIEW' in game.rules
print (game.name + ' is ready and will be pr%sed now' %
('ocess', 'eview')[game.preview])
if game.preview:
game.delay = 72
game.save()
try: game.process(now = 1)
except: pass
if game.await:
mail = Mail(host.judgekeeper,
'Diplomacy adjudication error! (%s)' % game.name)
mail.write('JudgeKeeper:\n\nThe game %s on %s\n'
'encountered an error during adjudication\n'
'and is still in the AWAIT state.\n' %
(game.name, host.dpjudgeID))
mail.close()
elif line and game.deadlineExpired():
print game.name, 'is not ready but is past deadline'
game.lateNotice()
# ---------------------------------------
# Reschedule to check game in eight hours
# ---------------------------------------
game.delay = 24
elif line:
print game.name, 'is not to deadline yet',
hey, when = game.latePowers(), game.timing.get('WARN', '4H')
for warn in when.split(','):
if warn[:-1] != '0' and hey:
hit = line.offset('-' + warn)
# ------------------------------------------
# Note that we don't need to change the time
# zone for "now", since internally the Time
# class compares the timestamps
# ------------------------------------------
if hit <= now < hit.offset('20M'):
print '... sending reminders',
game.lateNotice()
break
print
continue
if not game.preview: game.save()
if os.path.exists(tsf2):
try: os.unlink(tsf2)
except: pass
open(tsf, 'w').close()
# ----------------------------------------------------------------------
|
raise ImportError, 'the nn_tools.Caffe.net is no longer used, please use nn_tools.Caffe.caffe_net'
|
import pygame
pygame.init()
clock = pygame.time.Clock()
# colors
FON = [0, 0, 0]
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
# Set the height and width of the screen
SIZE = [1000, 900]
# screen = pygame.display.set_mode(SIZE)
pygame.display.set_caption("my game")
# add fon photo, need add animation on winter -> clouds move
DISPLAYSURF = pygame.display.set_mode(SIZE, 0, 0)
cran = pygame.image.load('crane.jpg').convert()
# position of red dot
i = 10
j = 10
done = False
while not done:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
# keyboard control
# if event.type == pygame.KEYDOWN:
# if event.key == pygame.K_LEFT:
# i -= 15
# elif event.key == pygame.K_RIGHT:
# i += 15
# if event.type == pygame.KEYUP:
# if event.key == pygame.K_DOWN:
# j += 15
# if event.key == pygame.K_UP:
# j -= 15
player_position = pygame.mouse.get_pos()
x = player_position[0]
y = player_position[1]
# fon of game
DISPLAYSURF.fill(FON)
DISPLAYSURF.blit(cran, [0, 0])
pygame.display.update()
pygame.draw.circle(DISPLAYSURF, RED, [x, y], 5)
pygame.display.flip()
clock.tick(20)
pygame.quit()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, HttpResponse, redirect
from django.core.urlresolvers import reverse
from .forms import Register, Login
from django.contrib import messages
from .models import User
import bcrypt
# Create your views here.
def index(request):
forms = {
"registration_form": Register(),
"login_form": Login(),
}
return render(request, "login_reg/index.html", forms)
def success(request, id, session_type):
content = {
"user": User.objects.get(id=id),
"session_type": session_type,
}
return render(request, "login_reg/success.html", content)
def process(request):
if request.method == "POST":
errors = User.objects.registration_validate(request.POST)
if len(errors):
for tab, error in errors.iteritems():
messages.error(request, error)
return redirect(reverse("logreg:logreg_index"))
else:
register = Register(request.POST)
if register.is_valid():
user = User()
user.first_name = register.cleaned_data["first_name"].capitalize()
user.last_name = register.cleaned_data["last_name"].capitalize()
user.email = register.cleaned_data["email"]
user.birthday = register.cleaned_data["birthday"]
user.password = bcrypt.hashpw(register.cleaned_data["password"].encode(), bcrypt.gensalt())
user.save()
curr_user = User.objects.get(email=user.email)
try:
request.session["user_logged"]
except KeyError:
request.session["user_logged"] = True
request.session["user_id"] = curr_user.id
request.session["session_type"] = "registration"
return redirect(reverse("logreg:logreg_success", kwargs={'id':request.session["user_id"],'session_type':request.session["session_type"]}))
def login(request):
if request.method == "POST":
errors = User.objects.login_validate(request.POST)
if len(errors):
for tab, error in errors.iteritems():
messages.error(request, error)
return redirect(reverse("logreg:logreg_index"))
else:
login = Login(request.POST)
if login.is_valid():
curr_user = User.objects.get(email=request.POST["email"])
try:
request.session["user_logged"]
except KeyError:
request.session["user_logged"] = True
request.session["user_id"] = curr_user.id
request.session["session_type"] = "login"
return redirect(reverse("logreg:logreg_success", kwargs={'id':request.session["user_id"],'session_type':request.session["session_type"]}))
def logout(request):
del request.session["user_logged"]
return redirect(reverse("logreg:logreg_index")) |
r1 = float(input('Digite o primeiro segmento:'))
r2 = float(input('Digite o segundo segmento:'))
r3 = float(input('Digite o terceiro segmento:'))
if r1 < r2 + r3 and r2 < r1+r3 and r3 < r1+r2:
print('Esses segmentos podem formar um triângulo')
else:
print('Esses segmentos não podem formar um triângulo') |
################################################################################
### Dictionaries
# A dictionary is similar to a list, but you access values by looking up a key instead of an index.
# A key can be any string or number. Dictionaries are enclosed in curly braces.
d = {'key1' : 1, 'key2' : 2, 'key3' : 3}
print d # {'key3': 3, 'key2': 2, 'key1': 1}
residents = {'Puffin' : 104, 'Sloth' : 105, 'Burmese Python' : 106}
print residents['Puffin'] # 104
print residents['Sloth'] # 105
print residents["Burmese Python"] # 106
# Add element:
# dict_name[new_key] = new_value
menu = {} # Empty dictionary
menu['Chicken Alfredo'] = 14.50 # Adding new key-value pair
menu["kampot"] = 4.5
# The length len() of a dictionary is the number of key-value pairs it has.
# Each pair counts only once, even if the value is a list.
# (That's right: you can put lists inside dictionaries!)
print len(menu)
print menu
# Items can be removed from a dictionary with the del command:
# del dict_name[key_name]
zoo_animals = {
'Unicorn' : 'Cotton Candy House',
'Sloth' : 'Rainforest Exhibit',
'Bengal Tiger' : 'Jungle House',
'Atlantic Puffin' : 'Arctic Exhibit',
'Rockhopper Penguin' : 'Arctic Exhibit'
}
del zoo_animals['Bengal Tiger']
menus = {
Breakfast Menu: ['Spam n Eggs', 'Spam n Jam', 'Spam n Ham'],
Lunch Menu: ['SLT (Spam-Lettuce-Tomato)', 'PB&S (PB&Spam)'],
Dinner Menu: ['Spalad', 'Spamghetti', 'Spam noodle soup']
}
################################################################################
webster = {
"Aardvark" : "A star of a popular children's cartoon show.",
"Baa" : "The sound a goat makes.",
"Carpet": "Goes on the floor.",
"Dab": "A small amount."
}
for key in webster:
print webster[key]
|
t = int(input())
v = int(input())
g = v * t / 12
print('{:.3f}'.format(g)) |
import numpy as np
from ..base_transforms import DenseTransform
from ..decorators import multiple
from ..transform import Transform
__all__ = ['onehot', 'Onehot']
@Transform.register()
class Onehot(DenseTransform):
def __init__(self, depth=None):
super().__init__()
self.collect(locals())
def __call__(self, *x):
return onehot(*x, depth=self.depth)
@multiple()
def onehot(label, depth=None):
"""Get the one-hot like label of nodes."""
label = np.asarray(label, dtype=np.int32)
depth = depth or label.max() + 1
if label.ndim == 1:
return np.eye(depth, dtype=label.dtype)[label]
else:
raise ValueError(f"label must be a 1D array, but got {label.ndim}D array.")
|
# coding: utf-8
import logging
import threading
from queue import Queue
# Logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
# defined the main data structures to be used
fibo_dict = {}
shared_queue = Queue()
input_list = [3, 10, 5, 7] # simulates user input
# define an object from the threading module called `Condition`.
# This object aims to synchronize the access to resources according to
# a specific condition.
queue_condition = threading.Condition()
def fibonacci_task(condition):
with condition:
while shared_queue.empty():
logger.info("[%s] - waiting for elements in queue..." %
threading.current_thread().name)
condition.wait()
else:
value = shared_queue.get()
fibo_dict[value] = fib(value)
shared_queue.task_done()
logger.debug("[%s] fibonacci of key [%d] with result [%d]" %
(threading.current_thread().name, value, fibo_dict[value]))
def fib(n):
"Return fibonacci number nth"
a, b = 0, 1
for i in range(n):
a, b = b, a + b
return a
def queue_task(condition):
logging.debug('Starting queue_task...')
with condition:
for item in input_list:
shared_queue.put(item)
logging.debug(
"Notifying fibonacci_task threads that the queue is ready to consume..")
condition.notifyAll() # Notify condition that `shared_queue` is updated
threads = [threading.Thread(
daemon=True, target=fibonacci_task, args=(queue_condition,)) for i in range(4)] # Creation of Fibonacci threads
[thread.start() for thread in threads] # Start Fibonacci threads
# Creation of queue update thread
prod = threading.Thread(name='queue_task_thread', daemon=True,
target=queue_task, args=(queue_condition,))
prod.start() # Start queue update thread
# make the main thread wait for the execution of `threads` ends.
[thread.join() for thread in threads]
logger.info("[%s] - Result: %s" % (threading.current_thread().name, fibo_dict))
|
name = input('please enter your name')
print('hello world ' + name)
|
# Copyright 2017 datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Forge CLI.
"""
from .tasks import (
setup,
task,
TaskError
)
setup()
import click, os
from dotenv import find_dotenv, load_dotenv
import util
from . import __version__
from .core import Forge
from .kubernetes import Kubernetes
from .sops import edit_secret, view_secret
from collections import OrderedDict
ENV = find_dotenv(usecwd=True)
if ENV: load_dotenv(ENV)
@click.group()
@click.version_option(__version__, message="%(prog)s %(version)s")
@click.option('-v', '--verbose', count=True)
@click.option('--config', envvar='FORGE_CONFIG', type=click.Path(exists=True))
@click.option('--profile', envvar='FORGE_PROFILE')
@click.option('--branch', envvar='FORGE_BRANCH')
@click.option('--no-scan-base', is_flag=True, help="Do not scan for services in directory containing forge.yaml")
@click.pass_context
def forge(context, verbose, config, profile, branch, no_scan_base):
context.obj = Forge(verbose=verbose, config=config,
profile=None if profile is None else str(profile),
branch=None if branch is None else str(branch),
scan_base=not no_scan_base)
@forge.command()
@click.pass_obj
@click.argument('script', nargs=1, type=click.Path(exists=True))
@click.argument('args', nargs=-1)
@task()
def invoke(forge, script, args):
"""
Invoke a python script using the forge runtime.
Forge uses a portable self contained python runtime with a well
defined set of packages in order to behave consistently across
environments. The invoke command allows arbitrary python code to
be executed using the forge runtime.
The code is executed much as a normal python script, but with a
few exceptions. The "forge" global variable is set to an instance
of the forge object. Use forge.args to access any arguments
supplied to the script.
"""
forge.args = args
execfile(script, {"forge": forge, "__file__": os.path.abspath(script)})
@forge.command()
@click.pass_obj
def setup(forge):
"""
Help with first time setup of forge.
Forge needs access to a container registry and a kubernetes
cluster in order to deploy code. This command helps setup and
validate the configuration necessary to access these resources.
"""
return forge.setup()
@forge.group(invoke_without_command=True)
@click.pass_context
@click.option('-n', '--namespace', envvar='K8S_NAMESPACE', type=click.STRING)
@click.option('--dry-run', is_flag=True)
def build(ctx, namespace, dry_run):
"""Build deployment artifacts for a service.
Deployment artifacts for a service consist of the docker
containers and kubernetes manifests necessary to run your
service. Forge automates the process of building your containers
from source and producing the manifests necessary to run those
newly built containers in kubernetes. Use `forge build
[containers|manifests]` to build just containers, just manifests,
or (the default) all of the above.
How forge builds containers:
By default every `Dockerfile` in your project is built and tagged
with a version computed from the input sources. You can customize
how containers are built using service.yaml. The `containers`
property of `service.yaml` lets you specify an array.
\b
name: my-service
...
container:
- dockerfile: path/to/Dockerfile
context: context/path
args:
MY_ARG: foo
MY_OTHER_ARG: bar
How forge builds deployment manifests:
The source for your deployment manifests are kept as jinja
templates in the k8s directory of your project. The final
deployment templates are produced by rendering these templates
with access to relevant service and build related metadata.
You can use the `forge build metadata` command to view all the
metadata available to these templates. See the `forge metadata`
help for more info.
"""
forge = ctx.obj
forge.namespace = namespace
forge.dry_run = dry_run
if ctx.invoked_subcommand is None:
forge.execute(forge.build)
@build.command()
@click.pass_obj
def metadata(forge):
"""
Display build metadata.
This command outputs all the build metadata available to manifests.
"""
forge.metadata()
@build.command()
@click.pass_obj
def containers(forge):
"""
Build containers for a service.
See `forge build --help` for details on how containers are built.
"""
forge.execute(forge.bake)
@build.command()
@click.pass_obj
def manifests(forge):
"""
Build manifests for a service.
See `forge build --help` for details on how manifests are built.
"""
forge.execute(forge.manifest)
@forge.command()
@click.argument("file_path", required=True, type=click.Path())
@click.option('-c', '--create', is_flag=True, help="Create an empty file if it does not exist.")
def edit(file_path, create):
"""
Edit a secret file.
"""
edit_secret(file_path, create)
@forge.command()
@click.argument("file_path", required=True, type=click.Path(exists=True))
def view(file_path):
"""
View a secret file.
"""
view_secret(file_path)
@forge.command()
@click.pass_obj
@click.option('-n', '--namespace', envvar='K8S_NAMESPACE', type=click.STRING)
@click.option('--dry-run', is_flag=True, help="Run through the deploy steps without making changes.")
@click.option('--prune', is_flag=True, help="Prune any resources not in the manifests.")
def deploy(forge, namespace, dry_run, prune):
"""
Build and deploy a service.
They deploy command performs a `forge build` and then applies the
resulting deployment manifests using `kubectl apply`.
"""
forge.namespace = namespace
forge.dry_run = dry_run
forge.execute(lambda svc: forge.deploy(*forge.build(svc), prune=prune))
@forge.command()
@click.pass_obj
def pull(forge):
"""
Do a git pull on all services.
"""
# XXX: should have a better way to track this, but this is quick
pulled = {}
forge.execute(lambda svc: forge.pull(svc, pulled))
@forge.command()
@click.pass_obj
def clean(forge):
"""
Clean up intermediate containers used for building.
"""
forge.execute(forge.clean)
@forge.group()
def schema_docs():
"""
Generate schema documentation.
"""
pass
@schema_docs.command()
def forge_yaml():
"""
Output schema documentation for forge.yaml
"""
import config
config.CONFIG.render_all()
@schema_docs.command()
def service_yaml():
"""
Output schema documentation for service.yaml
"""
import service_info
service_info.SERVICE.render_all()
def primary_version(resources):
counts = OrderedDict()
for r in resources:
v = r["version"]
if v not in counts:
counts[v] = 0
counts[v] += 1
return sorted(counts.items(), cmp=lambda x, y: cmp(x[1], y[1]))[-1][0]
def unfurl(repos):
for repo, services in sorted(repos.items()):
for service, profiles in sorted(services.items()):
for profile, resources in sorted(profiles.items()):
yield repo, service, profile, resources
from fnmatch import fnmatch
def match(name, pattern):
if not pattern:
return True
else:
return fnmatch(name, pattern)
@forge.command()
@click.pass_obj
@click.argument("service_pattern", required=False)
@click.argument("profile_pattern", required=False)
@task()
def list(forge, service_pattern, profile_pattern):
"""
List deployed forge services.
The list command will query all k8s resources in all namespaces
within a cluster and display a summary of useful information about
those services. This includes the source repo where the service
originates, the descriptor within the repo, and the status of any
deployed k8s resources.
You can use shell-style pattern matching for either the service or
the profile in order to filter what is printed.
"""
bold = forge.terminal.bold
red = forge.terminal.bold_red
kube = Kubernetes()
repos = kube.list()
first = True
for repo, service, profile, resources in unfurl(repos):
if not (match(service, service_pattern) and match(profile, profile_pattern)):
continue
descriptor = resources[0]["descriptor"]
version = primary_version(resources)
if first:
first = False
else:
print
header = "{0}[{1}]: {2} | {3} | {4}".format(bold(service), bold(profile), repo or "(none)", descriptor,
version)
print header
for resource in sorted(resources):
ver = resource["version"]
if ver != version:
red_ver = red(ver)
print " {kind} {namespace}.{name} {0}:\n {status}".format(red_ver, **resource)
else:
print " {kind} {namespace}.{name}:\n {status}".format(**resource)
@forge.command()
@click.pass_obj
@click.argument("service", required=False)
@click.argument("profile", required=False)
@click.option("--all", is_flag=True, help="Delete all services.")
@task()
def delete(forge, service, profile, all):
"""
Delete (undeploy) k8s resources associated with a given profile or service.
The delete command removes all forge deployed kubernetes
resources for the specified service. If the profile is supplied
then only the resources for that profile are removed. If the
`--all` option is supplied then all forge deployed resources are
removed from the entire cluster.
"""
if all and (service or profile):
raise TaskError("cannot specify an argument with the --all option")
if not all and not service:
raise TaskError("either supply a service or the --all option")
labels = {
"forge.service": service
}
kube = Kubernetes()
if not all:
repos = kube.list()
services = set()
profiles = set()
for r, svc, prof, _ in unfurl(repos):
services.add(svc)
profiles.add((svc, prof))
if service not in services:
raise TaskError("service has no resources: %s" % service)
if profile:
if (service, profile) not in profiles:
raise TaskError("profile has no resources: %s" % profile)
if profile:
labels["forge.profile"] = profile
with task.verbose(True):
kube.delete(labels)
def call_main():
util.setup_yaml()
try:
exit(forge())
except TaskError, e:
exit(e)
except KeyboardInterrupt, e:
exit(e)
if __name__ == "__main__":
call_main()
|
from owl import Deck
from collections import Counter
def test_deck():
d1 = Deck()
d2 = Deck()
cards1 = []
cards2 = []
for i in range(50):
cards1.append(d1.draw())
cards2.append(d2.draw())
counter1 = Counter(cards1)
counter2 = Counter(cards2)
assert cards1 != cards2
assert counter1 == counter2
|
import turtle
def draw_square(animal, size):
"""
Draw a square
:param animal: This should be the turtle drawing the suare
:param size: length of the sides of the square
:return: nothing
"""
for i in range(4):
animal.forward(size)
animal.left(90)
def calculate_surface_of_square(side_length):
surface=side_length**2
print(surface)
return(surface)
screen =turtle.Screen()
nick =turtle.Turtle()
for index in range(16):
draw_square(nick, index*5)
nick.left(6)
result=calculate_surface_of_square(index*5)
print(result)
screen.exitonclick() |
from django.urls import path
from todoapp import views
urlpatterns = [
path('',views.home,name = 'home'),
path('login/',views.login,name='login'),
path('signup/',views.signup,name = 'signup'),
path('logout/',views.signout,name = 'logout'),
path('addTask/',views.addTask,name = 'addTask'),
path('deleteTodo/<int:id>',views.deleteTodo,name ='deleteTodo' ),
path('change-status/<int:id>/<str:status>',views.changestatus,name ='changestatus' ),
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-14 09:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chloroform', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='alternative',
options={'ordering': ['order']},
),
migrations.AddField(
model_name='alternative',
name='order',
field=models.PositiveIntegerField(default=0),
),
migrations.AlterField(
model_name='configuration',
name='target',
field=models.CharField(blank=True, help_text='An email or a list of emails separated by ;', max_length=2000, verbose_name='Recipient of mails sent with this configuration'),
),
]
|
# import dependencies
import os
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
from flask import Flask, jsonify, json, render_template, request, Response
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
from dotenv import load_dotenv
load_dotenv()
# create the flask app
app = Flask(__name__)
# get the heroku database url from environment
db_uri = os.environ['DATABASE_URL']
# app configuration
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
# db setup
db = SQLAlchemy(app)
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(db.engine, reflect=True)
# save references to the table
Info = Base.classes.info
# cors config
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
# create the routes
@app.route('/api/update', methods=['POST', 'OPTIONS'])
def update():
if request.method == 'POST':
if request.is_json:
data = request.get_json()
print(data)
name = data['name']
email = data['email']
message = data['message']
new_info = Info(name=name, email=email, message=message)
db.session.add(new_info)
db.session.commit()
return json.dumps(data), 200, {'ContentType':'application/json'}
# return Response(f'{data}', status=200, mimetype='application/json')
# return make_response(f'{data} successfully created!')
if request.method == 'OPTIONS':
return json.dumps({}), 200, {'ContentType':'application/json'}
# return Response('{}', status=200, mimetype='application/json')
if __name__ =="__main__":
app.run(debug=True)
|
class Solution(object):
def longestValidParentheses(self, s):
"""
:type s: str
:rtype: int
"""
maxLen = 0
lastIndex = -1
lefts = []
for i in range(0, len(s)):
if s[i] == '(':
lefts.append(i)
else:
if len(lefts) == 0:
lastIndex = i
else:
lefts.pop()
if len(lefts) == 0:
maxLen = max(maxLen, i - lastIndex)
else:
maxLen = max(maxLen, i - lefts[-1])
return maxLen |
class RaCRohaCentral:
president = "Rtr. Akash Rumade"
secretary = "Rtr. Satyen Deshpande"
treasurer = "Rtr. Yash Shinde"
class Avenue(RaCRohaCentral):
CSD = "Rtr. Rohit"
PDD = "Rtr. Srushti"
CMD = "Rtr. Sanket"
ISD = "Rtr. Mitesh"
class display(Avenue):
def displayy(self):
print(f"CSD is {self.CSD}, Treasurer is {self.treasurer}")
a = display()
a.displayy()
|
#!/usr/bin/env python
"""
cfg.py
=======
NB configparser changed a lot betweeen py2.7 and py3
the below is for py2.7
"""
import StringIO, textwrap
from collections import OrderedDict as odict
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
class Cfg(object):
def __init__(self, txt):
sfp = StringIO.StringIO(txt)
c = ConfigParser()
c.readfp(sfp)
self.c = c
self.d = self.full(c)
def full(self, c):
d = odict()
for s in c.sections():
d[s] = dict(c.items(s))
pass
return d
def sections(self):
return self.c.sections()
def sect(self, s):
return dict(self.c.items(s))
def __repr__(self):
return "\n".join(["%s\n%s" % (k,repr(v)) for k, v in self.d.items()])
if __name__ == '__main__':
txt = textwrap.dedent("""
[red]
a=1
b:2
c=3
[green]
aa=2
bb=3
cc:4
""")
c = Cfg(txt)
print(c.sections())
for s in c.sections():
print("%s " % s)
d = c.sect(s)
print(repr(d))
for k,v in d.items():
print("%10s : %s " % (k,v ))
pass
pass
print(c)
|
from django.conf.urls import url
from .views import (
image_list,
image_upload,
)
urlpatterns = [
url(r'^$', image_list),
url(r'^upload/$', image_upload),
]
|
# coding: utf-8
# Standard Python libraries
# http://www.numpy.org/
import numpy as np
# https://pandas.pydata.org/
import pandas as pd
from ..database.IprPyDatabase import IprPyDatabase
def get_isolated_atom_energies(database: IprPyDatabase,
verbose: bool = False
) -> pd.DataFrame:
"""
Builds a table listing the identified isolated atom energies for each
LAMMPS potential and symbol model, which can then be used to convert
measured potential energies into cohesive energies. This uses finished
results from both isolated_atom and diatom_scan calculations.
Details:
- The isolated_atom calculation directly computes the energy of
non-interacting particles for each symbol.
- For the diatom_scan calculations, the tabulated values for r > 2.0 are
searched for a common cutoff energy value, which may not be found if the
max r measured is less than the cutoff.
- If energies are found by both methods and are different, the diatom_scan
values are used. This bypasses an issue with versions 1+2 of SNAP
potentials where the energy above the cutoff is correct for a while
before erroneously droping to zero at some higher cutoff.
Parameters
----------
database : IprPyDatabase
The database to search for isolated_atom and diatom_scan results.
verbose : bool, optional
Setting this to True will print informative messages.
Returns
-------
pandas.DataFrame
The table of isolated atom energies for each LAMMPS potential and
symbol model.
"""
# Get isolated atom results
records = database.get_records('calculation_isolated_atom', status='finished')
if len(records) == 0:
raise ValueError('No finished isolated atom results found!')
if verbose:
print(len(records), 'finished isolated atom results loaded')
results1_df = []
for record in records:
# Extract values by symbol
for symbol, energy in record.isolated_atom_energy.items():
data = {}
data['potential_LAMMPS_id'] = record.potential.potential_LAMMPS_id
data['potential_LAMMPS_key'] = record.potential.potential_LAMMPS_key
data['potential_id'] = record.potential.potential_id
data['potential_key'] = record.potential.potential_key
data['symbol'] = symbol
data['isolated_atom_energy'] = energy
results1_df.append(data)
results1_df = pd.DataFrame(results1_df)
# Get diatom_scan results
records = database.get_records('calculation_diatom_scan', status='finished')
if len(records) == 0:
raise ValueError('No finished diatom scan results found!')
if verbose:
print(len(records), 'finished diatom scan results loaded')
results2_df = []
for record in records:
# Skip cross interaction results
if record.symbols[0] != record.symbols[1]:
continue
data = {}
data['potential_LAMMPS_id'] = record.potential.potential_LAMMPS_id
data['potential_LAMMPS_key'] = record.potential.potential_LAMMPS_key
data['potential_id'] = record.potential.potential_id
data['potential_key'] = record.potential.potential_key
data['symbol'] = record.symbols[0]
# Search for common cutoff energy
try:
e = record.energy_values[record.r_values > 2.0]
ii = np.where(e[1:] - e[:-1] == 0)[0][0]
except:
data['diatom_cutoff_energy'] = np.nan
else:
data['diatom_cutoff_energy'] = e[ii] / 2
results2_df.append(data)
results2_df = pd.DataFrame(results2_df)
# Merge into a single dataframe
mergekeys = ['potential_LAMMPS_key', 'potential_key',
'potential_LAMMPS_id', 'potential_id', 'symbol']
results_df = pd.merge(results1_df, results2_df, on=mergekeys)
# Relace values where where diatom cutoff is reached and is different
usediatom = (~pd.isna(results_df.diatom_cutoff_energy) &
~np.isclose(results_df.isolated_atom_energy,
results_df.diatom_cutoff_energy,
atol=0.0, rtol=1e-8))
if verbose:
print('different energies found for:')
for potential_LAMMPS_id in results_df[usediatom].potential_LAMMPS_id:
print(potential_LAMMPS_id)
print('using diatom_cutoff values for those implementations')
results_df.loc[usediatom, 'isolated_atom_energy'] = results_df.loc[usediatom, 'diatom_cutoff_energy']
del results_df['diatom_cutoff_energy']
return results_df.sort_values(['potential_LAMMPS_id', 'symbol']).reset_index(drop=True) |
# Bu method berilgan string turidagi o'zgaruvchining barcha boshlanayotgan so'zlardagi xarflarni bosh xarfini katta
# xarfga aylantirib beradi
a="Khamzayev Jamshid is wonderfull Python programmer!!!"
print(a.title()) |
"""Static operators for xml parsing."""
import os
def clear_destination(file_path):
"""Identify if file exists. If so, remove it."""
if os.path.exists(file_path):
os.remove(file_path)
|
# Name: Barbara Payne
# Assignment 11.1
# Purpose: Cash register to display total items and total price from user's input
import locale
locale.setlocale(locale.LC_ALL, 'en_US')
class CashRegister:
def __init__(self):
self.count = 0
self.total = 0
self.price = 0
print('Welcome to Cash Register!')
def addItem(self, price):
self.price = price
self.count = self.count+1
self.total = self.total + price
def getTotal(self):
print("The total price is:", locale.currency(self.total,'$'))
def getCount(self):
print("You have entered", self.count, "items into the cash register.")
register = CashRegister()
while True:
priceAnswer = register.addItem(float(input('Enter price of the item ')))
response = input('Want to add another price? Type any key to continue or type q to quit. ')
if response == 'q':
print("\n")
break
register.getCount()
register.getTotal()
|
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QTableWidget
from PyQt5.Qt import QTableWidgetItem, QAbstractItemView
class AppDemo(QWidget):
def __init__(self):
super().__init__()
self.initGUI()
self.createTable()
lstSelection = [0, 3, 4]
self.selectRows(lstSelection)
def initGUI(self):
self.resize(600,600)
self.rows = [['a1','b1','c1'],
['a2','b2','c2'],
['a3','b3','c3'],
['a4','b4','c4'],
['a5','b5','c5']]
def createTable(self):
self.tableWidget = QTableWidget(self)
self.tableWidget.resize(self.width(), self.height())
self.tableWidget.setRowCount(len(self.rows))
self.tableWidget.setColumnCount(len(self.rows[0]))
self.tableWidget.setSelectionMode(QAbstractItemView.MultiSelection)
for row in enumerate(self.rows):
for col in enumerate(row[1]):
item = QTableWidgetItem()
item.setText(col[1])
self.tableWidget.setItem(row[0],col[0],item)
def selectRows(self, selection: list):
for i in selection:
self.tableWidget.selectRow(i)
if __name__ == '__main__':
app = QApplication(sys.argv)
demo = AppDemo()
demo.show()
sys.exit(app.exec_()) |
def head_resource(uri):
'''Get the metadata on a resource.
ARGS:
@uri -- The uri for locating the resource in relation to the
resources directory. Ex networks/config.
RETURNS:
@metadata -- The metadata associated with the uri given.
'''
from find_resource import find_resource
path = find_resource(uri)
return the meta data when we figure out how this will be structured |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('last_edited', models.DateTimeField(auto_now=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('latitude', models.FloatField()),
('longitude', models.FloatField()),
('country_name', models.CharField(max_length=50)),
('locality', models.CharField(max_length=100)),
('postal_code', models.CharField(max_length=10)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='User',
fields=[
('password', models.CharField(verbose_name='password', max_length=128)),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('last_edited', models.DateTimeField(auto_now=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('phone', models.CharField(serialize=False, max_length=15, primary_key=True)),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('age', models.IntegerField()),
('profile_picture', models.ImageField(upload_to='profile_pics')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
|
from Move import *
class Pokemon():
Number = 0
Name = ""
Type1 = Type.none
Type2 = Type.none
Level = 0
BaseHP = 0
BaseATK = 0
BaseDEF = 0
BaseSPATK = 0
BaseSPDEF = 0
BaseSPEED = 0
PossibleMoves = AllMoves.Moveset
CurrentHP = 0
HP = 0
Attack = 0
Defense = 0
SpAttack = 0
SpDefense = 0
Speed = 0
Moves = [None, None, None, None]
#Ability
#Nature
#HeldItem
#IVs
#EVs
def __init__(self, name, number, type1, type2, hp, atk, dff, spatk, spdef, spd, lvl = 5):
self.Name = name
self.Number = number
self.Type1 = type1
self.Type2 = type2
self.Level = lvl
self.BaseHP = hp
self.BaseATK = atk
self.BaseDEF = dff
self.BaseSPATK = spatk
self.BaseSPDEF = spdef
self.BaseSPEED = spd
self.PossibleMoves = AllMoves.Moveset
self.Moves = [None, None, None, None]
self.__updateStats()
def SetPossibleMoves(self, moves):
self.PossibleMoves = {}
for move in moves:
if isinstance(move, str):
move = AllMoves.Get(move)
self.PossibleMoves[move.Name] = move
def New(self, level):
newPokemon = Pokemon(self.Name, self.Number, self.Type1, self.Type2,
self.BaseHP, self.BaseATK, self.BaseDEF,
self.BaseSPATK, self.BaseSPDEF, self.BaseSPEED,
level)
newPokemon.PossibleMoves = self.PossibleMoves
return newPokemon
def AddMove(self, move, ndx):
if isinstance(move, str):
move = AllMoves.Get(move)
if move and isinstance(ndx, int) and ndx < 4 and ndx >= 0:
if not move.Name in self.PossibleMoves:
raise ValueError("Move " + move.Name + " was not found in possible moves")
self.Moves[ndx] = move
return
raise ValueError("Move was not found passed properly")
def SetMoves(self, moves):
for ndx, move in enumerate(moves):
if isinstance(move, str):
move = AllMoves.Get(move)
if not move.Name in self.PossibleMoves:
raise ValueError("Move " + move.Name + " was not found in possible moves")
self.Moves[ndx] = move
def __updateStats(self):
self.__calcHP()
self.Attack = self.__calcStat(self.BaseATK)
self.Defense = self.__calcStat(self.BaseDEF)
self.SpAttack = self.__calcStat(self.BaseSPATK)
self.SpDefense = self.__calcStat(self.BaseSPDEF)
self.Speed = self.__calcStat(self.BaseSPEED)
self.CurrentHP = self.HP
def __calcHP(self):
self.HP = int((2 * self.BaseHP * self.Level) / 100) + self.Level + 10
return self.HP
def __calcStat(self, base):
return int((2 * base * self.Level) / 100) + 5
def __str__(self):
output = ""
output += self.Name + ": #" + str(self.Number) + "\n"
output += "\tType: " + str(self.Type1.name) + " / " + str(self.Type2.name) + "\n"
output += "\tLevel: " + str(self.Level) + "\n"
output += "\tHP: " + str(self.CurrentHP) + " / " + str(self.HP) + "\n"
output += "\tAttack : " + str(self.Attack) + "\n"
output += "\tDefense : " + str(self.Defense) + "\n"
output += "\tSpecial Attack : " + str(self.SpAttack) + "\n"
output += "\tSpecial Defense : " + str(self.SpDefense) + "\n"
output += "\tSpeed : " + str(self.Speed) + "\n"
output += "\tMoves: \n"
for move in self.Moves:
if move:
output += "\t\t" + str(move.Name) + "\n"
return output
def Print(self):
print(self)
class PokemonList():
Pokemonset = {}
Pokedex = {}
def __init__(self):
self.Pokemonset = {}
self.Pokedex = {}
return
def AddSpecies(self, name, number, type1, type2, hp, atk, dff, spatk, spdef, spd):
newPokemon = Pokemon(name, number, type1, type2, hp, atk, dff, spatk, spdef, spd)
self.Pokemonset[name] = newPokemon
self.Pokedex[number] = name
return
def SetSpeciesMoves(self, name, moves):
if isinstance(name, int):
name = self.Pokedex[name]
species = self.Pokemonset[name]
species.SetPossibleMoves(moves)
return
def MakeNewPokemon(self, name, level):
if isinstance(name, int):
name = self.Pokedex[name]
species = self.Pokemonset[name]
newEntry = species.New(level)
return newEntry
AllPokemon = PokemonList()
AllPokemon.AddSpecies("Bulbasaur", 1, Type.grass, Type.poison, 45, 49, 49, 65, 65, 45)
AllPokemon.SetSpeciesMoves("Bulbasaur", ["Tackle", "Vine Whip"])
AllPokemon.AddSpecies("Charmander", 4, Type.fire, Type.none, 39, 52, 43, 60, 50, 65)
AllPokemon.SetSpeciesMoves("Charmander", ["Tackle", "Ember"])
AllPokemon.AddSpecies("Squirtle", 6, Type.water, Type.none, 44, 48, 65, 50, 64, 43)
AllPokemon.SetSpeciesMoves("Squirtle", ["Tackle", "Water Gun"])
AllPokemon.AddSpecies("Pikachu", 25, Type.electric, Type.none, 35, 55, 40, 50, 50, 90)
AllPokemon.SetSpeciesMoves("Pikachu", ["Tackle", "Thunder Shock"])
if __name__ == "__main__":
pika = AllPokemon.MakeNewPokemon(25, 50)
pika.SetMoves(["Tackle", "Thunder Shock"])
bulba = AllPokemon.MakeNewPokemon(1, 50)
bulba.SetMoves(["Tackle", "Vine Whip"])
char = AllPokemon.MakeNewPokemon(4, 50)
char.SetMoves(["Tackle", "Ember"])
squirt = AllPokemon.MakeNewPokemon(6, 50)
squirt.SetMoves(["Tackle", "Water Gun"])
print(bulba)
print(char)
print(squirt)
print(pika)
|
from pathlib import Path
from random import shuffle
def main():
path_to_datasets = Path(__file__).absolute().parent.parent / "Datasets"
for path_to_dataset in path_to_datasets.iterdir():
if(path_to_dataset.is_dir()):
for task in path_to_dataset.iterdir():
if(task.name.endswith(".txt") and task.name != "out.txt"):
with open(task, "r") as file:
randomGuess = list(range(1, len(file.read())+1))
shuffle(randomGuess)
print(str(task) + " " + ' '.join(str(pos) for pos in randomGuess), flush=True)
if __name__ == "__main__":
main()
|
from collections import Counter
def self_descriptive(num):
s = [int(a) for a in str(num)]
cnt = Counter(s)
return all(cnt[i] == b for i, b in enumerate(s))
|
import numpy as np
import os
import argparse
import torch
from PIL import Image
from torch.autograd import Variable
from torchvision import transforms
from config import sbu_testing_root
from misc import check_mkdir, crf_refine
from model import BDRAR
torch.cuda.set_device(0)
a = argparse.ArgumentParser()
a.add_argument("--pathIn", type=str, help="input path")
a.add_argument("--pathOut", type=str, help="output path")
a.add_argument("--intype", type=str, default='.jpg', help="input image type")
a.add_argument("--rootpath", type=str, default='/data/add_disk0/shilinhu', help="root path")
a.add_argument("--expname", type=str, default='test_bdrar', help="example name")
a.add_argument("--snapshot", type=str, default='3000.pth', help="checkpoint path")
a.add_argument("--scale", type=int, default=416, help="image scale")
a.add_argument("--crf", type=int, default=1, help="whether use crf or not, 1 is use")
args = a.parse_args()
#ckpt_path = './ckpt'
#exp_name = 'BDRAR'
#args = {
# 'snapshot': '3000',
# 'scale': 416
#}
img_transform = transforms.Compose([
transforms.Resize(args.scale),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
to_test = {args.pathIn: os.path.join(args.rootpath, args.expname, args.pathIn)}
to_pil = transforms.ToPILImage()
def main():
net = BDRAR().cuda()
if len(args.snapshot) > 0:
print('load snapshot \'%s\' for testing' % args.snapshot)
net.load_state_dict(torch.load(os.path.join(args.rootpath, args.expname, args.snapshot)))
net.eval()
with torch.no_grad():
for name, root in to_test.items():
img_list = [os.path.splitext(f)[0] for f in os.listdir(root) if f.endswith(args.intype)]
#img_list = [img_name for img_name in os.listdir(root) if img_name.endswith('.jpg')]
for idx, img_name in enumerate(img_list):
print('predicting for %s: %d / %d' % (name, idx + 1, len(img_list)))
check_mkdir(
os.path.join(args.pathOut))
img = Image.open(os.path.join(root, img_name + args.intype))
w, h = img.size
img_var = Variable(img_transform(img).unsqueeze(0)).cuda()
res = net(img_var)
prediction = np.array(transforms.Resize((h, w))(to_pil(res.data.squeeze(0).cpu())))
if args.crf == 1:
prediction = crf_refine(np.array(img.convert('RGB')), prediction)
Image.fromarray(prediction).save(
os.path.join(args.pathOut, img_name + '.png'), "PNG")
if __name__ == '__main__':
main()
|
# 要注意文件本身自带的编解码和写入读出的手动编解码是两回事
with open("1.txt",mode="wb") as f1:
str1 = "xianqian嘿嘿"
f1.write(str1.encode("utf-8"))
with open("1.txt", mode="rb") as f2:
data = f2.read()
# data = f2.read().decode("utf-8")
print(data)
|
from flask import Flask
from flask_restful import Api
from flask_cors import CORS
import logging
import os
from resources.api.v1.TelegramWebHook import TelegramWebHook
from credentials.telegram import BOT_TOKEN
from resources.HelloWorld import HelloWorld
from core.RequestHandler import RequestHandler
logging_format = "[%(asctime)s] [%(process)d] [%(threadName)s] [%(levelname)s] - %(message)s"
if __name__ == '__main__':
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"), format=logging_format)
else:
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if logger.hasHandlers():
for handler in logger.handlers:
handler.setFormatter(
logging.Formatter(logging_format)
)
else:
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"), format=logging_format)
app = Flask(__name__, static_url_path='')
CORS(app)
api = Api(app)
request_handler = RequestHandler()
api.add_resource(TelegramWebHook, f'/{BOT_TOKEN}', resource_class_kwargs={'request_handler': request_handler})
api.add_resource(HelloWorld, '/')
if __name__ == '__main__':
app.run(debug=True) # ssl_context=('cert.pem', 'private_key.pem'))
|
import pandas as pd
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import accuracy_score
import numpy as np
#veriyi hazırlama
pf=pd.read_csv("../../Datasets/Cancer.csv")
X=pf.drop(['Unnamed: 32',"id","diagnosis"],axis=1)
Y=np.array(pd.get_dummies(pf['diagnosis'], drop_first=True)).reshape(X.shape[0])
#veriyi bölme
X_train,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.21,random_state=42)
#modeli kurma
Gbm=GradientBoostingClassifier()
Gbm.fit(X_train,y_train)
#modelden tahmin tapma
pred=Gbm.predict(X_test)
#ilkel başarı değeri
print(f"İlkel başarı değeri : {accuracy_score(y_test,pred)}")
#hiperparametre seçelim
hiperparams={'max_depth': np.arange(2,10,2),
'learning_rate': [0.0001,0.001,0.01,0.1,1],
'n_estimators': np.arange(200,1000,200)}
model_cv=GridSearchCV(Gbm,hiperparams,cv=10,n_jobs=-1,verbose=2).fit(X_train,y_train)
print(model_cv.best_params_)
# Model tunnig
model_tunned=GradientBoostingClassifier(learning_rate=model_cv.best_params_['learning_rate'],
n_estimators=model_cv.best_params_['n_estimators'],
max_depth=model_cv.best_params_['max_depth']).fit(X_train,y_train)
pred_tunned=model_tunned.predict(X_test)
print(f"İlkel başarı değeri : {accuracy_score(y_test,pred)}")
print(f"Tunned edilmiş başarı değeri : {accuracy_score(y_test,pred_tunned)}") |
import os
import numpy as np
import pandas as pd
from util import DATA_DIR, DOWNLOAD_DIR, load_data, reduce_dataset, keystrokes2events, events2keystrokes
GREYC_NISLAB_DATASET_URL = 'http://www.epaymentbiometrics.ensicaen.fr/wp-content/uploads/2015/04/greyc-nislab-keystroke-benchmark-dataset.xls'
CITEFA_DATASET_URL = 'http://www.cse.chalmers.se/~bello/publications/kprofiler-20100716-1442.tar.gz'
VILLANI_DATASET_URL = 'https://bitbucket.org/vmonaco/dataset-villani-keystroke/raw/f451aa1b1ee40e86ef58d8eab2b8f829fcc23405/data/keystroke.csv'
COLS = ['age', 'gender', 'handedness', 'timepress', 'timerelease', 'keyname']
def make_sessions(x, mean, std, skip_chars=20):
x = x.reset_index(level=1)
base_session = x['session'].unique().squeeze() * 10000
end = 0
session = base_session
while end < len(x):
if std > 0:
new_end = end + int(np.random.normal(mean, std))
else:
new_end = end + int(mean)
if new_end > len(x):
x['session'][end:new_end] = -1
break
else:
x['session'][end:new_end] = session
x['session'][new_end:new_end + skip_chars] = -1
session += 1
end = new_end + skip_chars
x = x[x['session'] >= 0]
x = x.set_index('session', append=True)
return x
def preprocess_greyc_nislab(in_file, out_file):
"""
Preprocess the raw GREYC NISLAB dataset
"""
df = pd.concat([pd.read_excel(in_file, sheetname=0),
pd.read_excel(in_file, sheetname=1),
pd.read_excel(in_file, sheetname=2),
pd.read_excel(in_file, sheetname=3),
pd.read_excel(in_file, sheetname=4)])
df = df[df['Class'] == 2]
df['age'] = (df['Age'] < 30).map({True: '<30', False: '>=30'})
df['gender'] = df['Gender'].map({'F': 'female', 'M': 'male'})
df['handedness'] = df['Handedness'].map({'L': 'left', 'R': 'right'})
df['session'] = np.arange(len(df))
df['password'] = df['Password'].map({
'leonardo dicaprio': 1,
'the rolling stones': 2,
'michael schumacher': 3,
'red hot chilli peppers': 4,
'united states of america': 5,
})
def preprocess_row(idx_row):
idx, row = idx_row
keyname = list(map(lambda x: 'space' if x == ' ' else x, list(row['Password'])))
v = np.array(row['Keystroke Template Vector'].strip().split()).astype(int) // 10000
s = len(keyname) - 1
pp, rr, pr, rp = [v[s * i:s * (i + 1)] for i in range(4)]
timepress = np.r_[0, pp].cumsum()
# Offset the first release time by the duration of the first key
timerelease = np.r_[rp[0] - rr[0], rr].cumsum()
# There are ~180 rows where timerelease == timepress.
# Fix these by assuming at least the minimum standard clock resolution
timerelease[timerelease == timepress] += 16
sample = pd.DataFrame.from_items([
('user', row['User_ID']),
('session', row['session']),
('password', row['password']),
('age', row['age']),
('gender', row['gender']),
('handedness', row['handedness']),
('timepress', timepress),
('timerelease', timerelease),
('keyname', keyname)
])
return sample
df = pd.concat(map(preprocess_row, df.iterrows()))
df = df.set_index(['user', 'session'])[COLS]
df = remove_repeated_keys(df)
df.to_csv(out_file)
return
def preprocess_citefa(in_file, out_file):
"""
Preprocess the raw CITEFA dataset
"""
import tempfile
import shutil
import tarfile
from glob import glob
from operator import itemgetter
from keycode import lookup_key, detect_agent
tdir = tempfile.mkdtemp()
tfile = tarfile.open(in_file, 'r:gz')
tfile.extractall(tdir)
dfs = []
for fname in glob(os.path.join(tdir, '*', '*')):
with open(fname) as f:
lines = f.readlines()
header = lines[0]
agent = detect_agent(header)
fields = header.split(';')
age = '<30' if int(fields[7]) < 30 else '>=30'
gender = 'male' if fields[8] == 'Male' else 'female'
handedness = 'right' if fields[9] == 'right-handed' else 'left'
# rows contain the keypress/keyrelease actions, need to convert to keystrokes
key_actions = [row.strip().split() for row in lines if ('dn' in row) or ('up' in row)]
# parse the ints
key_actions = [(i1, int(i2), i3, int(i4)) for i1, i2, i3, i4 in key_actions]
key_actions = sorted(key_actions, key=itemgetter(1))
keystrokes = []
keys_down = {}
for task, time, action, keycode in key_actions:
if action == 'dn':
if keycode in keys_down.keys():
print('Warning: key pressed twice without release (probably auto repeated while held down)')
continue
keys_down[keycode] = time
elif action == 'up':
if keycode not in keys_down.keys():
print('Warning: key released without first being pressed', time, keycode)
continue
keystrokes.append((task, keys_down[keycode], time, lookup_key(keycode, agent)))
del keys_down[keycode]
else:
raise Exception('Unknown action')
task, timepress, timerelease, keyname = zip(*keystrokes)
dfs.append(pd.DataFrame.from_items([
('user', fields[4]),
('session', int(fields[2])),
('age', age),
('gender', gender),
('handedness', handedness),
('task', task),
('timepress', timepress),
('timerelease', timerelease),
('keyname', keyname)
]))
shutil.rmtree(tdir)
df = pd.concat(dfs)
# Keep only the sentence copy tasks. See Bello 2010
df = df[df['task'].isin(
{'ks_00', 'ks_01', 'ks_02', 'ks_03', 'ks_04', 'ks_05',
'ks_06', 'ks_07', 'ks_08', 'ks_09', 'ks_10',
'ks_11' 'ks_12', 'ks_13', 'ks_14'})]
df['session'] = df['session'] * 100 + df['task'].str[3:].astype(int)
df = df.set_index(['user', 'session'])
df = remove_repeated_keys(df)
df = reduce_dataset(df, min_samples=10, max_samples=10)
df.to_csv(out_file)
return
def preprocess_villani(in_file, out_file, long_fixed_out_file):
"""
Preprocess the raw Villani dataset and extend the long fixed dataset
"""
df = pd.read_csv(in_file, index_col=[0, 1])
# Make age a binary target, <30 and >=30
df['age'] = df['agegroup'].map({
'under20': '<30',
'20-29': '<30',
'30-39': '>=30',
'40-49': '>=30',
'50-59': '>=30',
'over60': '>=30'}
)
# Ignore missing data
df = df.dropna()
df = remove_repeated_keys(df)
# combine the villani fixed text with citefa dataset fixed text
long_fixed = load_data('long_fixed')
slf = long_fixed.groupby(level=[0, 1]).size()
villani_fixed = df[df['inputtype'] == 'fixed']
villani_fixed = villani_fixed.groupby(level=[0, 1]).apply(lambda x: make_sessions(x, slf.mean(), slf.std()))
villani_fixed = villani_fixed.reset_index(level=[0, 1], drop=True)
villani_fixed = reduce_dataset(villani_fixed, min_samples=10, max_samples=10)
long_fixed = pd.concat([long_fixed, villani_fixed])
long_fixed = long_fixed[COLS]
long_fixed.to_csv(long_fixed_out_file)
# Free-text input only
villani_free = df[df['inputtype'] == 'free']
villani_free = villani_free.groupby(level=[0, 1]).apply(lambda x: make_sessions(x, slf.mean(), slf.std()))
villani_free = villani_free.reset_index(level=[0, 1], drop=True)
villani_free = reduce_dataset(villani_free, min_samples=10, max_samples=10)
villani_free = villani_free[COLS]
villani_free.to_csv(out_file)
return
def remove_repeated_keys(df):
def process_sample(x):
dfs = []
last_release = {}
for idx, row in x.iterrows():
# time press must be after last release, otherwise ignore
if row['keyname'] in last_release.keys() and row['timepress'] <= last_release[row['keyname']]:
continue
last_release[row['keyname']] = row['timerelease']
dfs.append(row)
x = pd.concat(dfs, axis=1).T
x.index.names = ['user', 'session']
return x
df = df.groupby(level=[0, 1]).apply(process_sample).reset_index(level=[2, 3], drop=True)
return df
def preprocess():
"""
Download and preprocess datasets for the experiments.
"""
import urllib.request
import urllib.error
def download_dataset(name, local_name, url):
if os.path.exists(os.path.join(DOWNLOAD_DIR, local_name)):
print('Already downloaded %s' % name)
return
try:
print('Downloading %s' % name)
urllib.request.urlretrieve(url, os.path.join(DOWNLOAD_DIR, local_name))
except urllib.error.HTTPError as e:
print('WARNING: Unable to download %s from URL:\n%s' % (name, url))
print('Check that the URL is correct and you have permissions to download the file.')
# Download both datasets
download_dataset('GREYC NISLAB Dataset', 'greyc_nislab.xls', GREYC_NISLAB_DATASET_URL)
download_dataset('CITAFA Dataset', 'citefa.tar.gz', CITEFA_DATASET_URL)
download_dataset('Villani Dataset', 'villani.csv', VILLANI_DATASET_URL)
# This creates the short fixed dataset
# preprocess_greyc_nislab(os.path.join(DOWNLOAD_DIR, 'greyc_nislab.xls'),
# os.path.join(DATA_DIR, 'short_fixed.csv'))
# This creates the long fixed dataset
preprocess_citefa(os.path.join(DOWNLOAD_DIR, 'citefa.tar.gz'),
os.path.join(DATA_DIR, 'long_fixed.csv'))
# This creates the long free dataset and extends the previous long fixed dataset
preprocess_villani(os.path.join(DOWNLOAD_DIR, 'villani.csv'),
os.path.join(DATA_DIR, 'long_free.csv'),
os.path.join(DATA_DIR, 'long_fixed.csv'))
return
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.