text
stringlengths 8
6.05M
|
|---|
import importlib
import game.bitboard as bitop
import game.util as util
def run(module_name_1, module_name_2, N=1000):
m1 = importlib.import_module(module_name_1)
m2 = importlib.import_module(module_name_2)
modules = [m1, m2]
wins = [0, 0]
for i in range(N):
arr = util.initial_setup()
p1, p2, obs = bitop.array_to_bits(arr)
first = modules[i % 2]
second = modules[1 - i % 2]
turn = 0
while not bitop.is_terminated(p1, p2, obs):
if turn == 0:
if bitop.generate_moves(p1, p2, obs):
move_idx = first.run(p1, p2, obs)
assert bitop.generate_moves(p1, p2, obs) & (1 << move_idx)
p1, p2 = bitop.resolve_move(p1, p2, move_idx)
else:
if bitop.generate_moves(p2, p1, obs):
move_idx = second.run(p2, p1, obs)
assert bitop.generate_moves(p2, p1, obs) & (1 << move_idx)
p2, p1 = bitop.resolve_move(p2, p1, move_idx)
turn ^= 1
res = bitop.evaluate(p1, p2, obs)
if res > 0:
wins[i % 2] += 1
elif res == 0:
wins[0] += 0.5
wins[1] += 0.5
else:
wins[1 - i % 2] += 1
print(wins)
return wins
if __name__ == '__main__':
wins = run('engines.mcts', 'neural.nn', 100)
print(wins)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from sklearn import naive_bayes
from models.binary_classifier import BinaryClassifier
class GaussianNB(BinaryClassifier, naive_bayes.GaussianNB):
pass
|
#!/usr/bin/env python3
# Write a Shannon entropy calculator: H = -sum(pi * log(pi))
# Use fileinput to get the data from nucleotides.txt
# Make sure that the values are probabilities
# Make sure that the distribution sums to 1
# Report with 3 decimal figures
"""
python3 entropy.py nucleotides.txt
1.846
"""
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from pyspark import SparkContext, SparkConf
def create_mutual_friends(line):
person = line[0].strip()
friends = line[1]
if(person != ''):
person = int(person)
final_friend_values = []
for friend in friends:
friend = friend.strip()
if(friend != ''):
friend = int(friend)
if(int(friend) < int(person)):
val = (str(friend)+","+str(person),set(friends))
else:
val = (str(person)+","+str(friend),set(friends))
final_friend_values.append(val)
return(final_friend_values)
def cal_length(line):
key = line[0]
value = len(line[1])
return(key,value)
def split_user(line):
friend_pair = line[0]
no_of_friends = line[1]
friend_pair = friend_pair.split(",")
return(friend_pair[0],(friend_pair[1],no_of_friends))
def userdata_format(line):
line = line.split(",")
return(line[0],(line[1],line[2],line[3]))
def user_data_format1(line):
user1 = line[0]
user2 = line[1][0][0]
no_of_friends = line[1][0][1]
user1_data = line[1][1]
return( user2,((user1,no_of_friends),user1_data) )
def final_map(line):
no_of_friends = str(line[1][0][0][1])
user1_data = line[1][0][1]
user2_data = line[1][1]
user1_firstname = user1_data[0]
user1_lastname = user1_data[1]
user1_address = user1_data[2]
user2_firstname = user2_data[0]
user2_lastname = user2_data[1]
user2_address = user2_data[2]
return("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}".format(no_of_friends,user1_firstname,user1_lastname,user1_address,user2_firstname,user2_lastname,user2_address))
if __name__ == "__main__":
config = SparkConf().setAppName("mutualfriends").setMaster("local[2]")
sc = SparkContext(conf = config)
mutual_friends = sc.textFile("soc-LiveJournal1Adj.txt")
lines_split = mutual_friends.map(lambda x : x.split("\t")).filter(lambda x : len(x) == 2).map(lambda x: [x[0],x[1].split(",")])
mutual_friends1 = lines_split.flatMap(create_mutual_friends)
reducerdd = mutual_friends1.reduceByKey(lambda x,y: x.intersection(y))
lengthrdd = reducerdd.map(cal_length)
sortedmap = lengthrdd.takeOrdered(10, key = lambda x: -x[1])
sortedmapper = sc.parallelize(sortedmap)
friends1 = sortedmapper.map(split_user)
userdata = sc.textFile("userdata.txt")
formatedddata = userdata.map(userdata_format)
joinrdd = friends1.join(formatedddata)
joinrdd_format = joinrdd.map(user_data_format1)
joinrdd1 = joinrdd_format.join(formatedddata)
finalrdd = joinrdd1.map(final_map)
finalrdd.coalesce(1).saveAsTextFile("q2_output.txt")
|
# Generated by Django 2.2.1 on 2019-05-07 17:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portalapp', '0007_loggedissue'),
]
operations = [
migrations.AddField(
model_name='loggedissue',
name='url',
field=models.URLField(default='https://github.com', unique=True),
),
migrations.AlterField(
model_name='loggedissue',
name='commit_id',
field=models.CharField(max_length=40, unique=True),
),
]
|
import base64
base64_message = input("Enter base64 String: ")
base64_bytes = base64_message.encode('ascii')
message_bytes = base64.b64decode(base64_bytes)
message = message_bytes.decode('ascii')
print("Decoded Message: " + message)
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class timeSheetEntry(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
start = models.DateTimeField()
end = models.DateTimeField()
time = models.IntegerField()
comment = models.TextField()
completed = models.BooleanField()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 23 19:24:29 2020
@author: user
"""
import torch.nn as nn
class Encoder(nn.Module):
def __init__(self, isize, nz, nc, ndf, ngpu, n_extra_layers=0, add_final_conv=True):
super(Encoder,self).__init__()
self.ngpu = ngpu
assert 16 % 16 == 0, "isize has to be a multiple of 16"
main = nn.Sequential()
main.add_module('initial-conv-{0}-{1}'.format(nc, ndf),
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False))
main.add_module('initial-relu-{0}'.format(ndf),
nn.LeakyReLU(0.2, inplace=True))
csize, cndf = isize / 2, ndf
for t in range(n_extra_layers):
main.add_module('initial-conv-{0}-{1}'.format(nc, ndf),
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False))
main.add_module('extra-layers-{0}-{1}-batchnorm'.format(t, cndf),
nn.BatchNorm2d(cndf))
main.add_module('extra-layers-{0}-{1}-relu'.format(t, cndf),
nn.LeakyReLU(0.2, inplace=True))
while csize > 4 :
in_feat = cndf
out_feat = cndf*2
main.add_module('pyramid-{0}-{1}-conv'.format(in_feat,out_feat),
nn.Conv2d(in_feat,out_feat,4,2,1,bias = False))
main.add_module('pyramid-{0}-batchnorm'.format(out_feat),
nn.BatchNorm2d(out_feat))
main.add_module('pyramid-{0}-relu'.format(out_feat),
nn.LeakyReLU(0.2, inplace=True))
cndf = cndf * 2
csize = csize / 2
if add_final_conv:
main.add_module('final-{0}-{1}-conv'.format(cndf, 1),
nn.Conv2d(cndf, nz, 4, 1, 0, bias=False))
self.main = main
def forward(self, input):
if self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
class Decoder(nn.Module):
def __init__ (self, isize, nz, nc, ngf, ngpu, n_extra_layers=0):
|
#[LeetCode] 130. Surrounded Regions_Medium tag: DFS/BFS
|
_ID_LIST = {'Server' :
(({'name':'Server', 'id':int()}), ({'name':'channel', 'id':int()})),
### ^ Dictionary is set up in this format, actual servers omitted for privacy purposes
}
_SERVER = 'Server'
default_guild = _ID_LIST[_SERVER][0]
default_channel = _ID_LIST[_SERVER][1]
limit = 100
|
# Generated by Django 3.0.7 on 2020-06-19 09:45
from django.db import migrations
def fill_new_admin_field(apps, schema_editor):
Restaurant = apps.get_model("foodcartapp", "Restaurant")
for restaurant in Restaurant.objects.all():
restaurant.new_admin = restaurant.admin.user
restaurant.save()
class Migration(migrations.Migration):
dependencies = [
("foodcartapp", "0016_restaurant_new_admin"),
]
operations = [
migrations.RunPython(fill_new_admin_field),
]
|
import pdfx
import os
import sys
from os import walk
import csv
import git
# [filter(lambda item: "docker" in item or "github" in item or "pdf" in item, link_list) for link_list in l]
def main():
#path = "/mnt/c/Users/Fjona/Desktop/2018-2019/UROP/icse/2018/pdfs/ICSE2018-7hDWfdAOTaSxSTuYmZ7C9S/73vOGjBkf23NHodSgD3DR2/"
path = "/mnt/c/Users/Fjona/Desktop/2018-2019/UROP/icse/2018/pdfs/ICSE2018-7hDWfdAOTaSxSTuYmZ7C9S/"
#path = "/mnt/c/Users/Fjona/Desktop/2018-2019/UROP"
#https://weakdh.org/imperfect-forward-secrecy.pdf"
# papers = ["1OWvuIr0ODMO3SiUdEDKEV.pdf"]
# links = extractURLsFromPDFs(path, papers)
list_of_pdfs = getRecursiveFilenames(path, '.pdf')
#print(list_of_pdfs)
pdf_links = extractURLsFromPDFs(list_of_pdfs)
writeLinksCSV("paper_links.csv", pdf_links)
def extractURLsFromPDFs(papers):
links_in_papers = {}
i = 1
for paper in papers:
sys.stderr.write(str(i) + " extracted: " + paper + "\n")
i+= 1
try:
pdf = pdfx.PDFx(paper)
set_of_urls = pdf.get_references()
list_of_urls = []
for e in set_of_urls:
list_of_urls.append(e.ref)
links_in_papers[paper] = list_of_urls
except UnicodeDecodeError:
sys.stderr.write("This file has a UnicodeDecodeError!")
return links_in_papers
def writeLinksCSV(filename, pdfLinks):
# download_dir = filename #where you want the file to be downloaded to
csv = open(filename, "w")
#"w" indicates that you're writing strings to the file
#columnTitleRow = "name, email\n"
#csv.write(filename)
i=1
for paper, links in pdfLinks.items():
row = paper + "," + ",".join(links) + ",\n"
try:
csv.write(row)
sys.stderr.write(str(i) + " written: " + paper + "\n")
i+= 1
except:
sys.stderr.write("This file has a UnicodeDecodeError!")
csv.close()
# Sample call: getRecurisiveFilenames(path, ".pdf")
# Return: List of PDF files with complete path
def getRecursiveFilenames(path, suffix):
pdf_files = []
for (dirpath, dirnames, files) in walk(path):
for name in files:
if name.lower().endswith(suffix):
pdf_files.append(os.path.join(dirpath, name))
return pdf_files
def get_GitHub_links(links):
"""Gets the github links with username and repo"""
general_links = [filter(lambda item: "docker" in item or "github" in item or "pdf" in item, link_list) for link_list in links]
gh_links = [filter(lambda item: "http://github.com/" in item, link) for link in general_links]
return gh_links
def get_paper_titles(links):
def attempt_build()
# metadata = pdf.get_metadata()
# references_list =
# references_dict = pdf.get_references_as_dict()
def build_docker(links):
for link in links:
repo = git.Repo.clone_from(link,os.path.join(rw_dir,'repo'),branch="master")
docker_paths = repo.get_paths('Dockerfile')
for dockerfile in docker_paths:
os.chdir(dockerfile)
stdout,st
main()
|
"""
Most codes from https://github.com/carpedm20/DCGAN-tensorflow
"""
from __future__ import division
import math
import random
import pprint
import scipy.misc
import numpy as np
from time import gmtime, strftime
from six.moves import xrange
import os, gzip
import csv
from PIL import Image
from random import randint
import cv2
from skimage.transform import rotate
import tensorflow as tf
import tensorflow.contrib.slim as slim
def load_mnist(dataset_name):
data_dir = os.path.join("./data", dataset_name)
def extract_data(filename, num_data, head_size, data_size):
with gzip.open(filename) as bytestream:
bytestream.read(head_size)
buf = bytestream.read(data_size * num_data)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float)
return data
data = extract_data(data_dir + '/train-images-idx3-ubyte.gz', 60000, 16, 28 * 28)
trX = data.reshape((60000, 28, 28, 1))
data = extract_data(data_dir + '/train-labels-idx1-ubyte.gz', 60000, 8, 1)
trY = data.reshape((60000))
data = extract_data(data_dir + '/t10k-images-idx3-ubyte.gz', 10000, 16, 28 * 28)
teX = data.reshape((10000, 28, 28, 1))
data = extract_data(data_dir + '/t10k-labels-idx1-ubyte.gz', 10000, 8, 1)
teY = data.reshape((10000))
trY = np.asarray(trY)
teY = np.asarray(teY)
X = np.concatenate((trX, teX), axis=0)
y = np.concatenate((trY, teY), axis=0).astype(np.int)
seed = 547
np.random.seed(seed)
np.random.shuffle(X)
np.random.seed(seed)
np.random.shuffle(y)
y_vec = np.zeros((len(y), 10), dtype=np.float)
for i, label in enumerate(y):
y_vec[i, y[i]] = 1.0
return X / 255., y_vec
def check_folder(log_dir):
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
# refer to M-net. img must width=height
def transform_to_Polar(img):
polar_img = cv2.linearPolar(img, (img.shape[0] / 2, img.shape[1] / 2),
img.shape[0] / 2, cv2.WARP_FILL_OUTLIERS)
rotated = rotate(polar_img,-90)
return rotated
# refer to M-net. img must width=height
def transform_from_Polar(polared_img):
origin_img = cv2.linearPolar(rotate(polared_img, 90), (polared_img.shape[0] / 2, polared_img.shape[1] / 2),
polared_img.shape[0] / 2, cv2.WARP_FILL_OUTLIERS + cv2.WARP_INVERSE_MAP)
return origin_img
def get_image(image_path, grayscale=False,polar_transform=False):
image = imread(image_path, grayscale)
if polar_transform:
#Remaps an image to polar coordinates space.
image = transform_to_Polar(image)
return np.array(image)/127.5 - 1.
#save sample_num images into one image
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def imread(path, grayscale = False):
if (grayscale):
return scipy.misc.imread(path, flatten = True).astype(np.float)
else:
return scipy.misc.imread(path).astype(np.float)
def merge_images(images, size):
return inverse_transform(images)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
if (images.shape[3] in (3,4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
elif images.shape[3]==1:
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]
return img
else:
raise ValueError('in merge(images,size) images parameter ''must have dimensions: HxW or HxWx3 or HxWx4')
def imsave(images, size, path):
image = np.squeeze(merge(images, size))
return scipy.misc.imsave(path, image)
def center_crop(x, crop_h, crop_w, resize_h=64, resize_w=64):
if crop_w is None:
crop_w = crop_h
h, w = x.shape[:2]
j = int(round((h - crop_h)/2.))
i = int(round((w - crop_w)/2.))
return scipy.misc.imresize(x[j:j+crop_h, i:i+crop_w], [resize_h, resize_w])
def transform(image, input_height, input_width, resize_height=64, resize_width=64, crop=True):
if crop:
cropped_image = center_crop(image, input_height, input_width, resize_height, resize_width)
else:
cropped_image = scipy.misc.imresize(image, [resize_height, resize_width])
return np.array(cropped_image)/127.5 - 1.
def inverse_transform(images):
return (images+1.)/2.
""" Drawing Tools """
# borrowed from https://github.com/ykwon0407/variational_autoencoder/blob/master/variational_bayes.ipynb
# def save_scattered_image(z, id, z_range_x, z_range_y, name='scattered_image.jpg'):
# N = 10
# plt.figure(figsize=(8, 6))
# plt.scatter(z[:, 0], z[:, 1], c=np.argmax(id, 1), marker='o', edgecolor='none', cmap=discrete_cmap(N, 'jet'))
# plt.colorbar(ticks=range(N))
# axes = plt.gca()
# axes.set_xlim([-z_range_x, z_range_x])
# axes.set_ylim([-z_range_y, z_range_y])
# plt.grid(True)
# plt.savefig(name)
# borrowed from https://gist.github.com/jakevdp/91077b0cae40f8f8244a
# def discrete_cmap(N, base_cmap=None):
# """Create an N-bin discrete colormap from the specified input map"""
#
# # Note that if base_cmap is a string or None, you can simply do
# # return plt.cm.get_cmap(base_cmap, N)
# # The following works for string, None, or a colormap instance:
#
# base = plt.cm.get_cmap(base_cmap)
# color_list = base(np.linspace(0, 1, N))
# cmap_name = base.name + str(N)
# return base.from_list(cmap_name, color_list, N)
###
#the rest codes are created by langyuan mo
###
def conv_out_size_same(size, stride):
"""calculate the output size with SAME padding(conv)"""
return int(math.ceil(float(size) / float(stride)))
def resize_images_from_folder(folder_path,size,save_path):
"""
resize images from a folder and save resized images to another folder.
:param folder_path: string
:param size: (h,w)
:param save_path: string
:return: None
"""
if os.path.exists(folder_path):
images_path = os.listdir(folder_path)
else:
print('folder path not exist.')
return
if not os.path.exists(save_path):
os.mkdir(save_path)
count = 0
for image in images_path:
count+=1
if count % 10 == 0:
print('process %d images'%(count))
image_path = os.path.join(folder_path,image)
output_path = os.path.join(save_path,image)
try:
resize_and_save_image(image_path,size,output_path)
except Exception:
print(image_path+' failed')
def resize_and_save_image(image_path, size, save_path):
"""resize an image and save it"""
img = scipy.misc.imread(image_path)
resize_img = scipy.misc.imresize(img,size)
scipy.misc.imsave(save_path,resize_img)
# def crop_images_from_folder(folder_path,save_path_disk,save_path_cup,info_path,disk_size,cup_size):
def crop_images_from_folder(folder_path, save_path_disk, info_path, disk_size):
"""
crop the dr images, extract the disk part and cup part from image.
:param folder_path: origin images folder
:param save_path_disk: saved folder for disk images
:param save_path_disk: saved folder for cup images
:param info_path: infomation of disk and cup of image
:param disk_size: resize
:param cup_size: resize
:return: None
"""
if os.path.exists(folder_path):
images_path = os.listdir(folder_path)
else:
print('folder path not exist.')
return
if not os.path.exists(save_path_disk):
os.mkdir(save_path_disk)
# if not os.path.exists(save_path_cup):
# os.mkdir(save_path_cup)
with open(info_path,'r') as f:
content = csv.reader(f)
crop_dict = {}
for row in content:
image_name = row[0]
crop_regions = [[int(row[1]),int(row[2]),int(row[3]),int(row[4])],[int(row[5]),int(row[6]),int(row[7]),int(row[8])]] #disk,cup
#extend regions
crop_regions = adjust_disk_size(crop_regions)
disk_score = float(row[9])
cup_score = float(row[10])
if disk_score == 1.0 and cup_score == 1.0:
crop_dict[image_name] = crop_regions
count = 0
for image in images_path:
count+=1
if count % 10 == 0:
print('process %d images'%(count))
image_path = os.path.join(folder_path,image)
output_path_disk = os.path.join(save_path_disk,image)
# output_path_cup = os.path.join(save_path_cup, image)
if image in crop_dict:
crop_regions = crop_dict[image]
else:
continue
try:
img = Image.open(image_path)
disk_img = img.crop(crop_regions[0])
#disk_img = img.crop(crop_regions[0]).resize(disk_size)
# cup_img = img.crop(crop_regions[1]).resize(cup_size)
disk_img.save(output_path_disk)
# cup_img.save(output_path_cup)
except Exception as e:
print(e)
print(image_path+' failed')
def adjust_disk_size(crop_regions):
"""extend optic disc regions, make them contain the surrounding space"""
extend_min = 100
extend_max = 150
x1 = crop_regions[0][0] - randint(extend_min,extend_max)
y1 = crop_regions[0][1] - randint(extend_min,extend_max)
x2 = crop_regions[0][2] + randint(extend_min,extend_max)
y2 = crop_regions[0][3] + randint(extend_min,extend_max)
#make size W==H
if x2 - x1 == y2 - y1:
pass
elif x2 - x1 > y2 - y1:
add = (x2 - x1) - (y2 - y1)
if add % 2 == 0:
add_size = add/2
y1 = y1 - add_size
y2 = y2 + add_size
else:
add_size1 = add//2
add_size2 = add//2 + 1
y1 = y1 - add_size1
y2 = y2 + add_size2
else:
add = (y2 - y1) - (x2 - x1)
if add % 2 == 0:
add_size = add/2
x1 = x1 - add_size
x2 = x2 + add_size
else:
add_size1 = add//2
add_size2 = add//2 + 1
x1 = x1 - add_size1
x2 = x2 + add_size2
crop_regions[0][0] = x1
crop_regions[0][1] = y1
crop_regions[0][2] = x2
crop_regions[0][3] = y2
return crop_regions
#
#load data directly.(abandond now)
#
def load_dr_images(dataset):
""" load the dataset 'Diabetic Retinopathy' """
if dataset == 'dr-disc-256':
pass
if dataset == 'dr-disk':
folder_path = '/home/molangyuan/Dataset/dr_data/dr_disk'
elif dataset == 'dr-128':
folder_path = '/home/molangyuan/Dataset/dr_data/128_dr_images'
elif dataset == 'dr-256':
folder_path = '/home/molangyuan/Dataset/dr_data/256_dr_images'
elif dataset == 'dr-512':
folder_path = '/home/molangyuan/Dataset/dr_data/512_dr_images'
elif dataset == 'dr-1024':
folder_path = '/home/molangyuan/Dataset/dr_data/1024_dr_images'
else:
folder_path = '/home/molangyuan/Dataset/dr_data/256_dr_images'
label_path = './trainLabels.csv'
if os.path.exists(folder_path):
images_path = os.listdir(folder_path)
else:
print('dr dataset folder path not exist.')
return
#data x
images = np.array([scipy.misc.imread(os.path.join(folder_path,filename)) for filename in images_path])
#normalization
images = images/127.5 - 1.
with open(label_path,'r') as f:
content = csv.reader(f)
head_row = next(content) #remove header
label_dict = {}
for row in content:
image_name, label = row[0], row[1]
_label = np.zeros((5, )) #one-hot vector
_label[int(label)] = 1.0
label_dict[image_name] = _label
#label y
labels = np.array([label_dict[filename.split('.')[0]] for filename in images_path])
return (images, labels)
def get_dr_labels(label_path,x_list):
"""load dr images' level labels(0,1,2,3,4) from path"""
with open(label_path, 'r') as f:
content = csv.reader(f)
head_row = next(content) # remove header
label_dict = {}
for row in content:
image_name, label = row[0], row[1]
_label = np.zeros((5,)) # one-hot vector
_label[int(label)] = 1.0
label_dict[image_name] = _label
# label y
labels = np.array([label_dict[filename.split('.jp')[0].split('/')[-1]] for filename in x_list])
return labels
|
from __future__ import division
from __future__ import print_function
import sys
import os
import torch
from torch.autograd import Variable
from collections import OrderedDict
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torch.utils.model_zoo as model_zoo
from torchvision import datasets, transforms
from loadModelMNIST import *
from ProjectImageHandler import *
classes = [0,1,2,3,4,5,6,7,8,9]
def get_test_acc(model,test_loader):
correct = 0
total = 0
model.eval()
for data in test_loader:
images, labels = data
images, labels = Variable(images).cuda(), Variable(labels).cuda()
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.data).sum()
return correct / total
def predictImageLabel(model,image):
#Note: image = Variable(tensorImage.cuda())
output = model(image)
_,idx = torch.max(output,1)
label = classes[idx.data[0]]
return label
if __name__ == "__main__":
BATCH_SIZE = 32
model = loadRandom().cuda()
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]);
testset = torchvision.datasets.MNIST(root='./data', train=False,
download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE,
shuffle=False, num_workers=2)
test_acc = get_test_acc(model,test_loader)
print('test_acc %.5f' % (test_acc))
print("\n")
for i in range(0,10):
tensorImage,label = testset[i]
print("Testing image with label " + str(label))
image = Variable(tensorImage.cuda())
pred = predictImageLabel(model,image)
print("Prediction: " + str(pred))
print("\n\nRandomly rotating images prior to predictions:")
ih = ProjectImageHandler()
for i in range(10,20):
tensorImage,label = testset[i]
print("Testing image with label " + str(label))
tensorImage = ih.unnormalizeTensor(tensorImage)
tensorImage = ih.randomRotateTensor(tensorImage)
tensorImage = ih.normalizeTensor(tensorImage)
image = Variable(tensorImage.cuda())
pred = predictImageLabel(model,image)
print("Prediction: " + str(pred))
|
# coding=UTF-8
import math
import operator
lista = [2, 6, 4, 7, -2]
if all(i % 2 == 0 for i in lista):
print("Todos são pares.")
else:
print("Tem algum impar.")
print("")
if not any(i <= 0 for i in lista ):
print("Todos são positivos.")
else:
print("Tem algum negativo.")
print("")
lista1 = [1, 4, 9, 16, 25]
lista2 = map(math.sqrt, lista1)
print(list(lista2))
print("")
print(any(map(operator.eq, [1, 2, 3, 4, 5], [1, 4, 2, 3, 5])))
|
import cv2
import numpy as np
input = cv2.imread("./Desktop/OpenCV/Basics/hand.jpg")
# input.shape return tuples with three value. The first value represent height(x-coordinate), second value
# represent width(y-coordinate) and third value represent returns '3L' which are the RGB values of an Image
print input.shape
print "Height of Image is :",int(input.shape[0]),"pixels";
print "Width of Image is :",int(input.shape[1]),"pixels";
|
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, HttpResponseRedirect, HttpResponse
from .models import UserProfile
from .forms import UserForm
from neighborhood.models import Neighborhood, House
from feed.forms import AnnouncementForm
def user_login(request):
"""
---USER_LOGIN---
:param request:
"""
if request.method == 'POST':
# get username and password from input text
username = request.POST['username_text']
password = request.POST['password_text']
# authenticate the credentials
user = authenticate(username=username, password=password)
# if we have a user the authentication was successful
if user:
# Is the account active? It could have been disabled.
if user.is_active:
# If the account is valid and active, we can log the user in.
# We'll send the user back to the homepage.
login(request, user)
return HttpResponseRedirect('/neighborhood/home/')
else:
# An inactive account was used - no logging in!
return HttpResponse("Your Neighbors account is disabled.")
else:
# Bad login details were provided. So we can't log the user in.
return HttpResponse("The username and password you have entered do not match any account.")
# The request is not a HTTP POST, so display the login form.
# This scenario would most likely be an HTTP GET.
else:
# No context variables to pass to the template system.
return render(request, 'accounts/login.html', {})
def demo_login(request):
user = authenticate(username="demouser", password="demopass")
if user:
# Is the account active? It could have been disabled.
if user.is_active:
# If the account is valid and active, we can log the user in.
# We'll send the user back to the homepage.
login(request, user)
return HttpResponseRedirect('/neighborhood/home/')
else:
# An inactive account was used - no logging in!
return HttpResponse("Your Neighbors account is disabled.")
@login_required
def user_logout(request):
"""
---USER LOGOUT---
:param request:
"""
logout(request)
return HttpResponseRedirect('/')
def register_user(request):
"""
---REGISTER USER---
:param request:
"""
registered = False
if request.method == 'POST':
# Using forms to collect new user data
user_form = UserForm(request.POST)
if user_form.is_valid():
neighborhood = Neighborhood.objects.get(division_title=request.POST['neighborhood_text'])
house = House.objects.filter(neighborhood=neighborhood,
permission_code=request.POST['perm_code_text'])
if house:
# We have checked that the forms are valid now save the user
user = user_form.save()
# Now we hash the password with the set_password method.
# Once hashed, we can update the user object.
user.set_password(user.password)
user.save()
user_profile = UserProfile(user=user, house=house)
user_profile.save()
return HttpResponseRedirect('/')
else:
# no house object was returned, invalid info provided
return HttpResponse("The neighborhood and permission code you have entered do not match any existing neighborhood.")
# Not a HTTP POST, so we render our form using two ModelForm instances.
# These forms will be blank, ready for user input.
else:
user_form = UserForm()
# Render the template depending on the context.
return render(request, 'accounts/register.html', {'user_form': user_form,
'registered': registered})
"""
---USER PROFILE---
"""
@login_required
def user_profile(request):
user_prof = request.user.userprofile
board_permissions = user_prof.is_board_member()
announcement_form = AnnouncementForm()
return render(request, 'accounts/profile.html', {'house': user_prof,
'board_permissions': board_permissions,
'announcement_form': announcement_form})
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-04-05 06:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('water_watch_api', '0007_auto_20180404_1952'),
]
operations = [
migrations.CreateModel(
name='SensorMaintenanceHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dateTime', models.DateTimeField()),
('status', models.CharField(choices=[('ACTIVE', 'active'), ('INACTIVE', 'inactive'), ('MAINTENANCE', 'maintenance')], default='ACTIVE', max_length=15)),
('sensor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='water_watch_api.Sensor')),
],
options={
'db_table': 'sensor_maintenance_history',
'verbose_name_plural': 'sensor maintenance history',
'ordering': ['id', 'dateTime'],
},
),
migrations.AddField(
model_name='station',
name='current_status',
field=models.CharField(choices=[('ACTIVE', 'active'), ('INACTIVE', 'inactive'), ('MAINTENANCE', 'maintenance')], default='ACTIVE', max_length=15),
),
]
|
# A~F 중 한 숫자 입력받기
num = input()
num = int(num, 16)
for i in range(1, 16):
print('%X' % num, '*%X' % i, '=%X' % (num * i), sep='')
|
__author__ = 'YY20'
|
# Bài 01: Viết chương trình tính tổng, tích của các phần tử trong một list.
my_list = [0, 1, 2, 3, 8, 5, 6, 7, 8, 9]
tong = 0
tich = 1
for i in range(len(my_list)) :
tong += my_list[i]
tich *=my_list[i]
print(tong)
print(tich)
|
def display(hash_table):
for i in range(len(hash_table)):
print(i, end = " ")
for j in hash_table[i]:
print("->", end = " ")
print(j, end = " ")
print()
def hash(key):
return key % len(hash_table)
def insert(hash_table, key, value):
hash_key = hash(key)
hash_table[hash_key].append(value)
def find(hash_table, key):
hash_key = hash(key)
print(hash_table[hash_key])
hash_table = [[] for _ in range(10)]
insert(hash_table, 50, 'Apple')
insert(hash_table, 25, 'Orange')
insert(hash_table, 10, 'Banana')
insert(hash_table, 4, 'Kiwi')
insert(hash_table, 30, 'Raspberry')
insert(hash_table, 38, 'Blueberry')
display(hash_table)
find(hash_table, 10)
|
# TODO: Autenticação com usuário e senha para acessar usa conta
# TODO: Manter o saldo atualizado em um arquivo JSON
# TODO: Manter um histórico de depósitos e saques e permitir gerar um extrato detalhado
def menu_deposito():
return float(input('Digite o valor para depósito: R$ '))
def menu_saque():
return float(input('Digite o valor para saque: R$ '))
def iniciar_caixa_eletronico():
saldo = 0
# FIXME: Verificar se usuário quer sair do sistema
while True:
print('Opções disponíves:')
print('1 - Depósito')
print('2 - Saque')
opcao = int(input('Digite a opção desejada: '))
if opcao == 1:
saldo = fazer_deposito(saldo)
elif opcao == 2:
saldo = fazer_saque(saldo)
print('Seu saldo é de: {}'.format(saldo))
print('------------------------')
def fazer_saque(saldo):
# TODO: Retornar a quantidade de notas de cada tamanho pro usuário
# TODO: Informar dentro do programa a quantidade de notas de cada tamanho disponível
valor_sacado = menu_saque()
if valor_sacado <= saldo:
saldo -= valor_sacado
print('Valor R$ {} sacado!'.format(valor_sacado))
else:
print('Valor indisponível para saque!')
return saldo
def fazer_deposito(saldo):
valor_depositado = menu_deposito()
saldo += valor_depositado
print('Valor R$ {} depositado!'.format(valor_depositado))
return saldo
if __name__ == '__main__':
print('+-----------------------------+')
print('| Programa: Caixa Eletrônico |')
print('+-----------------------------+')
iniciar_caixa_eletronico()
|
import jsonlines
import sys
import os
from shutil import copyfile
import argparse
import pdb
import pickle
import numpy as np
from nltk.tokenize import sent_tokenize
from tqdm import tqdm
sys.path.append('/data/rsg/nlp/darsh/aggregator/crawl_websites/NUT/')
from gather_annotations import _perform_tagging, _read_entity_file
from get_baseline_performances import _get_entities, read_embeddings,\
_compare_causes, _compare_contains
def get_phrase_embeddings(string, embeddings):
representation = [0.0] * 50
word_list = []
if ' ' in string:
word_list = string.split()
else:
word_list = [string]
for word in word_list:
if word in embeddings:
representation += embeddings[word]
norm = np.linalg.norm(representation)
if norm != 0.0:
representation /= norm
return np.array(representation)
def produce_re_outputs(input_file_path,\
model="t3_causes"):
copyfile(input_file_path,"T4_causes.jsonl")
os.chdir("/data/rsg/nlp/darsh/pytorch-pretrained-BERT")
os.system("python examples/run_re.py --task_name "\
"re_task --do_eval --do_lower_case --data_dir /data/rsg/nlp/darsh"\
"/aggregator/crawl_websites/NUT/ --bert_model bert-base-uncased "\
"--max_seq_length 128 --train_batch_size 32 --learning_rate 5e-5"\
" --num_train_epochs 3.0 --output_dir "\
+ model + " --eval_batch_size 32 --output_preds")
os.chdir("/data/rsg/nlp/darsh/aggregator/crawl_websites/NUT")
results_file = jsonlines.open("/data/rsg/nlp/darsh/"\
"pytorch-pretrained-BERT/"+model+"/preds.jsonl","r")
results = []
for r in results_file:
results.append(r)
return results
def produce_causes(input_file_path, model="T4_redo_causes2"):
copyfile(input_file_path,"T4_dev_causes.jsonl")
os.chdir("/data/rsg/nlp/darsh/pytorch-pretrained-BERT")
os.system("python examples/run_causes.py --task_name "\
"re_task --do_eval --do_lower_case --data_dir /data/rsg/nlp/darsh"\
"/aggregator/crawl_websites/NUT/ --bert_model bert-base-uncased "\
"--max_seq_length 128 --train_batch_size 32 --learning_rate 5e-5"\
" --num_train_epochs 3.0 --output_dir "\
+ model + " --eval_batch_size 32 --output_preds")
os.chdir("/data/rsg/nlp/darsh/aggregator/crawl_websites/NUT")
results_file = jsonlines.open("/data/rsg/nlp/darsh/"\
"pytorch-pretrained-BERT/"+model+"/preds.jsonl","r")
results = []
for r in results_file:
results.append(r)
return results
def create_entity_annotations_file_ind(sentences, index, file_path):
output_file = open(file_path, "w")
for sentence in sent_tokenize(sentences[index]):
words = sentence.split()
for word in words:
output_file.write(word + " O\n")
output_file.write("\n")
output_file.close()
def create_entity_annotations_file(sentences, file_path):
output_file = open(file_path, "w")
for sentence in sentences:
words = sentence.split()
for word in words:
output_file.write(word + " O\n")
output_file.write("\n")
output_file.close()
def get_sentence_entities(sentences, labels):
sentence_entities = {}
for sentence,label_str in zip(sentences,labels):
sentence_entities[sentence] = {}
for entity_type in ['Food', 'Condition', 'Nutrition']:
sentence_entities[sentence][entity_type] \
= _get_entities(sentence.split(),label_str.split(),entity_type)
return sentence_entities
def create_relation_annotations_file(sentence_entities, file_substr):
f_causes = jsonlines.open(file_substr + "_causes.jsonl","w")
f_contains = jsonlines.open(file_substr + "_contains.jsonl","w")
contains_dictionary = []
causes_dictionary = []
for sentence in tqdm(sentence_entities):
considered = set()
for x in sentence_entities[sentence]['Food']\
+ sentence_entities[sentence]['Nutrition']:
for y in sentence_entities[sentence]['Nutrition']:
if tuple([x,y]) in considered:
continue
considered.add(tuple([x,y]))
if x == y:
continue
if x not in sentence or y not in sentence:
continue
assert x in sentence
assert y in sentence
modified_text = sentence.replace(x, ' <X> ' + x + ' </X> ')
modified_text = modified_text.replace(y, ' <Y> ' + y + ' </Y> ')
if x not in modified_text or y not in modified_text:
continue
x_index = modified_text.index(x)
y_index = modified_text.index(y)
min_index= min(x_index,y_index)
max_index= max(x_index,y_index)
#if modified_text[min_index:max_index].count(" ") > 45:
#print(modified_text[min_index:max_index].count(" "))
# continue
#modified_text = modified_text[min_index-30:max_index+30]
dict = {'sentence':modified_text, 'gold_label':'None', 'uid':0,\
'original_sentence':sentence}
contains_dictionary.append(dict)
f_contains.write(dict)
for x in sentence_entities[sentence]['Food']\
+ sentence_entities[sentence]['Nutrition']\
+ sentence_entities[sentence]['Condition']:
for y in sentence_entities[sentence]['Condition']:
if tuple([x,y]) in considered:
continue
considered.add(tuple([x,y]))
if x == y:
continue
if x not in sentence or y not in sentence:
continue
modified_text = sentence.replace(x, ' <X> ' + x + ' </X> ')
modified_text = modified_text.replace(y, ' <Y> ' + y + ' </Y> ')
if x not in modified_text or y not in modified_text:
continue
x_index = modified_text.index(x)
y_index = modified_text.index(y)
#if modified_text[min_index:max_index].count(" ") > 45:
#print(modified_text[min_index:max_index].count(" "))
# continue
#min_index= min(x_index,y_index)
#max_index= max(x_index,y_index)
#if modified_text[min_index:max_index].count(" ") > 45:
# continue
#modified_text = modified_text[min_index-30:max_index+30]
dict = {'sentence':modified_text, 'gold_label':'None', 'uid':0,\
'original_sentence':sentence}
causes_dictionary.append(dict)
f_causes.write(dict)
f_contains.close()
f_causes.close()
return causes_dictionary, contains_dictionary
def get_predicted_relations(input_list, output_list):
sentence_relations = {}
for input,output in zip(input_list,output_list):
original_sentence = input['original_sentence']
relation_sentence = input['sentence']
x_string = relation_sentence[relation_sentence.find("<X>")+3:\
relation_sentence.find("</X>")].strip()
y_string = relation_sentence[relation_sentence.find("<Y>")+3:\
relation_sentence.find("</Y>")].strip()
pred_label = output['pred_label']
if pred_label != 'None':
sentence_relations[original_sentence] = \
sentence_relations.setdefault(original_sentence,[]) +\
[[x_string,y_string,pred_label]]
return sentence_relations
def evaluate_relations(gold_sentences, machine_sentences, gold_all_rels,\
machine_all_rels, embeddings):
recalls = []
precisions = []
f1s = []
missing_list = []
for gold,machine in zip(gold_sentences,machine_sentences):
missing_dict = {}
missing_dict['sentence'] = gold
missing_dict['output'] = machine
missing_dict['missing_rels'] = []
missing_dict['matching_rels']= []
gold_relations = gold_all_rels.get(gold,[])
machine_relations = machine_all_rels.get(machine,[])
matching_relations = [_compare_causes(g_r,machine_relations,embeddings,"")\
for g_r in gold_relations]
num_matching = sum(matching_relations)
considered_rels= set()
for matched,g_r in zip(matching_relations,gold_relations):
if tuple(g_r[:2]) in considered_rels:
continue
else:
considered_rels.add(tuple(g_r[:2]))
if not matched:
missing_dict['missing_rels'].append(g_r)
else:
missing_dict['matching_rels'].append(g_r)
if len(gold_relations) > 0:
recalls.append(num_matching/len(gold_relations))
#recalls.append(num_matching/len(considered_rels))
else:
continue
if len(machine_relations) > 0:
precisions.append(num_matching/len(machine_relations))
else:
assert len(gold_relations) > 0
precisions.append(0)
den = 1 if precisions[-1]+recalls[-1] == 0 else\
precisions[-1] + recalls[-1]
num = 2 * precisions[-1] * recalls[-1]
f1s.append(num/den)
missing_list.append(missing_dict)
return recalls, precisions, f1s, missing_list
def evaluate_entities(all_output_entities, all_target_entities, embeddings):
accuracies = []
for output_entities,target_entities in zip(all_output_entities,\
all_target_entities):
new_output_entities = []
for output_entity in output_entities:
if output_entity.strip()[-1].isalnum():
new_output_entities.append(output_entity.strip())
else:
new_output_entities.append(output_entity.strip()[:-1])
output_entities = new_output_entities
if len(output_entities) == 0 or len(target_entities) == 0:
accuracies.append(0)
continue
output_representations = np.stack([get_phrase_embeddings(output_entity,\
embeddings) for output_entity in output_entities],axis=0)
target_representations = np.stack([get_phrase_embeddings(target_entity,\
embeddings) for target_entity in target_entities],axis=0)
max_products = np.max(np.dot(output_representations,\
target_representations.transpose()),axis=1)
#for output_entity,max_product in zip(output_entities,max_products):
#if max_product <= 0.7:
# pdb.set_trace()
accuracies.append(sum([x>0.7 for x in max_products])/\
len(output_entities))
return accuracies
def compare_results(args, analysis_file):
file_names = []
gold_sentences = []
machine_sentences = []
if args.file_path is not None:
input_reader = jsonlines.open(args.file_path, "r")
input_instances = []
gold_string = 'gold'
output_string='output'
for r in input_reader:
gold_sentences.append(r[gold_string].replace('\n',' '))
machine_sentences.append(r[output_string].replace('\n',' '))
input_instances.append(r)
else:
gold_lines = open(args.gold, "r").readlines()
output_lines = open(args.output, "r").readlines()
for out,gold in zip(output_lines,gold_lines):
machine_sentences.append(out.strip())
gold_sentences.append(gold.strip())
machine_r_sentences = []
machine_labels = []
gold_r_sentences = []
gold_labels = []
#for i in range(len(input_instances)):
# create_entity_annotations_file_ind(machine_sentences, i,\
# "/data/rsg/nlp/darsh/aggregator/crawl_websites/NUT/model_sentences.txt")
# create_entity_annotations_file_ind(gold_sentences, i,\
# "/data/rsg/nlp/darsh/aggregator/crawl_websites/NUT/gold_sentences.txt")
# _perform_tagging("demo.model_evaluation_decode.config")
# _perform_tagging("demo.gold_evaluation_decode.config")
# machine_r_sentence, machine_label = _read_entity_file(\
# "/data/rsg/nlp/darsh/aggregator/crawl_websites"\
# "/NUT/model_sentences_out.txt")
# gold_r_sentence, gold_label = _read_entity_file(\
# "/data/rsg/nlp/darsh/aggregator/crawl_websites"\
# "/NUT/gold_sentences_out.txt")
# machine_r_sentences.append(" ".join(machine_r_sentence).strip())
# machine_labels.append(" ".join(machine_label).strip())
# gold_r_sentences.append(" ".join(gold_r_sentence).strip())
# gold_labels.append(" ".join(gold_label).strip())
create_entity_annotations_file(machine_sentences,\
"/data/rsg/nlp/darsh/aggregator/crawl_websites/NUT/model_sentences.txt")
create_entity_annotations_file(gold_sentences,\
"/data/rsg/nlp/darsh/aggregator/crawl_websites/NUT/gold_sentences.txt")
_perform_tagging("demo.model_evaluation_decode.config")
_perform_tagging("demo.gold_evaluation_decode.config")
machine_r_sentences, machine_labels = _read_entity_file(\
"/data/rsg/nlp/darsh/aggregator/crawl_websites"\
"/NUT/model_sentences_out.txt")
gold_r_sentences, gold_labels = _read_entity_file(\
"/data/rsg/nlp/darsh/aggregator/crawl_websites"\
"/NUT/gold_sentences_out.txt")
machine_sentence_entities = get_sentence_entities(machine_r_sentences, \
machine_labels)
machine_nutrition_entities= []
machine_condition_entities= []
file_name_m_conditions = {}
file_name_m_nutritions = {}
file_name_condition_counts= {}
file_name_nutrition_counts= {}
for m_s, m_l, f_n in zip(machine_r_sentences, machine_labels, file_names):
n_e = _get_entities(m_s.split(),m_l.split(),'Nutrition')
c_e = _get_entities(m_s.split(),m_l.split(),'Condition')
file_name_m_conditions[f_n] = file_name_m_conditions.setdefault(f_n,[]\
) + c_e
file_name_m_nutritions[f_n] = file_name_m_nutritions.setdefault(f_n,[]\
) + n_e
file_name_condition_counts[f_n] = len(set(file_name_m_conditions[f_n]))
file_name_nutrition_counts[f_n] = len(set(file_name_m_nutritions[f_n]))
machine_nutrition_entities.append(n_e)
machine_condition_entities.append(c_e)
gold_sentence_entities = get_sentence_entities(gold_r_sentences,\
gold_labels)
machine_causes_input, machine_contains_input = \
create_relation_annotations_file(machine_sentence_entities,"model")
gold_causes_input, gold_contains_input = \
create_relation_annotations_file(gold_sentence_entities,"gold")
machine_causes_output = produce_re_outputs("model_causes.jsonl",\
"t3_causes")
machine_contains_output = produce_re_outputs("model_contains.jsonl",\
"t3_contains")
gold_causes_output = produce_re_outputs("gold_causes.jsonl",\
"t3_causes")
gold_contains_output = produce_re_outputs("gold_contains.jsonl",\
"t3_contains")
machine_causes = get_predicted_relations(machine_causes_input,\
machine_causes_output)
machine_contains = get_predicted_relations(machine_contains_input,\
machine_contains_output)
gold_causes = get_predicted_relations(gold_causes_input,\
gold_causes_output)
gold_contains = get_predicted_relations(gold_contains_input,\
gold_contains_output)
embeddings = read_embeddings()
recalls, precision, f1s, missing_list = \
evaluate_relations(gold_sentences, machine_sentences,\
gold_causes, machine_causes, embeddings)
analysis_file = jsonlines.open(args.analysis_output,'w')
for missing_dict in missing_list:
analysis_file.write(missing_dict)
if len(recalls) > 0:
print(sum(recalls)/len(recalls))
else:
print(0)
recalls, precision, f1s, missing_list = \
evaluate_relations(machine_sentences, gold_sentences,\
machine_causes, gold_causes, embeddings)
if len(recalls) > 0:
print(sum(recalls)/len(recalls))
else:
print(0)
analysis_file.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run sentiment evaluation')
parser.add_argument('--file_path',default=None)
parser.add_argument('--output',default='a.txt')
parser.add_argument('--gold',default='b.txt')
parser.add_argument('--analysis_output',default='kg_analysis.jsonl')
args = parser.parse_args()
compare_results(args, args.analysis_output)
|
# -*- coding: utf-8 -*-
import docutils.core
# All information about reStructeredText is here
# http://docutils.sourceforge.net/docs/user/rst/quickref.html
rest = '''
=======
Heading
=======
SubHeading
----------
This is just a simple
little subsection. Now,
we'll show a bulleted list:
- item one
- item two
- item three
'''
html = docutils.core.publish_string(source=rest, writer_name='html')
print(html[html.find('<body>') + 6:html.find('</body>')])
|
from test.core.derivatives.implementation.base import DerivativesImplementation
import torch
from backpack.hessianfree.hvp import hessian_vector_product
from backpack.hessianfree.lop import transposed_jacobian_vector_product
from backpack.hessianfree.rop import jacobian_vector_product
class AutogradDerivatives(DerivativesImplementation):
"""Derivative implementations with autograd."""
def jac_vec_prod(self, vec):
input, output, _ = self.problem.forward_pass(input_requires_grad=True)
return jacobian_vector_product(output, input, vec)[0]
def jac_mat_prod(self, mat):
V = mat.shape[0]
vecs = [mat[v] for v in range(V)]
jac_vec_prods = [self.jac_vec_prod(vec) for vec in vecs]
return torch.stack(jac_vec_prods)
def jac_t_vec_prod(self, vec):
input, output, _ = self.problem.forward_pass(input_requires_grad=True)
return transposed_jacobian_vector_product(output, input, vec)[0]
def jac_t_mat_prod(self, mat):
V = mat.shape[0]
vecs = [mat[v] for v in range(V)]
jac_t_vec_prods = [self.jac_t_vec_prod(vec) for vec in vecs]
return torch.stack(jac_t_vec_prods)
def weight_jac_t_mat_prod(self, mat, sum_batch):
return self.param_jac_t_mat_prod("weight", mat, sum_batch)
def bias_jac_t_mat_prod(self, mat, sum_batch):
return self.param_jac_t_mat_prod("bias", mat, sum_batch)
def param_jac_t_vec_prod(self, name, vec, sum_batch):
input, output, named_params = self.problem.forward_pass()
param = named_params[name]
if sum_batch:
return transposed_jacobian_vector_product(output, param, vec)[0]
else:
N = input.shape[0]
sample_outputs = [output[n] for n in range(N)]
sample_vecs = [vec[n] for n in range(N)]
jac_t_sample_prods = [
transposed_jacobian_vector_product(n_out, param, n_vec)[0]
for n_out, n_vec in zip(sample_outputs, sample_vecs)
]
return torch.stack(jac_t_sample_prods)
def param_jac_t_mat_prod(self, name, mat, sum_batch):
V = mat.shape[0]
vecs = [mat[v] for v in range(V)]
jac_t_vec_prods = [
self.param_jac_t_vec_prod(name, vec, sum_batch) for vec in vecs
]
return torch.stack(jac_t_vec_prods)
def weight_jac_mat_prod(self, mat):
return self.param_jac_mat_prod("weight", mat)
def bias_jac_mat_prod(self, mat):
return self.param_jac_mat_prod("bias", mat)
def param_jac_vec_prod(self, name, vec):
input, output, named_params = self.problem.forward_pass()
param = named_params[name]
return jacobian_vector_product(output, param, vec)[0]
def param_jac_mat_prod(self, name, mat):
V = mat.shape[0]
vecs = [mat[v] for v in range(V)]
jac_vec_prods = [self.param_jac_vec_prod(name, vec) for vec in vecs]
return torch.stack(jac_vec_prods)
def ea_jac_t_mat_jac_prod(self, mat):
def sample_jac_t_mat_jac_prod(sample_idx, mat):
assert len(mat.shape) == 2
def sample_jac_t_mat_prod(sample_idx, mat):
sample, output, _ = self.problem.forward_pass(
input_requires_grad=True, sample_idx=sample_idx
)
result = torch.zeros(sample.numel(), mat.size(1), device=sample.device)
for col in range(mat.size(1)):
column = mat[:, col].reshape(output.shape)
result[:, col] = transposed_jacobian_vector_product(
[output], [sample], [column], retain_graph=True
)[0].reshape(-1)
return result
jac_t_mat = sample_jac_t_mat_prod(sample_idx, mat)
mat_t_jac = jac_t_mat.t()
jac_t_mat_t_jac = sample_jac_t_mat_prod(sample_idx, mat_t_jac)
jac_t_mat_jac = jac_t_mat_t_jac.t()
return jac_t_mat_jac
N = self.problem.input.shape[0]
input_features = self.problem.input.shape.numel() // N
result = torch.zeros(input_features, input_features).to(self.problem.device)
for n in range(N):
result += sample_jac_t_mat_jac_prod(n, mat)
return result / N
def hessian(self, loss, x):
"""Return the Hessian matrix of a scalar `loss` w.r.t. a tensor `x`.
Arguments:
loss (torch.Tensor): A scalar-valued tensor.
x (torch.Tensor): Tensor used in the computation graph of `loss`.
Shapes:
loss: `[1,]`
x: `[A, B, C, ...]`
Returns:
torch.Tensor: Hessian tensor of `loss` w.r.t. `x`. The Hessian has shape
`[A, B, C, ..., A, B, C, ...]`.
"""
assert loss.numel() == 1
vectorized_shape = (x.numel(), x.numel())
final_shape = (*x.shape, *x.shape)
hessian_vec_x = torch.zeros(vectorized_shape).to(loss.device)
num_cols = hessian_vec_x.shape[1]
for column_idx in range(num_cols):
unit = torch.zeros(num_cols).to(loss.device)
unit[column_idx] = 1.0
unit = unit.view_as(x)
column = hessian_vector_product(loss, [x], [unit])[0].reshape(-1)
hessian_vec_x[:, column_idx] = column
return hessian_vec_x.reshape(final_shape)
def elementwise_hessian(self, tensor, x):
"""Yield the Hessian of each element in `tensor` w.r.t `x`.
Hessians are returned in the order of elements in the flattened tensor.
"""
for t in tensor.flatten():
yield self.hessian(t, x)
def tensor_hessian(self, tensor, x):
"""Return the Hessian of a tensor `tensor` w.r.t. a tensor `x`.
Given a `tensor` of shape `[A, B, C]` and another tensor `x` with shape `[D, E]`
used in the computation of `tensor`, the generalized Hessian has shape
[A, B, C, D, E, D, E]. Let `hessian` denote this generalized Hessian. Then,
`hessian[a, b, c]` contains the Hessian of the scalar entry `tensor[a, b, c]`
w.r.t. `x[a, b, c]`.
Arguments:
tensor (torch.Tensor): An arbitrary tensor.
x (torch.Tensor): Tensor used in the computation graph of `tensor`.
Returns:
torch.Tensor: Generalized Hessian of `tensor` w.r.t. `x`.
"""
shape = (*tensor.shape, *x.shape, *x.shape)
return torch.cat(list(self.elementwise_hessian(tensor, x))).reshape(shape)
def hessian_is_zero(self):
"""Return whether the input-output Hessian is zero.
Returns:
bool: `True`, if Hessian is zero, else `False`.
"""
input, output, _ = self.problem.forward_pass(input_requires_grad=True)
zero = None
for hessian in self.elementwise_hessian(output, input):
if zero is None:
zero = torch.zeros_like(hessian)
if not torch.allclose(hessian, zero):
return False
return True
def input_hessian(self):
"""Compute the Hessian of the module output w.r.t. the input."""
input, output, _ = self.problem.forward_pass(input_requires_grad=True)
return self.hessian(output, input)
def sum_hessian(self):
"""Compute the Hessian of a loss module w.r.t. its input."""
hessian = self.input_hessian()
return self._sum_hessian_blocks(hessian)
def _sum_hessian_blocks(self, hessian):
"""Sum second derivatives over the batch dimension.
Assert second derivative w.r.t. different samples is zero.
"""
input = self.problem.input
num_axes = len(input.shape)
if num_axes != 2:
raise ValueError("Only 2D inputs are currently supported.")
N = input.shape[0]
num_features = input.numel() // N
sum_hessian = torch.zeros(num_features, num_features, device=input.device)
hessian_different_samples = torch.zeros(
num_features, num_features, device=input.device
)
for n_1 in range(N):
for n_2 in range(N):
block = hessian[n_1, :, n_2, :]
if n_1 == n_2:
sum_hessian += block
else:
assert torch.allclose(block, hessian_different_samples)
return sum_hessian
|
#!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
#import _init_paths
from fast_rcnn.config import cfg, cfg_from_file
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
from contextlib import contextmanager
import shutil
from subprocess import Popen, PIPE
import shlex
import tempfile
import re
import time
import fcntl
from timeit import default_timer as timer
from osgeo import gdal
CLASSES = ('__background__',
'ship',
'fast_ship')
NETS = {'vgg16': ('VGG16',
'VGG16_faster_rcnn_final.caffemodel'),
'vgg': ('VGG_CNN_M_1024',
'VGG_faster_rcnn_final.caffemodel'),
'zf': ('ZF',
'ZF_faster_rcnn_final.caffemodel')}
def fileno(file_or_fd):
fd = getattr(file_or_fd, 'fileno', lambda: file_or_fd)()
if not isinstance(fd, int):
raise ValueError("Expected a file (`.fileno()`) or a file descriptor")
return fd
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(net, im_file):
"""Detect object classes in an image using pre-computed object proposals."""
# compute the file offset from the name
iterEx = re.compile(".*?_(\d+)_(\d+)\.jpg$")
itIter = iterEx.findall(im_file)
if len(itIter) > 0:
xoff = int(itIter[0][0])
yoff = int(itIter[0][1])
else:
print("Bad Filename " + im_file + ". No offsets! Skipping file")
return []
# Load the demo image as gray scale
gim = cv2.imread(im_file, flags= cv2.CV_LOAD_IMAGE_GRAYSCALE)
# convert to rgb repeated in each channel
im = cv2.cvtColor(gim, cv2.COLOR_GRAY2BGR)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.6
NMS_THRESH = 0.3
res = []
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
inds = np.where(dets[:,-1] >= CONF_THRESH)[0]
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
res.append(cls + " {0} {1} {2} {3}".format(xoff + bbox[0], yoff + bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]))
return res
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [zf]',
choices=NETS.keys(), default='vgg')
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--model', dest='model_file',
help='caffe model file',
default=None, type=str)
parser.add_argument('--proto', dest='proto_file',
help='caffe prototext file',
default=None, type=str)
parser.add_argument('--split', dest='split_size',
help='width && height for split up images',
action='store', type=int)
parser.add_argument('--tiles', dest='tile_path',
help='image tile output path',
default=None, type=str)
parser.add_argument('file', help="Image file or dir to process",
type=str)
args = parser.parse_args()
return args
def split_up_file(fname, tempDir, splitSize, maxCnt):
dset = gdal.Open(fname)
width = dset.RasterXSize
height = dset.RasterYSize
baseName = os.path.basename(fname)
tName = os.path.join(tempDir, baseName)
fileList = []
cnt = 1
nname, ext = os.path.splitext(fname)
#Here we assume tif files are 8 bit and ntf are 16 bit
if ext.lower() == '.tif':
bitSize = 8
else:
bitSize = 16
for i in range(0, width, splitSize):
for j in range(0, height, splitSize):
if maxCnt > 0 and cnt > maxCnt:
return fileList
cnt += 1
w = min(i+splitSize, width) - i
h = min(j+splitSize, height) - j
xoff = i
yoff = j
if w < splitSize:
xoff = i - (splitSize - w)
if xoff < 0:
xoff = 0
if h < splitSize:
yoff = j - (splitSize - h)
if yoff < 0:
yoff = 0
tempName = tName + "_" + str(i) + "_" + str(j) + ".jpg"
print("spliting up " + tempName)
with timeout(6):
if bitSize == 16:
transStr = "/home/trbatcha/tools/bin/gdal_translate -of JPEG -ot Byte -scale 64 1024 0 255 -b 1 -srcwin " + str(xoff) + " " + str(yoff) + \
" " + str(splitSize) + " " + str(splitSize) + " " + fname + " " + tempName
else:
transStr = "/home/trbatcha/tools/bin/gdal_translate -of JPEG -ot Byte -b 1 -srcwin " + str(xoff) + " " + str(yoff) + \
" " + str(splitSize) + " " + str(splitSize) + " " + fname + " " + tempName
#result = subprocess.check_output([transStr], shell=True)
args = shlex.split(transStr)
p = Popen(args, stdout=PIPE, stderr=PIPE)
try:
print("calling gdal_translate")
stdout, stderr = p.communicate()
print("gdal_translate complete")
fileList.append(tempName)
print (stderr)
print (stdout)
sys.stdout.flush()
except IOError, e:
if e.errno != errno.EINTR:
raise e
print("Timeout: gdal_translate for image " + \
tempName + " w {0} h {1}".
format(width, height))
#get rid of xml file gdal_translate creates
xmlfile = tempName + ".aux.xml"
if os.path.exists(xmlfile):
os.remove(tempName + ".aux.xml")
return fileList
def doWriteToHDFS(dirname, fname) :
basename = os.path.basename(fname)
hname = os.path.join(dirname, basename)
put = Popen(["hdfs", "dfs", "-put", fname, hname],
stdout=PIPE, stderr = PIPE)
stdout, stderr = put.communicate()
print stderr
return hname
def mergeTiles(src, dst):
img1 = mpimg.imread(src)
img2 = mpimg.imread(dst)
img = np.maximum(img1, img2)
mpimg.imsave(dst, img)
def moveTiles(src, dst):
files = os.listdir(src)
if not os.path.exists(dst):
os.makedirs(dst)
# chmod of dirs
for p,d,f in os.walk(dst):
os.chmod(p, 0o777)
for f in files:
sname = os.path.join(src, f)
dname = os.path.join(dst, f)
if os.path.isdir(sname):
moveTiles(sname, dname)
else:
fname, ext = os.path.splitext(dname)
# Currently only moving the tiles since the
# tilemapresource.xml is not being used by leaflet.
# TODO: merge the tilemapresource.xml files by
# reading the xml and updating the bounding box, and
# x,y of the tiles.
if os.path.exists(dname) == True and ext == '.png':
mergeTiles(sname, dname)
#i = 0;
#dname2 = dname + str(i)
#while os.path.exists(dname2) == True:
# i += 1
# dname2 = dname + str(i)
#shutil.move(sname, dname2)
#os.chmod(dname, 0o666)
pass
elif ext == '.png':
shutil.move(sname, dname)
os.chmod(dname, 0o666)
def parseRectStr(rectStr):
items = rectStr.split(' ')
# the first item is the class which we will ignore
x = int(round(float(items[1])))
y = int(round(float(items[2])))
w = int(round(float(items[3])))
h = int(round(float(items[4])))
print("pared rect {0},{1},{2},{3}".format(x,y,w,h))
return x,y,w,h
import signal, errno
from contextlib import contextmanager
@contextmanager
def timeout(seconds):
def timeout_handler(signum, frame):
pass
orig_handler = signal.signal(signal.SIGALRM, timeout_handler)
try:
signal.alarm(seconds)
yield
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, orig_handler)
#
# We create the tile in a temp directory and then move it to its final
# destination.
#
def writeTilesFromDetects(tileDir, detects, origFile):
if len(detects) == 0:
return
tempTileDir = tempfile.mkdtemp(dir='/home/trbatcha/tempDir')
outputDir = os.path.join(tempTileDir, "output")
if not os.path.exists(tempTileDir):
os.makedirs(tempTileDir)
if not os.path.exists(outputDir):
os.makedirs(outputDir)
vname = os.path.basename(origFile)
vPath = os.path.join(tempTileDir, vname + ".vrt")
listPath = os.path.join(tempTileDir, "fileList.txt")
listFile = open(listPath, "w")
nname, ext = os.path.splitext(origFile)
#Here we assume tif files are 8 bit and ntf are 16 bit
if ext.lower() == '.tif':
bitSize = 8
else:
bitSize = 16
for detect in detects:
rectStr = detect[0]
imgName = detect[1]
basename = os.path.basename(imgName)
x, y, w, h = parseRectStr(rectStr)
print("detect = {0},{1},{2},{3}".format(x, y,w , h))
tName = basename + "_" + str(x) + "_" + str(y) + "_" + \
str(w) + "_" + str(h) + ".tif"
t2Name = basename + "_" + str(x) + "_" + str(y) + "_" + \
str(w) + "_" + str(h) + "_w" + ".tif"
tPath = os.path.join(tempTileDir, tName)
t2Path = os.path.join(tempTileDir, t2Name)
if os.path.exists(tPath) == True:
os.remove(tPath)
if os.path.exists(t2Path) == True:
os.remove(t2Path)
# Git the image clip
if bitSize == 16:
transStr = "/home/trbatcha/tools/bin/gdal_translate -of GTiff " +\
"-ot Byte -scale 64 1024 0 255 -b 1 -srcwin " \
+ str(x) + " " + str(y) + " " + str(w) + " " + str(h) + " " \
+ imgName + " " + tPath
else:
transStr = "/home/trbatcha/tools/bin/gdal_translate -of GTiff " +\
"-ot Byte -b 1 -srcwin " \
+ str(x) + " " + str(y) + " " + str(w) + " " + str(h) + " " \
+ imgName + " " + tPath
args = shlex.split(transStr)
print("running translate")
p = Popen(args, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
print stderr
print("translate complete")
#get rid of xml file gdal_translate creates
xmlfile = tPath + ".aux.xml"
if os.path.exists(xmlfile):
os.remove(tPath + ".aux.xml")
print (stdout)
warpStr = "/home/trbatcha/tools/bin/gdalwarp -of GTiff -t_srs " + \
"EPSG:3857 -overwrite " + tPath + " " + t2Path
args = shlex.split(warpStr)
print("running warp")
p = Popen(args, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
print (stderr)
print (stdout)
print("warp complete")
listFile.write(t2Path + '\n')
listFile.close()
vrtStr = "/home/trbatcha/tools/bin/gdalbuildvrt -srcnodata 0 -addalpha " \
+ "-vrtnodata 0 -overwrite -input_file_list " + listPath + \
" " + vPath
args = shlex.split(vrtStr)
print("running vrt")
p = Popen(args, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
print (stderr)
print (stdout)
print("virt complete")
# Generate tiles for all the image chips
import gdal2tiles
tileStr = "-v -p mercator --zoom 13 -w none " + vPath + " " + outputDir
#debug tried gdal2tiles as seperate process, it did not fix my problem so commented out
#my_env = os.environ.copy()
#tileStr = "/home/trbatcha/tools/bin/python gdal2tiles.py -v -p mercator -z 13 -w none " + vPath + " " + outputDir
args = shlex.split(tileStr)
#p = Popen(args, env=my_env, stdout=PIPE, stderr=PIPE)
#stdout, stderr = p.communicate()
#print (stderr)
#print (stdout)
print("gen tiles")
tileGenFailed = False
with timeout(10):
try:
# By default gdal turns exceptions off
gdal.UseExceptions()
targs = gdal.GeneralCmdLineProcessor(args)
gtiles = gdal2tiles.GDAL2Tiles(targs)
gtiles.process()
except Exception, err:
if err.errno != errno.EINTR:
print("gdal2tiles FAILED!!!")
print(err)
sys.stdout.flush()
shutil.rmtree(tempTileDir, ignore_errors=True)
return
print("TileGeneration TIMED OUT!! for file " + origFile)
tileGenFailed = True
print("gen tiles complete")
# before we move tiles lets check lockfile and wait if not avail
if tileGenFailed == False:
with timeout(3):
lockFile = os.path.join(tileDir,"tileLock")
lock = open(lockFile, 'w')
try:
fcntl.flock(lock, fcntl.LOCK_EX)
except IOError, e:
if e.errno != errno.EINTR:
raise e
print("Tile filelock timeout")
lock.close()
shutil.rmtree(tempTileDir, ignore_errors=True)
return
moveTiles(outputDir, tileDir)
fcntl.flock(lock, fcntl.LOCK_UN)
lock.close()
# remove the non-tiles we created
shutil.rmtree(tempTileDir, ignore_errors=True)
if __name__ == '__main__':
#debug profiling
import cProfile
save_stdout = sys.stdout
sys.stdout = sys.stderr
#debug force stdout to flush optut
sys.stdout = os.fdopen(sys.stdout.fileno(), "w", 0)
#debug profiling
#profile = cProfile.Profile()
#profile.enable()
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
if args.cfg_file is not None:
print("using config " + args.cfg_file)
cfg_from_file(args.cfg_file)
if cfg.TRAIN.IS_COLOR == True:
print("We are configured for color")
else:
print("We are configured for b/w")
if args.split_size:
print("We are to split up image by {0}".format(args.split_size))
else:
print("No split applied.")
tiledir = args.tile_path
ifile = args.file
prototxt = os.path.join(cfg.ROOT_DIR, 'models', 'VGG_CNN_M_1024',
'faster_rcnn_end2end', 'test_ships.prototxt')
#caffemodel = os.path.join(cfg.ROOT_DIR, 'output', 'faster_rcnn_end2end',
# 'ak47_train', 'zf_faster_rcnn_iter_70000.caffemodel')
caffemodel = os.path.join(cfg.ROOT_DIR, 'output', 'faster_rcnn_end2end',
'ships_train', 'vgg_cnn_m_1024_faster_rcnn_iter_500000.caffemodel')
if args.model_file is not None:
caffemodel = args.model_file
if args.proto_file is not None:
prototxt = args.proto_file
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you train it?'
).format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
# Need to redirect stdout since we only want to return the results
tempDir = tempfile.mkdtemp(dir = "/dev/shm")
os.chmod(tempDir, 0o777)
#debug
doDetect = True
detects = []
if args.split_size != None:
fileList = split_up_file(ifile, tempDir, args.split_size, -1)
##debug only do the first one
#fileList = split_up_file(ifile, tempDir, args.split_size, 90)
else:
fileList = [ifile]
if doDetect == True:
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 1), dtype=np.uint8)
#im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _= im_detect(net, im)
for nextf in fileList:
print('detection for ' + nextf)
res = demo(net, nextf)
if res != None and len(res) > 0:
print("we have detection results!")
for d in res:
#We have to use ifile instead of nextf since
#all the geo data is gone from nextf
detects.append((d, ifile))
else:
pass
#For testing
# print("appending dummp detection")
# detects.append(("ship 0.0 0.0 300.0 300.0", ifile))
# dset = gdal.Open(ifile)
# dxsize = 300
# dysize = 300
# width = dset.RasterXSize
# height = dset.RasterYSize
# cx = width /2
# cy = height /2
# if height < dysize:
# dysize = height
# if width < dxsize:
# dxsize = width
# if dxsize <= 0 or dysize <= 0:
# continue
# if cx + dxsize >= width or cy + dysize >= height:
# detects.append(("ship {0} {1} {2} {3}".format( cx, cy, width - cx -1, height - cy -1), ifile))
# else:
# detects.append(("ship {0} {1} {2} {3}".format( cx, cy, dxsize -1, dysize -1), ifile))
#detects.append(
# ("ship 0.0 0.0 {0} {1}".format(dxsize, dysize), ifile))
#detects.append(("ship {0} {1} {2} {3}".format(
# width - dxsize-10, height - dysize-10, dxsize, dysize),
# ifile))
#detects.append(("ship {0} {1} {2} {3}".format(
# width - dxsize, 0.0, dxsize, dysize),
# ifile))
#detects.append(("ship {0} {1} {2} {3}".format(
# 0.0, height - dysize, dxsize, dysize),
# ifile))
#debug show all detects
print("Printing detects for {0} detections".format(len(detects)))
for d in detects:
print(d[0])
writeTilesFromDetects(tiledir, detects, ifile)
#shutil.rmtree(tempDir)
#debug profiling
#profile.disable()
#profile.print_stats(sort='time')
#putting stdout back so we can output the results
sys.stdout.flush()
sys.stdout = save_stdout
# Write out the result to stdout
for d in detects:
print(d[0])
sys.exit(0)
|
import enum
import itertools
import string
import typing
from .number_set import NumberSet
class AreaType(enum.Enum):
ROW = 0
COLUMN = 1
BLOCK = 2
def __init__(self, index):
self._index = index
@property
def index(self):
return self._index
def orthogonal_type(self) -> 'AreaType':
if self is self.ROW:
return self.COLUMN
elif self is self.COLUMN:
return self.ROW
else:
raise ValueError(f'{self} has no orthogonal type')
def __str__(self):
self.name: str
return self.name.lower()
class Cell:
def __init__(self, row: int, col: int, board: 'Board'):
assert 0 <= row < board.size, f'cell row {row} out of range'
assert 0 <= col < board.size, f'cell col {col} out of range'
self._row = row
self._col = col
self._index = board.size * row + col
row_in_block = row % board.block_height
col_in_block = col % board.block_width
block_index = board.block_width * row_in_block + col_in_block
# Cell index inside surrounding area.
self._index_of_areas = {
AreaType.ROW: col,
AreaType.COLUMN: row,
AreaType.BLOCK: block_index,
}
self._areas: typing.Dict[AreaType, 'Area'] = {}
@property
def row(self) -> int:
return self._row
@property
def col(self) -> int:
return self._col
@property
def index(self) -> int:
return self._index
def index_of_area(self, area_or_type: typing.Union[AreaType, 'Area']) -> int:
if isinstance(area_or_type, Area):
return self._index_of_areas[area_or_type.area_type]
elif isinstance(area_or_type, AreaType):
return self._index_of_areas[area_or_type]
else:
raise TypeError(f'Unsupported type: "{type(area_or_type)}"')
def get_area(self, area_type: AreaType) -> 'Area':
return self._areas[area_type]
def iter_areas(self) -> typing.Iterator['Area']:
return self._areas.values()
def __repr__(self):
return f'({self._row + 1},{self._col + 1})'
class Area:
def __init__(self, area_type: AreaType, index: int, board: 'Board'):
assert 0 <= index < board.size, f'area index {index} out of range'
self._board = board
self._area_type = area_type
self._index = index
self._cells: typing.List[Cell] = []
@property
def area_type(self) -> AreaType:
return self._area_type
@property
def index(self) -> int:
return self._index
@property
def first_row(self) -> int:
return self._cells[0].row
@property
def first_col(self) -> int:
return self._cells[0].col
@property
def last_row(self) -> int:
return self._cells[-1].row
@property
def last_col(self) -> int:
return self._cells[-1].col
def get_cell(self, index_of_area: int) -> Cell:
return self._cells[index_of_area]
def iter_cells(self, excludes: typing.Iterable = None) -> typing.Iterator[Cell]:
positions = NumberSet(0)
for item in excludes or []:
if isinstance(item, Cell):
positions.add(item.index_of_area(self._area_type))
elif isinstance(item, Area):
positions.add(item.index)
elif isinstance(item, int):
positions.add(item)
else:
raise TypeError(f'Unsupported item type of excludes "{type(item)}"')
for cell in self._cells:
if cell.index_of_area(self._area_type) not in positions:
yield cell
def __repr__(self):
return f'{self._area_type.name.lower()} {self._index + 1}'
def __str__(self):
if self._area_type == AreaType.BLOCK:
r, c = divmod(self._index, self._board.blocks_per_row)
return f'({r + 1},{c + 1})'
else:
return f'{self._index + 1}'
class Board:
def __init__(self, block_width: int = 3, block_height: int = 3):
self._block_width = block_width
self._block_height = block_height
self._size = block_width * block_height
self._mapping: str = f'123456789{string.ascii_uppercase}'
self._cells = [Cell(r, c, self) for r, c in itertools.product(range(self._size), repeat=2)]
# self._areas = [Area(t, i, self) for t, i in itertools.product(AreaType, range(self._size))]
self._areas: typing.Dict[AreaType, typing.List[Area]] = {}
for t in AreaType:
self._areas[t] = [Area(t, i, self) for i in range(self._size)]
self._connect_area_and_cell()
@property
def block_width(self) -> int:
return self._block_width
@property
def block_height(self) -> int:
return self._block_height
@property
def blocks_per_row(self) -> int:
return self._block_height
@property
def blocks_per_col(self) -> int:
return self._block_width
@property
def size(self) -> int:
return self._size
@property
def mapping(self) -> str:
return self._mapping
@mapping.setter
def mapping(self, value):
self._mapping = value
def mark(self, number: int) -> str:
return self._mapping[number]
def lookup(self, mark: str) -> int:
number = self._mapping.find(mark)
return number if number >= 0 else None
def iter_cells(self) -> typing.Iterator[Cell]:
return iter(self._cells)
def get_area(self, area_type: AreaType, index: int) -> Area:
return self._areas[area_type][index]
def iter_areas(self, area_type: AreaType = None) -> typing.Iterator[Area]:
if area_type is None:
return itertools.chain(*self._areas.values())
else:
return iter(self._areas[area_type])
def get_common_area(self, cells: typing.Iterable[Cell], area_type: AreaType) -> Area:
cell_iter = iter(cells)
cell = next(cell_iter, None)
if cell is None:
return None
area = cell.get_area(area_type)
for cell in cell_iter:
if cell.get_area(area_type) != area:
return None
return area
def iter_common_areas(self, cells: typing.Iterable[Cell]) -> typing.Iterator[Area]:
for area_type in AreaType:
area = self.get_common_area(cells, area_type)
if area is not None:
yield area
def iter_numbers(self) -> typing.Iterator[int]:
return range(self._size)
def _connect_area_and_cell(self):
cell: Cell
for cell in self._cells:
# Each cell belongs to 3 areas, one for each type.
area_index_mapping = {
AreaType.ROW: cell.row,
AreaType.COLUMN: cell.col,
AreaType.BLOCK: self.blocks_per_row * (cell.row // self._block_height) + (cell.col // self._block_width),
}
for area_type in AreaType:
area_index = area_index_mapping[area_type]
area = self._areas[area_type][area_index]
area._cells.append(cell)
cell._areas[area_type] = area
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
""" AvroSerializer class
Serialize / Deserialize record to avro schema
Note:
In schemas folder your avro file must have the *avsc.yaml* extension
Todo:
* Remove workaround in constructor (os.path ...)
"""
import json
import os
import re
from io import BytesIO
from logging import Logger, getLogger
from typing import Dict, Any, Union, Type
from avro.datafile import DataFileWriter, DataFileReader
from avro.io import DatumWriter, DatumReader, AvroTypeException
from avro.schema import NamedSchema, Parse
from yaml import (FullLoader, load_all) # type: ignore
from tonga.models.handlers.base import BaseHandler
from tonga.models.records.base import BaseRecord
from tonga.models.store.base import BaseStoreRecordHandler
from tonga.models.store.store_record import StoreRecord
from tonga.services.serializer.errors import (AvroEncodeError, AvroDecodeError, AvroAlreadyRegister,
NotMatchedName, MissingEventClass, MissingHandlerClass)
from .base import BaseSerializer
__all__ = [
'AvroSerializer',
]
AVRO_SCHEMA_FILE_EXTENSION: str = 'avsc.yaml'
class AvroSerializer(BaseSerializer):
"""Class serializer Avro schema to class instance
Serialize / Deserialize (BaseRecord & StoreRecord) to avro schema
"""
logger: Logger
schemas_folder: str
_schemas: Dict[str, NamedSchema]
_events: Dict[object, Union[Type[BaseRecord], Type[StoreRecord]]]
_handlers: Dict[object, Union[BaseHandler, BaseStoreRecordHandler]]
def __init__(self, schemas_folder: str):
""" AvroSerializer constructor
Args:
schemas_folder (str): Folder where are stored project avro schema
(example: *os.path.join(os.path.dirname(os.path.abspath(__file__)),
'examples/coffee_bar/avro_schemas')*)
Returns:
None
"""
super().__init__()
self.schemas_folder = schemas_folder
self.schemas_folder_lib = os.path.dirname(os.path.abspath(__file__)) + '/../../models/avro_schema'
self.logger = getLogger('tonga')
self._schemas = dict()
self._events = dict()
self._handlers = dict()
self._scan_schema_folder(self.schemas_folder)
self._scan_schema_folder(self.schemas_folder_lib)
def _scan_schema_folder(self, schemas_folder: str) -> None:
""" AvroSerializer internal function, he was call by class constructor
Args:
schemas_folder (str): Folder where are stored project avro schema
Returns:
None
"""
with os.scandir(schemas_folder) as files:
for file in files:
if not file.is_file():
continue
if file.name.startswith('.'):
continue
if not file.name.endswith(f'.{AVRO_SCHEMA_FILE_EXTENSION}'):
continue
self._load_schema_from_file(file.path)
def _load_schema_from_file(self, file_path: str) -> None:
""" AvroSerializer internal function, he was call by _scan_schema_folder for load schema file
Args:
file_path: Path to schema
Raises:
AvroAlreadyRegister: This error was raised when schema is already register the Avro schema
Returns:
None
"""
with open(file_path, 'r') as fd:
for s in load_all(fd, Loader=FullLoader):
avro_schema_data = json.dumps(s)
avro_schema = Parse(avro_schema_data)
schema_name = avro_schema.namespace + '.' + avro_schema.name
if schema_name in self._schemas:
raise AvroAlreadyRegister
self._schemas[schema_name] = avro_schema
def register_event_handler_store_record(self, store_record_event: Type[StoreRecord],
store_record_handler: BaseStoreRecordHandler) -> None:
""" Register project event & handler in AvroSerializer
Args:
store_record_event (Type[BaseStoreRecord]): Store record event, BaseStoreRecord can work without class
override, but for more flexibility you can create your own class
store_record_handler (BaseStoreRecordHandler): Store record handler, can be used with Tonga
StoreRecordHandler this class work for simple usage, for more
flexibility creates your own class must inherit form
BaseStoreRecordHandler
Returns:
None
"""
event_name_regex = re.compile(store_record_event.event_name())
self._events[event_name_regex] = store_record_event
self._handlers[event_name_regex] = store_record_handler
def register_class(self, event_name: str, event_class: Type[BaseRecord], handler_class: BaseHandler = None) -> None:
"""Register project event & handler in AvroSerializer
Args:
event_name (str): Event name, Avro schema *namespace + name*
event_class (Type[BaseModel]): Event class must inherit form *BaseEvent / BaseCommand / BaseResult*
handler_class (BaseHandler): Handler class must inherit form *BaseHandlerEvent / BaseHandlerCommand
/ BaseHandlerResult*
Raises:
NotMatchedName: Can’t find same name in registered schema
Returns:
None
"""
event_name_regex = re.compile(event_name)
matched: bool = False
for schema_name in self._schemas:
if event_name_regex.match(schema_name):
matched = True
break
if not matched:
raise NotMatchedName
self._events[event_name_regex] = event_class
self._handlers[event_name_regex] = handler_class
def get_schemas(self) -> Dict[str, NamedSchema]:
""" Return _schemas class attributes
Returns:
Dict[str, NamedSchema]: _schemas class attributes
"""
return self._schemas
def get_events(self) -> Dict[object, Union[Type[BaseRecord], Type[StoreRecord]]]:
""" Return _events class attributes
Returns:
Dict[object, Union[Type[BaseModel], Type[BaseStoreRecord]]]: _events class attributes
"""
return self._events
def get_handlers(self) -> Dict[object, Union[BaseHandler, BaseStoreRecordHandler]]:
""" Return _handlers class attributes
Returns:
Dict[object, Union[BaseHandler, BaseStoreRecordHandler]]: _handlers class attributes
"""
return self._handlers
def encode(self, obj: BaseRecord) -> bytes:
""" Encode *BaseHandlerEvent / BaseHandlerCommand / BaseHandlerResult* to bytes format
This function is used by kafka-python
Args:
obj (BaseModel): *BaseHandlerEvent / BaseHandlerCommand / BaseHandlerResult*
Raises:
MissingEventClass: can’t find BaseModel in own registered BaseModel list (self._schema)
AvroEncodeError: fail to encode BaseModel to bytes
Returns:
bytes: BaseModel in bytes
"""
try:
schema = self._schemas[obj.event_name()]
except KeyError as err:
self.logger.exception('%s', err.__str__())
raise MissingEventClass
try:
output = BytesIO()
writer = DataFileWriter(output, DatumWriter(), schema)
writer.append(obj.to_dict())
writer.flush()
encoded_event = output.getvalue()
writer.close()
except AvroTypeException as err:
self.logger.exception('%s', err.__str__())
raise AvroEncodeError
return encoded_event
def decode(self, encoded_obj: Any) -> Dict[str, Union[BaseRecord, StoreRecord,
BaseHandler, BaseStoreRecordHandler]]:
""" Decode bytes format to BaseModel and return dict which contains decoded *BaseModel / BaseStoreRecord*
This function is used by kafka-python / internal call
Args:
encoded_obj (Any): Bytes encode BaseModel / BaseStoreRecord
Raises:
AvroDecodeError: fail to decode bytes in BaseModel
MissingEventClass: can’t find BaseModel in own registered BaseModel list (self._schema)
MissingHandlerClass: can’t find BaseHandlerModel in own registered BaseHandlerModel list (self._handler)
Returns:
Dict[str, Union[BaseModel, BaseStoreRecord, BaseHandler, BaseStoreRecordHandler]]:
example: {'event_class': ..., 'handler_class': ...}
"""
try:
reader = DataFileReader(BytesIO(encoded_obj), DatumReader())
schema = json.loads(reader.meta.get('avro.schema').decode('utf-8'))
schema_name = schema['namespace'] + '.' + schema['name']
dict_data = next(reader)
except AvroTypeException as err:
self.logger.exception('%s', err.__str__())
raise AvroDecodeError
# Finds a matching event name
for e_name, event in self._events.items():
if e_name.match(schema_name): # type: ignore
record_class = event
break
else:
raise MissingEventClass
# Finds a matching handler name
for e_name, handler in self._handlers.items():
if e_name.match(schema_name): # type: ignore
handler_class = handler
break
else:
raise MissingHandlerClass
return {'record_class': record_class.from_dict(dict_data=dict_data), 'handler_class': handler_class}
|
#!/usr/bin/env python
# Level Script
# Use scene.when to schedule events.
# Yield when you want to wait until the next event.
# This is a generator. Using a busy loop will halt the game.
from math import tau, pi
from game.constants import GREEN
from game.scripts.level import Level
class Level2(Level):
number = 2
name = "The Rise of Butterflies"
ground = GREEN
music = "butterfly.ogg"
def __call__(self):
self.scene.cloudy()
self.scene.rocks()
yield from super().__call__()
self.spawn(0, 0)
yield from self.rotating_circle(2, 30, 0.5)
yield self.medium_pause()
self.spawn(0, 0)
yield from self.rotating_circle(3, 30)
yield self.big_pause()
yield from self.v_shape(5)
yield self.big_pause()
yield from self.rotating_v_shape(4)
yield self.big_pause()
for i in range(3):
self.spawn(0, 0)
yield from self.rotating_circle(5, -40)
yield self.big_pause()
yield from self.slow_type("They plan to swarm us.")
self.spawn_powerup("M", 0, 0)
yield from self.slow_type("Take this Machine Gun!", color="green")
self.terminal.write_center("Press shift to change guns.", 15)
t = "X / Y on controller"
self.terminal.write_center(t, 17)
self.terminal.write_center("X", 17, color="blue", length=len(t))
self.terminal.write_center(
"Y", 17, color="yellow", length=len(t), char_offset=(4, 0)
)
yield self.bigg_pause()
self.terminal.clear(5)
self.terminal.clear(15)
self.terminal.clear(17)
self.wall(8, 4, 0.2, 0.1)
yield self.bigg_pause()
self.spawn_powerup("M")
self.medium_pause()
self.wall(8, 4, 0.2, 0.1)
yield self.big_pause()
yield from self.combine(
self.rotating_v_shape(5, angular_mult=0.5),
self.rotating_v_shape(5, pi / 3, angular_mult=0.5),
self.rotating_v_shape(5, tau / 3, angular_mult=0.5),
)
self.spawn_powerup("M")
yield self.bigg_pause()
yield from self.rotating_circle(5, 10)
yield from self.rotating_circle(7, 20)
yield self.small_pause()
yield from self.rotating_circle(11, 30)
yield self.small_pause()
yield from self.rotating_circle(11, 40)
yield self.big_pause()
# TODO: Check for level clear ?
yield self.huge_pause()
yield from self.slow_type("Well done!", 5, "green", clear=True)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 该自定键值映射参照数据库设计文档
light_color = {
1 : 'white',
2 : 'blue',
3 : 'yellow',
}
class PolicyInstance:
"""
策略实例类型
"""
#: 实例号
instance_id = -1
#: 策略号
policy_id = -1
#: 植被号
plant_id = -1
#: 房间号
room_id = -1
#: 实例执行开始时间
start_time = -1
class AbsoluteTime:
"""
绝对时间类型
对应数据库中的 tb_absolute_time 表
"""
#: 步骤号
rule_id = -1
#: 实例号
instance_id = -1
#: 更改时间
change_time = ''
class TableSensor:
sensor_id = -1
sensor_name = ''
room_id = -1
position = []
state = 1
class TablePlant:
plant_id = -1
plant_name = ''
class TableRoom:
room_id = -1
room_description = ''
|
from django.contrib import admin
from .models import Bootcamp, User, Course, Review
# Register your models here.
admin.site.register(User)
admin.site.register(Bootcamp)
admin.site.register(Course)
admin.site.register(Review)
|
""" gps_util.py - parsing of GPS messages """
import datetime,traceback,logging,time,sys,math
from common import UTC,excTraceback,hexDump,parseTimeString
from tl_logger import TLLog,logOptions
log = TLLog.getLogger( 'gps' )
logGPSD = TLLog.getLogger( 'GPSD' )
logGpsData = TLLog.getLogger( 'gpsdata' )
class Satellite(object):
_lstValidGPS = range(1,65) # 1 .. 64
_lstValidGLONASS = range(65,86) # 66 .. 85
@staticmethod
def isGPS(prn):
return prn in Satellite._lstValidGPS
@staticmethod
def isGLONASS(prn):
return prn in Satellite._lstValidGLONASS
def __init__(self, prn, ele, azi, snr, used=None):
self.prn = self._int(prn)
self.ele = self._int(ele)
self.azi = self._int(azi)
self.snr = self._int(snr)
if self.snr == None:
self.snr = 0
self.used = used
def _int(self,val):
try:
return int(val)
except:
return None
def desc(self):
s = '%4s %4s %4s %4s' % (self.prn, self.ele, self.azi, self.snr)
if self.used:
s += ' **'
return s
def __str__(self):
return 'prn:%s ele:%s azi:%s snr:%s used:%s' % (self.prn, self.ele, self.azi, self.snr, self.used )
class GPS(object):
""" base class for all GPS messages """
def __init__(self, msg):
lst = msg.split('*')
assert len(lst) == 2
self.msg = lst[0]
self.checksum = int(lst[1], 16)
#
chk = self._checksum()
if self.checksum != chk:
log.warn('GPS message checksum FAIL')
print('**** GPS message checksum FAIL ****')
#
self.lst = self.msg.split(',')
def _checksum(self):
val = 0
for ch in self.msg[1:]:
val ^= ord(ch)
return val
def getMsgType(self):
return self.lst[0][1:]
def __str__(self):
return '%5s - %s' % ('GPS', ','.join(self.lst))
class GSV(GPS):
""" Abstract
=== GSV - Satellites in view ===
These sentences describe the sky position of a UPS satellite in view.
Typically they're shipped in a group of 2 or 3.
------------------------------------------------------------------------------
1 2 3 4 5 6 7 n
| | | | | | | |
$--GSV,x,x,x,x,x,x,x,...*hh<CR><LF>
------------------------------------------------------------------------------
Field Number:
1. total number of GSV messages to be transmitted in this group
2. 1-origin number of this GSV message within current group
3. total number of satellites in view (leading zeros sent)
4. satellite PRN number (leading zeros sent)
5. elevation in degrees (00-90) (leading zeros sent)
6. azimuth in degrees to true north (000-359) (leading zeros sent)
7. SNR in dB (00-99) (leading zeros sent)
more satellite info quadruples like 4-7
n) checksum
Example:
$GPGSV,3,1,11,03,03,111,00, 04,15,270,00, 06,01,010,00, 13,06,292,00*74
$GPGSV,3,2,11,14,25,170,00,16,57,208,39,18,67,296,40,19,40,246,00*74
$GPGSV,3,3,11,22,42,067,42,24,14,311,43,27,05,244,00,,,,*4D
Some GPS receivers may emit more than 12 quadruples (more than three
GPGSV sentences), even though NMEA-0813 doesn't allow this. (The
extras might be WAAS satellites, for example.) Receivers may also
report quads for satellites they aren't tracking, in which case the
SNR field will be null; we don't know whether this is formally allowed
or not.
"""
def __init__(self, msg):
GPS.__init__(self, msg)
self.totGSV = int(self.lst[1])
self.curGSV = int(self.lst[2])
self.totSatView = int(self.lst[3])
self.lstSats = []
for i in range( 4, len(self.lst), 4):
satPRN = self.lst[i]
satEle = self.lst[i+1]
satAzi = self.lst[i+2]
satSNR = self.lst[i+3]
sat = Satellite(satPRN,satEle,satAzi,satSNR)
log.debug('GSV sat:%s' % sat)
self.lstSats.append( sat )
def __str__(self):
return '%5s - %s' % ('--GSV', ','.join(self.lst))
class GPGSV(GSV):
""" === GSV - GPS Satellites in view ===
"""
lstSatellites = []
_lstTempSats = []
@staticmethod
def getSatellite(PRN):
""" return satellite from lstSatellites with matching PRN """
lst = [sat for sat in GPGSV.lstSatellites if sat.prn == PRN]
if len(lst) == 1:
return lst[0]
if len(lst) == 0:
return None
if len(lst) > 1:
print 'getSatellite() ERROR - PRN exists for 2 satellites ... return 1st only'
return lst[0]
def __init__(self, msg):
GSV.__init__(self, msg)
self.process()
def process(self):
if self.totSatView == 0:
GPGSV.lstSatellites = []
GPGSV._lstTempSats = []
else:
GPGSV._lstTempSats.extend( self.lstSats )
if self.curGSV == self.totGSV:
GPGSV.lstSatellites = [sat for sat in GPGSV._lstTempSats]
GPGSV._lstTempSats = []
log.debug( 'totGSV:%s curGSV:%s' % (self.totGSV,self.curGSV))
log.debug( 'lstSatellites:%d' % len(GPGSV.lstSatellites))
log.debug( '_lstTempSats:%d' % len(GPGSV._lstTempSats))
def __str__(self):
return '%5s - %s' % ('GPGSV', ','.join(self.lst))
class GLGSV(GSV):
""" === GSV - GPS Satellites in view ===
"""
lstGLONASSSats = []
_lstTempGLONASSSats = []
@staticmethod
def getSatellite(PRN):
""" return satellite from lstSatellites with matching PRN """
lst = [sat for sat in GLGSV.lstGLONASSSats if sat.prn == PRN]
if len(lst) == 1:
return lst[0]
if len(lst) == 0:
return None
if len(lst) > 1:
print 'getSatellite() ERROR - PRN exists for 2 satellites ... return 1st only'
return lst[0]
def __init__(self, msg):
GSV.__init__(self, msg)
self.process()
def process(self):
if self.totSatView == 0:
GLGSV.lstGLONASSSats = []
GLGSV._lstTempGLONASSSats= []
else:
GLGSV._lstTempGLONASSSats.extend( self.lstSats )
if self.curGSV == self.totGSV:
GLGSV.lstGLONASSSats = [sat for sat in GLGSV._lstTempGLONASSSats]
GLGSV._lstTempGLONASSSats = []
log.debug( 'totGSV:%s curGSV:%s' % (self.totGSV,self.curGSV))
log.debug( 'lstGLONASSSats:%d' % len(GLGSV.lstGLONASSSats))
log.debug( '_lstTempGLONASSSats:%d' % len(GLGSV._lstTempGLONASSSats))
def __str__(self):
return '%5s - %s' % ('GLGSV', ','.join(self.lst))
class GSA(GPS):
""" Abstract class
=== GSA - GPS DOP and active satellites ===
------------------------------------------------------------------------------
1 2 3 14 15 16 17 18
| | | | | | | |
$--GSA,a,a,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x.x,x.x,x.x*hh<CR><LF>
------------------------------------------------------------------------------
Field Number:
1. Selection mode: M=Manual, forced to operate in 2D or 3D, A=Automatic, 3D/2D
2. Mode (1 = no fix, 2 = 2D fix, 3 = 3D fix)
3. ID of 1st satellite used for fix
4. ID of 2nd satellite used for fix
5. ID of 3rd satellite used for fix
6. ID of 4th satellite used for fix
7. ID of 5th satellite used for fix
8. ID of 6th satellite used for fix
9. ID of 7th satellite used for fix
10. ID of 8th satellite used for fix
11. ID of 9th satellite used for fix
12. ID of 10th satellite used for fix
13. ID of 11th satellite used for fix
14. ID of 12th satellite used for fix
15. PDOP
16. HDOP
17. VDOP
18. Checksum
"""
def __init__(self, msg):
GPS.__init__(self, msg)
self.selMode = self.lst[1]
self.mode = self.lst[2]
self.lstSatIDs = []
for val in self.lst[3:15]:
try:
prn = int(val)
self.lstSatIDs.append(prn)
except:
pass
log.debug('GSA - lstSatIDs:%s' % self.lstSatIDs)
self.PDOP = self.lst[15]
self.HDOP = self.lst[16]
self.VDOP = self.lst[17]
def __str__(self):
return '%5s - %s' % ('--GSA', ','.join(self.lst))
class GPGSA(GSA):
""" GPS satellites used only """
def __init__(self, msg):
GSA.__init__(self, msg)
def __str__(self):
return '%5s - %s' % ('GPGSA', ','.join(self.lst))
class GLGSA(GSA):
""" GLONASS satellites used only """
def __init__(self, msg):
GSA.__init__(self, msg)
def __str__(self):
return '%5s - %s' % ('GLGSA', ','.join(self.lst))
class GNGSA(GSA):
""" GPS/GLONASS active satellites
When both GPS and GLONASS satellite are used together in the position solution,
the talker identifier will be GN. In the third case, the receiver creates two
GSA sentences for every epoch. The first GNGSA sentence is used for GPS satellites
while the second one is for the GLONASS satellites. In the GSA message, the satellite
ID number of the GLONASS satellite is 64+ satellite slot number.
"""
# We need to make sure we know the current
_GPS = None
_GLONASS = None
def __init__(self, msg):
GSA.__init__(self, msg)
# GNGSA will send 2 records; GPS followed by GLONASS
if self._isGPS():
self.gpsType = 'GPS'
GNGSA._GPS = self
else:
self.gpsType = 'GLONASS'
GNGSA._GLONASS = self
def _isGPS(self):
for satId in self.lstSatIDs:
if Satellite.isGPS(satId):
return True
if Satellite.isGLONASS(satId):
return False
# should not get here
s = 'GNGSA _isGPS() fail - could not determine GPS or GLONASS message type from Sat IDs *****'
log.error(s)
print '***** Error ' * s
def __str__(self):
return '%5s - %7s - %s' % ('GNGSA', self.gpsType, ','.join(self.lst))
class GPGGA(GPS):
""" === GSA - Global positioning system fix data ===
Time, Position and fix related data for a GPS receiver.
------------------------------------------------------------------------------
11
1 2 3 4 5 6 7 8 9 10 | 12 13 14 15
| | | | | | | | | | | | | | |
$--GGA,hhmmss.ss,llll.ll,a,yyyyy.yy,a,x,xx,x.x,x.x,M,x.x,M,x.x,xxxx*hh<CR><LF>
------------------------------------------------------------------------------
Field Number:
1. Universal Time Coordinated (UTC)
2. Latitude
3. N or S (North or South)
4. Longitude
5. E or W (East or West)
6. GPS Quality Indicator,
- 0 - fix not available,
- 1 - GPS fix,
- 2 - Differential GPS fix
(values above 2 are 2.3 features)
- 3 = PPS fix
- 4 = Real Time Kinematic
- 5 = Float RTK
- 6 = estimated (dead reckoning)
- 7 = Manual input mode
- 8 = Simulation mode
7. Number of satellites in view, 00 - 12
8. Horizontal Dilution of precision (meters)
9. Antenna Altitude above/below mean-sea-level (geoid) (in meters)
10. Units of antenna altitude, meters
11. Geoidal separation, the difference between the WGS-84 earth
ellipsoid and mean-sea-level (geoid), "-" means mean-sea-level
below ellipsoid
12. Units of geoidal separation, meters
13. Age of differential GPS data, time in seconds since last SC104
type 1 or 9 update, null field when DGPS is not used
14. Differential reference station ID, 0000-1023
15. Checksum
"""
def __init__(self, msg):
GPS.__init__(self, msg)
self.UTC = self.lst[1]
self.latitude = self.lst[2]
self.NorS = self.lst[3]
self.longitude = self.lst[4]
self.EorW = self.lst[5]
self.GPSQual = self.lst[6]
self.numSatsView = int(self.lst[7])
#if self.numSatsView > 12:
#log.warn( 'GPGGA() -- numSatsView = %d -- Max is 12, setting to Max' % self.numSatsView)
#self.numSatsView = 12
def __str__(self):
return '%5s - %s' % ('GPGGA', ','.join(self.lst))
class RMC(GPS):
""" Abstract class
=== RMC - Recommended Minimum Navigation Information ===
------------------------------------------------------------------------------
12
1 2 3 4 5 6 7 8 9 10 11| 13
| | | | | | | | | | | | |
$--RMC,hhmmss.ss,A,llll.ll,a,yyyyy.yy,a,x.x,x.x,xxxx,x.x,a,m,*hh<CR><LF>
------------------------------------------------------------------------------
Field Number:
1. UTC Time
2. Status, V=Navigation receiver warning A=Valid
3. Latitude
4. N or S
5. Longitude
6. E or W
7. Speed over ground, knots
8. Track made good, degrees true
9. Date, ddmmyy
10. Magnetic Variation, degrees
11. E or W
12. FAA mode indicator (NMEA 2.3 and later)
13. Checksum
A status of V means the GPS has a valid fix that is below an internal
quality threshold, e.g. because the dilution of precision is too high
or an elevation mask test failed.
"""
def __init__(self, msg):
GPS.__init__(self, msg)
self.UTC = self.lst[1]
self.status = self.lst[2]
self.latitude = self.lst[3]
self.NorS = self.lst[4]
self.longitude = self.lst[5]
self.EorW = self.lst[6]
self.speed = self.lst[7]
self.track = self.lst[8]
self.date = self.lst[9]
def getUTC(self):
""" return the UTC time as a python datetime. """
# get date/time
try:
year = int(self.date[4:6]) + 2000
month = int(self.date[2:4])
day = int(self.date[0:2])
hour = int(self.UTC[0:2])
minute = int(self.UTC[2:4])
second = int(self.UTC[4:6])
return datetime.datetime(year=year,month=month,day=day,hour=hour,minute=minute,second=second, tzinfo=UTC() )
except Exception,err:
log.error( 'getUTC() fail - %s' % err)
return None
def __str__(self):
return '%5s - %s' % ('--RMC', ','.join(self.lst))
#return '%5s - date:%s time:%s UTC:%s' % ('GPRMC', self.date, self.UTC, self.getUTC())
class GPRMC(RMC):
""" GPS RMS message """
def __init__(self, msg):
RMC.__init__(self, msg)
def __str__(self):
return '%5s - %s' % ('GPRMC', ','.join(self.lst))
#return '%5s - date:%s time:%s UTC:%s' % ('GPRMC', self.date, self.UTC, self.getUTC())
class GNRMC(RMC):
""" GPS/GLONASS RMS message """
def __init__(self, msg):
RMC.__init__(self, msg)
def __str__(self):
return '%5s - %s' % ('GNRMC', ','.join(self.lst))
#return '%5s - date:%s time:%s UTC:%s' % ('GPRMC', self.date, self.UTC, self.getUTC())
class GLL(GPS):
""" Abstract base class for GLL - Geographic Position - Latitude/Longitude ===
------------------------------------------------------------------------------
1 2 3 4 5 6 7 8
| | | | | | | |
$--GLL,llll.ll,a,yyyyy.yy,a,hhmmss.ss,a,m,*hh<CR><LF>
------------------------------------------------------------------------------
Field Number:
1. Latitude
2. N or S (North or South)
3. Longitude
4. E or W (East or West)
5. Universal Time Coordinated (UTC)
6. Status A - Data Valid, V - Data Invalid
7. FAA mode indicator (NMEA 2.3 and later)
8. Checksum
"""
def __init__(self, msg):
GPS.__init__(self, msg)
self.latitude = self.lst[1]
self.NorS = self.lst[2]
self.longitude = self.lst[3]
self.EorW = self.lst[4]
self.UTC = self.lst[5]
def __str__(self):
return '%5s - %s' % ('--GLL', ','.join(self.lst))
class GPGLL(GLL):
""" GPS Geographic Position - Latitude/Longitude """
def __init__(self, msg):
GLL.__init__(self, msg)
def __str__(self):
return '%5s - %s' % ('GPGLL', ','.join(self.lst))
class GNGLL(GLL):
""" GPS/GLONASS Geographic Position - Latitude/Longitude """
def __init__(self, msg):
GLL.__init__(self, msg)
def __str__(self):
return '%5s - %s' % ('GNGLL', ','.join(self.lst))
_dctNMEA = {
'GPGSV' : GPGSV, # GPS messages
'GPGSA' : GPGSA,
'GPGGA' : GPGGA,
'GPRMC' : GPRMC,
'GPGLL' : GPGLL,
'GNGLL' : GNGLL, # GPS/GLONASS messages
'GNGSA' : GNGSA,
'GNRMC' : GNRMC,
'GLGSA' : GLGSA, # GLONASS messages
'GLGSV' : GLGSV,
}
def parseGPSMessages( lstMsgs ):
lstGPSRecs = []
for msg in lstMsgs:
try:
lst = msg.split(',')
if len(lst) > 1 and len(lst[0]) > 1 and lst[0][0] == '$':
nmea = lst[0][1:]
if nmea in _dctNMEA:
rec = _dctNMEA[nmea](msg)
else:
rec = GPS( msg )
lstGPSRecs.append( rec )
except Exception,err:
log.error( 'GPS parse fail - %s' % err)
log.error( 'msg:%s' % msg )
return lstGPSRecs
class GPSData(object):
"""
Create GPS data object from gpsdata file. Expected format:
GGA:2014.01.07-14:11:40,223900.000,3717.1137,N,12156.6299,W,2
DOP:3,1.84,1.02,1.52,3.825,3.825,8.74,8.74
SAT:^\31,56,137,46
SAT:^\32,51,314,51
SAT:^\11,48,262,51
SAT:^\01,45,303,46
SAT:^\14,42,045,48
SAT:^\46,39,143,44
SAT:^\22,26,100,33
SAT:^\20,16,299,45
SAT:^\19,15,228,49
SAT:^\25,06,077,
SAT:^\23,05,244,40
"""
_dctGGAFix = { 0:'invalid', 1:'GPS', 2:'DGPS' }
_dctDOPMode = { 1:'invalid', 2:'2-D', 3:'3-D' }
def __init__(self, lstData):
self.lstData = lstData
self.lstSats = []
for line in self.lstData:
# remove the ^\
line = line.replace( '\x1c','')
logGpsData.debug( line )
if line[0:4] == 'SAT:':
lst = line[4:].split(',')
if lst:
prn = lst[0]
ele = None
azi = None
snr = None
if len(lst) > 1: ele = lst[1]
if len(lst) > 2: azi = lst[2]
if len(lst) > 3: snr = lst[3]
if prn:
sat = Satellite( prn, ele, azi, snr)
self.lstSats.append( sat )
elif line[0:4] == 'GGA:':
lst = line[4:].split(',')
if len(lst) in [6,7,8]:
curTime = lst[0].replace('-','_')
curTime = curTime.replace('.','-')
self.curTime = parseTimeString( curTime )
self.fix = self._dctGGAFix.get( int(lst[5]), lst[5])
self.longitude = None
self.latitude = None
self.NorS = None
self.EorW = None
if lst[1] and lst[3] and self.fix:
self.longitude = self._convToDeg(float(lst[1]) / 100)
self.NorS = lst[2]
self.latitude = self._convToDeg(float(lst[3]) / 100)
self.EorW = lst[4]
elif line[0:4] == 'DOP:':
self.dctDOP = {}
lst = line[4:].split(',')
if len(lst) == 8:
mode = int(lst[0])
self.dctDOP['mode'] = self._dctDOPMode.get( mode, lst[0])
if mode > 1:
self.dctDOP['PDOP'] = float(lst[1])
self.dctDOP['HDOP'] = float(lst[2])
self.dctDOP['VDOP'] = float(lst[3])
self.dctDOP['epx'] = float(lst[4])
self.dctDOP['epy'] = float(lst[5])
self.dctDOP['epv'] = float(lst[6])
self.dctDOP['epe'] = float(lst[7])
def _convToDeg(self, value):
""" The fractional part is in minutes [0.0-59.9], convert to degrees [0.0-99.9] """
frac,num = math.modf(value)
return num + frac / 60.0
def processGPSData(lst):
""" process the lines from reading the gpsdata file on DUT
return a GPSData object
"""
pass
import socket
class TelnetConn(object):
_TIMEOUT = 0.5
_HOST = 'localhost'
_PORT = 23
_PROMPT = 'quantenna #'
def __init__(self, dct):
self.host = dct.get( 'host', TelnetConn._HOST)
self.port = int(dct.get( 'port', TelnetConn._PORT))
self.timeout = float(dct.get( 'timeout', TelnetConn._TIMEOUT))
self.prompt = dct.get( 'prompt', TelnetConn._PROMPT)
self.sock = None
self._resp = ''
def connect(self):
""" Perform all setup operations for communicating with the simulator. """
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Connecting to %s:%d' % (self.host, self.port))
self.sock.settimeout( self.timeout )
self.sock.connect((self.host, self.port))
def close(self):
""" Disconnect from the simulator. """
if self.sock:
self.sock.close()
self.sock = None
def recv(self):
""" Receive data from socket. """
try:
recv = self.sock.recv(8*1024)
return recv
except socket.timeout, err:
return None
def send(self, binData):
""" Write data to the socket. """
self.sock.send(binData)
def readUntil(self, timeout=5.0):
""" read until prompt is returned """
# make a timeout
dtTimeout = datetime.datetime.now() + datetime.timedelta( seconds=timeout)
while True:
resp = self.recv()
if resp is not None:
self._resp += resp
index = self._resp.find( self.prompt )
if index != -1:
index += len(self.prompt)
resp = self._resp[0:index]
self._resp = self._resp[index+1:]
return resp
if datetime.datetime.now() > dtTimeout:
print 'readUntil() fail -- timeout after %s seconds' % timeout
return ''
import serial
class SerialConn(object):
""" Uses a serial port for all communication """
_BAUDRATE = 115200
_TIMEOUT = 0.5
_PORT = 6
_PROMPT = 'quantenna #'
def __init__(self, dct ):
self.port = int(dct.get( 'port', SerialConn._PORT))
self.timeout = float(dct.get( 'timeout', SerialConn._TIMEOUT))
self.baudrate = int(dct.get( 'baudrate', SerialConn._BAUDRATE))
self.ser = None
self._resp = ''
def connect(self):
""" Perform all setup operations for communicating with the simulator. """
print('Connecting to COM%d' % (self.port))
self.ser = serial.Serial(self.port-1)
self.ser.timeout = self.timeout
self.ser.baudrate = self.baudrate
def close(self):
""" Disconnect from the simulator. """
if self.ser:
self.ser.close()
self.ser = None
def send(self, cmd):
self.ser.write(cmd)
def recv(self):
""" Read data from the serial port """
dtTimeout = datetime.datetime.now() + datetime.timedelta(seconds=self.ser.timeout)
while True:
if self.ser.inWaiting() > 0:
recv = self.ser.read(self.ser.inWaiting())
return recv
if datetime.datetime.now() > dtTimeout:
return None
time.sleep( 0.05 )
def readUntil(self, timeout=5.0):
""" read until prompt is returned """
# make a timeout
dtTimeout = datetime.datetime.now() + datetime.timedelta( seconds=timeout)
while True:
resp = self.recv()
if resp is not None:
self._resp += resp
index = self._resp.find( self.prompt )
if index != -1:
index += len(self.prompt)
resp = self._resp[0:index]
self._resp = self._resp[index+1:]
return resp
if datetime.datetime.now() > dtTimeout:
print 'readUntil() fail -- timeout after %s seconds' % timeout
return ''
class FileConn(object):
_FILENAME = 'gps_input.txt'
def __init__(self, dct):
self.filename = dct.get( 'filename', FileConn._FILENAME)
self._fp = None
def connect(self):
""" Perform all setup operations for communicating with the simulator. """
print('Opening file %s' % (self.filename))
self._fp = open(self.filename)
def close(self):
""" Disconnect from the simulator. """
if self._fp:
self._fp.close()
self._fp = None
def recv(self):
""" Receive data from socket. """
recv = self._fp.readline()
if recv == '':
raise Exception('No more lines in file %s' % self.filename)
log.debug('recv:%s' % recv)
return recv
def send(self, binData):
""" Write data to the socket. """
raise Exception( 'WE SHOULD NOT BE WRITING HERE' )
class GPSD(object):
""" base class for all GPSD JSON messages """
def __init__(self, json):
self.json = json
def getMsgType(self):
return self.json['class']
def __str__(self):
return 'GPSD %s' % (self.getMsgType())
class GPSD_SKY(GPSD):
""" SKY messages """
def __init__(self, json):
GPSD.__init__(self, json)
self.lstSats = []
for key,value in self.json.items():
if key == 'satellites':
self.lstSats = [Satellite( dct['PRN'], dct['el'], dct['az'], dct['ss'], dct['used']) for dct in value]
else:
setattr( self, key, value )
def __str__(self):
return 'SKY - sats:%d' % (len(self.lstSats))
class GPSD_TPV(GPSD):
""" SKY messages """
def __init__(self, json):
GPSD.__init__(self, json)
for key,value in self.json.items():
if key == 'time':
# Expect format 2013-09-01T00:34:02.000Z
year = value[0:4]
month = value[5:7]
day = value[8:10]
hour = value[11:13]
minute = value[14:16]
second = value[17:19]
self.time = datetime.datetime(year=int(year),month=int(month),day=int(day),
hour=int(hour),minute=int(minute),second=int(second), tzinfo=UTC() )
else:
setattr( self, key, value )
def __str__(self):
return 'TPV - time:%s' % getattr( self, 'time', None)
import json
class GPSDConn(object):
""" Use a socket to communicate with gpsd process """
_TIMEOUT = 0.5
_HOST = '192.168.1.100'
_PORT = 2947
def __init__(self, dct):
self.host = dct.get( 'host', GPSDConn._HOST)
self.port = int(dct.get( 'port', GPSDConn._PORT))
self.timeout = float(dct.get( 'timeout', GPSDConn._TIMEOUT))
self.sock = None
self._resp = ''
def connect(self):
""" Perform all setup operations for communicating with the simulator. """
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logGPSD.info( 'Connecting to %s:%d' % (self.host, self.port))
self.sock.settimeout( self.timeout )
self.sock.connect((self.host, self.port))
def close(self):
""" Disconnect from the simulator. """
if self.sock:
self.sock.close()
self.sock = None
def recv(self):
""" Receive data from socket. """
try:
recv = self.sock.recv(8*1024)
hexDump( recv, msg='recv', logFunc=logGPSD.debug )
return recv
except socket.timeout, err:
return None
def send(self, binData):
""" Write data to the socket. """
hexDump( binData, msg='send', logFunc=logGPSD.debug )
self.sock.send(binData)
def enableJSONData(self):
# send command to start receiving the data
self.send( '?WATCH={"enable":true,"json":true}\n' )
def disableJSONData(self):
# send command to start receiving the data
self.send( '?WATCH={"enable":false,"json":true}\n' )
def readMsgs(self, timeout=5.0):
""" read and parse into messages """
# make a timeout
dtTimeout = datetime.datetime.now() + datetime.timedelta( seconds=timeout)
while True:
resp = self.recv()
if resp is not None:
#self._resp += resp
#lst = self._resp.split( '\r\n' )
lst = resp.split( '\r\n' )
if lst:
for n,msg in enumerate(lst):
#print 'Msg %d : length=%d' % (n,len(msg))
hexDump( msg, msg='msg %d' % n, logFunc=logGPSD.debug )
# remove 0 length messages
lst = [msg for msg in lst if len(msg) > 0]
lstJS = []
for msg in lst:
try:
dct = json.loads( msg )
js = self._parseGPSJson( dct )
lstJS.append( js )
except Exception,err:
#print '\nEXCEPTION : %s' % err
logGPSD.error( 'readMsgs() fail - %s' % err )
return lstJS
if datetime.datetime.now() > dtTimeout:
logGPSD.warn('readMsgs() fail -- timeout after %s seconds .. return empty list' % timeout)
return []
def _parseGPSJson( self, js ):
rec = None
try:
if js['class'] == 'SKY':
rec = GPSD_SKY( js )
elif js['class'] == 'TPV':
rec = GPSD_TPV( js )
else:
rec = GPSD( js )
except Exception,err:
logGPSD.error( '_parseGPSJson() fail - %s' % err)
return rec
def dumpResp( lst, decode=True ):
print( '\nlst:%s' % lst )
for gpsdMsg in lst:
print( 'gpsdMsg:%s' % gpsdMsg )
if gpsdMsg.getMsgType() == 'SKY':
for n,sat in enumerate(gpsdMsg.lstSats):
print(' %2d : %s' % (n+1, sat))
elif gpsdMsg.getMsgType() == 'TPV':
for key,value in gpsdMsg.json.items():
print ' %-15s : %s' % (key,value)
def testGPSDComm(host=None, port=None):
""" testing GPSDComm """
dct = {}
if host is not None:
dct['host'] = host
if port is not None:
dct['port'] = port
gpsd = None
try:
gpsd = GPSDConn( dct )
gpsd.connect()
print
lst = gpsd.readMsgs()
dumpResp( lst )
print
# send command to start receiving the data
gpsd.enableJSONData()
lst = gpsd.readMsgs()
dumpResp( lst )
while True:
lst = gpsd.readMsgs()
if lst:
dumpResp( lst )
else:
time.sleep( 0.5 )
finally:
if gpsd:
gpsd.disableJSONData()
gpsd.close()
def getStatus(conn2):
# Now send the status
cmd = 'gpsdoctl2 ST'
#print '\nsending %s to conn2' % cmd
conn2.send( '%s\n' % cmd )
time.sleep( 0.1 )
recv = conn2.readUntil()
#print 'recv: %s' % recv
if recv == '':
print '***** Error -- no data in response'
return None
# parse to get last byte
lst = str(recv).split('\n')
lst = [line.strip() for line in lst]
#print 'lst:%s' % lst
if lst[2] == 'quantenna #' and lst[0] == cmd:
lst2 = lst[1].split()
if len(lst2) != 7:
print '***** Error parsing the %s response -- length != 7' % cmd
return None
return lst2
else:
print '***** Error parsing the %s response' % cmd
return None
def dumpResults(csv2File, gga, gsa, los, los2, lstGpsSNR, lstGlonassSNR, lstUsedSats ):
""" dump results to CSV file and stdout """
# calc averages
snrGPS = 0.0
snrGLONASS = 0.0
snrAvg = 0.0
if lstGpsSNR:
snrGPS = sum(lstGpsSNR) / len(lstGpsSNR)
if lstGlonassSNR:
snrGLONASS = sum(lstGlonassSNR) / len(lstGlonassSNR)
lstSNR = lstGpsSNR + lstGlonassSNR
if lstSNR:
snrAvg = sum(lstSNR) / len(lstSNR)
# build CSV output
line2 = '%s,%s,%s,%s' % (gga.UTC, gsa.mode, los, los2)
line2 += ',%s' % (len(GPGSV.lstSatellites) + len(GLGSV.lstGLONASSSats)) # total sats in sky
line2 += ',%s' % (len(lstSNR)) # total sats used
line2 += ',%.1f' % (snrAvg) # SNR for all sats used
line2 += ',%s' % (len(lstGpsSNR)) # total GPS sats used
line2 += ',%.1f' % (snrGPS) # SNR for GPS sats used
line2 += ',%s' % (len(lstGlonassSNR)) # total GLONASS sats used
line2 += ',%.1f' % (snrGLONASS) # SNR for GLONASS sats used
for sat in lstUsedSats:
line2 += ',%s,%s' % (sat.prn, sat.snr)
csv2File.write( line2 + '\n' )
# dump to stdout
print ' los:%s los2:%s' % (los,los2)
print ' GPS sats %d' % len(GPGSV.lstSatellites)
for sat in GPGSV.lstSatellites:
print ' %-7s : %s' % ('GPS',sat.desc())
print ' GLONASS sats %d' % len(GLGSV.lstGLONASSSats)
for sat in GLGSV.lstGLONASSSats:
print ' %-7s : %s' % ('GLONASS',sat.desc())
print
print ' Sats %-7s : %5d (%2d)' % ('GPS',len(GPGSV.lstSatellites), len([sat for sat in GPGSV.lstSatellites if sat.used]))
print ' Sats %-7s : %5d (%2d)' % ('GLONASS',len(GLGSV.lstGLONASSSats), len([sat for sat in GLGSV.lstGLONASSSats if sat.used]))
print ' SNR %-7s : %.2f (%2d) ' % ('GPS', snrGPS, len(lstGpsSNR))
print ' SNR %-7s : %.2f (%2d) ' % ('GLONASS', snrGLONASS, len(lstGlonassSNR))
print ' SNR %-7s : %.2f (%2d) ' % ('All', snrAvg, len(lstSNR))
def processGPSMessages(dctRecs, conn2, gpsFile, csvFile, csv2File):
""" """
try:
gga = dctRecs['GPGGA']
gsa = dctRecs['GPGSA']
except KeyError,err:
print 'Missing GPGGA or GPGSA. Skipping ...'
return
lstStatus = []
los = 'x'
if conn2:
# Now send the status
lstStatus = getStatus( conn2 )
if lstStatus:
los = lstStatus[6][-1]
if gpsFile:
gpsFile.write( 'PICStatus : %s\n' % ','.join( lstStatus) )
print ' PROCESS GPS only'
s = 'UTC:%s mode:%s los:%s numsats:%s' % (gga.UTC, gsa.mode, los, len(GPGSV.lstSatellites))
line = '%s,%s,%s,%s' % (gga.UTC, gsa.mode, los, len(GPGSV.lstSatellites))
for sat in GPGSV.lstSatellites:
s += ' [%s,%s]' % (sat.prn, sat.snr)
line += ',%s,%s' % (sat.prn, sat.snr)
#print ' ', s
#print ' ', line
if csvFile:
csvFile.write( line + '\n' )
# for 2nd CSV output file
if csv2File:
if los != 'x' and int( los, 16) > 7:
los2 = 1
else:
los2 = 0
lstUsedSats = []
lstSNR = []
for n in range(gga.numSatsView):
PRN = gsa.lstSatIDs[n]
sat = GPGSV.getSatellite( PRN )
sat.used = True
lstUsedSats.append( sat )
if sat.snr is not None:
lstSNR.append( float(sat.snr))
else:
logError( 'sat has no SNR - skipping for calculation - %s' % sat)
#
dumpResults(csv2File, gga, gsa, los, los2, lstSNR, [], lstUsedSats )
#snrAvg = sum(lstSNR) / len(lstSNR)
#line2 = '%s,%s,%s,%s,%s,%s,%.1f' % (gga.UTC, gsa.mode, los, los2, len(GPGSV.lstSatellites), gga.numSatsView, snrAvg)
#for sat in lstUsedSats:
#line2 += ',%s,%s' % (sat.prn, sat.snr)
#csv2File.write( line2 + '\n' )
def logError( msg ):
log.error(msg)
print '***** ERROR - ' + msg
def processGPSGLONASSMessages(dctRecs, conn2, gpsFile, csvFile, csv2File):
""" Process GPS/GLONASS combined messages """
sat = None
gga = None
gsa = None
if 0:
assert isinstance(gga, GPGGA)
assert isinstance(gsa, GNGSA)
assert isinstance(sat, Satellite)
try:
gga = dctRecs['GPGGA']
gsa = dctRecs['GNGSA']
except KeyError,err:
logError( 'Process GPS/GLONASS fail -- Missing GPGGA or GNGSA -- Skipping ...')
return
lstStatus = []
los = 'x'
if conn2:
# Now send the status
lstStatus = getStatus( conn2 )
if lstStatus:
los = lstStatus[6][-1]
if gpsFile:
gpsFile.write( 'PICStatus : %s\n' % ','.join( lstStatus) )
print '\n PROCESSING GPS/GLONASS...'
s = 'UTC:%s mode:%s los:%s numsats:%s' % (gga.UTC, gsa.mode, los, len(GPGSV.lstSatellites)+len(GLGSV.lstGLONASSSats))
line = '%s,%s,%s,%s' % (gga.UTC, gsa.mode, los, len(GPGSV.lstSatellites)+len(GLGSV.lstGLONASSSats))
# GPS sats
for sat in GPGSV.lstSatellites:
s += ' [%s,%s]' % (sat.prn, sat.snr)
line += ',%s,%s' % (sat.prn, sat.snr)
# add the GLONASS sats
for sat in GLGSV.lstGLONASSSats:
s += ' [%s,%s]' % (sat.prn, sat.snr)
line += ',%s,%s' % (sat.prn, sat.snr)
#print ' ', s
#print ' ', line
if csvFile:
csvFile.write( line + '\n' )
# for 2nd CSV output file
if csv2File:
if los != 'x' and int( los, 16) > 7:
los2 = 1
else:
los2 = 0
lstUsedSats = []
lstGpsSNR = []
lstGlonassSNR = []
# GPS sats
for PRN in gsa._GPS.lstSatIDs:
sat = GPGSV.getSatellite( PRN )
sat.used = True
lstUsedSats.append( sat )
if sat.snr is not None:
lstGpsSNR.append( float(sat.snr))
else:
logError( 'sat has no SNR - skipping for calculation - %s' % sat)
# GLONASS sats
for PRN in gsa._GLONASS.lstSatIDs:
sat = GLGSV.getSatellite( PRN )
sat.used = True
lstUsedSats.append( sat )
if sat.snr is not None:
lstGlonassSNR.append( float(sat.snr))
else:
logError( 'sat has no SNR - skipping for SNR calculation - %s' % sat)
#
dumpResults(csv2File, gga, gsa, los, los2, lstGpsSNR, lstGlonassSNR, lstUsedSats )
if __name__ == '__main__':
TLLog.config( 'gps_test.log', defLogLevel=logging.INFO )
import time
from optparse import OptionParser
DEF_PORT = 1
DEF_OUTPUT_CSV_FILE = 'output.txt'
DEF_OUTPUT2_CSV_FILE = 'output2.txt'
DEF_OUTPUT_GPS_FILE = 'gps.txt'
DEF_BAUDRATE = 9600
DEF_TIMEOUT = 0.5
DEF_IPADDR = '192.168.1.100'
DEFAULT_LOG_ENABLE = 'gps'
# build the command line arguments
parser = OptionParser()
parser.add_option( "-p", "--port", dest="port", default=DEF_PORT,
help="set the com port to read GPS messages. Default is %d" % DEF_PORT)
parser.add_option( "-f", "--outputCSVFile", dest="outputCSVFile", default=DEF_OUTPUT_CSV_FILE,
help='Set the output CSV file. Default is %s' % DEF_OUTPUT_CSV_FILE )
parser.add_option( '', "--output2CSVFile", dest="output2CSVFile", default=DEF_OUTPUT2_CSV_FILE,
help='Set the output CSV file fro the 2nd CSV file. Default is %s' % DEF_OUTPUT2_CSV_FILE )
parser.add_option( "-b", "--baudrate", dest="baudrate", default=DEF_BAUDRATE,
help='Set the serial port baudrate. Default is %d' % DEF_BAUDRATE)
parser.add_option( "-t", "--timeout", dest="timeout", default=DEF_TIMEOUT,
help='Set the serial port timeout. Default is %s sec' % DEF_TIMEOUT)
parser.add_option( "-g", "--outputGPSFile", dest="outputGPSFile", default=DEF_OUTPUT_GPS_FILE,
help='Set the output GPS file. Default is %s' % DEF_OUTPUT_GPS_FILE )
parser.add_option( "-a", "--ipaddr", dest="ipaddr", default=DEF_IPADDR,
help='Set the IP address for the telnet connection. Default is %s sec' % DEF_IPADDR)
parser.add_option( "", "--gpsdTest", action="store_true", dest="gpsdTest", default=False,
help='Perform testing using the gpsd daemon on board. Default is False' )
parser.add_option( "-m", "--logEnable", dest="lstLogEnable", default=DEFAULT_LOG_ENABLE,
help='Comma separated list of log modules to enable, * for all. Default is "%s"' % DEFAULT_LOG_ENABLE)
parser.add_option( "", "--gpsTestFile", dest="gpsTestFile", default=None,
help='Set a file to input test GPS NMEA messages.' )
# parse the command line and set values
(options, args) = parser.parse_args()
# makes Control-break behave the same as Control-C on windows
import signal
signal.signal( signal.SIGBREAK, signal.default_int_handler )
port = int(options.port)
outputCSVFile = options.outputCSVFile
output2CSVFile = options.output2CSVFile
outputGPSFile = options.outputGPSFile
baudrate = int(options.baudrate)
timeout = float(options.timeout)
ipaddr = options.ipaddr
ser = None
csvFile = None
gsvFile = None
csv2File = None
gga = None
gsa = None
conn = None
conn2 = None
sat = None
gpsFile = None
parseMessages = True
# for wing IDE object lookup, code does not need to be run
if 0:
assert isinstance(gga, GPGGA)
assert isinstance(gsa, GPGSA)
assert isinstance(sat, Satellite)
try:
log.info( '*********************************************' )
log.info( 'GPS test starting .....' )
# update log options
logOptions(options.lstLogEnable)
if options.gpsdTest:
log.info( 'GPSD testing only .....' )
testGPSDComm()
if options.gpsTestFile:
log.info( 'Using GPS input file %s ...' % options.gpsTestFile )
dct = { 'filename' : options.gpsTestFile }
conn = FileConn( dct )
conn.connect()
else:
# use telnet
dct = { 'host' : ipaddr }
print 'Connecting telnet to %s for GPS ...' % ipaddr
conn = TelnetConn( dct )
conn.connect()
time.sleep(0.2)
resp = conn.recv()
print 'resp:%s' % resp
conn.send( 'root\n' )
time.sleep(0.2)
resp = conn.readUntil()
#resp = conn.recv()
print 'resp:%s' % resp
print 'Connecting telnet to %s for status ...' % ipaddr
conn2 = TelnetConn( dct )
conn2.connect()
time.sleep(0.2)
resp = conn2.recv()
print 'resp:%s' % resp
conn2.send( 'root\n' )
time.sleep(0.2)
resp = conn2.readUntil()
#resp = conn2.recv()
print 'resp:%s' % resp
# start the messages
conn.send( 'cat /dev/ttyS1\n' )
# open files append
csvFile = open( outputCSVFile, 'a')
gpsFile = open( outputGPSFile, 'a' )
csv2File = open( output2CSVFile, 'a')
#conn = SerialConn( port=port, baudrate=baudrate, timeout=timeout)
respData = ''
dctRecs = {}
while True:
recv = conn.recv()
if recv is not None:
try:
#print 'recv:%s' % recv
#print
if gpsFile:
gpsFile.write( recv )
# process the data
respData += recv
lst = respData.split('\n')
#print 'lst:%s' % lst
#print
respData = lst[-1]
lstRecs = parseGPSMessages( lst[:-1] )
for rec in lstRecs:
print '%s - %s' % (datetime.datetime.now() , rec)
if parseMessages:
dctRecs[ rec.getMsgType() ] = rec
if rec.getMsgType() == 'GPGLL':
processGPSMessages(dctRecs, conn2, gpsFile, csvFile, csv2File)
elif rec.getMsgType() == 'GNGLL':
processGPSGLONASSMessages(dctRecs, conn2, gpsFile, csvFile, csv2File)
except Exception,err:
excTraceback( err, log, raiseErr=False)
#s = '%s: %s' % (err.__class__.__name__, err)
#log.error( s )
#print s
#print '-- traceback --'
#traceback.print_exc()
#print
print
time.sleep( 0.05 )
except Exception, err:
excTraceback( err, log, raiseErr=False)
#s = '%s: %s' % (err.__class__.__name__, err)
#log.error( s )
#print s
#print '-- traceback --'
#traceback.print_exc()
#print
finally:
if ser:
ser.close()
if gpsFile:
gpsFile.close()
if csvFile:
csvFile.close()
if csv2File:
csv2File.close()
log.info( 'gps_test exiting' )
|
# -*- coding: utf-8 -*-
"""Utils."""
from zope.component import getUtility
from plone.behavior.interfaces import IBehavior
# from plone.memoize.ram import cache
from dexterity.localrolesfield.interfaces import IBaseLocalRoleField
def cache_key(fun, fti):
return fti
# @cache(cache_key) a test with profilehooks.timecall give 0.000s for this method with ten fields and some behaviors
def get_localrole_fields(fti):
"""Get all local role(s) fields for given fti.
Lookup local role(s) fields on content from its schema and its behaviors.
Return field name and field object for each found field.
"""
fti_schema = fti.lookupSchema()
fields = [(n, f) for n, f in fti_schema.namesAndDescriptions(all=True)
if IBaseLocalRoleField.providedBy(f)]
# also lookup behaviors
for behavior_id in fti.behaviors:
behavior = getUtility(IBehavior, behavior_id).interface
fields.extend(
[(n, f) for n, f in behavior.namesAndDescriptions(all=True)
if IBaseLocalRoleField.providedBy(f)])
return fields
|
from django.conf.urls import url, include
#from django.views.generic import TemplateView
from . import views
urlpatterns = [
# url(r'^shipping-address/$', TemplateView.as_view(
# template_name='users/user_shipping_address_view.html')),
url(r'^shipping-address/$',
views.ShippingAddressView.as_view(),
name='user_shipping_address_view'),
]
|
#!python
# Fetch one or more book's fpgen source from fadedpage,
# rerun fpgen, and send the resulting mobi
# output back to fadedpage.
#
# scp should be setup to work without a password
#
# Args is a list of fadedpage book ids
# Old .mobi file is left in 20######.save.mobi
# New .mobi file is left in 20######.mobi
import sys
import os
def fatal(line):
sys.stderr.write("ERROR " + line)
exit(1)
def repair(id):
# Retrieve the .mobi file for backup purposes
if not os.path.exists(id + ".save.mobi"):
os.system("scp fadedpage@ssh.fadedpage.com:books/{0}/{0}.mobi {0}.save.mobi".format(id))
# Retrieve the source file, and all images
os.system("rm -rf images")
os.system("scp fadedpage@ssh.fadedpage.com:books/{0}/{0}-src.txt {0}-src.txt".format(id))
os.system(f"scp -r fadedpage@ssh.fadedpage.com:books/{id}/images images")
# Rerun fpgen just for mobi output
exit = os.system(f"fpgen -f k {id}-src.txt")
if exit != 0:
fatal(f"{0}: fpgen failed with exit code {exit}")
# Send the new mobi file back to the server
os.system("scp {0}.mobi fadedpage@ssh.fadedpage.com:books/{0}/{0}.mobi".format(id))
if len(sys.argv) < 2:
fatal("Usage: rerun-mobi book-id ...")
for src in sys.argv[1:]:
repair(src)
|
import os.path as osp
import pandas as pd
from .manager import BaseManager
from utils import seed_everything, make_datapath_list
from dataset import TrainDataset, Anno_xml2list, DataTransform, od_collate_fn, get_dataloader
from models import ObjectDetectionModel
class Train(BaseManager):
def __call__(self):
print("Training")
if self.get("train_flag"):
for seed in self.seeds:
self.train_by_seed(seed)
def train_by_seed(self, seed):
seed_everything(seed)
train_img_list, train_anno_list, val_img_list, val_anno_list = make_datapath_list(self.data_path)
if self.debug:
train_img_list = train_img_list[:2]
train_anno_list = train_anno_list[:2]
val_img_list = val_img_list[:2]
val_anno_list = val_anno_list[:2]
self.params["epochs"] = 1
train_dataset = TrainDataset(
train_img_list,
train_anno_list,
phase="train",
transform=DataTransform(self.image_size, self.get("tr_transform_params")),
transform_anno=Anno_xml2list(self.voc_classes)
)
val_dataset = TrainDataset(
val_img_list,
val_anno_list,
phase="val",
transform=DataTransform(self.image_size, self.get("val_transform_params")),
transform_anno=Anno_xml2list(self.voc_classes)
)
trainloader = get_dataloader(
dataset=train_dataset,
batch_size=self.get("batch_size"),
num_workers=self.get("num_workers"),
shuffle=True,
collate_fn=od_collate_fn,
drop_last=False
)
validloader = get_dataloader(
dataset=val_dataset,
batch_size=self.get("batch_size"),
num_workers=self.get("num_workers"),
shuffle=False,
collate_fn=od_collate_fn,
drop_last=False
)
self.params["seed"] = seed
self.params["phase"] = "train"
model = ObjectDetectionModel(self.params)
model.fit(trainloader, validloader)
# valid predict, no detect, 最後にdetectしてもいいかもね
val_preds = model.val_preds
try:
print(val_preds[0].shape)
print(val_preds[1].shape)
print(val_preds[2].shape)
except:
pass
#val_preds = pd.DataFrame(val_preds, columns=[f"pred_{n}" for n in range(self.params["output_size"])])
#val_preds.to_csv(osp.join(self.val_preds_path, f"preds_{seed}_{fold}.csv"), index=False)
|
#!/usr/bin/env python3
import sys
def main(inputfile):
with open(inputfile, 'r') as rd:
print("Resulting frequency:", find_rep_freq(0, rd))
def find_rep_freq(start, rd):
current = start
counter = 0
mutations = [int(x.strip()) for x in rd.readlines()]
max_mut = len(mutations)
found_freqs = set()
while True:
mut = mutations[counter % max_mut]
counter += 1
current += mut
if current in found_freqs:
break
found_freqs.add(current)
return current
def calculate_freq(start, rd):
mutations = [int(x.strip()) for x in rd.readlines()]
for mut in mutations:
start += mut
return start
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: %s inputfile", sys.argv[0])
sys.exit(0)
main(sys.argv[1])
|
#!/usr/bin/python
import requests,pprint,bs4
page=requests.get('https://dataquestio.github.io/web-scraping-pages/simple.html')
print(page)
print(page.status_code) #print status code
#print(page.content) #print the content of the page
#from bs4 import BeautifulSoup
#soup=BeautifulSoup(page.content,'html.parser')
#print(soup.prettify())
#print(list(soup.children))
#print([type(item) for item in list(soup.children)])
#html=list(soup.children)[2]
#print(list(html.children))
#seond
#page2=requests.get('http://dataquestio.github.io/web-scraping-pages/ids_and_classes.html')
#print(page2.status_code)
#from bs4 import BeautifulSoup
#soup1=BeautifulSoup(page2.content,'html.parser')
#print(list(soup1.children))
#print('find all ')
#print(soup1.find_all('p',class_='outer-text'))
#txt=[soup1.get_text() for e in soup1]
#print (txt[0])
#third
print('SCMS Site')
page3=requests.get('http://scmsgroup.org/sstm/objectives')
print(page3.status_code)
from bs4 import BeautifulSoup
soup2=BeautifulSoup(page3.content,'html.parser')
soup2.find_all('p',class_='head-h3')
txt=[soup2.get_text() for e in soup2]
print(txt[0])
|
#Worked
prefix = "ffmpeg -i gd691108d"
d_v = 1
t_v = 1
mids = ".shn gd69-11-08-"
nt_v = 1
file = open("slate2.txt", "w")
def tracks():
global t_v
global nt_v
file.write("\n" + prefix + str(d_v) + "t0" + str(t_v) + mids + "0" + str(nt_v)+".flac"+"\n")
t_v += 1
nt_v += 1
def trackx():
global t_v
global nt_v
file.write("\n" + prefix + str(d_v) + "t0" + str(t_v) + mids + str(nt_v) +".flac"+"\n")
t_v +=1
nt_v +=1
while t_v < 8:
tracks()
d_v = 2
t_v = 1
file.write("\n" + prefix + str(d_v) + "t0" + str(t_v) + mids + "0" + str(nt_v)+".flac"+"\n")
t_v += 1
nt_v += 1
file.write("\n" + prefix + str(d_v) + "t0" + str(t_v) + mids + "0" + str(nt_v)+".flac"+"\n")
t_v += 1
nt_v += 1
while t_v < 9:
trackx()
file.close()
|
import json
import datetime
import h5py
import numpy as np
from pelops.datasets.chip import ChipDataset, Chip
class FeatureDataset(ChipDataset):
def __init__(self, filename):
super().__init__(filename)
self.chip_index_lookup, self.chips, self.feats = self.load(filename)
self.filename_lookup = {}
for chip_key, chip in self.chips.items():
self.filename_lookup[chip.filepath] = chip_key
def get_feats_for_chip(self, chip):
chip_key = self.filename_lookup[chip.filepath]
return self.feats[self.chip_index_lookup[chip_key]]
@staticmethod
def load(filename):
with h5py.File(filename) as fIn:
feats = np.array(fIn['feats'])
num_items = fIn['feats'].shape[0]
# Hack to deal with performance of extracting single items
local_hdf5 = {}
local_hdf5['chip_keys'] = np.array(fIn['chip_keys'])
local_hdf5['filepath'] = np.array(fIn['filepath'])
local_hdf5['car_id'] = np.array(fIn['car_id'])
local_hdf5['cam_id'] = np.array(fIn['cam_id'])
local_hdf5['time'] = np.array(fIn['time'])
local_hdf5['misc'] = np.array(fIn['misc'])
chips = {}
chip_index_lookup = {}
for i in range(num_items):
filepath = local_hdf5['filepath'][i].decode('utf-8')
car_id = local_hdf5['car_id'][i]
cam_id = local_hdf5['cam_id'][i]
timestamp = local_hdf5['time'][i]
if isinstance(timestamp, str) or isinstance(timestamp, bytes):
# Catch the case where we have encoded time as a string timestamp
timestamp = datetime.datetime.fromtimestamp(float(timestamp))
misc = json.loads(local_hdf5['misc'][i].decode('utf-8'))
chip_key = local_hdf5['chip_keys'][i]
if isinstance(chip_key, bytes):
chip_key = chip_key.decode('utf-8')
chip_index_lookup[chip_key] = i
chips[chip_key] = Chip(filepath, car_id, cam_id, timestamp, misc)
return chip_index_lookup, chips, feats
@staticmethod
def _save_field(fOut, field_example, field_name, value_array):
if isinstance(field_example, datetime.datetime):
# Encode time as a string seconds since epoch
times = np.array([str(val.timestamp()).encode('ascii', 'ignore') for val in value_array])
fOut.create_dataset(field_name,
data=times,
dtype=h5py.special_dtype(vlen=bytes))
elif isinstance(field_example, str):
output_vals = [val.encode('ascii', 'ignore') for val in value_array]
fOut.create_dataset(field_name,
data= output_vals,
dtype=h5py.special_dtype(vlen=bytes))
elif isinstance(field_example, dict):
output_vals = [json.dumps(val).encode('ascii', 'ignore') for val in value_array]
fOut.create_dataset(field_name,
data=output_vals,
dtype=h5py.special_dtype(vlen=bytes))
else:
fOut.create_dataset(field_name, data=value_array)
@staticmethod
def save(filename, chip_keys, chips, features):
""" Save a feature dataset
"""
with h5py.File(filename, 'w') as fOut:
fOut.create_dataset('feats', data=features)
FeatureDataset._save_field(fOut,
chip_keys[0],
'chip_keys',
chip_keys)
first_chip = chips[0]
fields = first_chip._fields
for field in fields:
field_example = getattr(first_chip, field)
output_data = [getattr(chip, field) for chip in chips]
FeatureDataset._save_field(fOut, field_example, field, output_data)
|
import time
from inc import *
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def CheckTable(tablename):
conn, cursor = Mysql()
try:
cursor.execute("select count(1) from `%s` limit 1" %(tablename))
except:
print "%s not exist" % (tablename)
print "%s exist" % (tablename)
conn.commit()
CloseMysql(conn, cursor)
CheckTable("avg_cloudtchhome-service-provider_20171101")
|
import splitter
import unittest
class TestSplitFunction(unittest.TestCase):
"""docstring for TestSplitFunction"""
# def __init__(self, arg):
# super(TestSplitFunction, self).__init__()
# self.arg = arg
def setUp(self):
pass
def tearDown(self):
pass
def testSimpleString(self):
r = splitter.split('Goog 100 490.50')
self.assertEqual(r,['Goog','100','490.50'])
def testTypeConvert(self):
r = splitter.split('Goog 100 490.50',[str,int,float])
self.assertEqual(r,['Goog',100,490.50])
def testDelimeter(self):
pass
if __name__ == '__main__':
unittest.main()
|
from rest_framework import permissions
from rest_framework.permissions import IsAdminUser
class IsOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it or see it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS and obj.user == request.user:
return True
else:
return False
# Write permissions are only allowed to the owner of the snippet.
return obj.user == request.user
class CustomObjectPermissions(permissions.DjangoObjectPermissions):
"""
Similar to `DjangoObjectPermissions`, but adding 'view' permissions.
"""
perms_map = {
'GET': ['%(app_label)s.view_%(model_name)s'],
'OPTIONS': ['%(app_label)s.view_%(model_name)s'],
'HEAD': ['%(app_label)s.view_%(model_name)s'],
'POST': ['%(app_label)s.add_%(model_name)s'],
'PUT': ['%(app_label)s.change_%(model_name)s'],
'PATCH': ['%(app_label)s.change_%(model_name)s'],
'DELETE': ['%(app_label)s.delete_%(model_name)s'],
}
class IsAdminOrSelf(IsAdminUser):
"""
Allow access to admin users or the user himself.
"""
def has_object_permission(self, request, view, obj):
if request.user and request.user.is_staff:
return True
elif (request.user and type(obj) == type(request.user) and
obj == request.user):
return True
return False
|
import torch
import transformers
import turbo_transformers
from turbo_transformers.layers.utils import convert2tt_tensor, try_convert, convert_returns_as_type, ReturnType
import time
model = transformers.BertModel.from_pretrained('bert-base-uncased')
model.eval()
torch.set_grad_enabled(False)
bertlayer = model.encoder.layer[0]
qbertlayer = turbo_transformers.QBertLayer.from_torch(bertlayer)
torchqbertlayer = torch.quantization.quantize_dynamic(bertlayer)
lens = [10,20,40,60,80,100,200,300]
loops = 100
for l in lens:
input_tensor = torch.rand((1, l, 768))
attention_mask = torch.ones((1, l))
attention_mask = attention_mask[:, None, None, :]
attention_mask = (1.0 - attention_mask) * -10000.0
print("seq length =", l)
start = time.time()
for i in range(loops):
res = bertlayer(input_tensor, attention_mask, output_attentions=True)
end = time.time()
print("torch fp32 layer QPS =", loops/(end-start))
start = time.time()
for i in range(loops):
res2 = qbertlayer(input_tensor, attention_mask, output_attentions=True)
end = time.time()
print("turbo fp32+int8 layer QPS =", loops/(end-start))
start = time.time()
for i in range(loops):
res3 = torchqbertlayer(input_tensor, attention_mask, output_attentions=True)
end = time.time()
print("torch int8 layer QPS =", loops/(end-start))
print("max error against torch fp32 =", max(
torch.max(torch.abs(res[0]-res2[0])),
torch.max(torch.abs(res[1]-res2[1]))))
print("max error against torch int8 =", max(
torch.max(torch.abs(res3[0]-res2[0])),
torch.max(torch.abs(res3[1]-res2[1]))))
print("max error between torch int8 and torch fp32 =", max(
torch.max(torch.abs(res3[0]-res[0])),
torch.max(torch.abs(res3[1]-res[1]))))
|
from django.http import Http404, HttpResponse
from rest_framework import generics
from rest_framework.generics import get_object_or_404
from clasificador.models import ClassifierModel
from documentos.models import DocumentGroup
from gerente.datatxt_helpers import Datatxt
from pruebas.models import BaseTestResult, DocumentTestResult
from pruebas.serializers import BaseTestResultSerializer, \
DocumentTestResultSerializer, DocumentAnnotationSerializer, \
DocumentTestResultSmallSerializer
from pruebas.tasks import test_model, test_document_set
import json
def model_test(request, datatxt_id):
try:
model = ClassifierModel.objects.get(datatxt_id=datatxt_id)
except ClassifierModel.DoesNotExist:
raise Http404
#create a new classifier on datatxt
dt = Datatxt()
req = dt.create_model(model.json_model)
res = req.json()
model_id = res.get('id')
#launch a celery task with this model
task = test_model.delay(model_id, model)
model.testing_task_id = task
model.save()
return HttpResponse(
json.dumps({'task': task.id}), 'application/json'
)
def model_document_group(request, dg_pk, datatxt_id):
try:
model = ClassifierModel.objects.get(datatxt_id=datatxt_id)
except ClassifierModel.DoesNotExist:
raise Http404
try:
dg = DocumentGroup.objects.get(pk=dg_pk)
except DocumentGroup.DoesNotExist:
raise Http404
try:
threshold = float(request.GET.get('threshold'))
except ValueError:
threshold = 0.25
#launch a celery task with this model
task = test_document_set.delay(model, dg, threshold)
dg.testing_task_id = task
dg.save()
return HttpResponse(
json.dumps({'task': task.id}), 'application/json'
)
class ClassifierModelList(generics.ListAPIView):
serializer_class = BaseTestResultSerializer
def get_queryset(self):
datatxt_id = self.kwargs['datatxt_id']
return BaseTestResult.objects.filter(
model_version__datatxt_id=datatxt_id).order_by('created')
def check_permissions(self, request):
return True
def perform_authentication(self, request):
pass
class BaseDocumentTestDetails(generics.RetrieveAPIView):
serializer_class = DocumentTestResultSerializer
queryset = DocumentTestResult.objects.all()
def check_permissions(self, request):
return True
def perform_authentication(self, request):
pass
class BaseDocumentTestList(generics.ListAPIView):
serializer_class = DocumentTestResultSmallSerializer
def get_queryset(self):
"""
This view should return a list of all the purchases for
the user as determined by the username portion of the URL.
"""
doc_group = self.kwargs['doc_pk']
return DocumentTestResult.objects.filter(document_group__pk=doc_group).order_by('-created')
def check_permissions(self, request):
return True
def perform_authentication(self, request):
pass
class DocumentAnnotationDetails(generics.RetrieveAPIView):
serializer_class = DocumentAnnotationSerializer
def get_object(self):
test_results = DocumentTestResult.objects.get(
pk=self.kwargs['test_pk'])
queryset = test_results.documentannotation_set.filter()
filter = {
'document__pk': self.kwargs['doc_pk']
}
return get_object_or_404(queryset, **filter)
def check_permissions(self, request):
return True
def perform_authentication(self, request):
pass
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
def fetcher(obj, index):
"""该函数用于索引"""
return obj[index]
x = [1, 2]
try:
fetcher(x, 4)
except IndexError:
print('got exception!')
print('contining...')
|
from django.contrib import admin
from .models import StaffMember, Department
@admin.register(StaffMember)
class StaffMemberAdmin(admin.ModelAdmin):
list_display = (
'id',
'name',
'work_department',
'phone_num',
'grade',
'school_department',
'major',
'personal_signature',
'brief_introduction',
'start_entry',
'end_quit',
'is_incumbent',
'is_first_generation',
'is_man',
'is_delisting',
)
@admin.register(Department)
class DepartmentAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'brief_introduction', 'is_delete')
|
try:
from kfp.components import InputPath
from kfp.components import OutputPath
except ImportError:
def InputPath(c):
return c
def OutputPath(c):
return c
metrics = "Metrics"
def wikiqa_test(
dataset_path: InputPath(str),
wikiqa_path: InputPath(str),
prev_model_path: InputPath(str),
shared_path,
run_id,
sent_size_th,
ques_size_th,
num_epochs,
num_steps,
eval_period,
save_period,
device,
device_type,
num_gpus,
mlpipeline_metrics_path: OutputPath(metrics),
model_path: OutputPath(str),
):
input_dir = wikiqa_path + "/wikiqa-class"
output_dir = model_path + "/out/wikiqa"
import shutil
src = prev_model_path + "/out/wikiqa"
dst = model_path + "/out/wikiqa"
shutil.copytree(src, dst)
full_shared_path = dataset_path + shared_path
import os
import tensorflow as tf
flags = tf.app.flags
# Names and directories
flags.DEFINE_string("model_name", "basic-class", "Model name [basic | basic-class]")
flags.DEFINE_string("data_dir", input_dir, "Data dir [data/squad]")
flags.DEFINE_string("run_id", run_id, "Run ID [0]")
flags.DEFINE_string("out_base_dir", output_dir, "out base dir [out]")
flags.DEFINE_string("forward_name", "single", "Forward name [single]")
flags.DEFINE_string("answer_path", "", "Answer path []")
flags.DEFINE_string("eval_path", "", "Eval path []")
flags.DEFINE_string("load_path", "", "Load path []")
flags.DEFINE_string("shared_path", full_shared_path, "Shared path []")
# Device placement
flags.DEFINE_string(
"device", device, "default device for summing gradients. [/cpu:0]"
)
flags.DEFINE_string(
"device_type",
device_type,
"device for computing gradients (parallelization). cpu | gpu [gpu]",
)
flags.DEFINE_integer(
"num_gpus", int(num_gpus), "num of gpus or cpus for computing gradients [1]"
)
# Essential training and test options
flags.DEFINE_string("mode", "test", "train | test | forward [test]")
flags.DEFINE_boolean("load", True, "load saved data? [True]")
flags.DEFINE_bool("single", False, "supervise only the answer sentence? [False]")
flags.DEFINE_boolean("debug", False, "Debugging mode? [False]")
flags.DEFINE_bool(
"load_ema", True, "load exponential average of variables when testing? [True]"
)
flags.DEFINE_bool("eval", True, "eval? [True]")
flags.DEFINE_bool("train_only_output", False, "Train only output module?")
flags.DEFINE_bool("load_trained_model", False, "Load SQUAD trained model")
flags.DEFINE_bool("freeze_phrase_layer", False, "Freeze phrase layer")
flags.DEFINE_bool("freeze_att_layer", False, "Freeze att layer")
flags.DEFINE_bool(
"freeze_span_modelling_layer", False, "Freeze modelling layer for span"
)
flags.DEFINE_bool("using_shared", False, "using pre-created shared.json")
flags.DEFINE_bool("load_shared", False, "load shared.json for each batch")
flags.DEFINE_string("dev_name", "test", "using dev or test?")
flags.DEFINE_string("test_name", "test", "using test or dev?")
# Training / test parameters
flags.DEFINE_integer("batch_size", 60, "Batch size [60]")
flags.DEFINE_integer("val_num_batches", 100, "validation num batches [100]")
flags.DEFINE_integer("test_num_batches", 0, "test num batches [0]")
flags.DEFINE_integer(
"num_epochs", int(num_epochs), "Total number of epochs for training [12]"
)
flags.DEFINE_integer("num_steps", int(num_steps), "Number of steps [20000]")
flags.DEFINE_integer("load_step", 0, "load step [0]")
flags.DEFINE_float("init_lr", 0.5, "Initial learning rate [0.5]")
flags.DEFINE_float(
"input_keep_prob", 0.8, "Input keep prob for the dropout of LSTM weights [0.8]"
)
flags.DEFINE_float(
"keep_prob", 0.8, "Keep prob for the dropout of Char-CNN weights [0.8]"
)
flags.DEFINE_float("wd", 0.0, "L2 weight decay for regularization [0.0]")
flags.DEFINE_integer("hidden_size", 100, "Hidden size [100]")
flags.DEFINE_integer("char_out_size", 100, "char-level word embedding size [100]")
flags.DEFINE_integer("char_emb_size", 8, "Char emb size [8]")
flags.DEFINE_string(
"out_channel_dims",
"100",
"Out channel dims of Char-CNN, separated by commas [100]",
)
flags.DEFINE_string(
"filter_heights", "5", "Filter heights of Char-CNN, separated by commas [5]"
)
flags.DEFINE_bool("finetune", False, "Finetune word embeddings? [False]")
flags.DEFINE_bool("highway", True, "Use highway? [True]")
flags.DEFINE_integer("highway_num_layers", 2, "highway num layers [2]")
flags.DEFINE_bool("share_cnn_weights", True, "Share Char-CNN weights [True]")
flags.DEFINE_bool(
"share_lstm_weights",
True,
"Share pre-processing (phrase-level) LSTM weights [True]",
)
flags.DEFINE_float(
"var_decay", 0.999, "Exponential moving average decay for variables [0.999]"
)
flags.DEFINE_string("classifier", "maxpool", "[maxpool, sumpool, default]")
# Optimizations
flags.DEFINE_bool("cluster", True, "Cluster data for faster training [False]")
flags.DEFINE_bool("len_opt", True, "Length optimization? [False]")
flags.DEFINE_bool(
"cpu_opt", False, "CPU optimization? GPU computation can be slower [False]"
)
# Logging and saving options
flags.DEFINE_boolean("progress", True, "Show progress? [True]")
flags.DEFINE_integer("log_period", 100, "Log period [100]")
flags.DEFINE_integer("eval_period", int(eval_period), "Eval period [1000]")
flags.DEFINE_integer("save_period", int(save_period), "Save Period [1000]")
flags.DEFINE_integer("max_to_keep", 20, "Max recent saves to keep [20]")
flags.DEFINE_bool("dump_eval", True, "dump eval? [True]")
flags.DEFINE_bool("dump_answer", False, "dump answer? [True]")
flags.DEFINE_bool("vis", False, "output visualization numbers? [False]")
flags.DEFINE_bool("dump_pickle", True, "Dump pickle instead of json? [True]")
flags.DEFINE_float(
"decay", 0.9, "Exponential moving average decay for logging values [0.9]"
)
# Thresholds for speed and less memory usage
flags.DEFINE_integer("word_count_th", 10, "word count th [100]")
flags.DEFINE_integer("char_count_th", 50, "char count th [500]")
flags.DEFINE_integer("sent_size_th", int(sent_size_th), "sent size th [64]")
flags.DEFINE_integer("num_sents_th", 1, "num sents th [8]")
flags.DEFINE_integer("ques_size_th", int(ques_size_th), "ques size th [32]")
flags.DEFINE_integer("word_size_th", 16, "word size th [16]")
flags.DEFINE_integer("para_size_th", 256, "para size th [256]")
# Advanced training options
flags.DEFINE_bool("lower_word", True, "lower word [True]")
flags.DEFINE_bool("squash", False, "squash the sentences into one? [False]")
flags.DEFINE_bool("swap_memory", True, "swap memory? [True]")
flags.DEFINE_string("data_filter", "max", "max | valid | semi [max]")
flags.DEFINE_bool("use_glove_for_unk", True, "use glove for unk [False]")
flags.DEFINE_bool(
"known_if_glove", True, "consider as known if present in glove [False]"
)
flags.DEFINE_string("logit_func", "tri_linear", "logit func [tri_linear]")
flags.DEFINE_string("answer_func", "linear", "answer logit func [linear]")
flags.DEFINE_string("sh_logit_func", "tri_linear", "sh logit func [tri_linear]")
# Ablation options
flags.DEFINE_bool("use_char_emb", True, "use char emb? [True]")
flags.DEFINE_bool("use_word_emb", True, "use word embedding? [True]")
flags.DEFINE_bool("q2c_att", True, "question-to-context attention? [True]")
flags.DEFINE_bool("c2q_att", True, "context-to-question attention? [True]")
flags.DEFINE_bool("dynamic_att", False, "Dynamic attention [False]")
def main(_):
from basic.main import main as m
config = flags.FLAGS
config.model_name = "basic-class"
config.out_dir = os.path.join(
config.out_base_dir, config.model_name, str(config.run_id).zfill(2)
)
print(config.out_dir)
evaluator = m(config)
"""Generating metrics for the squad model"""
metrics = {
"metrics": [
{
"name": "accuracy-score",
"numberValue": str(evaluator.acc),
"format": "RAW",
},
{
"name": "loss",
"numberValue": str(evaluator.loss),
"format": "RAW",
},
]
}
import json
with open(mlpipeline_metrics_path, "w") as f:
json.dump(metrics, f)
tf.app.run(main)
|
import glob
import os
import re
import cv2
import face_recognition as fr
# predictor_path = 'dlib_data/shape_predictor_5_face_landmarks.dat'
# face_rec_model_path = 'dlib_data/dlib_face_recognition_resnet_model_v1.dat'
known_people_folder = 'images'
cam = cv2.VideoCapture(0)
color_green = (0, 255, 0)
line_width = 2
tolerance = 0.6
def main():
# face_image = fr.load_image_file('{}/img_20181030_172355.jpg'.format(faces_folder_path))
# known_face = fr.face_encodings(face_image)
known_people = scan_known_people(known_people_folder)
process_this_frame = True
while True:
# Capture frame-by-frame
ret_val, img = cam.read()
rgb_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
face_locations = fr.face_locations(rgb_image, number_of_times_to_upsample=1, model='hog')
unknown_face_encodings = fr.face_encodings(img)
# Draw rectangles in detected faces
# for face_location in face_locations:
# cv2.rectangle(img, (face_location[3], face_location[0]), (face_location[1], face_location[2]), color_green, line_width)
# Rec face
face_names = []
for unknown_encoding in unknown_face_encodings:
for person in known_people.items():
matches = fr.compare_faces(person[1], unknown_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
name = person[0]
face_names.append(name)
break
# distances = fr.face_distance(person[1], unknown_encoding)
# result = list(distances <= tolerance)
# if True in result:
# face_names.append(person[0])
# # print("Recognized: " + person[0])
# break
process_this_frame = not process_this_frame
# Display the results
print(face_names)
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Draw a box around the face
# cv2.rectangle(img, (left, top), (right, bottom), color_green, line_width)
cv2.rectangle(img, (left, top), (right, bottom), color_green,
line_width)
# Draw a label with a name below the face
cv2.rectangle(img, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(img, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Draw captured image to screen
cv2.imshow('my webcam', img)
if cv2.waitKey(1) == 27:
break # esc to quit
cam.release()
cv2.destroyAllWindows()
def image_files_in_folder(folder):
return [os.path.join(folder, f) for f in os.listdir(folder) if re.match(r'.*\.(jpg|jpeg|png)', f, flags=re.I)]
def scan_known_people(known_people_folder):
"""
:param image folder which store subfolder using person's name or identity:
:return: dict of key = person names, value = list of face encodings for that person
"""
known_person_names = [d for d in os.listdir(known_people_folder)]
result = {}
for name in known_person_names:
files = [os.path.abspath(f) for f in glob.glob('{}/{}/*.jpg'.format(known_people_folder, name))]
known_face_encodings = []
for file in files:
img = fr.load_image_file(file)
encodings = fr.face_encodings(img)
if len(encodings) > 1:
print("WARNING: More than one face found in {}. Only considering the first face.".format(file))
if len(encodings) == 0:
print("WARNING: No faces found in {}. Ignoring file.".format(file))
else:
known_face_encodings.append(encodings[0])
result[name] = known_face_encodings
return result
if __name__ == '__main__':
main()
|
import sys
if len(sys.argv) != 3:
print("Usage: python convert_shp_to_json.py SHAPE_FILE OUT_FILE")
exit(2)
print("Importing modules...")
import geopandas as gpd
print("Reading file...")
gdf = gpd.read_file(sys.argv[1])
print("Writing to file...")
jsonfile = gdf.to_json()
with open(sys.argv[2], "w+") as f:
f.write(jsonfile)
print("Done !")
|
"""
This app creates an animated sidebar using the dbc.Nav component and some local
CSS. Each menu item has an icon, when the sidebar is collapsed the labels
disappear and only the icons remain. Visit www.fontawesome.com to find
alternative icons to suit your needs!
dcc.Location is used to track the current location, a callback uses the current
location to render the appropriate page content. The active prop of each
NavLink is set automatically according to the current pathname. To use this
feature you must install dash-bootstrap-components >= 0.11.0.
For more details on building multi-page Dash applications, check out the Dash
documentation: https://dash.plot.ly/urls
"""
import dash
import dash_bootstrap_components as dbc
from dash import Input, Output, dcc, html
PLOTLY_LOGO = "https://images.plot.ly/logo/new-branding/plotly-logomark.png"
app = dash.Dash(
external_stylesheets=[dbc.themes.BOOTSTRAP, dbc.icons.FONT_AWESOME]
)
sidebar = html.Div(
[
html.Div(
[
# width: 3rem ensures the logo is the exact width of the
# collapsed sidebar (accounting for padding)
html.Img(src=PLOTLY_LOGO, style={"width": "3rem"}),
html.H2("Sidebar"),
],
className="sidebar-header",
),
html.Hr(),
dbc.Nav(
[
dbc.NavLink(
[html.I(className="fas fa-home me-2"), html.Span("Home")],
href="/",
active="exact",
),
dbc.NavLink(
[
html.I(className="fas fa-calendar-alt me-2"),
html.Span("Calendar"),
],
href="/calendar",
active="exact",
),
dbc.NavLink(
[
html.I(className="fas fa-envelope-open-text me-2"),
html.Span("Messages"),
],
href="/messages",
active="exact",
),
],
vertical=True,
pills=True,
),
],
className="sidebar",
)
content = html.Div(id="page-content", className="content")
app.layout = html.Div([dcc.Location(id="url"), sidebar, content])
# set the content according to the current pathname
@app.callback(Output("page-content", "children"), Input("url", "pathname"))
def render_page_content(pathname):
if pathname == "/":
return html.P("This is the home page!")
elif pathname == "/calendar":
return html.P("This is your calendar... not much in the diary...")
elif pathname == "/messages":
return html.P("Here are all your messages")
# If the user tries to reach a different page, return a 404 message
return html.Div(
[
html.H1("404: Not found", className="text-danger"),
html.Hr(),
html.P(f"The pathname {pathname} was not recognised..."),
],
className="p-3 bg-light rounded-3",
)
if __name__ == "__main__":
app.run_server(debug=True)
|
from hue import HueControlUtil as hue
from wemo import WemoControlUtil as wemo
from alexa import AlexaControlUtil as alexa
from googleApis.googleSheetController import GoogleSheetController
from googleApis.googleDriveController import GoogleDriveController
from googleApis.gmailController import GmailController
import time, os, sys, argparse, datetime
import shutil
import uuid
datetimeFormat = "%Y-%m-%d %H-%M-%S.%f"
#turn on hue lights when wemo switch is turned on
def testWemoHueRecipe(argv):
parser = argparse.ArgumentParser()
parser.add_argument("resultFile")
parser.add_argument("-iterNum", default = 5, type = int)
parser.add_argument("-lightId", default = 2, type = int)
parser.add_argument("-interval", default = 1, type = float)
parser.add_argument("-wemoport", type = int, default = 10085)
options = parser.parse_args(argv)
resultFile = options.resultFile
hueController = hue.HueController()
bind = "0.0.0.0:{}".format(options.wemoport)
switchName = "WeMo Switch"
lightId = options.lightId
wemoController = wemo.WemoController(bind = bind)
switch = wemoController.discoverSwitch(switchName)
if switch is None:
print("error to locate the switch")
sys.exit(1)
else:
print("switch discoverred")
#test recipe: when wemo switch is truned on, turn on lights in living room
time.sleep(3)
resultStatList = []
resultFd = open(resultFile, "a")
preState = wemoController.getState(switchName)
for index in range(options.iterNum):
print("start test iteration {}".format(index))
#monitor trigger event
while (True):
currState = wemoController.getState(switchName)
if currState != preState and currState == 1:
triggerObservedTime = datetime.datetime.now()
preState = currState
break
preState = currState
time.sleep(options.interval)
hueController.turnonLight(lightId)
print("light turned one")
endTime = datetime.datetime.now()
timeDiff = endTime - triggerObservedTime
timeCost = timeDiff.seconds + timeDiff.microseconds / float(1000000)
testUuid = uuid.uuid4()
statStr = "executionUuid: {}->triggerObservedTime: {}->wemo_hue".format(testUuid, triggerObservedTime.strftime(datetimeFormat))
print(statStr)
resultStatList.append(statStr)
resultFd.write(statStr + "\n")
resultFd.flush()
statStr = "testUuid: {}->actionExecutionTime: {}->wemo_hue".format(testUuid, endTime.strftime(datetimeFormat))
print(statStr)
resultStatList.append(statStr)
resultFd.write(statStr + "\n")
resultFd.flush()
statStr = "testUuid: {}->time cost for wemo_hue iter {} is {} seconds".format(testUuid, index, timeCost)
print(statStr)
resultStatList.append(statStr)
resultFd.write(statStr + "\n")
resultFd.flush()
time.sleep(5)
resultFd.close()
#when any new email arrives in gmail, blink lights.
#https://ifttt.com/applets/93876p-when-any-new-email-arrives-in-gmail-blink-lights
def testGmailHueRecipe(argv):
'''
test the following recipe:
If You say "Alexa trigger turn on hue light", then turn on lights in Living room
'''
parser = argparse.ArgumentParser()
parser.add_argument("resultFile")
parser.add_argument("-iterNum", default = 5, type = int)
#parser.add_argument("-gmailSenderName", default = "senmuxing", type = str)
parser.add_argument("-gmailName", default = "xianghangmi", type = str)
parser.add_argument("-lightId", default = 2, type = int)
parser.add_argument("-interval", default = 0.1, type = float)
options = parser.parse_args(argv)
hueController = hue.HueController()
lightId = options.lightId
gmailController = GmailController(options.gmailName)
resultStatList = []
resultFd = open(options.resultFile, "a")
#test recipe: when wemo switch is truned on, turn on lights in living room
preMsgList = gmailController.listMessagesMatchingQuery()
for index in range(options.iterNum):
#check new emails
#hueController.clearAlert(lightId)
nowDate = datetime.datetime.now()
nowStr = nowDate.strftime("%Y-%m-%d %H-%M-%S")
#detect reception of the email
while True:
latestMsgList = gmailController.listMessagesMatchingQuery()
latestMsgId = latestMsgList[0]["id"]
preMsgId = preMsgList[0]["id"]
if latestMsgId == preMsgId:
time.sleep(options.interval)
continue
else:
preMsgList = latestMsgList
triggerObservedTime = datetime.datetime.now()
break
print("receive email at ", triggerObservedTime.strftime("%Y-%m-%d %H-%M-%S"))
hueController.alertLight(lightId)
endTime = datetime.datetime.now()
timeDiff = endTime - triggerObservedTime
timeCost = timeDiff.seconds + timeDiff.microseconds / float(1000000)
testUuid = uuid.uuid4()
statStr = "testUuid: {}->triggerObservedTime: {}->gmail_hue".format(testUuid, triggerObservedTime.strftime(datetimeFormat))
print(statStr)
resultStatList.append(statStr)
resultFd.write(statStr + "\n")
resultFd.flush()
statStr = "testUuid: {}->actionExecutionTime: {}->gmail_hue".format(testUuid, endTime.strftime(datetimeFormat))
print(statStr)
resultStatList.append(statStr)
resultFd.write(statStr + "\n")
resultFd.flush()
statStr = "testUuid: {}->time cost for iter {} is {} seconds".format(testUuid, index, timeCost)
print(statStr)
resultStatList.append(statStr)
resultFd.write(statStr + "\n")
resultFd.flush()
resultFd.close()
open(options.resultFile, "w").write("\n".join(resultStatList) + "\n")
#if my wemo switch is activated, add line to spreadsheet
#https://ifttt.com/applets/67307p-if-my-wemo-switch-is-activated-add-line-to-spreadsheet
def testWemoGoogleSheetRecipe(argv):
'''
test the following recipe:
If You say "Alexa trigger turn on hue light", then turn on lights in Living room
'''
parser = argparse.ArgumentParser()
parser.add_argument("resultFile")
parser.add_argument("-iterNum", default = 5, type = int)
parser.add_argument("-interval", default = 1, type = float)
parser.add_argument("-wemoport", type = int, default = 10085)
spreadsheetId = "1TwPsEXIQ0tZPnFABwKqePshDw3x0kFozaP69Nsw95ug"
sheetName = "Sheet1"
options = parser.parse_args(argv)
bind = "0.0.0.0:{}".format(options.wemoport)
switchName = "WeMo Switch"
wemoController = wemo.WemoController(bind = bind)
switch = wemoController.discoverSwitch(switchName)
if switch is None:
print("error to locate the switch")
sys.exit(1)
else:
print("switch discoverred")
sheetController = GoogleSheetController()
spreadsheet = sheetController.getSpreadSheet(spreadsheetId)
print("got spreadsheet: ", sheetController.getSpreadSheet(spreadsheetId))
retrievedSheetName = spreadsheet["sheets"][0]["properties"]["title"]
print("title of first sheet is ", spreadsheet["sheets"][0]["properties"]["title"])
if retrievedSheetName != sheetName:
print("sheet name doesn't match, use retrieved one: preconfigured one: {}, retrieved one {}".format(sheetName, retrievedSheetName))
sheetName = retrievedSheetName
resultStatList = []
resultFd = open(options.resultFile, "a")
#test recipe: when wemo switch is truned on, write a log to the google spreadsheet
preSwitchState = wemoController.getState(switchName)
for index in range(options.iterNum):
while True:
currSwitchState = wemoController.getState(switchName)
if currSwitchState == 1 and currSwitchState != preSwitchState:
preSwitchState = currSwitchState
triggerObservedTime = datetime.datetime.now()
break
preSwitchState = currSwitchState
time.sleep(options.interval)
print("switch turned on is observed")
nowDate = datetime.datetime.now()
nowStr = nowDate.strftime("%Y-%m-%d %H-%M-%S")
values = [
["switch turned on", nowStr],
]
responseAppend = sheetController.appendFile(spreadsheetId, range = "Sheet1", valueList = values)
endTime = datetime.datetime.now()
timeDiff = endTime - triggerObservedTime
timeCost = timeDiff.seconds + timeDiff.microseconds / float(1000000)
testUuid = uuid.uuid4()
statStr = "testUuid: {}->triggerObservedTime: {}->wemo_sheet".format(testUuid, triggerObservedTime.strftime(datetimeFormat))
print(statStr)
resultStatList.append(statStr)
resultFd.write(statStr + "\n")
resultFd.flush()
statStr = "testUuid: {}->actionExecutionTime: {}->wemo_sheet".format(testUuid, endTime.strftime(datetimeFormat))
print(statStr)
resultStatList.append(statStr)
resultFd.write(statStr + "\n")
resultFd.flush()
statStr = "testUuid: {}->time cost for iter {} is {} seconds".format(testUuid, index, timeCost)
print(statStr)
resultStatList.append(statStr)
resultFd.write(statStr + "\n")
resultFd.flush()
#generate trigger event: speak to alexa: change a music
resultFd.close()
print(resultStatList)
#automatically save new email attachments in gmail to google drive
#https://ifttt.com/applets/99068p-automatically-save-new-email-attachments-in-gmail-to-google-drive
def testGmailDrive(argv):
parser = argparse.ArgumentParser()
parser.add_argument("resultFile")
parser.add_argument("-iterNum", default = 5, type = int)
parser.add_argument("-interval", default = 0.1, type = float)
parser.add_argument("-gmailName", default = "xianghangmi", type = str)
parentId = "0B2Edbo2pC3d4MzEzSzRBN1BnSVU"
options = parser.parse_args(argv)
gmailController = GmailController(options.gmailName)
driveController = GoogleDriveController()
resultStatList = []
resultFd = open(options.resultFile, "a")
for index in range(options.iterNum):
preMsgList = gmailController.listMessagesMatchingQuery()
nowDate = datetime.datetime.now()
nowStr = nowDate.strftime("%Y-%m-%d %H-%M-%S")
#send email from senmuxing@gmail.com to xianghangmi@gmail.com
subject = "ifttt test at {}".format(nowStr)
tempFile = "ifttTest_engine_{}.txt".format(nowStr)
open(tempFile, "w").write(subject)
#detect reception of the email
while True:
latestMsgList = gmailController.listMessagesMatchingQuery()
latestMsgId = latestMsgList[0]["id"]
preMsgId = preMsgList[0]["id"]
if latestMsgId == preMsgId:
time.sleep(options.interval)
continue
else:
preMsgList = latestMsgList
triggerObservedTime = datetime.datetime.now()
break
driveController.uploadFile(tempFile, tempFile, dstParentList = [ parentId ])
endTime = datetime.datetime.now()
timeDiff = endTime - triggerObservedTime
timeCost = timeDiff.seconds + timeDiff.microseconds / float(1000000)
testUuid = uuid.uuid4()
statStr = "testUuid: {}->triggerObservedTime: {}->gmail_drive".format(testUuid, triggerObservedTime.strftime(datetimeFormat))
print(statStr)
resultStatList.append(statStr)
resultFd.write(statStr + "\n")
resultFd.flush()
statStr = "testUuid: {}->actionExecutionTime: {}->gmail_drive".format(testUuid, endTime.strftime(datetimeFormat))
print(statStr)
resultStatList.append(statStr)
resultFd.write(statStr + "\n")
resultFd.flush()
statStr = "testUuid: {}->time cost for iter {} is {} seconds".format(testUuid, index, timeCost)
print(statStr)
resultStatList.append(statStr)
resultFd.write(statStr + "\n")
resultFd.flush()
os.remove(tempFile)
time.sleep(10)
print(resultStatList)
resultFd.close()
recipeTypeDict = {
#"alexa_wemo" : testAlexaWemoRecipe,
#"alexa_hue" : testAlexaHueRecipe,
"wemo_hue" : testWemoHueRecipe,
#"alexa_sheet" : testAlexaGoogleSheetRecipe,
"gmail_hue" : testGmailHueRecipe,
"wemo_sheet" : testWemoGoogleSheetRecipe,
"gmail_drive" : testGmailDrive,
}
if __name__ == "__main__":
recipeName = sys.argv[1]
print(recipeName)
if recipeName not in recipeTypeDict:
print("please provide recipeType from this list: ", recipeTypeDict.keys())
sys.exit(1)
recipeFunc = recipeTypeDict[recipeName]
recipeFunc(sys.argv[2:])
|
# Generated by Django 2.2 on 2019-04-26 08:18
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='card',
name='idm',
field=models.CharField(max_length=16, unique=True, validators=[django.core.validators.MinLengthValidator, django.core.validators.RegexValidator(message='IDm must be 16-digit hexadecimal number', regex='^[0-9A-F]{16}$')], verbose_name='FeliCa ID'),
),
migrations.AlterField(
model_name='temporary',
name='idm',
field=models.CharField(max_length=16, unique=True, validators=[django.core.validators.MinLengthValidator, django.core.validators.RegexValidator(message='IDm must be 16-digit hexadecimal number', regex='^[0-9A-F]{16}$')], verbose_name='FeliCa ID'),
),
]
|
from rest_framework import serializers
from watchlist_app.models import WatchList, StreamPlatform, Review
class ReviewSerializer(serializers.ModelSerializer):
review_user = serializers.StringRelatedField(read_only=True)
class Meta:
model = Review
exclude = ('watchlist',)
# fields = "__all__"
class WatchListSerializer(serializers.ModelSerializer):
# reviews = ReviewSerializer(many=True, read_only=True)
platform = serializers.CharField(source='platform.name')
class Meta:
model = WatchList
fields = "__all__"
class StreamPlatformSerializer(serializers.ModelSerializer):
watchlist = WatchListSerializer(many=True, read_only=True)
class Meta:
model = StreamPlatform
fields = "__all__"
|
# Service monitor - updates the service status every configured interval
# Service configruations refer
from pprint import pprint
import urllib2
import json
import base64
import time
import ConfigParser
import os
import sys
from monitors.baseMonitor import BaseMonitor
import ha_engine.ha_infra as infra
# ========================== Configurable Parameters ======================
# Ip address based service filter
# If IP_FILTER is empty all services will be selected else only ip from IP_FILTER considerd for display
# =========================================================================
class NagiosMonitor(BaseMonitor):
headers = ['host_name', 'description', 'time_critical',
'time_ok', 'time_indeterminate_nodata']
polling_interval = 20
ip_filter = []
url = "http://%s:8080/state"
# path = '/Users/pradeech/HA1/Cloud_HA_Testframework_v1/configs/user_configs/nagios_config.cfg'
path = os.getcwd() + os.sep + 'configs/user_configs/nagios_config.cfg'
def stop(self):
pass
def report(self):
pass
def stable(self):
pass
def is_module_exeution_completed(self, finish_exection):
pass
def start(self, sync=None, finish_execution=None):
format_string = "%s %s %s %s %s "
i = 0
inputs = sys.argv[1:]
for arg in inputs:
if arg == 'ip':
IP_FILTER = inputs[i + 1].split(',')
elif arg == 'fre':
POLLING_INTERVAL = float(inputs[i + 1])
i = i + 1
ip_address = self.get_config('nagios_ip')
# Execution starts here
entries = 0
#while (not finish_execution):
reportList = {}
run = True
run_count = 0
while(run):
data = self.getdata(self.url, ip_address)
ret = []
for ip in data:
hostService = data[ip]["services"]
for key in hostService:
result = {}
result["ip"] = ip
result["description"] = key
if data[ip]["services"][key]["current_state"] == '0':
result["status"] = "OK"
result["output"] = data[ip]["services"][key][
"plugin_output"]
else:
result["status"] = self.get_severity_color("CRITICAL",
"CRITICAL")
result["output"] = self.get_severity_color(
"CRITICAL",
data[ip]["services"][key]["plugin_output"])
#self.validate_critical_data(ip,key,reportList,result["output"])
# if self.validate_critical_data(ip,key,critical_data):
#critical_data.append({ip+key:{'timestamp':int(time.time()),'status':'CRITICAL'}})
ret.append(result)
#print critical_data
# os.system("clear")
if sync:
infra.notify_all_waiters(sync)
print self.get_severity_color('INFO', format_string % (
'IP address'.ljust(20), "Service Description".ljust(45),
"Status".ljust(7), "Status Information", ""))
print "Polling Interval %s " % (self.polling_interval)
print "IP Filter : %s " % self.ip_filter
for item in ret:
# tuncating the description to 30 char for better visibilty ljust()
if self.ip_filter != []:
if item.get("ip") in self.ip_filter:
print format_string % (item.get("ip").ljust(20),
item.get(self.headers[1])[:40].ljust(
45),
item.get("status").ljust(10),
item.get("output"), "")
entries = entries + 1
else:
print format_string % (item.get("ip").ljust(20),
item.get(self.headers[1])[:40].ljust(45),
item.get("status").ljust(7),
item.get("output"), "")
entries = entries + 1
if run_count < 10:
run_count+=1
time.sleep(2)
else:
run = False
infra.set_execution_completed(finish_execution)
#time.sleep(20)
@staticmethod
def getdata(url, ipaddress):
try:
request = urllib2.Request(url % (ipaddress))
result = urllib2.urlopen(request)
except urllib2.HTTPError as e:
print "%s" % e.reason()
json_data = result.read()
data = json.loads(json_data)
return data["content"] # ["services"]
@staticmethod
def get_severity_color(severity, text):
# print severity, text
if severity == 'CRITICAL':
return "\033[" + '31' + "m" + text + "\033[0m"
elif severity == 'WARNING':
return "\033[" + '33' + "m" + text + "\033[0m"
elif severity == 'INFO':
return "\033[" + '32' + "m" + text + "\033[0m"
@staticmethod
def do_calc(starttime, endtime, value):
delta = float((endtime - starttime) / 1000)
return round((float(value) / delta) * 100, 2)
def filter_objs(self, objs):
filter_objs = []
for obj in objs:
if obj.get('host_name') in self.ip_filter:
filter_objs.append(obj)
return filter_objs
def get_config(self, key, section='Default'):
config = ConfigParser.ConfigParser()
config.readfp(open(self.path))
return config.get(section, key)
@staticmethod
def validate_critical_data(ip,desc,reportList,status):
# under progress
if reportList.has_key(ip+desc):
ipDescStatusList = reportList.get(ip+desc)
timeStampStatusTup = ipDescStatusList[0]
if timeStampStatusTup[1] != status:
tsst=(time.time(),status)
ipDescStatusList.insert(tsst,0)
elif status != 'OK':
ipDescStatusList=[]
tsst=(time.time(),status)
ipDescStatusList.insert(tsst,0)
reportList[ip+desc]=ipDescStatusList
|
from charm.adapters.ibenc_adapt_hybrid import HybridIBEnc
from charm.adapters.ibenc_adapt_identityhash import HashIDAdapter
from charm.schemes.ibenc.ibenc_bb03 import IBE_BB04
from charm.schemes.ibenc.ibenc_bf01 import IBE_BonehFranklin
from charm.schemes.ibenc.ibenc_ckrs09 import IBE_CKRS
from charm.schemes.ibenc.ibenc_lsw08 import IBE_Revoke
from charm.schemes.ibenc.ibenc_sw05 import IBE_SW05_LUC
from charm.schemes.ibenc.ibenc_waters05 import IBE_N04
from charm.schemes.ibenc.ibenc_waters09 import DSE09
from charm.toolbox.pairinggroup import PairingGroup,ZR,GT
from charm.toolbox.hash_module import Waters
import unittest
debug = False
class HybridIBEncTest(unittest.TestCase):
def testHybridIBEnc(self):
groupObj = PairingGroup('SS512')
ibe = IBE_BB04(groupObj)
hashID = HashIDAdapter(ibe, groupObj)
hyb_ibe = HybridIBEnc(hashID, groupObj)
(pk, mk) = hyb_ibe.setup()
kID = 'waldoayo@gmail.com'
sk = hyb_ibe.extract(mk, kID)
msg = b"This is a test message."
ct = hyb_ibe.encrypt(pk, kID, msg)
if debug:
print("Ciphertext")
print("c1 =>", ct['c1'])
print("c2 =>", ct['c2'])
decrypted_msg = hyb_ibe.decrypt(pk, sk, ct)
if debug: print("Result =>", decrypted_msg)
assert decrypted_msg == msg
del groupObj
class HashIDAdapterTest(unittest.TestCase):
def testHashIDAdapter(self):
group = PairingGroup('SS512')
ibe = IBE_BB04(group)
hashID = HashIDAdapter(ibe, group)
(pk, mk) = hashID.setup()
kID = 'waldoayo@email.com'
sk = hashID.extract(mk, kID)
if debug: print("Keygen for %s" % kID)
if debug: print(sk)
m = group.random(GT)
ct = hashID.encrypt(pk, kID, m)
orig_m = hashID.decrypt(pk, sk, ct)
assert m == orig_m
if debug: print("Successful Decryption!!!")
if debug: print("Result =>", orig_m)
class IBE_BB04Test(unittest.TestCase):
def testIBE_BB04(self):
# initialize the element object so that object references have global scope
groupObj = PairingGroup('MNT224')
ibe = IBE_BB04(groupObj)
(params, mk) = ibe.setup()
# represents public identity
kID = groupObj.random(ZR)
key = ibe.extract(mk, kID)
M = groupObj.random(GT)
cipher = ibe.encrypt(params, kID, M)
m = ibe.decrypt(params, key, cipher)
assert m == M, "FAILED Decryption!"
if debug: print("Successful Decryption!! M => '%s'" % m)
class IBE_BonehFranklinTest(unittest.TestCase):
def testIBE_BonehFranklin(self):
groupObj = PairingGroup('MNT224', secparam=1024)
ibe = IBE_BonehFranklin(groupObj)
(pk, sk) = ibe.setup()
id = 'user@email.com'
key = ibe.extract(sk, id)
m = "hello world!!!!!"
ciphertext = ibe.encrypt(pk, id, m)
msg = ibe.decrypt(pk, key, ciphertext)
assert msg == m, "failed decrypt: \n%s\n%s" % (msg, m)
if debug: print("Successful Decryption!!!")
class IBE_CKRSTest(unittest.TestCase):
def testIBE_CKRS(self):
groupObj = PairingGroup('SS512')
ibe = IBE_CKRS(groupObj)
(mpk, msk) = ibe.setup()
# represents public identity
ID = "bob@mail.com"
sk = ibe.extract(mpk, msk, ID)
M = groupObj.random(GT)
ct = ibe.encrypt(mpk, ID, M)
m = ibe.decrypt(mpk, sk, ct)
if debug: print('m =>', m)
assert m == M, "FAILED Decryption!"
if debug: print("Successful Decryption!!! m => '%s'" % m)
class IBE_RevokeTest(unittest.TestCase):
def testIBE_Revoke(self):
# scheme designed for symmetric billinear groups
grp = PairingGroup('SS512')
n = 5 # total # of users
ibe = IBE_Revoke(grp)
ID = "user2@email.com"
S = ["user1@email.com", "user3@email.com", "user4@email.com"]
(mpk, msk) = ibe.setup(n)
sk = ibe.keygen(mpk, msk, ID)
if debug: print("Keygen...\nsk :=", sk)
M = grp.random(GT)
ct = ibe.encrypt(mpk, M, S)
if debug: print("Ciphertext...\nct :=", ct)
m = ibe.decrypt(S, ct, sk)
assert M == m, "Decryption FAILED!"
if debug: print("Successful Decryption!!!")
class IBE_SW05_LUCTest(unittest.TestCase):
def testIBE_SW05_LUC(self):
# initialize the element object so that object references have global scope
groupObj = PairingGroup('SS512')
n = 6; d = 4
ibe = IBE_SW05_LUC(groupObj)
(pk, mk) = ibe.setup(n, d)
if debug:
print("Parameter Setup...")
print("pk =>", pk)
print("mk =>", mk)
w = ['insurance', 'id=2345', 'oncology', 'doctor', 'nurse', 'JHU'] #private identity
wPrime = ['insurance', 'id=2345', 'doctor', 'oncology', 'JHU', 'billing', 'misc'] #public identity for encrypt
(w_hashed, sk) = ibe.extract(mk, w, pk, d, n)
M = groupObj.random(GT)
cipher = ibe.encrypt(pk, wPrime, M, n)
m = ibe.decrypt(pk, sk, cipher, w_hashed, d)
assert m == M, "FAILED Decryption: \nrecovered m = %s and original m = %s" % (m, M)
if debug: print("Successful Decryption!! M => '%s'" % m)
class IBE_N04Test(unittest.TestCase):
def testIBE_N04(self):
# initialize the element object so that object references have global scope
groupObj = PairingGroup('SS512')
waters = Waters(groupObj)
ibe = IBE_N04(groupObj)
(pk, mk) = ibe.setup()
# represents public identity
ID = "bob@mail.com"
kID = waters.hash(ID)
#if debug: print("Bob's key =>", kID)
key = ibe.extract(mk, kID)
M = groupObj.random(GT)
cipher = ibe.encrypt(pk, kID, M)
m = ibe.decrypt(pk, key, cipher)
#print('m =>', m)
assert m == M, "FAILED Decryption!"
if debug: print("Successful Decryption!!! m => '%s'" % m)
del groupObj
class DSE09Test(unittest.TestCase):
def testDSE09(self):
grp = PairingGroup('SS512')
ibe = DSE09(grp)
ID = "user2@email.com"
(mpk, msk) = ibe.setup()
sk = ibe.keygen(mpk, msk, ID)
if debug: print("Keygen...\nsk :=", sk)
M = grp.random(GT)
ct = ibe.encrypt(mpk, M, ID)
if debug: print("Ciphertext...\nct :=", ct)
m = ibe.decrypt(ct, sk)
assert M == m, "Decryption FAILED!"
if debug: print("Successful Decryption!!!")
if __name__ == "__main__":
unittest.main()
|
from tkinter import *
from PIL import ImageTk,Image
root = Tk()
root.title('Dropdown Meny')
root.iconbitmap('images/diablo.ico')
root.geometry("200x200")
def show():
lbl = Label(root, text=clicked.get()).pack()
options = [
'Monday',
'Tuesday',
'Wednesday',
'Thursday',
'Friday',
'Saturday',
]
clicked = StringVar()
clicked.set(options[0])
drop = OptionMenu(root, clicked, *options)
drop.pack()
btn = Button(root, text='Show Selection', command=show)
btn.pack()
Button(root, text='Exit', command=root.quit).pack()
root.mainloop()
|
# Generated by Django 2.2.7 on 2020-01-15 09:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('statics', '0013_auto_20200114_2217'),
]
operations = [
migrations.AddField(
model_name='review',
name='index',
field=models.PositiveIntegerField(default=1, verbose_name='순서'),
),
]
|
import os
import time
source = ['/Users/YanYu/Desktop/']
target_dir = '/Users/YanYu/Desktop/'
target = target_dir+time.strftime('%Y%m%d%H%M%S')+'.zip'
zip_command = "zip -qr '%s' %s"%(target, ''.join(source))
if os.system(zip_command) == 0:
print 'Successful backup to', target
else:
print 'Backup failed'
|
from collections import OrderedDict
favorite_languages=OrderedDict()
favorite_languages['a']='python'
favorite_languages['b']='c++'
favorite_languages['c']='c'
favorite_languages['d']='ruby'
for name,language in favorite_languages.items():
print(name.title()+" 's favorite_language is "+language.title()+" .")
|
import numpy as np
from qiskit import (
#IBMQ,
QuantumCircuit,
QuantumRegister,
ClassicalRegister,
execute,
Aer,
)
from math import pi
from qiskit.visualization import plot_histogram
from qiskit.tools.visualization import circuit_drawer
from rpy2 import robjects as robjects
def dec2bin(n):
a = 1
list = []
while a > 0:
a, b = divmod(n, 2)
list.append(str(b))
n = a
s = ""
for i in range(len(list) - 1, -1, -1):
s += str(list[i])
s = s.zfill(10)#input的位数
return s
def inverse(s):
s_list = list(s)
for i in range(len(s_list)):
if s_list[i] == '0':
s_list[i] = '1'
else:
s_list[i] ='0'
s = "".join(s_list)
return s
def swap_registers(circuit, n):
for qubit in range(n//2):
circuit.swap(qubit, n-qubit-1)
return circuit
def qft_rotations(circuit, qubit, p):
"""Performs qft on the first n qubits in circuit (without swaps)"""
# if n == 0:
# return circuit
# n -= 1
# circuit.h(9-n)
# for qubit in range(n):
# circuit.cu1(pi/2**(n-qubit), qubit, n)
# # At the end of our function, we call the same function again on
# # the next qubits (we reduced n by one earlier in the function)
# qft_rotations(circuit, n)
# for qubit in range(n):
# circuit.h(qubit)
# p = 1
# for j in range(qubit + 1, 10):
# circuit.cu1(pi/2**(p), qubit, j)
# p += 1
# circuit.barrier()
# return circuit
# for qubit in range(n):
# circuit.h(qubit)
# p = 1
for j in range(qubit + 1, 10):
circuit.cu1(pi/2**(p), qubit, j)
p += 1
circuit.barrier()
return circuit
def IQFT(input, count_times):
simulator = Aer.get_backend('qasm_simulator')
q = QuantumRegister(10)
c = ClassicalRegister(10)
qc = QuantumCircuit(q,c)
input_string = dec2bin(input)
for i in range(10):
if input_string[9-i] == '1':
qc.x(q[i])
qc.barrier()
swap_registers(qc,10)
#qft_rotations(qc,10)
for qubit in range(10):
qc.h(qubit)
p = 1
qft_rotations(qc,qubit,p)
qc.barrier()
qc.measure(q,c)
#circuit_drawer(qc, filename='./IQFT_circuit')
job = execute(qc, simulator, shots=count_times * 100)
result = job.result()
counts = result.get_counts(qc)
return counts
def IQFT_M1(input, count_times):
simulator = Aer.get_backend('qasm_simulator')
q = QuantumRegister(10)
c = ClassicalRegister(10)
qc = QuantumCircuit(q,c)
input_string = dec2bin(input)
for i in range(10):
if input_string[9-i] == '1':
qc.x(q[i])
qc.barrier()
qc.ch(q[3],q[4])#M1
swap_registers(qc,10)
#qft_rotations(qc,10)
for qubit in range(10):
qc.h(qubit)
qft_rotations(qc,qubit,1)
qc.barrier()
qc.measure(q,c)
#circuit_drawer(qc, filename='./IQFT_M1_circuit')
job = execute(qc, simulator, shots=count_times * 100)
result = job.result()
counts = result.get_counts(qc)
return counts
def IQFT_M2(input, count_times):
simulator = Aer.get_backend('qasm_simulator')
q = QuantumRegister(10)
c = ClassicalRegister(10)
qc = QuantumCircuit(q,c)
input_string = dec2bin(input)
for i in range(10):
if input_string[9-i] == '1':
qc.x(q[i])
qc.barrier()
swap_registers(qc,10)
for qubit in range(3):
qc.h(qubit)
p = 1
qft_rotations(qc,qubit,p)
qc.ch(q[4],q[2])#M2
for qubit in range(3,10):
qc.h(qubit)
p = 1
qft_rotations(qc,qubit,p)
qc.barrier()
qc.measure(q,c)
#circuit_drawer(qc, filename='./IQFT_M2_circuit')
job = execute(qc, simulator, shots=count_times * 100)
result = job.result()
counts = result.get_counts(qc)
return counts
def IQFT_M3(input, count_times):
simulator = Aer.get_backend('qasm_simulator')
q = QuantumRegister(10)
c = ClassicalRegister(10)
qc = QuantumCircuit(q, c)
input_string = dec2bin(input)
for i in range(10):
if input_string[9 - i] == '1':
qc.x(q[i])
qc.barrier()
swap_registers(qc, 10)
for qubit in range(3):
qc.h(qubit)
p = 1
qft_rotations(qc, qubit, p)
qc.ch(q[4],q[3])#M3
qft_rotations(qc,3,1)
for qubit in range(4,10):
qc.h(qubit)
p = 1
qft_rotations(qc,qubit,p)
qc.barrier()
qc.measure(q, c)
#circuit_drawer(qc, filename='./IQFT_M3_circuit')
job = execute(qc, simulator, shots=count_times * 100)
result = job.result()
counts = result.get_counts(qc)
return counts
def IQFT_M4(input, count_times):
simulator = Aer.get_backend('qasm_simulator')
q = QuantumRegister(10)
c = ClassicalRegister(10)
qc = QuantumCircuit(q, c)
input_string = dec2bin(input)
for i in range(10):
if input_string[9 - i] == '1':
qc.x(q[i])
qc.barrier()
qc.ch(q[2], q[4])#M4
swap_registers(qc, 10)
for qubit in range(10):
qc.h(qubit)
p = 1
qft_rotations(qc, qubit, p)
qc.barrier()
qc.measure(q, c)
#circuit_drawer(qc, filename='./IQFT_M4_circuit')
job = execute(qc, simulator, shots=count_times * 100)
result = job.result()
counts = result.get_counts(qc)
return counts
def IQFT_M5(input, count_times):
simulator = Aer.get_backend('qasm_simulator')
q = QuantumRegister(10)
c = ClassicalRegister(10)
qc = QuantumCircuit(q, c)
input_string = dec2bin(input)
for i in range(10):
if input_string[9 - i] == '1':
qc.x(q[i])
qc.barrier()
swap_registers(qc, 10)
for qubit in range(6):
qc.h(qubit)
p = 1
qft_rotations(qc, qubit, p)
qc.ch(q[8],q[3])#M5
for qubit in range(6,10):
qc.h(qubit)
p = 1
qft_rotations(qc, qubit, p)
qc.barrier()
qc.measure(q, c)
#circuit_drawer(qc, filename='./IQFT_M5_circuit')
job = execute(qc, simulator, shots=count_times * 100)
result = job.result()
counts = result.get_counts(qc)
return counts
def IQFT_specification(input):
simulator = Aer.get_backend('statevector_simulator')
q = QuantumRegister(10)
c = ClassicalRegister(10)
qc = QuantumCircuit(q,c)
input_string = dec2bin(input)
for i in range(10):
if input_string[9-i] == '1':
qc.x(q[i])
qc.barrier()
swap_registers(qc,10)
#qft_rotations(qc,10)
for qubit in range(10):
qc.h(qubit)
p = 1
qft_rotations(qc,qubit,p)
qc.barrier()
vector = execute(qc, simulator).result().get_statevector()
return vector
def probabilityComputing(input):
pt = []
t = IQFT_specification(input)
for i in range(1024):
pt.append(abs(t[i])**2)
return pt
if __name__ == '__main__':
# print(probabilityComputing(16))
# print(probabilityComputing_M2(16))
# print(probabilityComputing_M3(16))
f = open("iqft_M3_test.txt","a")
for i in range(1023):
f.write('--------------------')
f.write('\n')
f.write(str(i))
f.write(str(probabilityComputing_M3(i)))
f.write('\n')
f.close()
# print(QRAM(0,2))
# print(QRAM_M1(0, 2))
# print(QRAM_M2(0, 2))
# print(QRAM_M3(0, 2))
# print(QRAM_M4(0, 2))
# print(QRAM_M5(0, 2))
#print(probabilityComputing(104))
#IQFT(0,1024)
#print(IQFT_M(8,1024))
# print(IQFT(0,1024))
# print(IQFT_M1(0,1024))
# print(IQFT_M2(0, 1024))
#print(IQFT_M3(0, 1024))
# print(IQFT_M4(0, 1024))
# print(IQFT_M5(0, 1024))
# temp = IQFT_M1(200,1024)
# print(temp)
# fre = []
# p = probabilityComputing(8)
# print(p)
# for i in range(1024):
# j_s = dec2bin(i)
# if j_s in temp:
# fre.append(temp[j_s])
# else:
# fre.append(0)
# fre = np.array(fre)
# p = np.array(p)
# fre = robjects.FloatVector(fre)
# p = robjects.FloatVector(p)
# print(fre)
# print(p)
# robjects.r('''
# chitest<-function(observed,theoretical){
# test_result <- chisq.test(x = observed,p = theoretical)
# pvalue = test_result$p.value
# return (pvalue)
# }
# ''')
# t = robjects.r['chitest'](fre,p)
# pvalue = t[0]
# print(pvalue)
# a = probabilityComputing(8)
# for i in range(1024):
# print(a[i])
# print(sum(a))
# AmplitudeAmplification(3,1)
# AmplitudeAmplification_M1(3,1)
# AmplitudeAmplification_M2(3,1)
# AmplitudeAmplification_M3(3,1)
# AmplitudeAmplification_M4(3,1)
# AmplitudeAmplification_M5(3,1)
# AmplitudeAmplification_M6(3,1)
# # # f = open('./specification.txt','a')
# f1 = open('./counts.txt','a')
# a = probabilityComputing(400)
# # for i in range(len(a)):
# # f.write(str(a[i]))
# # f.write('\n')
# temp = AmplitudeAmplification(400,1)
# for i in range(len(a)):
# i_s = dec2bin(i)
# if i_s not in temp:
# fre.append(0)
# else:
# fre.append(temp[i_s])
# f1.write(str(fre[i]))
# f1.write('\n')
|
import random
import numpy as np
import matplotlib.pyplot as plt
def rand_seed(m, b, num=2):
# create empty list
x_coor = []
y_coor = []
label = []
# positive and negtive point number
pos_num = int(num / 2)
neg_num = num - pos_num
# random create point
for i in range(pos_num):
x = random.randint(0, 30)
r = random.randint(1, 30)
y = m * x + b - r
# save the coordinate of x and y
x_coor.append(x)
y_coor.append(y)
# save label, right=1, left=0
label.append(1 if m >= 0 else -1)
for i in range(neg_num):
x = random.randint(0, 30)
r = random.randint(1, 30)
y = m * x + b + r
x_coor.append(x)
y_coor.append(y)
label.append(-1 if m >= 0 else 1)
return x_coor, y_coor, label
if __name__ == '__main__':
# set value of m and b
m, b = 1, 2
# plot the function curve
x = np.arange(30) # x = [0, 1,..., 29]
y = m * x + b
plt.plot(x, y)
# plot the random point
# blue for positive and red for negative
x_coor, y_coor, label = rand_seed(m, b, num=30)
plt.plot(x_coor[:15], y_coor[:15], 'o', color='blue')
plt.plot(x_coor[15:], y_coor[15:], 'o', color='red')
plt.show()
|
"""
Edanur Demir
EENet models
"""
from torch import nn
from flops_counter import get_model_complexity_info
from resnet import ResNet, ResNet6n2
__all__ = ['EENet',
'eenet18', 'eenet34', 'eenet50', 'eenet101', 'eenet152',
'eenet20', 'eenet32', 'eenet44', 'eenet56', 'eenet110',]
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
"""Basic Block defition.
Basic 3X3 convolution blocks for use on ResNets with layers <= 34.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
"""
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""Bottleneck Block defition.
Bottleneck architecture for > 34 layer ResNets.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
"""
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ExitBlock(nn.Module):
"""Exit Block defition.
This allows the model to terminate early when it is confident for classification.
"""
def __init__(self, inplanes, num_classes, input_shape, exit_type):
super(ExitBlock, self).__init__()
_, width, height = input_shape
self.expansion = width * height if exit_type == 'plain' else 1
self.layers = []
if exit_type == 'bnpool':
self.layers.append(nn.BatchNorm2d(inplanes))
if exit_type != 'plain':
self.layers.append(nn.AdaptiveAvgPool2d(1))
self.confidence = nn.Sequential(
nn.Linear(inplanes * self.expansion, 1),
nn.Sigmoid(),
)
self.classifier = nn.Sequential(
nn.Linear(inplanes * self.expansion, num_classes),
nn.Softmax(dim=1),
)
def forward(self, x):
for layer in self.layers:
x = layer(x)
x = x.view(x.size(0), -1)
conf = self.confidence(x)
pred = self.classifier(x)
return pred, conf
class EENet(nn.Module):
"""Builds a EENet like architecture.
Arguments are
* is_6n2model: Whether the architecture of the model is 6n+2 layered ResNet.
* block: Block function of the architecture either 'BasicBlock' or 'Bottleneck'.
* total_layers: The total number of layers.
* repetitions: Number of repetitions of various block units.
* num_ee: The number of early exit blocks.
* distribution: Distribution method of the early exit blocks.
* num_classes: The number of classes in the dataset.
* zero_init_residual: Zero-initialize the last BN in each residual branch,
so that the residual branch starts with zeros,
and each residual block behaves like an identity. This improves the model
by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
* input_shape: Input shape of the model according to dataset.
Returns:
The nn.Module.
"""
def __init__(self, is_6n2model, block, total_layers, num_ee, distribution, num_classes,
input_shape, exit_type, loss_func, repetitions=None, zero_init_residual=False,
**kwargs):
super(EENet, self).__init__()
if is_6n2model:
self.inplanes = 16
repetitions = [(total_layers-2) // 6]*3
counterpart_model = ResNet6n2(block, total_layers, num_classes, input_shape)
else:
self.inplanes = 64
counterpart_model = ResNet(block, repetitions, num_classes, input_shape)
self.stages = nn.ModuleList()
self.exits = nn.ModuleList()
self.cost = []
self.complexity = []
self.layers = nn.ModuleList()
self.stage_id = 0
self.num_ee = num_ee
self.total_layers = total_layers
self.exit_type = exit_type
self.distribution = distribution
self.num_classes = num_classes
self.input_shape = input_shape
self.exit_threshold = 0.5
if loss_func == 'v3':
self.exit_threshold = 1./self.num_ee
channel, _, _ = input_shape
total_flops, total_params = self.get_complexity(counterpart_model)
self.set_thresholds(distribution, total_flops)
if is_6n2model:
self.layers.append(nn.Sequential(
nn.Conv2d(channel, 16, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
))
else:
self.layers.append(nn.Sequential(
nn.Conv2d(channel, 64, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
))
planes = self.inplanes
stride = 1
for repetition in repetitions:
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
self.layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
if self.is_suitable_for_exit():
self.add_exit_block(exit_type, total_flops)
for _ in range(1, repetition):
self.layers.append(block(self.inplanes, planes))
if self.is_suitable_for_exit():
self.add_exit_block(exit_type, total_flops)
planes *= 2
stride = 2
assert len(self.exits) == num_ee, \
'The desired number of exit blocks is too much for the model capacity.'
planes = 64 if is_6n2model else 512
self.layers.append(nn.AdaptiveAvgPool2d(1))
#self.fully_connected = nn.Linear(planes * block.expansion, num_classes)
self.classifier = nn.Sequential(
nn.Linear(planes * block.expansion, num_classes),
nn.Softmax(dim=1),
)
self.confidence = nn.Sequential(
nn.Linear(planes * block.expansion, 1),
nn.Sigmoid(),
)
self.stages.append(nn.Sequential(*self.layers))
self.softmax = nn.Softmax(dim=1)
self.complexity.append((total_flops, total_params))
self.parameter_initializer(zero_init_residual)
def get_complexity(self, model):
"""get model complexity in terms of FLOPs and the number of parameters"""
flops, params = get_model_complexity_info(model, self.input_shape,\
print_per_layer_stat=False, as_strings=False)
return flops, params
def add_exit_block(self, exit_type, total_flops):
"""add early-exit blocks to the model
Argument is
* total_flops: the total FLOPs of the counterpart model.
This add exit blocks to suitable intermediate position in the model,
and calculates the FLOPs and parameters until that exit block.
These complexity values are saved in the self.cost and self.complexity.
"""
self.stages.append(nn.Sequential(*self.layers))
self.exits.append(ExitBlock(self.inplanes, self.num_classes, self.input_shape, exit_type))
intermediate_model = nn.Sequential(*(list(self.stages)+list(self.exits)[-1:]))
flops, params = self.get_complexity(intermediate_model)
self.cost.append(flops / total_flops)
self.complexity.append((flops, params))
self.layers = nn.ModuleList()
self.stage_id += 1
def set_thresholds(self, distribution, total_flops):
"""set thresholds
Arguments are
* distribution: distribution method of the early-exit blocks.
* total_flops: the total FLOPs of the counterpart model.
This set FLOPs thresholds for each early-exit blocks according to the distribution method.
"""
gold_rate = 1.61803398875
flop_margin = 1.0 / (self.num_ee+1)
self.threshold = []
for i in range(self.num_ee):
if distribution == 'pareto':
self.threshold.append(total_flops * (1 - (0.8**(i+1))))
elif distribution == 'fine':
self.threshold.append(total_flops * (1 - (0.95**(i+1))))
elif distribution == 'linear':
self.threshold.append(total_flops * flop_margin * (i+1))
else:
self.threshold.append(total_flops * (gold_rate**(i - self.num_ee)))
def is_suitable_for_exit(self):
"""is the position suitable to locate an early-exit block"""
intermediate_model = nn.Sequential(*(list(self.stages)+list(self.layers)))
flops, _ = self.get_complexity(intermediate_model)
return self.stage_id < self.num_ee and flops >= self.threshold[self.stage_id]
def parameter_initializer(self, zero_init_residual):
"""
Zero-initialize the last BN in each residual branch,
so that the residual branch starts with zeros,
and each residual block behaves like an identity.
This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
"""
for module in self.modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(module, nn.BatchNorm2d):
nn.init.constant_(module.weight, 1)
nn.init.constant_(module.bias, 0)
if zero_init_residual:
for module in self.modules():
if isinstance(module, Bottleneck):
nn.init.constant_(module.bn3.weight, 0)
elif isinstance(module, BasicBlock):
nn.init.constant_(module.bn2.weight, 0)
def set_multiple_gpus(self):
for idx, stage in enumerate(self.stages):
self.stages[idx] = nn.DataParallel(stage)
for idx, exitblock in enumerate(self.exits):
self.exits[idx] = nn.DataParallel(exitblock)
def forward(self, x):
preds, confs = [], []
for idx, exitblock in enumerate(self.exits):
x = self.stages[idx](x)
pred, conf = exitblock(x)
if not self.training and conf.item() > self.exit_threshold:
return pred, idx, self.cost[idx]
preds.append(pred)
confs.append(conf)
x = self.stages[-1](x)
x = x.view(x.size(0), -1)
pred = self.classifier(x)
conf = self.confidence(x)
if not self.training:
return pred, len(self.exits), 1.0
preds.append(pred)
confs.append(conf)
return preds, confs, self.cost
def eenet18(**kwargs):
"""EENet-18 model"""
model = EENet(is_6n2model=False, block=BasicBlock, total_layers=18,
repetitions=[2, 2, 2, 2], **kwargs)
return model
def eenet34(**kwargs):
"""EENet-34 model"""
model = EENet(is_6n2model=False, block=BasicBlock, total_layers=34,
repetitions=[3, 4, 6, 3], **kwargs)
return model
def eenet50(**kwargs):
"""EENet-50 model"""
model = EENet(is_6n2model=False, block=Bottleneck, total_layers=50,
repetitions=[3, 4, 6, 3], **kwargs)
return model
def eenet101(**kwargs):
"""EENet-101 model"""
model = EENet(is_6n2model=False, block=Bottleneck, total_layers=101,
repetitions=[3, 4, 23, 3], **kwargs)
return model
def eenet152(**kwargs):
"""EENet-152 model"""
model = EENet(is_6n2model=False, block=Bottleneck, total_layers=152,
repetitions=[3, 8, 36, 3], **kwargs)
return model
def eenet20(**kwargs):
"""EENet-20 model"""
model = EENet(True, BasicBlock, 20, **kwargs)
return model
def eenet32(**kwargs):
"""EENet-32 model"""
model = EENet(True, BasicBlock, 32, **kwargs)
return model
def eenet44(**kwargs):
"""EENet-44 model"""
model = EENet(True, BasicBlock, 44, **kwargs)
return model
def eenet56(**kwargs):
"""EENet-56 model"""
model = EENet(True, BasicBlock, 56, **kwargs)
return model
def eenet110(**kwargs):
"""EENet-110 model"""
model = EENet(True, BasicBlock, 110, **kwargs)
return model
|
# Greg Elgin, Connor Hamilton
# CS 205: Warm up project
# Last Updated: 02/10/20
# Parsing system to take a string input and return token values
# Calls query function with tokens as parameters
import shlex
region_list = ["Great Lakes", "Harrisburg Scranton", "Hartford Springfield", "Houston", "Indianapolis", "Jacksonville",
"Las Vegas", "Los Angeles", "Louisville", "Miami Ft Lauderdale", "Nashville", "New Orleans Mobile",
"New York", "Northeast", "Northern New England"]
# Initialize search to allow for user to quit without searching
search = False
# Initialize tokens
tokens = list()
def get_info():
print()
print("You can search for avocado average price, total volume, or best month (of sales)")
print("Begin your search by specifying one of these fields followed by 'region' + region name")
print("Enter 'region list' for a list of regions")
print("Enter q to quit")
def get_user_input():
usr_input = str(input(""))
return usr_input
def get_region_list():
return region_list
def parse():
# Initialize valid to enter while loop
valid = False
global search
while not valid:
try:
# get user input, turn into list of words, initialize empty token list
user_input = get_user_input()
user_input = user_input.lower()
input_list = shlex.split(user_input)
tokens.clear()
# Save length of user input
length = len(input_list)
# If user types "region list" display a list of regions
if length >= 2 and input_list[0] == "region" and input_list[1] == "list":
print("Region list: ")
for item in region_list:
print(item)
print()
# First word needs to be "average price" or "total volume" or "best month
if length >= 2 and (input_list[0] == "average" and input_list[1] == "price" or
input_list[0] == "total" and input_list[1] == "volume" or
input_list[0] == "best" and input_list[1] == "month"):
if input_list[0] == "average" and input_list[1] == "price":
tokens.append("AveragePrice")
elif input_list[0] == "total" and input_list[1] == "volume":
tokens.append("TotalVolume")
elif input_list[0] == "best" and input_list[1] == "month":
tokens.append("BestMonth")
# Next word should be 'region' followed by a valid region name
if length >= 4 and input_list[2] == "region":
region_list_lower = [x.lower() for x in region_list]
if input_list[3] in region_list_lower:
tokens.append(input_list[3])
# Next item should be 'month' followed by an int 1-12
# This field is optional for AvgPrice, mandatory for TotalVol, and empty for BestMonth
if length >= 6 and input_list[4] == "month":
if input_list[0] == "best":
print("When searching for Best Month you do not need to input a month")
valid = False
elif input_list[0] == "average":
if (input_list[5] == "1" or input_list[5] == "2" or input_list[5] == "3" or
input_list[5] == "4" or input_list[5] == "5" or input_list[5] == "6" or
input_list[5] == "7" or input_list[5] == "8" or input_list[5] == "9" or
input_list[5] == "10" or input_list[5] == "11" or input_list[5] == "12"):
tokens.append(int(input_list[5]))
else:
print("you must specify a month between 1 and 12")
valid = False
elif input_list[0] == "total":
try:
if int(input_list[5]) in range(1, 13):
tokens.append(int(input_list[5]))
else:
print("you must specify a month between 1 and 12")
valid = False
except ValueError:
print("you must specify a month between 1 and 12")
# Last item should be 'type' followed by 'conventional' or 'organic'
if length >= 8 and input_list[6] == "type":
if input_list[7] == 'conventional' or input_list[7] == "organic":
tokens.append(input_list[7])
if length == 8:
valid = True
search = True
else:
print("You input too much information")
else:
print("type must be 'organic' or 'conventional'")
valid = False
else:
print("You must specify a type after month")
valid = False
elif length >= 5 and input_list[4] != "month" and (input_list[0] == "best" or
input_list[0] == 'average'):
tokens.append("")
# Last item should be 'type' followed by 'conventional' or 'organic'
if length >= 6 and input_list[4] == "type":
if input_list[5] == 'conventional' or input_list[5] == "organic":
tokens.append(input_list[5])
if length == 6:
valid = True
search = True
else:
print("You input too much information")
else:
print("type must be 'organic' or 'conventional'")
valid = False
else:
print("You must specify a type after region")
valid = False
elif input_list[0] != "best":
print("You must enter month followed by a number 1-12 after you specify region")
valid = False
else:
print("You must specify a type after region")
valid = False
else:
print("That is not a valid region")
print("Input 'region list' to see valid regions")
print("If region is multiple words input it in quotes")
valid = False
else:
print("Please input 'region' followed by a region name after you specify average price, "
"total volume, or best month")
valid = False
# exit program if input is "q"
elif length >= 1 and input_list[0] == "q":
valid = True
search = False
else:
print("Please begin your search with 'average price', 'total volume', or 'best month'")
print("Or input 'q' to quit")
valid = False
except ValueError:
print("Invalid input")
print("Did you forget to close your quotes?")
valid = False
# FOR TESTING PURPOSES
def query(tokens_list):
print(tokens_list)
|
"""SCT URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from app0.views import *
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', home, name='home'),
url(r'^search/$', search, name = 'search'),
url(r'^insert/$', insert, name = 'insert'),
url(r'^update/$', update, name = 'update'),
url(r'delete/$', delete, name = 'delete'),
url(r'^search_query/$', search_query, name='search_query'),
url(r'^insert_query/$', insert_query, name='insert_query'),
url(r'^update_query/$', update_query, name = 'update_query'),
]
|
############################################################################
# LOGISTIC REGRESSION #
# Note: NJUST Machine Learning Assignment. #
# Task: Binary Classification, Multi-class Classification. #
# Optimization: Grediant Descent (GD), Stochastic Grediant Descent(SGD). #
# Author: Edmund Sowah #
############################################################################
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
class softmax_regression():
def __init__(self, data, label, num_class):
self.label = label
self.num_class = num_class
self.num_data = len(data)
data = self.preprocess(data)
bias = np.ones(len(data),)
self.data = np.vstack((data.T, bias)).T
self.num_feature = self.data.shape[1]
self.theta = np.random.randn(self.num_feature, self.num_class)
if num_class == 2:
self.lr = 1
elif num_class == 3:
self.lr = 1.5
self.color_list = ['r', 'g', 'b', 'y']
def preprocess(self, data):
data = data - np.mean(data, axis=0, keepdims=True)
data = data / (np.max(data, axis=0, keepdims=True) -
np.min(data, axis=0, keepdims=True))
return data
def softmax(self, data):
''' Calculate Softmax Prediction Vector. '''
# `Feature` is an n by num_class matrix
feature = np.dot(data, self.theta)
# Prevent overflow by subtracting the maximum vector
feature -= np.max(feature, axis=1, keepdims=True)
exp_feature = np.exp(feature)
sum_exp_feature = np.sum(exp_feature, axis=1, keepdims=True)
return exp_feature / sum_exp_feature
def loss(self):
''' Use Log-likelihood estimation to compute loss function. '''
score = np.dot(self.data, self.theta)
score -= np.max(score, axis=1, keepdims=True)
sum_exp_score = np.sum(np.exp(score), axis=1)
loss = np.log(sum_exp_score)
# Remove redundant terms: the correct term
loss -= score[np.arange(self.num_data), self.label]
loss = (1. / self.num_data) * np.sum(loss)
return loss
def update_parameter(self, stochastic=0):
''' Calculate Grediant. '''
if stochastic != 0:
rand_i = np.random.randint(0, self.num_data, stochastic)
if stochastic == 1:
self.lr = 10
x = self.data[rand_i]
y = self.label[rand_i]
else:
self.lr = 5
x = self.data[rand_i]
y = self.label[rand_i]
else:
x = self.data
y = self.label
softmax = self.softmax(x)
softmax[np.arange(len(x)), y] -= 1.
gred = (1. / self.num_data) * np.dot(x.T, softmax)
self.theta -= self.lr * gred
print('theta:\n', self.theta)
if __name__ == '__main__':
cl = input('Input Classification number: ')
opt = input('Input Optimization strategy''s number: ')
if cl == '2':
# Import Binary data
exam_data = np.loadtxt('Exam\exam_x.dat')
exam_label = np.loadtxt('Exam\exam_y.dat', dtype=int)
data = exam_data
label = exam_label
elif cl == '3':
# Import Multi-class data
iris_data = np.loadtxt('Iris\iris_x.dat')
iris_label = np.loadtxt('Iris\iris_y.dat', dtype=int)
data = iris_data
label = iris_label
else:
print('no {}-class task available!'.format(cl))
softmax_reg = softmax_regression(data, label, int(cl))
print('Initiated theta value is:\n', softmax_reg.theta)
loss_list = []
step_list = []
acc_list = []
plt.ion()
fig, ax = plt.subplots(1, 4, figsize=(16, 5))
for steps in range(300):
step_list.append(steps)
pred = softmax_reg.softmax(softmax_reg.data)
classification = np.argmax(pred, 1)
loss = softmax_reg.loss()
print('Current Loss is:\n', loss)
loss_list.append(loss)
plt.subplot(1, 4, 1)
plt.title('Ground Truth')
for i in range(int(cl)):
data_x = np.array(data.T[0][label == i])
data_y = np.array(data.T[1][label == i])
plt.scatter(data_x, data_y, c=softmax_reg.color_list[i])
plt.subplot(1, 4, 2)
plt.title('Classification Plot')
for i in range(int(cl)):
data_x = np.array(data.T[0][classification == i])
data_y = np.array(data.T[1][classification == i])
if len(data_x) == 0:
continue
plt.scatter(data_x, data_y, c=softmax_reg.color_list[i])
ax[1].cla()
plt.subplot(1, 4, 3)
plt.title('Loss')
ax[2].cla()
plt.plot(step_list, loss_list, c='b', ls='-', marker='o')
plt.subplot(1, 4, 4)
acc = sum(label == classification) / softmax_reg.num_data
acc_list.append(acc)
plt.plot(step_list, acc_list, c='g', ls='-', marker='*')
plt.title('Accuracy')
plt.pause(0.1)
if opt == 0:
softmax_reg.update_parameter()
else:
softmax_reg.update_parameter(int(opt))
|
#coding:utf8
#https://www.jianshu.com/p/c307d04eee56
#1。适合度分析(拟合性分析)
# 某科学家预言抛一个色子,各面向上的几率都相同。为了验证自己理论的正确性,该科学家抛了600次硬币,
# 结果为一点102次,二点102次,三点96次,四点105次,五点95次,六点100次。
# 显然这个结果和理论预期并不完全一样,那么,科学家的理论有错吗?我们就用Python来验证一下。
from scipy import stats
import numpy as np
from scipy import stats
obs = [102, 102, 96, 105, 95, 100]
exp = [100, 100, 100, 100, 100, 100]
stats.chisquare(obs, f_exp = exp)
#p>0.95很合适 没错
#2 独立性分析
#“独立性检验”验证从两个变量抽出的配对观察值组是否互相独立(例如:每次都从A国和B国各抽一个人,看他们的反应是否与国籍无关)。
#https://www.cnblogs.com/Yuanjing-Liu/p/9252844.html#_label1_1
#https://blog.csdn.net/QimaoRyan/article/details/72824766
#e.g. 三种农药的杀虫数据
# 杀虫效果 甲 乙 丙
# 死亡数 37 49 23
# 未死亡数 150 100 57
# 分析杀虫效果与农药类型是否有关
import numpy as np
from scipy.stats import chi2_contingency
d = np.array([[37, 49, 23], [150, 100, 57]])
print chi2_contingency(d)
#p=0.02 0。05 拒绝,杀虫效果与农药类型有关系,不独立
#第一个值为卡方值,第二个值为P值,第三个值为自由度,第四个为与原数据数组同维度的对应理论值
|
from flask import render_template, redirect
import helper
def create_link():
return redirect('/')
def get_liks():
return render_template(
'index.html',
dar = helper.dar(),
user = 'teste'
)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'winter2021_updated_nocheckboxes.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(400, 250)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.imageLabel = QtWidgets.QLabel(self.centralwidget)
self.imageLabel.setAlignment(QtCore.Qt.AlignCenter)
self.imageLabel.setObjectName("imageLabel")
self.verticalLayout_3.addWidget(self.imageLabel)
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.recordbutton = QtWidgets.QPushButton(self.centralwidget)
self.recordbutton.setMinimumSize(QtCore.QSize(0, 0))
self.recordbutton.setMaximumSize(QtCore.QSize(88, 21))
self.recordbutton.setObjectName("recordbutton")
self.gridLayout.addWidget(self.recordbutton, 2, 0, 1, 1)
self.sendbutton = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sendbutton.sizePolicy().hasHeightForWidth())
self.sendbutton.setSizePolicy(sizePolicy)
self.sendbutton.setMaximumSize(QtCore.QSize(88, 21))
self.sendbutton.setObjectName("sendbutton")
self.gridLayout.addWidget(self.sendbutton, 2, 1, 1, 1)
# self.serialCloseButton = QtWidgets.QPushButton(self.centralwidget)
# self.serialCloseButton.setEnabled(True)
# sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
# sizePolicy.setHorizontalStretch(0)
# sizePolicy.setVerticalStretch(0)
# sizePolicy.setHeightForWidth(self.serialCloseButton.sizePolicy().hasHeightForWidth())
# self.serialCloseButton.setSizePolicy(sizePolicy)
# self.serialCloseButton.setMaximumSize(QtCore.QSize(88, 21))
# self.serialCloseButton.setObjectName("serialCloseButton")
# self.gridLayout.addWidget(self.serialCloseButton, 0, 1, 1, 1)
# self.clearbutton = QtWidgets.QPushButton(self.centralwidget)
# self.clearbutton.setMinimumSize(QtCore.QSize(0, 0))
# self.clearbutton.setMaximumSize(QtCore.QSize(88, 21))
# self.clearbutton.setObjectName("clearbutton")
# self.gridLayout.addWidget(self.clearbutton, 2, 0, 1, 1)
self.plotbutton = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plotbutton.sizePolicy().hasHeightForWidth())
self.plotbutton.setSizePolicy(sizePolicy)
self.plotbutton.setMaximumSize(QtCore.QSize(88, 21))
self.plotbutton.setObjectName("plotbutton")
self.gridLayout.addWidget(self.plotbutton, 3, 0, 1, 1)
self.savebutton = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.savebutton.sizePolicy().hasHeightForWidth())
self.savebutton.setSizePolicy(sizePolicy)
self.savebutton.setMaximumSize(QtCore.QSize(88, 21))
self.savebutton.setObjectName("savebutton")
self.gridLayout.addWidget(self.savebutton, 3, 1, 1, 1)
self.com_port = QtWidgets.QComboBox()
self.gridLayout.addWidget(self.com_port, 0, 0, 1, 1)
self.serialOpenButton = QtWidgets.QPushButton(self.centralwidget)
self.serialOpenButton.setMinimumSize(QtCore.QSize(0, 0))
self.serialOpenButton.setMaximumSize(QtCore.QSize(88, 21))
self.serialOpenButton.setObjectName("serialOpenButton")
self.gridLayout.addWidget(self.serialOpenButton, 1, 0, 1, 1)
self.settingsButton = QtWidgets.QPushButton(self.centralwidget)
self.settingsButton.setMinimumSize(QtCore.QSize(0, 0))
self.settingsButton.setMaximumSize(QtCore.QSize(88, 21))
self.settingsButton.setObjectName("settingsButton")
self.gridLayout.addWidget(self.settingsButton, 1, 1, 1, 1)
# self.settings = QtWidgets.QPushButton(self.centralwidget)
# sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
# sizePolicy.setHorizontalStretch(0)
# sizePolicy.setVerticalStretch(0)
# sizePolicy.setHeightForWidth(self.settings.sizePolicy().hasHeightForWidth())
# self.settings.setSizePolicy(sizePolicy)
# self.settings.setMinimumSize(QtCore.QSize(0, 0))
# self.settings.setMaximumSize(QtCore.QSize(300, 21))
# self.settings.setObjectName("settings")
# self.gridLayout.addWidget(self.settings, 3, 0, 1, 2)
self.verticalLayout_5.addLayout(self.gridLayout)
self.verticalLayout_3.addLayout(self.verticalLayout_5)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem)
self.horizontalLayout_2.addLayout(self.verticalLayout_3)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
# self.graphWidgetOutput = PlotWidget(self.centralwidget)
# self.graphWidgetOutput.setObjectName("graphWidgetOutput")
# self.verticalLayout_2.addWidget(self.graphWidgetOutput)
# self.graphWidgetInput = PlotWidget(self.centralwidget)
# self.graphWidgetInput.setObjectName("graphWidgetInput")
# self.verticalLayout_2.addWidget(self.graphWidgetInput)
self.horizontalLayout_2.addLayout(self.verticalLayout_2)
MainWindow.setCentralWidget(self.centralwidget)
# self.menubar = QtWidgets.QMenuBar(MainWindow)
# self.menubar.setGeometry(QtCore.QRect(0, 0, 1000, 20))
# self.menubar.setObjectName("menubar")
# self.menuLayout = QtWidgets.QMenu(self.menubar)
# self.menuLayout.setObjectName("menuLayout")
# MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
# self.actionStatics = QtWidgets.QAction(MainWindow)
# self.actionStatics.setObjectName("actionStatics")
# self.actionBeam = QtWidgets.QAction(MainWindow)
# self.actionBeam.setObjectName("actionBeam")
# self.actionSound = QtWidgets.QAction(MainWindow)
# self.actionSound.setObjectName("actionSound")
# self.menuLayout.addAction(self.actionStatics)
# self.menuLayout.addAction(self.actionBeam)
# self.menuLayout.addAction(self.actionSound)
# self.menubar.addAction(self.menuLayout.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "CU@Home DAQ"))
self.imageLabel.setText(_translate("MainWindow", "Image Label (DON\'T CHANGE)"))
self.recordbutton.setText(_translate("MainWindow", "Record Data"))
self.sendbutton.setText(_translate("MainWindow", "Send Data"))
# self.serialCloseButton.setText(_translate("MainWindow", "Close Serial"))
# self.clearbutton.setText(_translate("MainWindow", "Clear Plot"))
self.plotbutton.setText(_translate("MainWindow", "Plot Data"))
self.savebutton.setText(_translate("MainWindow", "Save Data"))
self.serialOpenButton.setText(_translate("MainWindow", "Open Serial"))
self.settingsButton.setText(_translate("MainWindow", "Settings"))
# self.settings.setText(_translate("MainWindow", "Settings"))
# self.menuLayout.setTitle(_translate("MainWindow", "Course"))
# self.actionStatics.setText(_translate("MainWindow", "Statics"))
# self.actionBeam.setText(_translate("MainWindow", "Experimentation Beam"))
# self.actionSound.setText(_translate("MainWindow", "Experimentation Speed of Sound"))
class Ui_Dialog(object):
def setupUi(self, Dialog, COM, fs, N):
Dialog.setObjectName("Dialog")
Dialog.resize(250, 300)
Dialog.setMinimumSize(QtCore.QSize(250, 300))
Dialog.setMaximumSize(QtCore.QSize(250, 300))
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(Dialog)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.topGroupBox = QtWidgets.QGroupBox(Dialog)
self.topGroupBox.setMinimumSize(QtCore.QSize(230, 122))
self.topGroupBox.setMaximumSize(QtCore.QSize(230, 122))
self.topGroupBox.setObjectName("topGroupBox")
self.gridLayoutWidget = QtWidgets.QWidget(self.topGroupBox)
self.gridLayoutWidget.setGeometry(QtCore.QRect(9, 20, 211, 91))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
# self.timeoutLabel = QtWidgets.QLabel(self.gridLayoutWidget)
# self.timeoutLabel.setObjectName("timeoutLabel")
# self.gridLayout_3.addWidget(self.timeoutLabel, 2, 0, 1, 1)
# self.baudrateLabel = QtWidgets.QLabel(self.gridLayoutWidget)
# self.baudrateLabel.setObjectName("baudrateLabel")
# self.gridLayout_3.addWidget(self.baudrateLabel, 1, 0, 1, 1)
self.portsLabel = QtWidgets.QLabel(self.gridLayoutWidget)
self.portsLabel.setObjectName("portsLabel")
self.gridLayout_3.addWidget(self.portsLabel, 0, 0, 1, 1)
self.timeout = QtWidgets.QLineEdit(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
# sizePolicy.setHeightForWidth(self.timeout.sizePolicy().hasHeightForWidth())
# self.timeout.setSizePolicy(sizePolicy)
# self.timeout.setAlignment(QtCore.Qt.AlignCenter)
# self.timeout.setObjectName("timeout")
# self.gridLayout_3.addWidget(self.timeout, 2, 1, 1, 1)
# self.baudrate = QtWidgets.QComboBox(self.gridLayoutWidget)
# self.baudrate.setObjectName("baudrate")
# self.baudrate.addItem("")
# self.baudrate.addItem("")
# self.baudrate.addItem("")
# self.baudrate.addItem("")
# self.gridLayout_3.addWidget(self.baudrate, 1, 1, 1, 1)
self.ports = QtWidgets.QComboBox(self.gridLayoutWidget)
self.ports.setObjectName("ports")
self.gridLayout_3.addWidget(self.ports, 0, 1, 1, 1)
self.verticalLayout.addWidget(self.topGroupBox)
self.bottomGroupBox = QtWidgets.QGroupBox(Dialog)
self.bottomGroupBox.setMinimumSize(QtCore.QSize(230, 121))
self.bottomGroupBox.setMaximumSize(QtCore.QSize(230, 121))
self.bottomGroupBox.setObjectName("bottomGroupBox")
self.gridLayoutWidget_2 = QtWidgets.QWidget(self.bottomGroupBox)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 20, 211, 91))
self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2")
self.gridLayout_4 = QtWidgets.QGridLayout(self.gridLayoutWidget_2)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.samplingrate = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.samplingrate.sizePolicy().hasHeightForWidth())
self.samplingrate.setSizePolicy(sizePolicy)
self.samplingrate.setAlignment(QtCore.Qt.AlignCenter)
self.samplingrate.setObjectName("samplingrate")
self.gridLayout_4.addWidget(self.samplingrate, 2, 1, 1, 1)
self.datawindowsizeLabel = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.datawindowsizeLabel.setObjectName("datawindowsizeLabel")
self.gridLayout_4.addWidget(self.datawindowsizeLabel, 0, 0, 1, 1)
self.datawindowsize = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.datawindowsize.sizePolicy().hasHeightForWidth())
self.datawindowsize.setSizePolicy(sizePolicy)
self.datawindowsize.setAlignment(QtCore.Qt.AlignCenter)
self.datawindowsize.setObjectName("datawindowsize")
self.gridLayout_4.addWidget(self.datawindowsize, 0, 1, 1, 1)
self.sampletime = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sampletime.sizePolicy().hasHeightForWidth())
self.sampletime.setSizePolicy(sizePolicy)
self.sampletime.setAlignment(QtCore.Qt.AlignCenter)
self.sampletime.setObjectName("sampletime")
self.gridLayout_4.addWidget(self.sampletime, 1, 1, 1, 1)
self.sampletimeLabel = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.sampletimeLabel.setObjectName("sampletimeLabel")
self.gridLayout_4.addWidget(self.sampletimeLabel, 1, 0, 1, 1)
self.samplingrateLabel = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.samplingrateLabel.setObjectName("samplingrateLabel")
self.gridLayout_4.addWidget(self.samplingrateLabel, 2, 0, 1, 1)
self.verticalLayout.addWidget(self.bottomGroupBox)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setCenterButtons(True)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.horizontalLayout_2.addLayout(self.verticalLayout)
self.retranslateUi(Dialog, COM, fs, N)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog, COM, fs, N):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Settings"))
self.topGroupBox.setTitle(_translate("Dialog", "Serial Communication"))
# self.timeoutLabel.setText(_translate("Dialog", "Timeout (s)"))
# self.baudrateLabel.setText(_translate("Dialog", "Baud Rate"))
self.portsLabel.setText(_translate("Dialog", "Ports"))
# self.timeout.setText(_translate("Dialog", "0.1"))
# self.baudrate.setItemText(0, _translate("Dialog", "9600"))
# self.baudrate.setItemText(1, _translate("Dialog", "500000"))
# self.baudrate.setItemText(2, _translate("Dialog", "1000000"))
# self.baudrate.setItemText(3, _translate("Dialog", "2000000"))
self.bottomGroupBox.setTitle(_translate("Dialog", "Data"))
self.samplingrateLabel.setText(_translate("Dialog", "Sampling Rate (fs)"))
self.samplingrate.setText(_translate("Dialog", str(fs)))
self.datawindowsizeLabel.setText(_translate("Dialog", "Number of Samples (N)"))
self.datawindowsize.setText(_translate("Dialog", str(N)))
# self.sampletime.setText(_translate("Dialog", "100"))
# self.sampletimeLabel.setText(_translate("Dialog", "Sample Time (s)"))
# self.samplingrateLabel.setText(_translate("Dialog", "Sampling Time (s)"))
from pyqtgraph import PlotWidget
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
import errno
import os
import pandas as pd
import math
import json
import time
from project_thesis.visualization.visualizer import (
visualize_solution,
visualize_test_instance,
)
class Instance:
def __init__(
self,
scooters: pd.DataFrame,
delivery_nodes: pd.DataFrame,
depot: tuple,
service_vehicles: tuple,
optimal_state: list,
number_of_sections: int,
number_of_zones: int,
T_max: int,
is_percent_t_max: bool,
computational_limit: int,
bound: tuple,
model_class,
**kwargs,
):
"""
Wrapper class for a Model class. Contains both raw input data and model input data
:param scooters: dataframe with lat lon and battery in %
:param delivery_nodes: dataframe with lat lon of delivery nodes
:param depot: (lat, lon)
:param service_vehicles: dict - ["type"]: (#numbers, scooter capacity, battery capacity)
:param optimal_state: list of optimal state for each zone of the problem
:param number_of_sections: int number_of_sections
:param T_max: int - time limit for vehicles
:param computational_limit: int - max solution time for model
:param bound: tuple that defines the bound of the geographical area in play
"""
# Save raw data
self.scooters = scooters
self.delivery_nodes = delivery_nodes
self.depot = depot
self.service_vehicles = service_vehicles
self.T_max = T_max
self.is_percent_T_max = is_percent_t_max
self.computational_limit = computational_limit
self.optimal_state = optimal_state
self.number_of_sections = number_of_sections
self.number_of_zones = number_of_zones
self.seed = kwargs.get("seed", None)
# Context
self.bound = bound
# Model
self.model_input = model_class.get_input_class()(
scooters,
delivery_nodes,
depot,
service_vehicles,
optimal_state,
T_max,
is_percent_t_max,
self.number_of_zones,
theta=kwargs.get("theta", 0.05),
beta=kwargs.get("beta", 0.8),
)
self.model = model_class(
self.model_input,
time_limit=computational_limit,
valid_inequalities=kwargs.get("valid_inequalities", None),
symmetry=kwargs.get("symmetry", None),
subsets=kwargs.get("subsets", None),
)
def run(self):
"""
Runs the model of the instance
"""
self.model.optimize_model()
def visualize_solution(
self, save=False, edge_plot=False, time_stamp=time.strftime("%d-%m %H.%M")
):
"""
See documentation of visualize_solution function from visualization
"""
visualize_solution(self, save, edge_plot, time_stamp)
def visualize_raw_data_map(
self, model_name="", save=False, time_stamp=time.strftime("%d-%m %H.%M")
):
"""
See documentation of visualize_solution function from visualization
"""
visualize_test_instance(
self.scooters,
self.delivery_nodes,
self.bound,
self.number_of_sections,
model_name,
save,
time_stamp,
)
def print_instance(self):
print("\n-------------- INSTANCE --------------\n")
print(self.get_model_name(True))
print("\n--------------------------------------\n")
def get_runtime(self):
"""
:return: the elapsed time in seconds to get to optimal solution in gurobi
"""
return self.model.m.Runtime
def get_number_of_nodes(self):
return len(self.scooters) + len(self.delivery_nodes) + 1
def save_model_and_instance(self, time_stamp):
"""
Function to save gurobi models, file name represents: zones per axis_nodes per zone_Tmax_#vehicles_computational limit
"""
path = "saved_models/" + time_stamp
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
file_path_solution = f"{path}/{self.get_model_name()}.json"
self.model.m.write(file_path_solution)
with open(file_path_solution, "r") as jsonFile:
data = json.load(jsonFile)
visits = [self.model.y[key].x for key in self.model.y if 0 < key[0]]
visit_percentage = sum(visits) / (len(self.model_input.locations) - 1)
data["Visit Percentage"] = visit_percentage
data["Deviation Before"] = self.deviation_before()
deviation_after, deviation_after_squared = self.deviation_from_optimal_state()
data["Deviation After"] = deviation_after
data["Deviation After Squared"] = deviation_after_squared
data["Instance"] = self.instance_to_dict()
data["Variables"] = self.get_model_variables()
data["Seed"] = self.seed
if self.model.valid_inequalities:
data["Valid Inequalities"] = self.model.valid_inequalities
else:
data["Valid Inequalities"] = "-"
if self.model.symmetry:
data["Symmetry constraint"] = self.model.symmetry
else:
data["Symmetry constraint"] = "-"
with open(file_path_solution, "w") as jsonFile:
json.dump(data, jsonFile)
def instance_to_dict(self):
return {
"scooters": self.scooters.to_dict(),
"delivery_nodes": self.delivery_nodes.to_dict(),
"depot": self.depot,
"service_vehicles": self.service_vehicles,
"optimal_state": self.optimal_state,
"number_of_sections": self.number_of_sections,
"T_max": self.model_input.shift_duration,
"is_percent_T_max": (self.is_percent_T_max, self.T_max),
"computational_limit": self.computational_limit,
"bound": self.bound,
"model_class": self.model.to_string(False),
"theta": self.model_input.theta if self.model.to_string() == "A" else "x",
"beta": self.model_input.beta if self.model.to_string() == "A" else "x",
}
def get_model_variables(self):
variables = {}
for var in self.model.m.getVars():
variables[var.VarName] = var.X
return variables
def get_model_name(self, print_mode=False):
num_of_service_vehicles, scooter_cap, battery_cap = self.service_vehicles
scooters_per_section = int(len(self.scooters) / (self.number_of_sections * 2))
if not self.model.symmetry:
symmetry = "None"
else:
symmetry = self.model.symmetry
if not self.model.valid_inequalities:
valid_inequalities = "None"
else:
valid_inequalities = self.model.valid_inequalities
if self.is_percent_T_max:
percent = (
f"T max calculated from TSP - {self.is_percent_T_max}\n"
+ f"Shift duration as percent of TSP - {int(self.T_max*100)}%\n"
)
else:
percent = f"T max calculated from TSP - {self.is_percent_T_max}\n"
return (
f"Number of sections - {self.number_of_sections}\n"
+ f"Scooters per zone - {scooters_per_section}\n"
+ f"Number of service vehicles - {num_of_service_vehicles}\n"
+ percent
+ f"Shift duration - {int(self.model_input.shift_duration)}\n"
+ f"Computational limit - {self.computational_limit}\n"
+ f"Model type - {self.model.to_string(False)}\n"
+ f"Symmetry constraint - {' '.join(['%-2s,' % (x,) for x in symmetry])}\n"
+ f"Valid Inequalities - {' '.join(['%-2s,' % (x,) for x in valid_inequalities])}\n"
+ f"Seed - {self.seed}"
if print_mode
else f"model_{self.number_of_sections}_{scooters_per_section}_{num_of_service_vehicles}_"
+ f"{int(self.model_input.shift_duration)}_{self.computational_limit}_"
+ f"{self.model.to_string()}_{round(self.model_input.theta,4)}_{round(self.model_input.beta,3)}_"
f"{symmetry}_{valid_inequalities}"
)
def is_feasible(self):
return self.model.m.MIPGap != math.inf
def deviation_from_optimal_state(self):
optimal_state = self.calculate_optimal_state()
deviation = 0
deviation_squared = 0
for z in self.model_input.zones:
battery_in_zone = 0
for s in self.model_input.zone_scooters[z]:
battery = 0
visited = False
for v in self.model_input.service_vehicles:
if s in self.model_input.scooters and self.model.p[(s, v)].x == 1:
visited = True
elif self.model.y[(s, v)].x == 1:
visited = True
battery += 1
if not visited and s in self.model_input.scooters:
battery += self.model_input.battery_level[s]
battery_in_zone += battery
deviation += abs(optimal_state - battery_in_zone)
deviation_squared += (optimal_state - battery_in_zone) ** 2
return deviation, deviation_squared
def deviation_before(self):
optimal_state = self.calculate_optimal_state()
deviation = 0
for z in self.model_input.zones:
battery_in_zone = sum(
[
self.model_input.battery_level[s]
for s in self.model_input.zone_scooters[z]
if s in self.model_input.scooters
]
)
deviation += abs(optimal_state - battery_in_zone)
return deviation
def calculate_optimal_state(self):
return self.model_input.num_scooters / (self.number_of_sections ** 2)
|
import time
from django.core.exceptions import ValidationError
from rest_framework import serializers
def hh_mm_to_minutes(str_hh_mm):
"""Перевести строку формата 'HH:MM' во время."""
minutes = time.strptime(str_hh_mm, '%H:%M')
return minutes.tm_hour * 60 + minutes.tm_min
def interval_validator(value):
"""Проверить является ли строка интервалом времени в формате 'HH:MM-HH:MM'
и вернуть времена начала и конца периода."""
try:
value = value.split('-')
begin = hh_mm_to_minutes(value[0])
end = hh_mm_to_minutes(value[1])
except ValueError:
raise serializers.ValidationError(
'Значение не является интервалом времени в формате "HH:MM-HH:MM"'
)
return begin, end
def interval_list_validator(interval_list):
"""Проверить список интервалов и вернуть список кортежей с валидным
интервалом и временами начала и конца интервала."""
new_list = []
for interval in interval_list:
begin, end = interval_validator(interval)
new_list.append((interval, begin, end))
return new_list
def weight_validator(value):
"""Проверить, что вес соответствует нормам сервиса."""
if not (0 < value <= 50):
raise ValidationError('Недопустимый вес заказа')
def check_unknown_fields(fields, data):
"""Проверить, что в сериализуемый контент не содержит необъявленных
полей."""
if data is not serializers.empty:
unknown_fields = set(data) - set(fields)
if unknown_fields:
errors = [f for f in unknown_fields]
raise serializers.ValidationError({
'unknown_fields': errors,
})
|
import urllib.request
from bs4 import BeautifulSoup
import csv
from time import sleep
import pandas as pd
import json
import urllib.request
import os
from PIL import Image
import yaml
import requests
import sys
import argparse
import Levenshtein
vol = 6
curation_uri = "https://raw.githubusercontent.com/nakamura196/genji_curation/master/docs/iiif/kuronet/"+str(vol).zfill(2)+".json"
curation_data = requests.get(curation_uri).json()
page_text_map = {}
with open('data/'+str(vol)+".csv", 'r') as f:
reader = csv.reader(f)
header = next(reader) # ヘッダーを読み飛ばしたい時
index = 0
for row in reader:
if row[1] != "":
page_text_map[index] = int(row[1])
index += 1
members2 = []
members = curation_data["selections"][0]["members"]
for index in page_text_map:
member = members[index]
p = int(page_text_map[index])
member2 = {
"@id": member["@id"],
"@type": "sc:Canvas",
"description": "",
"label": "新編日本古典文学全集 p."+str(p),
"metadata": [
{
"label": "p",
"value": p
}
]
}
members2.append(member2)
curation_data["selections"][0]["members"] = members2
f2 = open("../../docs/iiif/saga/"+str(vol).zfill(2)+".json", 'w')
json.dump(curation_data, f2, ensure_ascii=False, indent=4,
sort_keys=True, separators=(',', ': '))
|
# https://wikidocs.net/42528
'''
Q1. 주어진 자연수가 홀수인지 짝수인지 판별해 주는 함수(is_odd)를 작성해 보자.
'''
def is_odd(num):
return '홀수' if num % 2 == 1 else '짝수'
print(is_odd(2))
'''
Q2. 입력으로 들어오는 모든 수의 평균 값을 계산해 주는 함수를 작성해 보자. (단 입력으로 들어오는 수의 개수는 정해져 있지 않다.)
'''
numbers = input('Input the numbers: ')
toString = filter(lambda x: x, numbers.split(' '))
toInt = list(map(int, toString))
print(sum(toInt) / len(toInt))
'''
Q3. 다음은 두 개의 숫자를 입력받아 더하여 돌려주는 프로그램이다.
input1 = input("첫번째 숫자를 입력하세요:")
input2 = input("두번째 숫자를 입력하세요:")
total = input1 + input2
print("두 수의 합은 %s 입니다" % total)
이 프로그램을 수행해 보자.
첫번째 숫자를 입력하세요:3
두번째 숫자를 입력하세요:6
두 수의 합은 36 입니다
3과 6을 입력했을 때 9가 아닌 36이라는 결괏값을 돌려주었다. 이 프로그램의 오류를 수정해 보자.
※ int 함수를 사용해 보자.
'''
input1 = int(input("첫번째 숫자를 입력하세요:"))
input2 = int(input("두번째 숫자를 입력하세요:"))
total = input1 + input2
print("두 수의 합은 %s 입니다" % total)
'''
Q4. 다음 중 출력 결과가 다른 것 한 개를 골라 보자.
print("you" "need" "python")
print("you"+"need"+"python")
print("you", "need", "python")
print("".join(["you", "need", "python"]))
'''
print("you", "need", "python")
'''
Q5. 다음은 "test.txt"라는 파일에 "Life is too short" 문자열을 저장한 후 다시 그 파일을 읽어서 출력하는 프로그램이다.
f1 = open("test.txt", 'w')
f1.write("Life is too short")
f2 = open("test.txt", 'r')
print(f2.read())
이 프로그램은 우리가 예상한 "Life is too short"라는 문장을 출력하지 않는다. 우리가 예상한 값을 출력할 수 있도록 프로그램을 수정해 보자.
'''
f1 = open("test.txt", 'w')
f1.write("Life is too short\n")
f1.close()
f2 = open("test.txt", 'r')
print(f2.read())
'''
Q6. 사용자의 입력을 파일(test.txt)에 저장하는 프로그램을 작성해 보자. (단 프로그램을 다시 실행하더라도 기존에 작성한 내용을 유지하고 새로 입력한 내용을 추가해야 한다.)
'''
f = open('test.txt', 'a')
f.write('Some body help me!')
f.close()
f1 = open('test.txt', 'r')
print(f1.readlines())
'''
Q7. 다음과 같은 내용을 지닌 파일 test.txt가 있다. 이 파일의 내용 중 "java"라는 문자열을 "python"으로 바꾸어서 저장해 보자.
Life is too short
you need java
'''
f = open('text.txt', 'r')
lines = f.readlines()
f.close()
f = open('text.txt', 'w')
result = []
for line in lines:
f.write(line.replace('java', 'python'))
f.close()
|
""" Script that streams the emotion detected in the camera feed to a local web-application.
Faces are detected in frames, then for each face detected in a frame, a rectangle is draw around the face. Once this is
done, the emotion displayed in the face is written to the rectangle.
There are a few errors in readings due to lighting and face angles.
"""
from flask import Flask, render_template, Response
import cv2
import imutils
app = Flask(__name__)
@app.route('/')
def index():
"""Video streaming ."""
return render_template('index.html')
def gen():
# Initialise video capture with OpenCV
cap = cv2.VideoCapture(0)
# Use pretrained face detection cascade classifier available with OpenCV
faceCascade = cv2.CascadeClassifier("/Users/yenji/opencv/data/haarcascades/haarcascade_frontalface_default.xml")
# Use fisher_face face detector that has been trained to detect emotions.
fisher_face = cv2.face.FisherFaceRecognizer_create()
fisher_face.read('/Users/yenji/Desktop/Emotion-Detection/emotion_detection_model_Haar(fisher).xml')
emotions = ["neutral", "anger", "disgust", "fear", "happy", "sadness", "surprise"] # Removed Contempt
while (True):
# Capture frame-by-frame
ret, frame = cap.read()
frame = imutils.resize(frame, width=600)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face = faceCascade.detectMultiScale(gray)
print (len(face))
#maybe need to create an array of faces instead of one face (dimensions get confused, which face are we talking about???)
if len(face) == 1: #Comment out to detect more than one face.
# Draw rectangle around face
for (x, y, w, h) in face: # get coordinates and size of rectangle containing face
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
gray = gray[y:y + h, x:x + w] # Cut rectangle to face size
gray = cv2.resize(gray, (350, 350))
label, confidence = fisher_face.predict(gray) # Get current emotion in face
cv2.putText(frame, emotions[label], (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0),
1) # Put emotion found in face on rectangle containing face
# Display the resulting frame
cv2.imwrite('pic.jpg', frame)
##
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + open('pic.jpg', 'rb').read() + b'\r\n')
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80, debug=True, threaded=True)
|
from app import create_app
from config import Config
import os
import logging
import pytest
from pandas import DataFrame, read_csv
from flask_jwt_extended import create_access_token
TEST_ROOT: str = os.path.dirname(os.path.abspath(__file__))
TEST_USER: dict = {"id": 1, "username": "test", "password": "password"}
CSV_TESTING_FILENAME: str = "destinations_testing_data.csv"
CSV_TESTING_FILEPATH: str = os.path.join(TEST_ROOT, CSV_TESTING_FILENAME)
class TestConfig(Config):
TESTING: bool = True
SQLALCHEMY_DATABASE_URI: str = "sqlite://"
@pytest.fixture()
def client():
yield create_app(TestConfig).test_client()
@pytest.fixture()
def auth_header():
app = create_app(TestConfig)
with app.app_context():
token: str = create_access_token(TEST_USER)
headers: dict = {"Authorization": "Bearer {}".format(token)}
return headers
@pytest.fixture()
def df():
df: DataFrame = read_csv(CSV_TESTING_FILEPATH)
logging.debug(f"filepath: {CSV_TESTING_FILEPATH} size: {df.shape}")
return df
@pytest.fixture()
def data(df):
DATA: list = [
{"latitude": df.latitude.iloc[i], "longitude": df.longitude.iloc[i],}
for i in range(len(df))
]
return DATA
|
class Solution:
# @param A : string
# @param B : integer
# @return a list of integers
def findPerm(self, A, B):
"""
Just walk through an array of size B increasing or decreasing numbers as necessary,
keeping track of the current largest and smallest numbers, and then make a pass
over the array adding (1 - smallest) to each number so they'll all be positive. I
would have gotten more points and a faster time except that InterviewBit kept
giving me a "Internal Error. We are working on fixing this issue ASAP" when I
would click Submit.
"""
smallest = largest = 1
results = [1] * B
for i, c in enumerate(A):
if c == "I":
results[i + 1] = largest + 1
largest += 1
else:
results[i + 1] = smallest - 1
smallest -= 1
return [num + (1 - smallest) for num in results]
|
import socket
import sys
size = 1024
client= socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('',8000)) #Leaving the host part blank means 'localhost'
print client.getsockname()
print '%'
name=''
while 1:
# read from keyboard
while not name:
name=raw_input("Name:")
client.send(name)
data=raw_input("Data:")
if data:
client.send(data)
data=''
data=client.recv(size)
if data:
print data,'%'
client.close()
|
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
import time
class Team(models.Model):
"""
Team model class.
"""
scoreboard = models.ForeignKey('Scoreboard', related_name='teams', related_query_name='team')
team_name = models.CharField(max_length=60, unique=True)
points = models.IntegerField(default=0)
correct_flags = models.IntegerField(default=0)
wrong_flags = models.IntegerField(default=0)
user = models.OneToOneField(User, related_name='team', related_query_name='team')
solved = models.ManyToManyField('Challenge', blank=True, related_name='solved', through='ChallengeTimestamp')
last_timestamp = models.DateTimeField(default=datetime.fromtimestamp(0))
created = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name_plural = 'Teams'
def __unicode__(self):
return '{}'.format(self.team_name)
def solves(self):
challenge_timestamps = []
team_challenge_timestamps = self.challenge_timestamps.all()
for timestamp in team_challenge_timestamps:
_time = int(time.mktime(timestamp.created.timetuple()))
_id = timestamp.challenge.id
challenge_timestamps.append((_id, _time))
return challenge_timestamps
def lasttimestamp(self):
return int(self.last_timestamp.strftime('%s'))
def team(self):
"""
Alias for teamname.
Created for ctftime api.
"""
return self.team_name
def score(self):
"""
Alias for points.
Created for ctftime api.
"""
return self.points
|
# a=[1,4,5,6,7,9,12,11]
# a.insertion_sort(a)
# print(a)
def insertionSort(arr):
i=1
while i>len(arr):
key = arr[i]
j = i+1
while j >=0 and key < arr[j] :
arr[j+1] = arr[j]
j += 1
arr[j+1] = key
arr = [12, 11, 13, 5, 6]
insertionSort(arr)
print ("Sorted array is:")
for i in range(len(arr)):
print ("%d" %arr[i])
|
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:
intervals.sort(key=lambda interval: interval[1])
current_end, result = float("-inf"), 0
for interval in intervals:
if interval[0] >= current_end:
current_end = interval[1]
else:
result += 1
return result
if __name__ == "__main__":
solution = Solution()
assert 1 == solution.eraseOverlapIntervals([[1, 2], [2, 3], [3, 4], [1, 3]])
assert 2 == solution.eraseOverlapIntervals([[1, 2], [1, 2], [1, 2]])
assert 0 == solution.eraseOverlapIntervals([[1, 2], [2, 3]])
|
print("LETTER K HAS BEEN SUCCESSFULLY EXECUTED")
|
# -*- coding: utf-8 -*-
class Parser:
def __init__(self, tokens, tabelaDeSimbolos):
self.tokens = tokens # lista de todos os tokens lidos do arquivo
self.tokenAtual = "" # token sendo analisado no momento
self.posicaoAtual = 0 # posição do token atual na lista "tokens"
self.errosAtuais = [] # lista utilizada para guardar os erros encontrados
self.tabelaDeSimbolos = tabelaDeSimbolos # tabela de símbolos
self.paraExcluir = [] # indíces da tabela de símbolos que serão excluídos
# método auxilizar que verifica se o token esperado pelo analisado é o token a ser analisado,
# retorna True se sim, False se não
def match(self, tokenEsperado):
if (self.tokenAtual == tokenEsperado):
return True
return False
# método auxiliar que faz a leitura do próximo token
def proximoToken(self):
if ((self.posicaoAtual >= len(self.tokens)) or self.posicaoAtual == -1):
self.posicaoAtual = -1
return False
else:
if ((self.tokens[self.posicaoAtual].tipo == "identificador") or (self.tokens[self.posicaoAtual].tipo == "constNumerica")):
self.tokenAtual = self.tabelaDeSimbolos[self.tokens[self.posicaoAtual].valor - 1][1]
else:
self.tokenAtual = self.tokens[self.posicaoAtual].valor
self.posicaoAtual += 1
# método auxiliar que adiciona os erros encontrado em self.errosAtuais
def erro(self):
if ((self.tokens[self.posicaoAtual - 1].tipo == "identificador") or
(self.tokens[self.posicaoAtual - 1].tipo == "constNumerica")):
self.errosAtuais.append("Erro sintático na linha " + str(self.tokens[self.posicaoAtual - 1].linha)
+ " e coluna " + str(self.tokens[self.posicaoAtual - 1].coluna) + " : "
+ str(self.tabelaDeSimbolos[int(self.tokens[self.posicaoAtual - 1].valor) - 1][1]))
else:
self.errosAtuais.append("Erro sintático na linha " + str(self.tokens[self.posicaoAtual - 1].linha)
+ " e coluna " + str(self.tokens[self.posicaoAtual - 1].coluna) + " : "
+ str(self.tokens[self.posicaoAtual - 1].valor))
# método auxiliar para remover erros repetidos de self.errosAtuais
def removeErrosRepetidos(self):
self.errosAtuais = list(set(self.errosAtuais)) # remove elementos repetidos
# método auxiliar para verificar se o expoente ao qual um número está elevado está correto,
# retorna True se sim, False se não
def verificaExpoente(self, indiceExpoente, expoente):
indice = self.posicaoAtual
self.proximoToken()
indiceExpoente[0] = int(self.tokens[self.posicaoAtual - 1].valor - 1)
numero = self.tokenAtual
expoente[0] = numero
self.tokenAtual = numero[0]
if ((self.match("+")) or (self.match("-")) or (self.digito())):
i = 1
if not(self.digito()):
self.tokenAtual = numero[1]
i += 1
if (self.digito()):
aux = True
while (i < len(numero)):
self.tokenAtual = numero[i]
if not(self.digito()):
# GENTE, acredito que ele nunca vá entrar aqui
aux = False
i += 1
return aux
else:
self.voltaPosicao(indice)
return False
# método auxiliar para verificar se um número com vírgula (utilizamos '.' na representação)
# está correto, retorna True se sim, False se não
def verificaPontoFlutuante(self, indiceMantissa, mantissa):
self.proximoToken()
indiceMantissa[0] = int(self.tokens[self.posicaoAtual - 1].valor - 1)
i = 1
numero = self.tokenAtual
mantissa[0] = numero
self.tokenAtual = numero[0]
if (self.digito()):
aux = True
while (i < len(numero)):
self.tokenAtual = numero[i]
if not(self.digito()):
# GENTE, acredito que ele nunca vá entrar aqui
aux = False
i += 1
indice = self.posicaoAtual
self.proximoToken()
if (self.match("E")):
expoente = [0]
indiceExpoente = [0]
if (self.verificaExpoente(indiceExpoente, expoente)):
mantissa[0] = mantissa[0] + "E" + expoente[0]
self.paraExcluir.append(indiceExpoente[0])
return True
return False
else:
self.voltaPosicao(indice)
return aux
# método auxiliar utilizado para retornar a uma posição da lista de tokens, isso acontece durante
# as tentativas de métodos para encontrar algum que encaixe na sequência de tokens
def voltaPosicao(self, indice):
self.posicaoAtual = indice - 1
self.proximoToken()
def programa(self):
print()
self.proximoToken()
if (self.declaracaoLista()):
print("\n\nO código do arquivo não contém erros sintáticos\n")
self.paraExcluir.sort(key = int, reverse = True)
for i in range(len(self.paraExcluir)):
del self.tabelaDeSimbolos[self.paraExcluir[i]]
else:
print("\n\nO código do arquivo contém erros sintáticos\n")
# self.removeErrosRepetidos()
# for i in range(len(self.errosAtuais)):
# print(self.errosAtuais[i])
def declaracaoLista(self):
continuaDeclaracao = self.declaracao()
if (True):
self.proximoToken()
aux = True
while (self.posicaoAtual != -1):
continuaDeclaracao = self.declaracao()
if not(continuaDeclaracao):
if ((self.tokens[self.posicaoAtual - 1].tipo == "identificador")
or (self.tokens[self.posicaoAtual - 1].tipo == "constNumerica")):
print("Erro sintático na linha " + str(self.tokens[self.posicaoAtual - 1].linha)
+ " e coluna " + str(self.tokens[self.posicaoAtual - 1].coluna) + " : "
+ str(self.tabelaDeSimbolos[int(self.tokens[self.posicaoAtual - 1].valor) - 1][1]))
else:
print("Erro sintático na linha " + str(self.tokens[self.posicaoAtual - 1].linha)
+ " e coluna " + str(self.tokens[self.posicaoAtual - 1].coluna) + " : "
+ str(self.tokens[self.posicaoAtual - 1].valor))
aux = False
self.proximoToken()
while (((self.tokenAtual != "{") and (self.tokenAtual != ";") and (self.tokenAtual != "}"))
and (self.posicaoAtual != -1)):
self.proximoToken()
continuaDeclaracao = True
self.proximoToken()
return aux
else:
return False
def declaracao(self):
indice = self.posicaoAtual
if (self.var_declaracao()):
return True
self.voltaPosicao(indice)
if (self.fun_declaracao()):
return True
return False
def var_declaracao(self):
indice = self.posicaoAtual
if (self.tipo_especificador()):
self.proximoToken()
if (self.ident()):
self.proximoToken()
if (self.match(";")):
return True
elif (self.abre_colchete()):
self.proximoToken()
if (self.num_int()):
self.proximoToken()
if (self.fecha_colchete()):
self.proximoToken()
if (self.match(";")):
return True
while (self.abre_colchete()):
self.proximoToken()
if (self.num_int()):
self.proximoToken()
if (self.fecha_colchete()):
self.proximoToken()
return self.match(";")
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
def tipo_especificador(self):
if ((self.match("int")) or (self.match("float")) or (self.match("char")) or (self.match("void"))):
return True
elif (self.match("struct")):
self.proximoToken()
if (self.ident()):
self.proximoToken()
if (self.abre_chave()):
self.proximoToken()
if (self.atributos_declaracao()):
if (self.fecha_chave()):
return True
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
def atributos_declaracao(self):
retorno = self.var_declaracao()
self.proximoToken()
while (retorno):
if (self.tokenAtual == "}"):
return True
retorno = self.var_declaracao()
self.proximoToken()
return retorno
def fun_declaracao(self):
if (self.tipo_especificador()):
self.proximoToken()
if (self.ident()):
self.proximoToken()
if (self.match("(")):
self.proximoToken()
if (self.params()):
if (self.match(")")):
self.proximoToken()
if (self.composto_decl()):
return True
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
def params(self):
indice = self.posicaoAtual
if (self.param_lista()):
return True
self.posicaoAtual = indice - 1
self.proximoToken()
if (self.match("void")):
self.proximoToken()
return True
self.erro()
return False
def param_lista(self):
if (self.param()):
while (self.match(",")):
self.proximoToken()
if not(self.param()):
self.erro()
return False
return True
else:
self.erro()
return False
def param(self):
if (self.tipo_especificador()):
self.proximoToken()
if (self.ident()):
self.proximoToken()
if (self.abre_colchete()):
self.proximoToken()
if (self.fecha_colchete()):
self.proximoToken()
return True
else:
self.erro()
return False
else:
return True
else:
self.erro()
return False
else:
self.erro()
return False
def composto_decl(self):
if (self.abre_chave()):
self.proximoToken()
if (self.local_declaracoes()):
if (self.comando_lista()):
self.proximoToken()
if (self.fecha_chave()):
return True
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
def local_declaracoes(self):
continua = self.var_declaracao()
while (continua):
indice = self.posicaoAtual
self.proximoToken()
continua = self.var_declaracao()
if not(continua):
self.posicaoAtual = indice - 1
self.proximoToken()
return True
def comando_lista(self):
continua = self.comando()
while (continua):
indice = self.posicaoAtual
self.proximoToken()
continua = self.comando()
if not(continua):
self.posicaoAtual = indice - 1
self.proximoToken()
return True
def comando(self):
indice = self.posicaoAtual
if (self.expressao_decl()):
return True
self.voltaPosicao(indice)
if (self.composto_decl()):
return True
self.voltaPosicao(indice)
if (self.selecao_decl()):
return True
self.voltaPosicao(indice)
if (self.iteracao_decl()):
return True
self.voltaPosicao(indice)
if (self.retorno_decl()):
return True
self.erro()
return False
def expressao_decl(self):
indice = self.posicaoAtual
if (self.expressao()):
if (self.match(";")):
return True
else:
self.erro()
return False
self.voltaPosicao(indice)
if (self.match(";")):
return True
self.erro()
return False
def selecao_decl(self):
if (self.match("if")):
self.proximoToken()
if (self.match("(")):
self.proximoToken()
if (self.expressao()):
if (self.match(")")):
self.proximoToken()
if (self.comando()):
indice = self.posicaoAtual
self.proximoToken()
if (self.match("else")):
self.proximoToken()
if (self.comando()):
return True
else:
self.erro()
return False
else:
self.voltaPosicao(indice)
return True
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
def iteracao_decl(self):
if (self.match("while")):
self.proximoToken()
if (self.match("(")):
self.proximoToken()
if (self.expressao()):
if (self.match(")")):
self.proximoToken()
if (self.comando()):
return True
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
def retorno_decl(self):
if (self.match("return")):
self.proximoToken()
if (self.match(";")):
return True
elif (self.expressao()):
if (self.match(";")):
return True
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
def expressao(self):
indice = self.posicaoAtual
if (self.var()):
self.proximoToken()
if (self.match("=")):
self.proximoToken()
if (self.expressao()):
return True
else:
self.erro()
return False
else:
self.erro()
self.voltaPosicao(indice)
if (self.expressao_simples()):
return True
else:
self.erro()
return False
def var(self):
if (self.ident()):
indice = self.posicaoAtual
self.proximoToken()
if (self.abre_colchete()):
self.proximoToken()
if (self.expressao()):
if (self.fecha_colchete()):
indice = self.posicaoAtual
self.proximoToken()
while (self.abre_colchete()):
self.proximoToken()
if (self.expressao()):
self.proximoToken()
if not(self.fecha_colchete()):
return False
else:
return False
self.voltaPosicao(indice)
return True
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.voltaPosicao(indice)
return True
else:
self.erro()
return False
def expressao_simples(self):
if (self.expressao_soma()):
indice = self.posicaoAtual
if (self.relacional()):
self.proximoToken()
if (self.expressao_soma()):
return True
else:
self.erro()
return False
else:
return True
else:
self.erro()
return False
def relacional(self):
if ((self.match("<=")) or (self.match("<")) or (self.match(">"))
or (self.match(">=")) or (self.match("==")) or (self.match("!="))):
return True
else:
self.erro()
return False
def expressao_soma(self):
if (self.termo()):
self.proximoToken()
continua = self.soma()
if (continua):
while (continua):
self.proximoToken()
if not(self.termo()):
self.erro()
return False
self.proximoToken()
continua = self.soma()
return True
else:
return True
else:
self.erro()
return False
def soma(self):
if ((self.match("+")) or (self.match("-"))):
return True
else:
self.erro()
return False
def termo(self):
if (self.fator()):
indice = self.posicaoAtual
self.proximoToken()
continua = self.mult()
if (continua):
indice1 = self.posicaoAtual
while (continua):
indice = self.posicaoAtual
self.proximoToken()
if not(self.fator()):
self.erro()
return False
self.proximoToken()
continua = self.mult()
self.voltaPosicao(indice + 1)
return True
else:
self.voltaPosicao(indice)
return True
else:
self.erro()
return False
def mult(self):
if ((self.match("*")) or (self.match("/"))):
return True
else:
self.erro()
return False
def fator(self):
indice = self.posicaoAtual
if (self.match("(")):
self.proximoToken()
if (self.expressao()):
if (self.match(")")):
return True
else:
self.erro()
return False
else:
self.erro()
return False
if (self.ativacao()):
return True
self.voltaPosicao(indice)
if (self.var()):
return True
self.voltaPosicao(indice)
if (self.num()):
return True
self.voltaPosicao(indice)
if (self.num_int()):
return True
self.erro()
return False
def ativacao(self):
if (self.ident()):
self.proximoToken()
if (self.match("(")):
self.proximoToken()
if (self.match(")")):
return True
elif (self.args()):
if (self.match(")")):
return True
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
else:
self.erro()
return False
def args(self):
return self.arg_lista()
def arg_lista(self):
if (self.expressao()):
while (self.match(",")):
self.proximoToken()
if not(self.expressao()):
self.erro()
return False
return True
else:
self.erro()
return False
def num(self):
numero = self.tokenAtual
self.tokenAtual = numero[0]
if ((self.match("+")) or (self.match("-")) or (self.digito())):
i = 1
if not(self.digito()):
self.tokenAtual = numero[1]
i += 1
indiceTabela = int(self.tokens[self.posicaoAtual - 1].valor - 1)
if (self.digito()):
aux = True
while (i < len(numero)):
self.tokenAtual = numero[i]
if not(self.digito()):
# GENTE, acredito que ele nunca vá entrar aqui
aux = False
i += 1
indice = self.posicaoAtual
self.proximoToken()
if (self.match(".")):
mantissa = [0]
indiceMantissa = [0]
if (self.verificaPontoFlutuante(indiceMantissa, mantissa)):
self.tabelaDeSimbolos[indiceTabela][1] = numero + "." + mantissa[0]
self.paraExcluir.append(indiceMantissa[0])
return True
self.erro()
return False
elif (self.match("E")):
expoente = [0]
indiceExpoente = [0]
if (self.verificaExpoente(indiceExpoente, expoente)):
self.tabelaDeSimbolos[indiceTabela][1] = numero + "E" + expoente[0]
self.paraExcluir.append(indiceExpoente[0])
return True
self.erro()
return False
else:
self.voltaPosicao(indice)
return aux
else:
self.erro()
return False
else:
self.erro()
return False
def num_int(self):
i = 1
numero = self.tokenAtual
self.tokenAtual = numero[0]
if (self.digito()):
aux = True
while (i < len(numero)):
self.tokenAtual = numero[i]
if not(self.digito()):
aux = False
i += 1
return aux
else:
self.erro()
return False
def digito(self):
if ((self.match("0")) or (self.match("1")) or (self.match("2")) or (self.match("3"))
or (self.match("4")) or (self.match("5")) or (self.match("6")) or (self.match("7"))
or (self.match("8")) or (self.match("9"))):
return True
else:
self.erro()
return False
def ident(self):
i = 1
palavra = self.tokenAtual
palavra = str(palavra)
self.tokenAtual = palavra[0]
if (self.letra()):
aux = True
while (i < len(palavra)):
self.tokenAtual = palavra[i]
if not((self.letra()) or (self.digito())):
aux = False
i += 1
return aux
else:
self.erro()
return False
def letra(self):
if ((self.match("a")) or (self.match("b")) or (self.match("c")) or (self.match("d")) or (self.match("e"))
or (self.match("f")) or (self.match("g")) or (self.match("h")) or (self.match("i")) or (self.match("j"))
or (self.match("k")) or (self.match("l")) or (self.match("m")) or (self.match("n")) or (self.match("o"))
or (self.match("p")) or (self.match("q")) or (self.match("r")) or (self.match("s")) or (self.match("t"))
or (self.match("u")) or (self.match("v")) or (self.match("w")) or (self.match("x")) or (self.match("y"))
or (self.match("z"))):
return True
else:
self.erro()
return False
def abre_chave(self):
if not(self.match("{")):
self.erro()
return False
return True
def fecha_chave(self):
if not(self.match("}")):
self.erro()
return False
return True
def abre_colchete(self):
if not(self.match("[")):
self.erro()
return False
return True
def fecha_colchete(self):
if not(self.match("]")):
self.erro()
return False
return True
|
from operator import itemgetter
from collections import OrderedDict
from matplotlib import font_manager, rc, style
import requests, io, json
import matplotlib.pyplot as plt
import numpy as np
links = []
sourcePath = "/home/hanjung/intern/files/"
f = open("links_cpp.txt", "r")
#get the links which used on hashing.py
for i in f:
translation_table = dict.fromkeys(map(ord, '\n'), None)
i = i.translate(translation_table)
links.append(i)
f.close()
result = [] # results of info from iotcuber
opensource = []
hcnt=0
f_1 = open("IOTCUBE_1.txt", "r")
f_2 = open("IOTCUBE_2.txt", "r")
odd = 0
for i in links:
odd += 1
if (odd <= 300):
source = sourcePath + i.split('/')[4]
name = i.split('/')[4]
opensource.append({'name':name})
for r in f_1:
r = r.replace('\'', '\"')
result.append(json.loads(r))
odd = 0
for r in f_2:
r = r.replace('\'', '\"')
result.append(json.loads(r))
f_1.close()
f_2.close()
cwe_list = {}
cve_list = {}
cve_high = []
cve_middle = []
cve_low = []
index = 0
for js in result: # processing the result
count = js[0]['total_cve']
opensource[index]["high"] = {}
opensource[index]["middle"] = {}
opensource[index]["low"] = {}
num_cve = 0
for j in range(1,count+1):
cve = js[j]['cveid']
cwe = js[j]['cwe']
cvss = float(js[j]['cvss'])
if cvss < 4:
vul = "low"
cve_low.append(cve)
cve = "l_" + cve
elif cvss < 7:
vul = "middle"
cve_middle.append(cve)
cve = "m_" + cve
else:
vul = "high"
cve_high.append(cve)
cve = "h_" + cve
if cve not in opensource[index][vul]:
opensource[index][vul][cve] = 1
num_cve += 1
else:
opensource[index][vul][cve] += 1
if cwe not in cwe_list:
cwe_list[cwe] = [cve]
else:
if cve not in cwe_list[cwe]:
cwe_list[cwe].append(cve)
if cve not in cve_list:
cve_list[cve] = [1, opensource[index]['name']]
else:
if opensource[index]['name'] not in cve_list[cve]:
cve_list[cve][0] += 1
cve_list[cve].append(opensource[index]['name'])
cve_high = list(dict.fromkeys(cve_high))
cve_middle = list(dict.fromkeys(cve_middle))
cve_low = list(dict.fromkeys(cve_low))
opensource[index]['total_cve'] = num_cve
index += 1
# make the result file
opensource = sorted(opensource, key=itemgetter("total_cve"), reverse = True)
o_text = ''
for element in opensource:
o_text += "open source: " + element['name'] + "\n" + "\t" + "detected CVEs : " + str(element['total_cve']) + "\n"
for vul in element:
if vul in ('name', 'total_cve'):
continue
o_text += "\t[" + vul + "]\n"
for cve in element[vul]:
o_text += "\t\t" + cve + " : " + str(element[vul][cve]) + "\n"
o_text += "\n"
f = open('opensource_cpp.txt', 'w')
f.write(o_text)
f.close()
cve_list = dict(OrderedDict(sorted(cve_list.items(), key=lambda kv: kv[1][0], reverse = True)))
c_text = ''
for element in cve_list:
c_text += element + "\n"
for source in cve_list[element]:
if type(source) == int:
temp = "\t ->" + "appearance : " + str(source) + "\n"
else:
c_text += "\t" + source + "\n"
c_text += temp
c_text += "\n"
f = open('cve_list_cpp.txt', 'w')
f.write(c_text)
f.close()
|
# Generated by Django 2.2.4 on 2019-08-31 01:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20190831_0039'),
]
operations = [
migrations.AddField(
model_name='video',
name='filterpath',
field=models.FileField(blank=True, upload_to=''),
),
migrations.AlterField(
model_name='video',
name='pub_date',
field=models.DateTimeField(auto_now_add=True),
),
]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
import time
from datetime import datetime
BITCOIN_PRICE_THRESHOLD = 10000
BITCOIN_API_URL = 'https://api.coinmarketcap.com/v1/ticker/bitcoin/'
IFTTT_WEBHOOKS_URL = 'https://maker.ifttt.com/trigger/{}/with/key/cMCIlvY3aJFHAY6C1bllqDBbPev0cIPfg5QXEWIuXr5'
def get_latest_bitcoin_price():
response = requests.get(BITCOIN_API_URL)
response_json = response.json()
return float(response_json[0]['price_usd']) # Конвертирует курс в число с плавающей запятой
def post_ifttt_webhook(event, value):
data = {'value1': value}
ifttt_event_url = IFTTT_WEBHOOKS_URL.format(event) # Вставка желаемого события
requests.post(ifttt_event_url, json=data) # Отправка запроса HTTP POST в URL вебхука
def format_bitcoin_history(bitcoin_history):
rows = []
for bitcoin_price in bitcoin_history:
date = bitcoin_price['date'].strftime('%d.%m.%Y %H:%M') # Форматирует дату в строку: '24.02.2018 15:09'
price = bitcoin_price['price']
# тег <b> делает текст полужирным
row = '{}: $<b>{}</b>'.format(date, price) # 24.02.2018 15:09: $<b>10123.4</b>
rows.append(row)
# Используйте тег <br> для создания новой строки
return '<br>'.join(rows)
def main():
bitcoin_history = []
while True:
price = get_latest_bitcoin_price()
date = datetime.now()
bitcoin_history.append({'date': date, 'price': price})
# Отправка срочного уведомления
if price > BITCOIN_PRICE_THRESHOLD:
post_ifttt_webhook('bitcoin_price_emergency', price)
# Отправка уведомления в Telegram
if len(bitcoin_history) == 5: # После получения 5 объектов в bitcoin_history – отправляем обновление
post_ifttt_webhook('bitcoin_price_update', format_bitcoin_history(bitcoin_history))
# Сброс истории
bitcoin_history = []
time.sleep(3600) # Сон на 5 минут(Для тестовых целей вы можете указать меньшее число)
if __name__ == '__main__':
main()
|
from django.db import models
from django.template.defaultfilters import slugify
from django.utils.html import strip_tags
from tinymce import HTMLField
from filebrowser.fields import FileBrowseField
# Create your models here.
class Page(models.Model):
is_published = models.BooleanField(default=False, help_text='Check the box to publish page.')
meta_title = models.CharField(max_length=100, unique=True, blank=False, help_text='Title that shows up in Google search.')
header_title = models.CharField(max_length=100, unique=True, blank=True, help_text='Title that shows on page. Should typically match meta title.')
meta_description = models.CharField(blank=True, max_length=250, help_text='Brief description that shows up in Google search. Approx. 160 characters.')
featured_image = FileBrowseField('Featured image', max_length=500, extensions=['.jpg',
'.jpeg',
'.gif',
'.png',
'.tif',
'.tiff'], blank=True, help_text='Image featured on page. Must be at least 1,000px X 1,000px')
show_cta = models.BooleanField(default=False, help_text='Check the box to show CTA.')
page_content = HTMLField('Content', blank=True)
page_slug = models.SlugField(max_length=255, blank=True, unique=True, help_text='Text that shows in URL. Will automatically populate when object is saved.')
custom_js = models.TextField(blank=True)
custom_css = models.TextField(blank=True)
def save(self, *args, **kwargs):
if not self.page_slug:
self.page_slug = slugify(self.meta_title)
if not self.meta_description:
self.meta_description = strip_tags(self.page_content[:230]) + '...'
if not self.header_title:
self.header_title = self.meta_title
super(Page, self).save(*args, **kwargs)
def __str__(self):
return self.meta_title
|
### importing modules
####################################################################################
import sys
import math
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.text import TextPath
from matplotlib.patches import PathPatch
from matplotlib.font_manager import FontProperties
### Functions used
####################################################################################
def aa_info(aa, x, y, yscale=1, ax=None):
text = aas[aa]
t = mpl.transforms.Affine2D().scale(1*globalscale, yscale*globalscale) + \
mpl.transforms.Affine2D().translate(x,y) + ax.transData
p = PathPatch(text, lw=0, fc=aa_clr[aa], transform=t)
if ax != None:
ax.add_artist(p)
return p
def log2(p):
if p == 0:
log_aa = 0
else:
log_aa = math.log2(p)
return(log_aa)
file_f = sys.argv[1]
file_r = open(file_f, "r")
fast_f = file_r.readlines()
seq_list = []
for line in fast_f:
if line[0] != ">":
seq_list.append(line.strip())
def score_order(score):
lst = len(score)
for i in range(0, lst):
for j in range(0, lst-i-1):
if (score[j][1] > score[j + 1][1]):
temp = score[j]
score[j]= score[j + 1]
score[j + 1]= temp
return score
### Fasta file handling and bit_score calculation
####################################################################################
rm_ls = ['','\n','\t']
for item in rm_ls:
while item in seq_list:
seq_list.remove(item)
aa_scores = []
for i in range(0,len(seq_list[0])):
A = C = D = E = F = G = H = I = K = L = M = N = P = Q = R = S = T = W = Y = V = O = U = 0
for data in seq_list:
if data[i] == "A":
A += 1
elif data[i] == "C":
C += 1
elif data[i] == "D":
D += 1
elif data[i] == "E":
E += 1
elif data[i] == "F":
F += 1
elif data[i] == "G":
G += 1
elif data[i] == "H":
H += 1
elif data[i] == "I":
I += 1
elif data[i] == "K":
K += 1
elif data[i] == "L":
L += 1
elif data[i] == "M":
M += 1
elif data[i] == "N":
N += 1
elif data[i] == "P":
P += 1
elif data[i] == "Q":
Q += 1
elif data[i] == "R":
R += 1
elif data[i] == "S":
S += 1
elif data[i] == "T":
T += 1
elif data[i] == "W":
W += 1
elif data[i] == "Y":
Y += 1
elif data[i] == "V":
V += 1
elif data[i] == "O":
O += 1
elif data[i] == "U":
U += 1
tot = A + C + D + E + F + G + H + I + K + L + M + N + P + Q + R + S + T + W + Y + V + O + U
pA = A/tot
pC = C/tot
pD = D/tot
pE = E/tot
pF = F/tot
pG = G/tot
pH = H/tot
pI = I/tot
pK = K/tot
pL = L/tot
pM = M/tot
pN = N/tot
pP = P/tot
pQ = Q/tot
pR = R/tot
pS = S/tot
pT = T/tot
pW = W/tot
pY = Y/tot
pV = V/tot
pO = O/tot
pU = U/tot
i = (math.log2(4.0)) + (pA*log2(pA)) + (pC*log2(pC)) +(pD*log2(pD)) + (pE*log2(pE)) + (pF*log2(pF)) + (pG*log2(pG)) +(pH*log2(pH)) + (pI*log2(pI)) +(pK*log2(pK)) + (pL*log2(pL)) +(pM*log2(pM)) + (pN*log2(pN)) + (pP*log2(pP)) + (pQ*log2(pQ)) +(pR*log2(pR)) + (pS*log2(pS)) + (pT*log2(pT)) + (pW*log2(pW)) +(pY*log2(pY)) + (pV*log2(pV)) +(pO*log2(pO)) + (pU*log2(pU))
score = [("A", pA*i), ("C", pC*i), ("D", pD*i), ("E", pE*i), ("F", pF*i), ("G", pG*i), ("H", pH*i), ("I", pI*i), ("K", pK*i), ("L", pL*i), ("M", pM*i), ("N", pN*i), ("P", pP*i), ("Q", pQ*i), ("R", pR*i), ("S", pS*i), ("T", pT*i), ("W", pW*i), ("Y", pY*i), ("V", pV*i),("O", pO*i), ("U", pU*i)]
aa_scores.append(score_order(score))
A = C = D = E = F = G = H = I = K = L = M = N = P = Q = R = S = T = W = Y = V = O = U = 0
### plotting the bit_score for amino acids
####################################################################################
font_p = FontProperties(weight="bold")
globalscale = 1.35
aas = {"G" : TextPath((-0.3, 0), "G", size=1, prop=font_p),
"S" : TextPath((-0.3, 0), "S", size=1, prop=font_p),
"T" : TextPath((-0.3, 0), "T", size=1, prop=font_p),
"Y" : TextPath((-0.3, 0), "Y", size=1, prop=font_p),
"C" : TextPath((-0.3, 0), "C", size=1, prop=font_p),
"Q" : TextPath((-0.3, 0), "Q", size=1, prop=font_p),
"N" : TextPath((-0.3, 0), "N", size=1, prop=font_p),
"K" : TextPath((-0.3, 0), "K", size=1, prop=font_p),
"R" : TextPath((-0.3, 0), "R", size=1, prop=font_p),
"H" : TextPath((-0.3, 0), "H", size=1, prop=font_p),
"D" : TextPath((-0.3, 0), "D", size=1, prop=font_p),
"E" : TextPath((-0.3, 0), "E", size=1, prop=font_p),
"A" : TextPath((-0.3, 0), "A", size=1, prop=font_p),
"V" : TextPath((-0.3, 0), "V", size=1, prop=font_p),
"L" : TextPath((-0.3, 0), "L", size=1, prop=font_p),
"I" : TextPath((-0.3, 0), "I", size=1, prop=font_p),
"P" : TextPath((-0.3, 0), "P", size=1, prop=font_p),
"W" : TextPath((-0.3, 0), "W", size=1, prop=font_p),
"F" : TextPath((-0.3, 0), "F", size=1, prop=font_p),
"M" : TextPath((-0.3, 0), "M", size=1, prop=font_p),
"U" : TextPath((-0.3, 0), "U", size=1, prop=font_p),
"O" : TextPath((-0.3, 0), "O", size=1, prop=font_p)}
aa_clr = {'C': 'limegreen', 'G': 'limegreen','S': 'limegreen','T': 'limegreen','Y': 'limegreen',
'Q': 'purple','N': 'purple',
'K': 'blue','R': 'blue','H': 'blue',
'D': 'red','E': 'red',
'A':'black','V':'black','L':'black','I':'black','P':'black','W':'black','F':'black','M':'black',
'O':'yellow','U':'yellow'}
fig, ax = plt.subplots(figsize=(10,3))
x = 1
maxi = 0
for scores in aa_scores:
y = 0
for aa, score in scores:
aa_info(aa, x,y, score, ax)
y += score
x += 1
maxi = max(maxi, y)
plt.xticks(range(1,x))
plt.xlim((0, x))
plt.ylim((0, maxi))
plt.tight_layout()
plt.show()
|
#Map the column back to Gene
#Written by Ruiqi Zhong April 8, 2017
import pickle
import pandas
import glob
import csv
paths = glob.glob("KEGG_6k/*.pkl")
print paths
for path in paths:
gene_name = []
data = pickle.load(open(path,"rb"))
data = data[data.columns[[1]]]
for i in range(1,data.shape[0]):
gene_name.append(data[1][i])
out = csv.writer(open("./name_maps_kegg/"+path.split('/')[1].split('.')[0]+"_name_map.csv","w"), delimiter=',',quoting=csv.QUOTE_ALL)
out.writerow(gene_name)
#print(data.to_csv(sep=' ',index=False))
|
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import argparse
import os
from models import *
from utils import load_pretrained_net, fetch_target, fetch_nearest_poison_bases, fetch_poison_bases
from trainer import make_convex_polytope_poisons, train_network_with_poison
class Logger(object):
def __init__(self, path):
self.terminal = sys.stdout
self.log = open(path, "a+")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing.
# you might want to specify some extra behavior here.
pass
if __name__ == '__main__':
# ======== arg parser =================================================
parser = argparse.ArgumentParser(description='PyTorch Poison Attack')
parser.add_argument('--gpu', default='0', type=str)
# The substitute models and the victim models
parser.add_argument('--end2end', default=False, choices=[True, False], type=bool,
help="Whether to consider an end-to-end victim")
parser.add_argument('--substitute-nets', default=['ResNet50', 'ResNet18'], nargs="+", required=False)
parser.add_argument('--target-net', default=["DenseNet121"], nargs="+", type=str)
parser.add_argument('--model-resume-path', default='models-chks-release', type=str,
help="Path to the pre-trained models")
parser.add_argument('--net-repeat', default=1, type=int)
parser.add_argument("--subs-chk-name", default=['ckpt-%s-4800.t7'], nargs="+", type=str)
parser.add_argument("--test-chk-name", default='ckpt-%s-4800.t7', type=str)
parser.add_argument('--subs-dp', default=[0], nargs="+", type=float,
help='Dropout for the substitute nets, will be turned on for both training and testing')
# Parameters for poisons
parser.add_argument('--target-dset', default='cifar10', choices=['cifar10', '102flowers'])
parser.add_argument('--target-label', default=6, type=int)
parser.add_argument('--target-index', default=1, type=int,
help='index of the target sample')
parser.add_argument('--poison-label', '-plabel', default=8, type=int,
help='label of the poisons, or the target label we want to classify into')
parser.add_argument('--poison-num', default=5, type=int,
help='number of poisons')
parser.add_argument('--poison-lr', '-plr', default=4e-2, type=float,
help='learning rate for making poison')
parser.add_argument('--poison-momentum', '-pm', default=0.9, type=float,
help='momentum for making poison')
parser.add_argument('--poison-ites', default=4000, type=int,
help='iterations for making poison')
parser.add_argument('--poison-decay-ites', type=int, metavar='int', nargs="+", default=[])
parser.add_argument('--poison-decay-ratio', default=0.1, type=float)
parser.add_argument('--poison-epsilon', '-peps', default=0.1, type=float,
help='maximum deviation for each pixel')
parser.add_argument('--poison-opt', default='adam', type=str)
parser.add_argument('--nearest', default=False, action='store_true',
help="Whether to use the nearest images for crafting the poison")
parser.add_argument('--subset-group', default=0, type=int)
parser.add_argument('--original-grad', default=True, choices=[True, False], type=bool)
parser.add_argument('--tol', default=1e-6, type=float)
# Parameters for re-training
parser.add_argument('--retrain-lr', '-rlr', default=0.1, type=float,
help='learning rate for retraining the model on poisoned dataset')
parser.add_argument('--retrain-opt', default='adam', type=str,
help='optimizer for retraining the attacked model')
parser.add_argument('--retrain-momentum', '-rm', default=0.9, type=float,
help='momentum for retraining the attacked model')
parser.add_argument('--lr-decay-epoch', default=[30, 45], nargs="+",
help='lr decay epoch for re-training')
parser.add_argument('--retrain-epochs', default=60, type=int)
parser.add_argument('--retrain-bsize', default=64, type=int)
parser.add_argument('--retrain-wd', default=0, type=float)
parser.add_argument('--num-per-class', default=50, type=int,
help='num of samples per class for re-training, or the poison dataset')
# Checkpoints and resuming
parser.add_argument('--chk-path', default='chk-black', type=str)
parser.add_argument('--chk-subdir', default='poisons', type=str)
parser.add_argument('--eval-poison-path', default='', type=str,
help="Path to the poison checkpoint you want to test")
parser.add_argument('--resume-poison-ite', default=0, type=int,
help="Will automatically match the poison checkpoint corresponding to this iteration "
"and resume training")
parser.add_argument('--train-data-path', default='datasets/CIFAR10_TRAIN_Split.pth', type=str,
help='path to the official datasets')
parser.add_argument('--dset-path', default='datasets', type=str,
help='path to the official datasets')
parser.add_argument('--mode', default='convex', type=str,
help='if convex, run the convexpolytope attack proposed by the paper, otherwise just run the mean shifting thing')
parser.add_argument('--device', default='cuda', type=str)
args = parser.parse_args()
# Set visible CUDA devices
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
cudnn.benchmark = True
# load the pre-trained models
sub_net_list = []
for n_chk, chk_name in enumerate(args.subs_chk_name):
for snet in args.substitute_nets:
if args.subs_dp[n_chk] > 0.0:
net = load_pretrained_net(snet, chk_name, model_chk_path=args.model_resume_path,
test_dp=args.subs_dp[n_chk])
elif args.subs_dp[n_chk] == 0.0:
net = load_pretrained_net(snet, chk_name, model_chk_path=args.model_resume_path)
else:
assert False
sub_net_list.append(net)
print("subs nets, effective num: {}".format(len(sub_net_list)))
print("Loading the victims networks")
targets_net = []
for tnet in args.target_net:
target_net = load_pretrained_net(tnet, args.test_chk_name, model_chk_path=args.model_resume_path)
targets_net.append(target_net)
cifar_mean = (0.4914, 0.4822, 0.4465)
cifar_std = (0.2023, 0.1994, 0.2010)
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(cifar_mean, cifar_std),
])
# Get the target image
if args.target_dset == 'cifar10':
target = fetch_target(args.target_label, args.target_index, 50, subset='others',
path=args.train_data_path, transforms=transform_test)
elif args.target_dset == '102flowers':
from utils import fetch_target_102flower_dset
target = fetch_target_102flower_dset(args.target_index, transforms)
if args.mode == 'mean':
chk_path = os.path.join(args.chk_path, 'mean')
else:
chk_path = os.path.join(args.chk_path, args.mode)
if args.net_repeat > 1:
chk_path = '{}-{}Repeat'.format(chk_path, args.net_repeat)
chk_path = os.path.join(chk_path, str(args.poison_ites))
chk_path = os.path.join(chk_path, str(args.target_index))
if not os.path.exists(chk_path):
os.makedirs(chk_path)
import sys
sys.stdout = Logger('{}/log.txt'.format(chk_path))
# Load or craft the poison!
if args.eval_poison_path != "":
state_dict = torch.load(args.eval_poison_path)
poison_tuple_list, base_idx_list = state_dict['poison'], state_dict['idx']
print("=" * 100)
print("=" * 100)
print("Poisons loaded")
print("Now evaluating on the target nets")
t = 0
tt = 0
else:
print(args)
print("Path: {}".format(chk_path))
# Otherwise, we craft new poisons
if args.nearest:
base_tensor_list, base_idx_list = fetch_nearest_poison_bases(sub_net_list, target, args.poison_num,
args.poison_label, args.num_per_class,
'others',
args.train_data_path, transform_test)
else:
# just fetch the first poison_num samples
base_tensor_list, base_idx_list = fetch_poison_bases(args.poison_label, args.poison_num, subset='others',
path=args.train_data_path, transforms=transform_test)
base_tensor_list = [bt.to('cuda') for bt in base_tensor_list]
print("Selected base image indices: {}".format(base_idx_list))
if args.resume_poison_ite > 0:
state_dict = torch.load(os.path.join(chk_path, "poison_%05d.pth" % args.resume_poison_ite))
poison_tuple_list, base_idx_list = state_dict['poison'], state_dict['idx']
poison_init = [pt.to('cuda') for pt, _ in poison_tuple_list]
# re-direct the results to the resumed dir...
chk_path += '-resume'
if not os.path.exists(chk_path):
os.makedirs(chk_path)
else:
poison_init = base_tensor_list
import time
t = time.time()
poison_tuple_list = make_convex_polytope_poisons(sub_net_list, target_net, base_tensor_list,
target, device='cuda', opt_method=args.poison_opt,
lr=args.poison_lr, momentum=args.poison_momentum,
iterations=args.poison_ites, epsilon=args.poison_epsilon,
decay_ites=args.poison_decay_ites,
decay_ratio=args.poison_decay_ratio,
mean=torch.Tensor(cifar_mean).reshape(1, 3, 1, 1),
std=torch.Tensor(cifar_std).reshape(1, 3, 1, 1),
chk_path=chk_path, poison_idxes=base_idx_list,
poison_label=args.poison_label,
tol=args.tol,
end2end=args.end2end,
start_ite=args.resume_poison_ite,
poison_init=poison_init,
mode=args.mode,
net_repeat=args.net_repeat)
tt = time.time()
res = []
print("Evaluating against victims networks")
for tnet, tnet_name in zip(targets_net, args.target_net):
print(tnet_name)
pred = train_network_with_poison(tnet, target, poison_tuple_list, base_idx_list, chk_path, args,
save_state=False)
res.append(pred)
print("--------")
print("------SUMMARY------")
print("TIME ELAPSED (mins): {}".format(int((tt - t) / 60)))
print("TARGET INDEX: {}".format(args.target_index))
for tnet_name, r in zip(args.target_net, res):
print(tnet_name, int(r == args.poison_label))
|
import os
import getopt
from dataclasses import dataclass
from config import Config
@dataclass
class Arguments():
verbose: bool = False
help: bool = False
target_dir: str = None
branch: str = None
commit_range: str = None
config: Config = None
@staticmethod
def helptext():
return "USAGE: python main.py --target-dir=/path/to/your/code [--verbose] [--commit-range=rev1..rev2]"
@staticmethod
def parse(args):
config_filename = None
result = Arguments()
try:
opts, args = getopt.getopt(args,
"hv=",
["help",
"verbose=",
"conf-file=",
"target-dir=",
"commit-range="])
except getopt.GetoptError as e:
print(e)
return None
for o, a in opts:
if o in ("-v", "--verbose"):
result.verbose = (a != "no")
elif o in ("-h", "--help"):
result.help = True
elif o in ("--conf-file"):
config_filename = a
elif o in ("--target-dir"):
result.target_dir = a
elif o in ("--commit-range"):
result.commit_range = a if a else None
if result.target_dir:
if not config_filename:
config_filename = ".devsecops-ci"
conf_file = os.path.join(result.target_dir, config_filename)
result.config = Config.load(conf_file)
if result.config is None:
result.config = Config()
return result
|
n = int(input())
c, r, s = 0, 0, 0
for i in range(0, n):
q, e = input().split(' ')
if e == 'C':
c += int(q)
elif e == 'R':
r += int(q)
elif e == 'S':
s += int(q)
print('Total: {} cobaias'.format(c + r + s))
print('Total de coelhos: {}'.format(c))
print('Total de ratos: {}'.format(r))
print('Total de sapos: {}'.format(s))
print('Percentual de coelhos: {:.2f} %'.format(c / (c + r + s) * 100))
print('Percentual de ratos: {:.2f} %'.format(r / (c + r + s) * 100))
print('Percentual de sapos: {:.2f} %'.format(s / (c + r + s) * 100))
|
import numpy as np
import re
regex = 'position=<[ ]*(?P<x>\-*\d+),[ ]*(?P<y>\-*\d+)> velocity=<[ ]*(?P<vx>\-*\d+),[ ]*(?P<vy>\-*\d+)>'
p = re.compile(regex)
data = []
n_points = 0
with open('input.txt') as f:
for l in f:
match = p.match(l.strip()).groupdict()
data.append(
list(map(int, [match['x'], match['y'], match['vx'], match['vy']])))
n_points += 1
points = np.zeros((n_points, 2))
velocity = np.zeros((n_points, 2))
for i, [x, y, vx, vy] in enumerate(data):
points[i][0] = x
points[i][1] = y
velocity[i][0] = vx
velocity[i][1] = vy
def get_size(points):
min_ = points.min(0)
max_ = points.max(0)
return (max_ - min_).prod()
def draw(points):
min_ = points.min(0)
max_ = points.max(0)
points -= min_
diff = max_ - min_ + 1
draw = [['_' for _ in range(int(diff[0]))] for _ in range(int(diff[1]))]
for i in range(n_points):
draw[int(points[i][1])][int(points[i][0])] = '#'
print('\n'.join([''.join(d) for d in draw]))
min_size = get_size(points)
min_points = points.copy()
for i in range(20000):
points += velocity
s = get_size(points)
if s < min_size:
min_size = s
min_points = points.copy()
print(i, min_size)
draw(min_points)
|
import collections
import itertools
import random
import sys
def frequentsets(k,prev,s,f):
if k==1:
dsingles={}
f=open("new_toivonen.txt")
for line in f:
line=line.rstrip()
items=line.split(',')
items.sort()
for item in items:
if item in dsingles:
dsingles[item]=dsingles[item]+1
else:
dsingles[item]=1
fitemsets1=[]
negative_border1=[]
for item in dsingles:
if dsingles[item]>=s:
fitemsets1.append(item)
else:
negative_border1.append(item)
negative_border1.sort()
fitemsets1.sort()
candidatepairs=[]
i=0
while i<(len(fitemsets1)-1):
j=i+1
while j<len(fitemsets1):
candidatepairs.append([fitemsets1[i],fitemsets1[j]])
j=j+1
i=i+1
negative_border=[]
pairs={i:0 for i in range(0,len(candidatepairs))}
f=open("new_toivonen.txt")
for line in f:
line=line.rstrip()
items=line.split(',')
items.sort()
p=itertools.combinations(items,2)
for item in p:
item=list(item)
count=0
for j in item:
if j in fitemsets1:
count=count+1
if count==2:
if item not in negative_border:
negative_border.append(item)
for index,k in enumerate(candidatepairs):
if item==k:
pairs[index]=pairs[index]+1
break
negative_border.sort()
fpairs=[]
for item in pairs:
if pairs[item]>=s:
fpairs.append(candidatepairs[item])
fpairs.sort()
neg_bor2=[]
for item in negative_border:
if item not in fpairs:
neg_bor2.append(item)
return fitemsets1,fpairs, negative_border1, neg_bor2
candidateitems=[]
f=open("new_toivonen.txt")
for line in f:
line=line.rstrip()
items=line.split(',')
items.sort()
trip=itertools.combinations(items,k)
for o in trip:
o=list(o)
pai=itertools.combinations(o,k-1)
count=0
for u in pai:
u=list(u)
if u in prev:
count=count+1
if count==k:
if o not in candidateitems:
candidateitems.append(o)
ditemsets={i:0 for i in range(len(candidateitems))}
f=open("new_toivonen.txt")
for line in f:
line=line.rstrip()
items=line.split(',')
items.sort()
trip=itertools.combinations(items,k)
for o in trip:
o=list(o)
for index,i in enumerate(candidateitems):
if o==i:
ditemsets[index]=ditemsets[index]+1
break
fitems=[]
ng_br=[]
for item in ditemsets:
if ditemsets[item]>=s:
fitems.append(candidateitems[item])
else:
ng_br.append(candidateitems[item])
fitems.sort()
ng_br.sort()
return fitems,ng_br
def sample_pass(s):
prev=[]
k=1
s=float(s)
size=0.5
f=open(sys.argv[1])
file1=open("new_toivonen.txt",'w')
countlines=0
d={}
for line in f:
d[countlines]=line
countlines=countlines+1
aman_items=[i for i in range(countlines)]
random.shuffle(aman_items)
for i in range(int(countlines*size)):
file1.write(d[aman_items[i]])
file1.close()
ori_items={}
neg_items={}
if k==1 and prev==[]:
res=frequentsets(k,prev,int(s*size*0.9),f)
prev=res[1]
singles=res[0]
if prev==[] and singles==[]:
return None
neg1=res[2]
neg2=res[3]
ori_items[1]=singles
neg_items[1]=neg1
ori_items[2]=prev
neg_items[2]=neg2
k=3
while prev!=[]:
res=frequentsets(k,prev,int(s*size*0.9),f)
prev=res[0]
ori_items[k]=prev
neg_items[k]=res[1]
k=k+1
return ori_items,neg_items
def whole_pass(dicts,s):
fitems=dicts[0]
neg_bord=dicts[1]
fitems_list=[]
neg_bord_list=[]
for item in fitems:
fitems_list.extend(fitems[item])
for item in neg_bord:
neg_bord_list.extend(neg_bord[item])
fitems_dict={i:0 for i in range(len(fitems_list))}
fneg_dict={i:0 for i in range(len(neg_bord_list))}
f=open(sys.argv[1])
for line in f:
line=line.rstrip()
items=line.split(',')
items.sort()
for k in fitems:
trip=itertools.combinations(items,k)
for o in trip:
o=list(o)
for index,q in enumerate(fitems_list):
if o==list(q):
fitems_dict[index]=fitems_dict[index]+1
break
trip=itertools.combinations(items,k)
for o in trip:
o=list(o)
for index,q in enumerate(neg_bord_list):
if o==list(q):
fneg_dict[index]=fneg_dict[index]+1
break
freq_itemsets=[]
for item in fitems_dict:
if fitems_dict[item]>=s:
freq_itemsets.append(fitems_list[item])
for item in fneg_dict:
if fneg_dict[item]>=s:
a=[]
return 0,a
return 1,freq_itemsets
iterations=0
def toivonen():
s=int(sys.argv[2])
size_perc=0.5
global iterations
iterations=iterations+1
dicts=sample_pass(s)
if dicts == None:
return toivonen()
a,freq=whole_pass(dicts,s)
if a==1:
sys.stdout.write(str(iterations)+"\n")
sys.stdout.write(str(size_perc)+"\n")
max_len = max(map(len, freq))
res = {i:[] for i in range(1, max_len + 1)}
for item in freq:
if len(item) == 1:
res[len(item)].append([item])
else:
res[len(item)].append(item)
for item in res:
sys.stdout.write(str(res[item])+"\n")
# sys.stdout.write(str(freq)+"\n")
return
else:
toivonen()
toivonen()
|
# encoding: utf-8
'''
@Version: V1.0
@Author: JE2Se
@Contact: admin@je2se.com
@Website: https://www.je2se.com
@Github: https://github.com/JE2Se/
@Time: 2020/6/10 12:39
@File: StrutsScan.py
@Desc:
'''
from lib.ModelLoad import ONLoad
from lib import *
import os
import logging
dlist = []
#文件遍历
def StrutsScan(url):
for file in os.listdir("./exphub/struts2/"):
if os.path.splitext(file)[1] == '.py':
if os.path.join(file) != "__init__.py" and os.path.join(file) != "StrutsScan.py":
dlist.append(os.path.join(os.path.splitext(file)[0]))
ONLoad(dlist)
try:
for defclass in dlist:
print(Vcolors.OKGREEN + "[?] 正在执行" + defclass + "脚本检测.......\r" + Vcolors.ENDC)
exec("from exphub.struts2.{0} import {1}".format(defclass, defclass))
defclass += "(url)"
exec(defclass)
except:
logging.error("StrutsScan脚本出现异常")
|
# 23415
# 13425 ++
# 12435 ++
# 12345 ++
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the minimumSwaps function below.
def minimumSwaps(arr):
temp = [0] * (len(arr) + 1)
for pos, val in enumerate(arr):
temp[val] = pos
pos += 1
count = 0
for i in range(len(arr)):
if arr[i] != i+1:
count += 1
t = arr[i] # t é o valor que está na posição errada
arr[i] = i+1 # botamos o valor correto na posição que estamos
arr[temp[i+1]] = t #no arr no index do elemento certo botamos o elemento errado, ou seja é apenas uma troca botando o valor errado no lugar do valor certo por exemplo 21 12
temp[t] = temp[i+1] # atualizamos o index do elemento errado para onde ele esta agora que é o index do elemento correto antes da mudança
return count
if __name__ == '__main__':
arr = [4, 3, 1, 2]
res = minimumSwaps(arr)
print(res)
|
import flask
app = flask.Flask(__name__)
app.config["DEBUG"] = True
@app.route('/', methods=['GET'])
def home():
return "<iframe src='https://airtw.epa.gov.tw/AirQuality_APIs/WebWidget.aspx?site=18' width='320px' height='380px' scrolling='yes'></iframe><iframe src='https://airtw.epa.gov.tw/AirQuality_APIs/WebWidget.aspx?site=43' width='320px' height='380px' scrolling='yes'></iframe><iframe src='https://airtw.epa.gov.tw/AirQuality_APIs/WebWidget.aspx?site=8' width='320px' height='380px' scrolling='yes'></iframe><iframe src='https://airtw.epa.gov.tw/AirQuality_APIs/WebWidget.aspx?site=12' width='320px' height='380px' scrolling='yes'></iframe><iframe src='https://airtw.epa.gov.tw/AirQuality_APIs/WebWidget.aspx?site=73' width='320px' height='380px' scrolling='yes'></iframe><iframe src='https://airtw.epa.gov.tw/AirQuality_APIs/WebWidget.aspx?site=76' width='320px' height='380px' scrolling='yes'></iframe><iframe src='https://airtw.epa.gov.tw/AirQuality_APIs/WebWidget.aspx?site=92' width='320px' height='380px' scrolling='yes'></iframe>"
app.run()
|
import time
from selenium import webdriver
#브라우저 열기
browser = webdriver.Edge("./msedgedriver.exe")
#네이버로 이동
browser.get("http://naver.com")
#로그인 버튼 클릭
elem = browser.find_element_by_class_name("link_login")
elem.click()
time.sleep(3)
#로그인
browser.find_element_by_id("id").send_keys("naverID")
browser.find_element_by_id("pw").send_keys("passward")
browser.find_element_by_id("log.login").click()
time.sleep(1)
browser.find_element_by_id("id").clear()
browser.find_element_by_id("id").send_keys("another_Id")
print(browser.page_source)
time.sleep(2)
browser.quit()
|
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
result = 0
l = 0
dict = set()
for r in range(len(s)):
if s[r] in dict:
while l < r:
if s[l] == s[r]:
l += 1
break
dict.remove(s[l])
l += 1
dict.add(s[r])
result = max(result, r - l + 1)
return result
|
from django import forms
class DateInput(forms.DateInput):
input_type = 'date'
class SearchForm(forms.Form):
start_date = forms.DateField(
label='Статистика с', widget=forms.widgets.DateInput(attrs={'type': 'date'}))
end_date = forms.DateField(
label='Статистика по', widget=forms.widgets.DateInput(attrs={'type': 'date'}))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.