text stringlengths 8 6.05M |
|---|
import cv2
import sys
import math
import numpy as np
grey_levels = 256
orig = cv2.imread('vliegtuig1.jpg',0)
orig = cv2.resize(orig, (0,0), fx=0.7, fy=0.7)
uni = cv2.copyMakeBorder(orig,0,0,0,0,cv2.BORDER_REPLICATE)
# Define the window size (16x16px)
windowsize_r = 16
windowsize_c = 16
# Crop out the window and calculate the histogram
for r in range(0,uni.shape[0] - windowsize_r, windowsize_r):
for c in range(0,uni.shape[1] - windowsize_c, windowsize_c):
window = uni[r:r+windowsize_r,c:c+windowsize_c]
hist = np.histogram(window,bins=grey_levels)
|
#!/usr/bin/python2
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
import MySQLdb
import codecs
PARTICIPANTS_CSV = "logins-passwords.csv"
def execute_cursor(cursor, query, *kwargs):
formatted_query = query.format(*kwargs)
cursor.execute(formatted_query)
def register_in_mysql(userlist):
db = MySQLdb.connect(host="localhost",
user="ejudge",
passwd="password",
db="ejudge",
charset="utf8",
use_unicode=True)
db.set_character_set('utf8')
cur = db.cursor()
for user in userlist:
if int(user['login'].split('-')[1]) not in xrange(1, 19):
continue
execute_cursor(cur, u'INSERT INTO logins (login, password) VALUES ("{}", "{}");',
user['login'], user['password'])
user_id = cur.lastrowid
execute_cursor(cur, u'INSERT INTO cntsregs (user_id, contest_id) VALUES ({}, {});', user_id, 5)
execute_cursor(cur, u'INSERT INTO cntsregs (user_id, contest_id, incomplete, locked) VALUES ({}, {}, {}, {});',
user_id, 6, 1, 0 if user['login'].startswith('f9-') else 1)
execute_cursor(cur, u'INSERT INTO cntsregs (user_id, contest_id, incomplete, locked) VALUES ({}, {}, {}, {});',
user_id, 7, 1, 1 if user['login'].startswith('f9-') else 0)
db.commit()
def load_user_csv():
file = codecs.open(PARTICIPANTS_CSV, "r", encoding="utf-8")
userlist = []
for line in file.readlines():
data = line.strip().split(",")
if len(data) <= 1:
break
cur_user = {}
cur_user['login'] = data[0]
cur_user['password'] = data[1]
userlist.append(cur_user)
return userlist
users = load_user_csv()
register_in_mysql(users)
|
# Credentials for the email account used to send santatron emails, gmail is a good choice
ORIGIN_ADDRESS = "santa@gmail.com"
ORIGIN_PASSWORD = "cookies"
# In test mode, all emails will be sent to this address instead of the configured participant addresses (for dry runs)
TEST_EMAIL = "santa@northpole.com"
# Invitation email subject line
INVITATION_SUBJECT = "SECRET SANTA INVITATION :) 01110011010101"
# Invitation email body content, giver and reciever must be provided when email is generated
INVITATION_DATE_STRING = "DECEMBER 25TH, 1 A.D."
INVITATION_TIME_STRING = "12:00 A.M."
INVITATION_ADDRESS = "NORTH POLE"
# Function to generate email body with giver and receiver filled in
def generate_msg(giver, receiver):
INVITATION_CONTENT = f"""
THIS MESSAGE IS AUTOMATICALLY GENERATED AND SENT BY SANTATRON 9002. PLEASE DO NOT REPLY :)
WELCOME TO YOU, HUMAN "{giver}". YOU ARE INVITED TO {INVITATION_ADDRESS} FOR A SECRET SANTA CHRISTMAS PARTY.
DATE: {INVITATION_DATE_STRING}
TIME: {INVITATION_TIME_STRING}
YOU WILL ACT AS "SECRET SANTA" FOR FELLOW HUMAN "{receiver}".
IF YOU FEEL THE NEED TO TRADE GIFTEES, PLEASE DO SO WITH CAUTION TO ENSURE NO ONE IS LEFT OUT IN THE 'COLD'. (HA! HA!)
SANTATRON IS ALWAYS WATCHING YOU. ;)
"""
return INVITATION_CONTENT |
from TvProgramBot.db.connection import getDb
from TvProgramBot.db.connectionMongo import getMongo
from TvProgramBot.db.connectionMemcache import getCache
from pymongo.objectid import ObjectId
def test_wiki():
mc = getCache()
wikis = mc.get("wiki_liverecommend")
if not wikis :
wikis = []
mongo_conn = getMongo()
mongo_db = mongo_conn.epg
wiki_liverecommend = mongo_db.wiki_liverecommend
wiki_list = wiki_liverecommend.find().limit(100)
for wiki in wiki_list:
wikis.append(wiki['wiki_id'])
if wikis:
mc.set("wiki_liverecommend",wikis)
return wikis
else:
return False
if __name__ == "__main__":
print "start"
main() |
#!/usr/bin/env python3
from internals import station_control
if __name__ == "__main__":
station_control.run()
|
import logging
from pylons import request, response, session, tmpl_context as c
from pylons.controllers.util import abort, redirect_to
from gwhiz.lib.base import BaseController, render
from authkit.authorize.pylons_adaptors import authorize
from authkit.permissions import RemoteUser, ValidAuthKitUser, UserIn
from pylons import h
log = logging.getLogger(__name__)
class AuthController(BaseController):
def index(self):
# Return a rendered template
#return render('/auth.mako')
# or, return a response
if 'GWHIZ' in session:
session['GWHIZ'] += 1
session.save()
return ('gwhizfound', session['GWHIZ'])
else:
session['GWHIZ'] = 1
session.save()
return '<p>'.join(session)
@authorize(ValidAuthKitUser())
#@authorize(UserIn(["visitor"]))
def private(self):
return "You are authenticated! " + h.link_to('signout','/auth/signout')
def private_manual(self):
if request.environ.get("REMOTE_USER"):
return "You are authenticated!"
else:
response.status = "401 Not authenticated"
return "You are not authenticated"
def signout(self):
return "Successfully signed out!"
|
from threading import Timer
from termcolor import colored
from builder import read_compose
RUNNING = colored('RUNNING', 'green')
FAILED = colored('FAILED', 'red')
modules = read_compose('/app/cyberhead-compose.yml')['modules']
def start_module(start, callback_time):
message, callback_time = start()
timer = Timer(callback_time, start_module, [start, callback_time])
timer.start()
print(message, callback_time, start)
def start_modules():
for module in modules:
exec('from cyberhead.' + module + ' import start', globals())
message, callback_time = start()
start_module(start, callback_time)
print(message, callback_time, start)
start_modules()
|
from django.urls import path
from websites.views import WebsiteListView, WebsiteDetailView
app_name = 'websites'
urlpatterns = [
path('', WebsiteListView.as_view(), name='website-list'),
path('<int:pk>', WebsiteDetailView.as_view(), name='website-detail'),
] |
class PlaylistInformation():
def __init__(self, title, description, is_public):
self.playlist_title = title
self.playlist_description = description
self.playlist_is_public = is_public
def get_title(self):
return self.playlist_title
def get_description(self):
return self.playlist_description
def get_publicy(self):
return self.playlist_is_public |
# File Reading
# Input: string (full path to file)
#Output: string/text (full contents of a file)
from CONST import *
def GetFileContents(filePath):
f = open(filePath, encoding='utf-8')
lines = f.readlines()
contents = ''.join(lines)
return contents
if __name__ == '__main__':
# Standalone Test
filePath = rootDir + '1.txt'
contents = GetFileContents(filePath)
print(contents) |
#!/usr/bin/env python
import rospy
import bluetooth
from std_msgs.msg import Int16MultiArray, Int32
import smach
import smach_ros
import random
class Utils():
"""docstring for SeqSmach"""
def __init__(self):
rospy.init_node('ani_smach')
rospy.Subscriber("animation_udp/rtc_feedback", Int16MultiArray, self.rtc_callback, queue_size=1)
self.mid_pub = rospy.Publisher("animation_udp/motion_msg", Int32, queue_size=1)
self.vid_pub = rospy.Publisher("animation_sm/bluetooth_vmsg", Int32, queue_size=1)
rospy.Timer(rospy.Duration(1), self.plotting_callback)
self.video_trans = [1003, 1001, 1002, 1004]
self.rate = 1/300.0
self.track_ani = 0
self.in_ani = 0
self.ani_finished = 0
self.prev_ani_finished = 0
self.trans_time = 3.0
self.t_trans_offset = 0
self.trans_begun = 0
self.pub_motion_msg = 0
self.pub_video_msg = 0
self.now = rospy.get_time()
rospy.loginfo("animation smach is init")
def rtc_callback(self,msg):
self.track_ani = msg.data[0]
self.in_ani = msg.data[1]
self.ani_finished = msg.data[2]
def make_decision_on_motion(self):
if self.track_ani is 1:
if self.ani_finished is 1 and self.prev_ani_finished is not 1 and self.trans_begun is 0:
self.trans_begun = 1
self.t_trans_offset = rospy.get_time()
self.prev_ani_finished = self.ani_finished
self.now = rospy.get_time()
if self.trans_begun is 1 and self.now > (self.t_trans_offset + self.trans_time):
self.trans_begun = 0
return "new_motion"
else:
return "in_motion"
else:
return "wait_transit"
def make_decision_on_emotion(self):
if self.track_ani is 1:
if self.in_ani:
return "begin_emotion"
else:
return "in_transit"
else:
return "in_transit"
def plotting_callback(self, event):
rospy.loginfo("[seq smach]: published motion msg is {}, video_msg is {}".format(self.pub_motion_msg, self.pub_video_msg))
rospy.loginfo("[seq smach]: trans_begun:{}, waitingflag is:{}".format(self.trans_begun,self.now > (self.t_trans_offset + self.trans_time)))
# def make_decision_2(self):
# if self.track_ani is 0:
# self.midx = 0
# self.vidx = 0
# if self.track_ani is 1:
# if self.ani_finished is 1 and self.prev_ani_finished is not 1 and self.trans_begun is 0:
# self.trans_begun = 1
# self.t_trans_offset = rospy.get_time()
# now = rospy.get_time()
# if self.trans_begun is 1 and now > (self.t_trans_offset + self.trans_time):
# self.midx = min(self.midx+1, len(self.motion_queue)-1)
# self.vidx = min(self.vidx+1, len(self.video_queue)-1)
# self.trans_begun = 0
# self.motion_msg = self.motion_queue[self.midx]
# if self.in_ani: #animation
# self.video_msg = self.video_queue[self.vidx]
# else: #transition
# self.video_msg = self.trans_video
# else:
# self.motion_msg = self.motion_queue[0]
# self.video_msg = self.trans_video
# self.mid_pub.publish(self.motion_msg)
# self.vid_pub.publish(self.video_msg)
# self.prev_ani_finished = self.ani_finished
##############################################################################################
##### State Machine ##########################################################################
##############################################################################################
class HappyState(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['return_to_InAni'],
input_keys=['holding_in'],
output_keys=['status_out'])
self.motion_queue = [6,13]
self.video_queue = [1003,1001]
def execute(self, userdata):
mid = random.randint(0,len(self.motion_queue)-1)
vid = random.randint(0,len(self.video_queue)-1)
trans_id = random.randint(0,len(utils.video_trans)-1)
while not rospy.is_shutdown():
curr_state = utils.make_decision_on_motion()
if curr_state == "in_motion":
utils.pub_motion_msg = self.motion_queue[mid]
utils.mid_pub.publish(self.motion_queue[mid])
curr_vstate = utils.make_decision_on_emotion()
if curr_vstate == "begin_emotion":
utils.pub_video_msg = self.video_queue[vid]
utils.vid_pub.publish(self.video_queue[vid])
else:
utils.pub_video_msg = utils.video_trans[trans_id]
utils.vid_pub.publish(utils.video_trans[trans_id])
elif curr_state == "wait_transit":
if userdata.holding_in == 1:
utils.pub_motion_msg = self.motion_queue[mid]
utils.mid_pub.publish(self.motion_queue[mid])
else:
userdata.status_out = curr_state
return 'return_to_InAni'
elif curr_state == "new_motion":
userdata.status_out = curr_state
return 'return_to_InAni'
rospy.sleep(utils.rate)
class SadState(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['return_to_InAni'],
input_keys=['holding_in'],
output_keys=['status_out'])
self.motion_queue = [6,13]
self.video_queue = [1003,1001]
def execute(self, userdata):
mid = random.randint(0,len(self.motion_queue)-1)
vid = random.randint(0,len(self.video_queue)-1)
trans_id = random.randint(0,len(utils.video_trans)-1)
while not rospy.is_shutdown():
curr_state = utils.make_decision_on_motion()
if curr_state == "in_motion":
utils.pub_motion_msg = self.motion_queue[mid]
utils.mid_pub.publish(self.motion_queue[mid])
curr_vstate = utils.make_decision_on_emotion()
if curr_vstate == "begin_emotion":
utils.pub_video_msg = self.video_queue[vid]
utils.vid_pub.publish(self.video_queue[vid])
else:
utils.pub_video_msg = utils.video_trans[trans_id]
utils.vid_pub.publish(utils.video_trans[trans_id])
elif curr_state == "wait_transit":
if userdata.holding_in == 1:
utils.pub_motion_msg = self.motion_queue[mid]
utils.mid_pub.publish(self.motion_queue[mid])
else:
userdata.status_out = curr_state
return 'return_to_InAni'
elif curr_state == "new_motion":
userdata.status_out = curr_state
return 'return_to_InAni'
rospy.sleep(utils.rate)
class LookaroundState(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['return_to_InAni'],
input_keys=['holding_in'],
output_keys=['status_out'])
self.motion_queue = [6,13]
self.video_queue = [1003,1001]
def execute(self, userdata):
mid = random.randint(0,len(self.motion_queue)-1)
vid = random.randint(0,len(self.video_queue)-1)
trans_id = random.randint(0,len(utils.video_trans)-1)
while not rospy.is_shutdown():
curr_state = utils.make_decision_on_motion()
if curr_state == "in_motion":
utils.pub_motion_msg = self.motion_queue[mid]
utils.mid_pub.publish(self.motion_queue[mid])
curr_vstate = utils.make_decision_on_emotion()
if curr_vstate == "begin_emotion":
utils.pub_video_msg = self.video_queue[vid]
utils.vid_pub.publish(self.video_queue[vid])
else:
utils.pub_video_msg = utils.video_trans[trans_id]
utils.vid_pub.publish(utils.video_trans[trans_id])
elif curr_state == "wait_transit":
if userdata.holding_in == 1:
utils.pub_motion_msg = self.motion_queue[mid]
utils.mid_pub.publish(self.motion_queue[mid])
else:
userdata.status_out = curr_state
return 'return_to_InAni'
elif curr_state == "new_motion":
userdata.status_out = curr_state
return 'return_to_InAni'
rospy.sleep(utils.rate)
class AngryState(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['return_to_InAni'],
input_keys=['holding_in'],
output_keys=['status_out'])
self.motion_queue = [6,13]
self.video_queue = [1003,1001]
def execute(self, userdata):
mid = random.randint(0,len(self.motion_queue)-1)
vid = random.randint(0,len(self.video_queue)-1)
trans_id = random.randint(0,len(utils.video_trans)-1)
while not rospy.is_shutdown():
curr_state = utils.make_decision_on_motion()
if curr_state == "in_motion":
utils.pub_motion_msg = self.motion_queue[mid]
utils.mid_pub.publish(self.motion_queue[mid])
curr_vstate = utils.make_decision_on_emotion()
if curr_vstate == "begin_emotion":
utils.pub_video_msg = self.video_queue[vid]
utils.vid_pub.publish(self.video_queue[vid])
else:
utils.pub_video_msg = utils.video_trans[trans_id]
utils.vid_pub.publish(utils.video_trans[trans_id])
elif curr_state == "wait_transit":
if userdata.holding_in == 1:
utils.pub_motion_msg = self.motion_queue[mid]
utils.mid_pub.publish(self.motion_queue[mid])
else:
userdata.status_out = curr_state
return 'return_to_InAni'
elif curr_state == "new_motion":
userdata.status_out = curr_state
return 'return_to_InAni'
rospy.sleep(utils.rate)
class ConcernedState(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['return_to_InAni'],
input_keys=['holding_in'],
output_keys=['status_out'])
self.motion_queue = [6,13]
self.video_queue = [1003,1001]
def execute(self, userdata):
mid = random.randint(0,len(self.motion_queue)-1)
vid = random.randint(0,len(self.video_queue)-1)
trans_id = random.randint(0,len(utils.video_trans)-1)
while not rospy.is_shutdown():
curr_state = utils.make_decision_on_motion()
if curr_state == "in_motion":
utils.pub_motion_msg = self.motion_queue[mid]
utils.mid_pub.publish(self.motion_queue[mid])
curr_vstate = utils.make_decision_on_emotion()
if curr_vstate == "begin_emotion":
utils.pub_video_msg = self.video_queue[vid]
utils.vid_pub.publish(self.video_queue[vid])
else:
utils.pub_video_msg = utils.video_trans[trans_id]
utils.vid_pub.publish(utils.video_trans[trans_id])
elif curr_state == "wait_transit":
if userdata.holding_in == 1:
utils.pub_motion_msg = self.motion_queue[mid]
utils.mid_pub.publish(self.motion_queue[mid])
else:
userdata.status_out = curr_state
return 'return_to_InAni'
elif curr_state == "new_motion":
userdata.status_out = curr_state
return 'return_to_InAni'
rospy.sleep(utils.rate)
class InAniState(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['happy','concerned','angry','lookaround','sad','end'],
input_keys=['status_in'],
output_keys=['holding_out'])
self.ani_seq = ['lookaround','concerned','happy']
self.seq_id = 0
def execute(self, userdata):
rospy.loginfo("status is {}".format(userdata.status_in))
if userdata.status_in == "wait_transit":
userdata.holding_out = 1
self.seq_id = 0
elif userdata.status_in == "new_motion":
userdata.holding_out = 0
self.seq_id = min(self.seq_id+1, len(self.ani_seq)-1)
return self.ani_seq[self.seq_id]
if __name__ == '__main__':
global utils
utils = Utils()
sm = smach.StateMachine(outcomes=['end'])
sm.userdata.holding = 1
sm.userdata.status = "wait_transit"
with sm:
smach.StateMachine.add('InAniState', InAniState(),
transitions={'happy':'HappyState','concerned':'ConcernedState','angry':'AngryState',
'lookaround':'LookaroundState','sad':'SadState','end':'end'},
remapping={'status_in':'status','holding_out':'holding'})
smach.StateMachine.add('HappyState', HappyState(),
transitions={'return_to_InAni':'InAniState'},
remapping={'holding_in':'holding','status_out':'status'})
smach.StateMachine.add('SadState', SadState(),
transitions={'return_to_InAni':'InAniState'},
remapping={'holding_in':'holding','status_out':'status'})
smach.StateMachine.add('LookaroundState', LookaroundState(),
transitions={'return_to_InAni':'InAniState'},
remapping={'holding_in':'holding','status_out':'status'})
smach.StateMachine.add('AngryState', AngryState(),
transitions={'return_to_InAni':'InAniState'},
remapping={'holding_in':'holding','status_out':'status'})
smach.StateMachine.add('ConcernedState', ConcernedState(),
transitions={'return_to_InAni':'InAniState'},
remapping={'holding_in':'holding','status_out':'status'})
sm.execute()
rospy.loginfo("[ani_smach]: smach init")
rospy.spin()
|
import os
import sys
from collections import deque
import glob
import random
from copy import deepcopy
import re
import numpy as np
import pandas as pd
import xml.etree.ElementTree as ET
from PIL import Image
import matplotlib.pyplot as plt
import torch
np.set_printoptions(threshold=sys.maxsize)
# Use command line arguments in the future
BBOX_TRAIN_PATH = "../ImageNet/BBOX_train/"
BBOX_TRAIN_PROC = "../ImageNet/BBOX_train_proc/"
IMG_TRAIN_PATH = "../ImageNet/IMG_train/"
IMG_TRAIN_PROC = "../ImageNet/IMG_train_proc/"
BBOX_VAL_PATH = "../ImageNet/BBOX_val/"
BBOX_VAL_PROC = "../ImageNet/BBOX_val_proc/"
IMG_VAL_PATH = "../ImageNet/IMG_val/"
IMG_VAL_PROC = "../ImageNet/IMG_val_proc/"
CLASSES = "classes.txt"
PREPROCESS = True
class Data_index:
def __init__(self, img_train_path=IMG_TRAIN_PATH,
img_val_path=IMG_VAL_PATH,
bb_train_path=BBOX_TRAIN_PATH,
bb_val_path=BBOX_VAL_PATH,
img_train_proc=IMG_TRAIN_PROC,
img_val_proc=IMG_VAL_PROC,
bb_train_proc=BBOX_TRAIN_PROC,
bb_val_proc=BBOX_VAL_PROC,
classes=CLASSES,
preprocess=PREPROCESS):
self.num_train_data = 0
self.train_dataset = {}
self.num_val_data = 0
self.val_dataset = {}
self.num_classes = None
self.preproc = preprocess
# preprocessed path
self.img_train_proc = img_train_proc
self.img_val_proc = img_val_proc
self.bb_train_proc = bb_train_proc
self.bb_val_proc = bb_val_proc
self.img_train_path = img_train_path
self.img_val_path = img_val_path
self.bb_train_path = bb_train_path
self.bb_val_path = bb_val_path
self.class_enum = {} # class_enum[class_id] = integer
self.class_id_to_name = {}
self.idx_to_class = {}
# creates mappings from class ID to names
self.import_classes(classes)
# Index the data
# Currently only indexes the training set.
def populate(self):
self.num_train_data = 0
class_index = 0
enter_preproc_loop = self.check_folder_empty(self.img_train_proc)
#print(enter_preproc_loop)
for class_path in glob.iglob(self.bb_train_path + "/*"):
if not self.preproc or not enter_preproc_loop:
print(f"\r Importing train class: {class_index}", end="")
# adds a label class to the dict
# maps index to class name
label_class = class_path.split("/")[-1]
self.class_enum[label_class] = class_index
try:
self.idx_to_class[class_index] = self.class_id_to_name[label_class]
except KeyError:
self.idx_to_class[class_index] = "Unlabeled"
self.train_dataset[label_class] = deque()
# adds labels path into a dictionary of deques.
for i, label_path in enumerate(glob.iglob(class_path + "/*")):
self.num_train_data += 1
# Preprocess the images for faster data feeding
if self.preproc:
print(f"\rImporting train class: {class_index}, image: {i+1}", end="")
if enter_preproc_loop:
oldsize, newsize = self.preprocess_img(label_path, self.img_train_path, self.img_train_proc)
self.preprocess_label(label_path, self.bb_train_proc, label_class, oldsize, newsize)
fname = ET.parse(label_path).getroot().find("filename").text + ".xml"
self.train_dataset[label_class].append(create_filepath([self.bb_train_proc, label_class, fname]))
else:
self.train_dataset[label_class].append(label_path)
class_index += 1
print("")
self.num_classes = class_index
self.num_val_data = 0
enter_preproc_loop = self.check_folder_empty(self.img_val_proc)
for class_path in glob.iglob(self.bb_val_path + "/*"):
if not self.preproc or not enter_preproc_loop:
print(f"\r Importing val class...", end="")
# adds a label class to the dict
label_class = class_path.split("/")[-1]
self.val_dataset[label_class] = deque()
# adds labels path into a dictionary of deques.
for i, label_path in enumerate(glob.iglob(class_path + "/*")):
self.num_val_data += 1
# Preprocess the images for faster data feeding
if self.preproc:
print(f"\rImporting val class: image: {i+1}", end="")
if enter_preproc_loop:
oldsize, newsize = self.preprocess_img(label_path, self.img_val_path, self.img_val_proc)
self.preprocess_label(label_path, self.bb_val_proc, label_class, oldsize, newsize)
fname = ET.parse(label_path).getroot().find("filename").text + ".xml"
self.val_dataset[label_class].append(create_filepath([self.bb_val_proc, label_class, fname]))
else:
self.val_dataset[label_class].append(label_path)
# print(self.train_dataset)
# print(self.num_train_data)
def trainsize(self):
self.num_train_data = 0
for label_class in self.train_dataset.keys():
self.num_train_data += len(self.train_dataset[label_class])
# print(self.num_train_data)
return self.num_train_data
def valsize(self):
self.num_val_data = 0
for label_class in self.val_dataset.keys():
self.num_val_data += len(self.val_dataset[label_class])
# print(self.num_train_data)
return self.num_val_data
def is_empty(self, dataset):
is_empty = True
for label_class in dataset.keys():
if len(dataset[label_class]) != 0:
is_empty = False
break
return is_empty
def preprocess_img(self, label_path, filepath_pre, filepath_proc):
root = ET.parse(label_path).getroot()
filename = root.find("filename").text + ".JPEG"
old_filepath = create_filepath([filepath_pre, filename])
new_filepath = create_filepath([filepath_proc, filename])
img = Image.open(old_filepath).convert("RGB")
oldsize = img.size
img = img.resize((416, 416))
newsize = img.size
img.save(new_filepath)
return (oldsize, newsize)
def preprocess_label(self, label_path, proc_path, classname, oldsize, newsize):
tree = ET.parse(label_path)
root = tree.getroot()
filename = root.find("filename").text + ".xml"
new_filepath = create_filepath([proc_path, classname, filename])
objs = root.findall("object")
for obj in objs:
bndbox = obj.find("bndbox")
bndbox.find("xmin").text = str(int(int(bndbox.find("xmin").text) /oldsize[0] * newsize[0]))
bndbox.find("xmax").text = str(int(int(bndbox.find("xmax").text) /oldsize[0] * newsize[0]))
bndbox.find("ymin").text = str(int(int(bndbox.find("ymin").text) /oldsize[1] * newsize[1]))
bndbox.find("ymax").text = str(int(int(bndbox.find("ymax").text) /oldsize[1] * newsize[1]))
try:
tree.write(new_filepath)
except FileNotFoundError:
newfolder = create_filepath([proc_path, classname])
os.system(f"mkdir {newfolder}")
tree.write(new_filepath)
def check_folder_empty(self, fpath):
if len([name for name in os.listdir(fpath) if True]) == 0:
return True
print(len([name for name in os.listdir(fpath) if True]))
return False
def import_classes(self, classes):
"""
Expected format:
class_index: class_name_1, class_name_2, ..., class_name_n
.
.
.
"""
with open(classes, "r") as file:
while True:
line = file.readline()
lst = re.split(": |, |\n", line)
class_id = lst[0]
try:
class_name = lst[1]
except IndexError:
pass
self.class_id_to_name[class_id] = class_name
if not line:
break
def create_filepath(arr):
"""
Args:
arr: An array of strings, which represents
each directory in the path
"""
filepath = ""
for i, string in enumerate(arr):
if string[-1] == "/":
string = string[:len(string)-1]
filepath += string
if i != len(arr)-1:
filepath += "/"
return filepath
def remove_broken(old_train_path=BBOX_TRAIN_PATH):
for class_path in glob.iglob(BBOX_TRAIN_PATH + "/*"):
label_class = class_path.split("/")[-1]
for label_path in glob.iglob(class_path + "/*"):
root = ET.parse(label_path).getroot()
fname = root.find("filename").text
if fname == "%s":
print(f"Deleting {label_path}")
os.system(f"rm {label_path}")
for label_path in glob.iglob(BBOX_VAL_PATH + "/val/*"):
#self.train_dataset[label_class].append(label_path)
#self.num_train_data += 1
root = ET.parse(label_path).getroot()
fname = root.find("filename").text
if fname == "%s":
print(f"Deleting {label_path}")
os.system(f"rm {label_path}")
def create_label(objs, class_enum, orig_img_shape, shape=(13, 13), num_of_classes=1000, input_img_shape=(416, 416)):
"""
Args:
objs: all xml tags labeled "object"
shape: (tuple) shape of the output map
(shape[0], shape[1])
num_of_classes: Number of classes
input_img_shape: input image shape, also in tuple form
(height, width)
Returns:
label: (shape[0], shape[1], 1 + 4 + num_of_classes) tensor
"""
orig_w = orig_img_shape[0]
orig_h = orig_img_shape[1]
w = input_img_shape[1]
h = input_img_shape[0]
label = np.zeros((shape[0], shape[1], 1 + 4 + num_of_classes))
for i in range(len(objs)):
classname = objs[i].find("name").text
try:
class_idx = class_enum[classname]
except KeyError:
continue
# Extract bounding box coords
bndbox = objs[i].find("bndbox")
xmin = float(bndbox.find("xmin").text) / orig_w
xmax = float(bndbox.find("xmax").text) / orig_w
ymin = float(bndbox.find("ymin").text) / orig_h
ymax = float(bndbox.find("ymax").text) / orig_h
center_x = ((xmax + xmin)/2)
center_y = ((ymax + ymin)/2)
cellx = int(center_x * shape[1])
celly = int(center_y * shape[0])
# print(xmax, xmin, ymax, ymin)
# print(xmax * 416, xmin*416, ymax*416, ymin*416)
# print(center_x, center_y)
# print(center_x * 416, center_y*416)
# print(cellx, celly)
label[celly, cellx, 0] = 1
label[celly, cellx, 1] = center_x
label[celly, cellx, 2] = center_y
label[celly, cellx, 3] = xmax - xmin
label[celly, cellx, 4] = ymax - ymin
label[celly, cellx, 5 + class_idx] = 1
return label
# Only used for training set, as file structure is different for val set.
def generator(data_index,
val_mode=False,
img_train_path=IMG_TRAIN_PATH,
img_val_path=IMG_VAL_PATH,
batch_size=16, img_shape=(416, 416, 3),
label_cells=(13, 13), preprocess=PREPROCESS,
img_train_proc=IMG_TRAIN_PROC,
img_val_proc=IMG_VAL_PROC):
start = 0
end = batch_size
if val_mode:
m = data_index.valsize()
dset = deepcopy(data_index.val_dataset)
dpath = img_val_path
dproc = img_val_proc
else:
m = data_index.trainsize()
dset = deepcopy(data_index.train_dataset)
dpath = img_train_path
dproc = img_train_proc
class_enum = data_index.class_enum
num_classes = len(class_enum)
width = img_shape[1]
height = img_shape[0]
channels = img_shape[2]
class_enum = data_index.class_enum
while True:
X_batch = np.zeros((batch_size, height, width, 3))
Y_batch = np.zeros((batch_size, label_cells[0], label_cells[1], 1 + 4 + num_classes))
for i in range(batch_size):
# If trainset is empty, reload it
if data_index.is_empty(dset):
#print("trainset is empty")
if val_mode:
dset = deepcopy(data_index.val_dataset)
else:
dset = deepcopy(data_index.train_dataset)
# randomly select one example
random_key = random.choice(list(dset.keys())) # this is a string of the class
xml_path = dset[random_key].pop()
# deletes key when deque is empty
if len(dset[random_key]) == 0:
del dset[random_key]
#print(f"deleted key {random_key}")
root = ET.parse(xml_path).getroot()
filename = root.find("filename").text + ".JPEG"
objs = root.findall("object")
# image processing
# print(img_path, xml_path)
if preprocess:
img_path = create_filepath([dproc, filename])
img = Image.open(img_path)
img_np = np.array(img)
orig_shape = img.size
X_batch[i, :, :, :] = img_np
label = create_label(objs, class_enum, orig_shape, shape=label_cells, num_of_classes=num_classes)
Y_batch[i, :, :, :] = label
else:
img_path = create_filepath([dpath, filename]) # path to the individual image
img = Image.open(img_path).convert('RGB')
orig_shape = img.size
img = img.resize((width, height))
img_np = np.array(img)
#print(img_np.shape, filename)
X_batch[i, :, :, :] = img_np
label = create_label(objs, class_enum, orig_shape, shape=label_cells, num_of_classes=num_classes)
Y_batch[i, :, :, :] = label
# plt.imshow(img)
# plt.show()
yield(torch.from_numpy(X_batch).float(), torch.from_numpy(Y_batch).float())
if __name__ == "__main__":
data_index = Data_index()
data_index.populate()
data_index.trainsize()
data_index.is_empty(data_index.train_dataset)
gen = generator(data_index, val_mode=False, batch_size=128)
for i in range(100000):
next(gen)
print(i) |
#coding=utf8
#########################################################################
# Copyright (C) 2016 All rights reserved.
#
# 文件名称:__init__.py
# 创 建 者:unicodeproject
# 创建日期:2016年11月29日
# 描 述:
#
# 备 注:
#
#########################################################################
#!/usr/bin/python
# please add your code here!
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 4 21:48:44 2020
@author: sumant
"""
num1=int(input("Enter number: "))
def even(num):
""" function for printing even numbers """
for i in range(1,num):
if i%2==0:
print("Even Number: ",i)
even(num1)
def odd(num):
""" function for printing even numbers """
for i in range(1,num):
if i%2==1:
print("Odd Number: ",i)
odd(num1)
###################################################################
num2 = int(input("Enter multiplication number: "))
def multi(num):
""" function for multiplication table """
for i in range(1,21):
print(num,"*",i,"=",i*num)
multi(num2)
###################################################################
num3 = int(input("Enter number: "))
num4 = int(input("Enter number: "))
def maximum2(num1,num2):
""" Finding max number """
print("Max Number is ",max(num1,num2))
maximum2(num3,num4)
num5 = int(input("Enter number1: "))
num6 = int(input("Enter number2: "))
num7 = int(input("Enter number3: "))
def maximum3(num1,num2,num3):
""" Finding max number """
print("Max Number is ",max(num1,num2,num3))
maximum3(num5,num6,num7)
###################################################################
list1 = [1,2,3,12,34,45,67,78,90]
def max_element(l):
""" Finding max number in list """
print("Maximum element is ",max(list1))
max_element(list1)
list2 = (12,3,12,34,45,67,78,90)
def min_element(l):
""" Finding max number in tuple """
print("Minimun element is ",min(list1))
min_element(list2)
###################################################################
num7 = int(input("Enter number: "))
def factorial(num7):
""" Calculating factorial of given number """
fact = 1
for i in range(1,num7+1):
fact = fact * i
print (f"The factorial of {num7} is : ",fact)
factorial(num7)
###################################################################
str1 = input("Enter string:")
def reverse_string(inp):
""" Reversing the string """
print(inp[::-1])
reverse_string(str1)
###################################################################
list2=[11,12,14,15,16,17,11,2,12,2,4,23,34,45,56,67,78,89]
def all(num):
print("Sum of all numbers: ",sum(num))
print("Reversed of list order: ",num[::-1])
print("Length of List: ",len(list2))
print("Count of number: ",num.count(2))
all(list2)
|
# coding: utf-8
# License: Apache License 2.0 (https://github.com/tensorflow/models/blob/master/LICENSE)
# Reference Code:
# source: https://github.com/tensorflow/models
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import time
from collections import defaultdict, deque
from io import StringIO
from PIL import ImageGrab
import cv2
# from nomo import LaneDetector, Vehicle
from ALD import Line
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
sys.path.append("models/research/")
sys.path.append("models/research/slim")
# ## Object detection imports
# Here are the imports from the object detection module.
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
# # Model preparation
# What model to download.
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
object_detection_path = "models/research/object_detection"
PATH_TO_LABELS = os.path.join(object_detection_path,'data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
# ## Download Model
# opener = urllib.request.URLopener()
# opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
# print("retrieved")
# tar_file = tarfile.open(MODEL_FILE)
# for file in tar_file.getmembers():
# file_name = os.path.basename(file.name)
# if 'frozen_inference_graph.pb' in file_name:
# tar_file.extract(file, os.getcwd())
# ## Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
LD = Line()
def box2pixels(box, rows, cols):
ymin = int(box[0]*rows)
xmin = int(box[1]*cols)
ymax = int(box[2]*rows)
xmax = int(box[3]*cols)
b = (xmin, ymin, xmax-xmin, ymax-ymin)
return b
buffer_frame = 15
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
n = 1
while True:
timer = cv2.getTickCount()
# print(n)
image_np = cv2.resize(np.array(ImageGrab.grab(bbox=(0,45, 640,525)).convert('RGB')), (320, 240))
image_t = image_np
rows, cols = image_t.shape[:2]
try:
new_screen = LD.pipeline(image_np)
except:
new_screen = image_np
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
if n == 1:
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=5,
max_boxes_to_draw=20)
# print(scores[0]>0.9)
n_boxes = len(scores[scores>0.5])
tracker = []
pts = []
for i in range(n_boxes):
bbox = box2pixels(boxes[0][i], rows, cols)
p1 = (int(bbox[0]), int(bbox[1]))
tracker.append(cv2.TrackerKCF_create())
tracker[i].init(image_t, bbox)
pts.append(deque(maxlen=buffer_frame))
pts[i].appendleft((int(bbox[0]), int(bbox[1])))
# ok = True
for i in range(n_boxes):
ok, bbox = tracker[i].update(image_t)
if ok:
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[2]+bbox[0]), int(bbox[3]+bbox[1]))
pts[i].appendleft(p1)
pts_len = len(pts[i])
cv2.rectangle(image_t, p1, p2, (255,0,0), 2, 1)
[cv2.line(image_t, pts[i][j-1], pts[i][j], (0,0,255), 2) for j in range(1, pts_len)]
else:
cv2.putText(image_t, "Tracking failure detected", (100, 80),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
cv2.putText(image_t, "FPS: {}".format(fps), (100, 50), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (50, 170, 50), 2)
image_np = cv2.addWeighted(image_np, 0.6, new_screen, 0.4, 0.0)
image_np = cv2.addWeighted(image_np, 0.6, image_t, 0.4, 0.0)
if n == buffer_frame: n = 1
else: n += 1
cv2.imshow("window",cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR))
cv2.moveWindow("window", 1000, 50)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break |
class AuthBase:
"""
_AuthBase provides default methods for add_auth_filters and add_auth_insert_data.
Subclass using mixins or by passing the class into declarative_base:
class Foo(Base, AuthBase):
or
Base = declarative_base(cls=AuthBase)
"""
@classmethod
def add_auth_filters(cls, query, badge):
"""
Override this to add implicit filters to a query, before any additional
filters are added.
"""
return query
def add_auth_insert_data(self, badge):
"""
Override this to assign implicit values to a new object (for example,
via Session.add(Base()))
"""
pass
|
import pandas as pd
import os
import sys
file_name = os.path.abspath(os.path.expanduser(sys.argv[1]))
base_name = file_name.rsplit(".", 1)[0]
data = pd.read_csv(file_name)
max_size = int(sys.argv[2])
for i, line in enumerate(range(0, data.shape[0], max_size)):
subdata = data.iloc[line: line + max_size]
subdata.to_csv(f"{base_name}-{i + 1}.csv")
|
max_len = 3
def rec(digits):
l = len(digits)
if l == 0:
return [l for i in range(1, 10) for l in rec([i])]
elif l >= max_len:
return [digits]
elif l <= max_len / 2:
return [l for i in range(10) for l in rec(digits + [i])]
elif l == max_len / 2 + 1:
if digits[-1] + digits[max_len-l] > 9:
if digits[max_len-l-1] % 2 == 0:
return [l for i in range(max(10-digits[-2], 1 if l == max_len-1 else 0), 10, 2) for l in rec(digits + [i])]
else:
return [l for i in range(max(10-digits[-2], 1), 10, 2) for l in rec(digits + [i])]
else:
if digits[max_len-l-1] % 2 == 0:
return [l for i in range(max(10-digits[-2], 1), 10, 2) for l in rec(digits + [i])]
else:
return [l for i in range(max(10-digits[-2], 1 if l == max_len-1 else 0), 10, 2) for l in rec(digits + [i])]
else:
if digits[-1] + digits[max_len-l] > 9:
if digits[max_len-l-1] % 2 == 0:
return [l for i in range(1 if l == max_len-1 else 0, 10, 2) for l in rec(digits + [i])]
else:
return [l for i in range(1, 10, 2) for l in rec(digits + [i])]
else:
if digits[max_len-l-1] % 2 == 0:
return [l for i in range(1, 10, 2) for l in rec(digits + [i])]
else:
return [l for i in range(1 if l == max_len-1 else 0, 10, 2) for l in rec(digits + [i])]
r = rec([])
print len(r), r
|
from tfcgp.problem import Problem
from tfcgp.config import Config
from tfcgp.evolver import Evolver
from tfcgp.learn_evo import LearnEvolver
from tfcgp.ga import GA
import argparse
import numpy as np
import os
parser = argparse.ArgumentParser(description='CGP with Tensorflow')
parser.add_argument('--no-learn', dest='learn', action='store_const',
const=False, default=True,
help='Turn off learning')
parser.add_argument('--no-evo', dest='evo', action='store_const',
const=False, default=True,
help='Turn off evolution')
parser.add_argument('--lamarck', dest='lamarck', action='store_const',
const=True, default=False,
help='Turn on Lamarckian evolution')
parser.add_argument('--log', type=str, help='Log file')
parser.add_argument('--data', type=str, help='Data file', default='data/glass.dt')
parser.add_argument('--config', type=str, help='Config file', default='cfg/base.yaml')
parser.add_argument('--epochs', type=int, help='Number of epochs', default=1)
parser.add_argument('--seed', type=int, help='Random seed', default=0)
args = parser.parse_args()
data = []; targets = []
nin = 0
with open(args.data, 'r') as p:
for i in p:
nin = int(i.strip('\n').split(' ')[1])
break
all_dat = np.genfromtxt(args.data, delimiter=' ', skip_header=4)
data = all_dat[:, :nin]
targets = all_dat[:, nin:]
c = Config()
c.update(args.config)
p = Problem(data, targets, learn=args.learn, epochs=args.epochs, lamarckian=args.lamarck)
e = GA(p, c, logname=args.log)
while p.eval_count < c.cfg["total_evals"]:
e.step()
|
# for 문
for looper in [1,2,3,4,5]:
print("hello")
for looper in range(0,5):
print("hello")
for i in range(1, 10, 2):
print(i)
for i in range(10,1,-1):
print(i)
#while 문(~동안)
# i = 1
# while i < 10:
# print(i)
# # i+1
for i in range(0,5):
print(i)
i=0
while i < 5:
print(i)
i = i + 1
#break 문
for i in range(10):
if i == 5: break
print(i)
print("EOP")
#continue 특정 조전에서 남은 반복 명령
for i in range(10):
if i == 5: continue
print(i)
print("EOP")
#반복의 제어 else
print("else 제어")
for i in range(10):
print(i),
else:
print("EOP")
print("##")
i = 0
while i < 10:
print(i),
i += 1
else:
print("eop") |
from urllib.request import Request, urlopen
import re,csv
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
BASE_URL = 'https://www.facebook.com/isaac.pintosevich.systems/'
def get_html(url):
# req = Request(url, headers={'User-Agent' : 'Mozilla/5.0'})
# response = urlopen(req).read()
driver = webdriver.Chrome()
driver.get(url)
# driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
SCROLL_PAUSE_TIME = 3
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
#if new_height == last_height:
#input()
# break
last_height = new_height
return driver.page_source
def parse_url(html):
soup = BeautifulSoup(html, 'html.parser')
table = soup.find('div', {'class': 'category-products'})
rows = table.find_all('div', {'class': 'product-info'})
projects = []
for row in rows:
cols = row.find_all('h2')
projects.append({
'title': cols[0].text,
'url': cols[0].a['href']
})
return projects
def save_url(projects,path):
with open(path,'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=';')
writer.writerow(('Название', 'URL'))
writer.writerows(
(project['title'],project['url']) for project in projects
)
def main():
all_url = parse_url(get_html(BASE_URL))
save_url(all_url, 'url.csv')
# get_html(BASE_URL)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 3 20:57:26 2018
@author: PPAGACZ
"""
from WaldDecompositionFilter import *
import unittest.mock
import pytest
from pytest_mock import mocker
import numpy as np
import pandas as pd
from statsmodels.tsa.seasonal import seasonal_decompose
from unittest import TestCase
class Test_WaldDecompositionFilter(TestCase):
def test_log_ts(self):
series = pd.read_csv("AirPassengers.csv")
columns = list(series.columns.values)
ts = series[columns[1]]
expected = np.log(ts)
actual = WaldDecompositionFilter.ts_log(ts)
assert ((expected == actual).all())
def test_decomposition(self):
series = pd.read_csv("AirPassengers.csv")
series.index = pd.DatetimeIndex(freq = 'M', start = 0, periods=series.shape[0])
columns = list(series.columns.values)
ts = series[columns[1]]
ts_log = np.log(ts)
expected = seasonal_decompose(ts_log)
actual = WaldDecompositionFilter.decomposition(ts_log)
assert actual is not None
def test_trend(self):
series = pd.read_csv("AirPassengers.csv")
series.index = pd.DatetimeIndex(freq = 'M', start = 0, periods=series.shape[0])
columns = list(series.columns.values)
ts = series[columns[1]]
ts_log = np.log(ts)
decomposition = seasonal_decompose(ts_log)
expected = decomposition.trend
actual = WaldDecompositionFilter.trend(decomposition)
assert ((expected == actual).any())
def test_seasonal(self):
series = pd.read_csv("AirPassengers.csv")
series.index = pd.DatetimeIndex(freq = 'M', start = 0, periods=series.shape[0])
columns = list(series.columns.values)
ts = series[columns[1]]
ts_log = np.log(ts)
decomposition = seasonal_decompose(ts_log)
expected = decomposition.seasonal
actual = WaldDecompositionFilter.seasonal(decomposition)
assert ((expected == actual).all())
def test_residuals(self):
series = pd.read_csv("AirPassengers.csv")
series.index = pd.DatetimeIndex(freq = 'M', start = 0, periods=series.shape[0])
columns = list(series.columns.values)
ts = series[columns[1]]
ts_log = np.log(ts)
decomposition = seasonal_decompose(ts_log)
expected = decomposition.resid
actual = WaldDecompositionFilter.residual(decomposition)
assert ((expected == actual).any())
def test_getText(self):
series = pd.read_csv("AirPassengers.csv")
series.index = pd.DatetimeIndex(freq = 'M', start = 0, periods=series.shape[0])
columns = list(series.columns.values)
ts = series[columns[1]]
ts_log = np.log(ts)
decomposition = seasonal_decompose(ts_log)
ts_log_decompose = decomposition.resid
ts_log_decompose.dropna(inplace=True)
expected = "Data after Wald decomposition\nMonth #Passengers\n"+ts_log_decompose.head(n=30).to_string()
actual = WaldDecompositionFilter.getText(ts_log_decompose)
assert expected == actual
def test_setDecomposedData(self):
series = pd.read_csv("AirPassengers.csv")
columns = list(series.columns.values)
data = Data(series[columns[1]])
WaldDecompositionFilter.setWaldDecomposition(data, series[columns[1]])
assert data.waldDecomposition.all() != None
|
# Generated by Django 2.0.7 on 2018-07-21 08:02
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('operation', '0003_auto_20180721_1511'),
]
operations = [
migrations.AlterField(
model_name='coursecomments',
name='add_time',
field=models.DateTimeField(default=datetime.datetime(2018, 7, 21, 8, 2, 47, 554721, tzinfo=utc), verbose_name='评论时间'),
),
migrations.AlterField(
model_name='userask',
name='add_time',
field=models.DateTimeField(default=datetime.datetime(2018, 7, 21, 8, 2, 47, 554721, tzinfo=utc), verbose_name='添加时间'),
),
migrations.AlterField(
model_name='usercourse',
name='add_time',
field=models.DateTimeField(default=datetime.datetime(2018, 7, 21, 8, 2, 47, 556210, tzinfo=utc), verbose_name='添加时间'),
),
migrations.AlterField(
model_name='userfavorite',
name='add_time',
field=models.DateTimeField(default=datetime.datetime(2018, 7, 21, 8, 2, 47, 555217, tzinfo=utc), verbose_name='评论时间'),
),
migrations.AlterField(
model_name='usermessage',
name='add_time',
field=models.DateTimeField(default=datetime.datetime(2018, 7, 21, 8, 2, 47, 555713, tzinfo=utc), verbose_name='添加时间'),
),
]
|
import time
import numpy as np
import pandas as pd
import pickle as pkl
import tensorflow as tf
import tf_centernet_resnet_s8 as tf_obj_detector
from data_preprocess import random_flip_horizontal
from data_preprocess import swap_xy, convert_to_xywh
# Custom function to parse the data. #
def _parse_image(
filename, img_rows=448, img_cols=448):
image_string = tf.io.read_file(filename)
image_decoded = \
tf.image.decode_jpeg(image_string, channels=3)
image_decoded = tf.cast(image_decoded, tf.float32)
image_decoded = image_decoded / 255.0
image_resized = tf.image.resize(
image_decoded, [img_rows, img_cols])
image_resized = tf.ensure_shape(
image_resized ,shape=(img_rows, img_cols, 3))
return image_resized
def train(
model, n_classes, img_dims,
sub_batch_sz, batch_size, box_scales,
train_data, training_loss, st_step, max_steps, optimizer,
ckpt, ck_manager, label_dict, init_lr=1.0e-3, min_lr=1e-6,
downsample=32, use_scale=False, min_scale=0.7, decay=0.75,
display_step=100, step_cool=50, base_rows=320, base_cols=320,
thresh=0.50, save_flag=False, train_loss_log="train_losses.csv"):
n_data = len(train_data)
base_dims = min(base_rows, base_cols)
max_scale = img_dims / base_dims
start_time = time.time()
tot_reg_loss = 0.0
tot_cls_loss = 0.0
for step in range(st_step, max_steps):
if step < 20000:
lrate = init_lr
elif step < 25000:
lrate = init_lr / 10.0
else:
lrate = init_lr / 100.0
lrate = max(lrate, min_lr)
batch_sample = np.random.choice(
n_data, size=batch_size, replace=False)
# Use only one image resolution to train. #
if use_scale:
rnd_scale = np.random.uniform(
low=min_scale, high=max_scale)
else:
rnd_scale = max_scale
raw_dims = int(rnd_scale * base_dims)
pad_dims = int((img_dims - raw_dims) / 2.0)
sc_dims = [raw_dims, raw_dims]
img_pad = [img_dims, img_dims]
img_boxes = []
img_batch = []
for tmp_idx in batch_sample:
tmp_image = _parse_image(
train_data[tmp_idx]["image"],
img_rows=raw_dims, img_cols=raw_dims)
tmp_bbox = np.array(train_data[
tmp_idx]["objects"]["bbox"])
tmp_class = np.array(train_data[
tmp_idx]["objects"]["label"])
tmp_class = np.expand_dims(tmp_class, axis=1)
disp_bbox = swap_xy(tmp_bbox)
disp_bbox = convert_to_xywh(disp_bbox)
disp_label = np.concatenate(tuple([
disp_bbox, tmp_class]), axis=1)
disp_label = tf.constant(disp_label)
del disp_bbox
disp_tuple = tf_obj_detector.format_data(
disp_label, box_scales, img_pad,
n_classes, img_pad=img_pad, stride=8)
tmp_tuple = \
random_flip_horizontal(tmp_image, tmp_bbox)
tmp_image = tmp_tuple[0]
tmp_bbox = tmp_tuple[1]
tmp_bbox = swap_xy(tmp_bbox)
tmp_bbox = convert_to_xywh(tmp_bbox)
tmp_image = tf.image.pad_to_bounding_box(
tmp_image, pad_dims, pad_dims, img_dims, img_dims)
gt_labels = np.concatenate(tuple([
tmp_bbox, tmp_class]), axis=1)
gt_labels = tf.constant(gt_labels)
del tmp_tuple
tmp_tuple = tf_obj_detector.format_data(
gt_labels, box_scales, sc_dims,
n_classes, img_pad=img_pad, stride=8)
img_batch.append(
tf.expand_dims(tmp_image, axis=0))
img_boxes.append(
tf.expand_dims(tmp_tuple[0], axis=0))
del tmp_tuple, gt_labels, tmp_image, tmp_bbox
# Get the image file names. #
img_files = [
train_data[x]["image"] for x in batch_sample]
# Note that TF parses the image transposed, so the #
# bounding boxes coordinates are already transposed #
# during the formatting of the data. #
img_batch = tf.concat(img_batch, axis=0)
img_boxes = tf.cast(tf.concat(
img_boxes, axis=0), tf.float32)
tmp_losses = tf_obj_detector.train_step(
model, sub_batch_sz, img_batch,
img_boxes, optimizer, learning_rate=lrate)
ckpt.step.assign_add(1)
tot_cls_loss += tmp_losses[0]
tot_reg_loss += tmp_losses[1]
if (step+1) % display_step == 0:
avg_reg_loss = tot_reg_loss.numpy() / display_step
avg_cls_loss = tot_cls_loss.numpy() / display_step
training_loss.append((step+1, avg_cls_loss, avg_reg_loss))
tot_reg_loss = 0.0
tot_cls_loss = 0.0
print("Step", str(step+1), "Summary:")
print("Learning Rate:", str(optimizer.lr.numpy()))
print("Average Epoch Cls. Loss:", str(avg_cls_loss) + ".")
print("Average Epoch Reg. Loss:", str(avg_reg_loss) + ".")
elapsed_time = (time.time() - start_time) / 60.0
print("Elapsed time:", str(elapsed_time), "mins.")
start_time = time.time()
if (step+1) % step_cool != 0:
img_title = "CenterNetv1 ResNet-101 "
img_title += "Object Detection Result "
img_title += "at Step " + str(step+1)
tmp_img = img_batch[-1]
tmp_bbox = img_boxes[-1]
tf_obj_detector.show_object_boxes(
tmp_img, tmp_bbox, img_dims,
box_scales, downsample=downsample)
tf_obj_detector.obj_detect_results(
img_files[-1], model,
box_scales, label_dict, heatmap=True,
thresh=thresh, downsample=downsample,
img_rows=img_dims, img_cols=img_dims,
img_box=disp_tuple[0], img_title=img_title)
print("-" * 50)
if (step+1) % step_cool == 0:
if save_flag:
# Save the training losses. #
train_cols_df = ["step", "cls_loss", "reg_loss"]
train_loss_df = pd.DataFrame(
training_loss, columns=train_cols_df)
train_loss_df.to_csv(train_loss_log, index=False)
# Save the model. #
save_path = ck_manager.save()
print("Saved model to {}".format(save_path))
print("-" * 50)
img_title = "CenterNetv1 ResNet-101 "
img_title += "Object Detection Result "
img_title += "at Step " + str(step+1)
tmp_img = img_batch[-1]
tmp_bbox = img_boxes[-1]
tf_obj_detector.show_object_boxes(
tmp_img, tmp_bbox, img_dims,
box_scales, downsample=downsample)
tf_obj_detector.obj_detect_results(
img_files[-1], model,
box_scales, label_dict, heatmap=True,
thresh=thresh, downsample=downsample,
img_rows=img_dims, img_cols=img_dims,
img_box=disp_tuple[0], img_title=img_title)
time.sleep(120)
# Load the Crowd Human dataset. #
tmp_path = "C:/Users/admin/Desktop/Data/Crowd Human Dataset/"
data_file = tmp_path + "crowd_human_body_data.pkl"
with open(data_file, "rb") as tmp_load:
train_data = pkl.load(tmp_load)
# Generate the label dictionary. #
id_2_label = dict([(0, "person")])
# Define the Neural Network. #
restore_flag = False
subsample = False
downsample = 8
img_dims = 512
base_rows = 448
base_cols = 448
disp_rows = img_dims
disp_cols = img_dims
step_cool = 50
init_lr = 0.01
min_lr = 1.0e-5
decay_rate = 0.999
max_steps = 30000
batch_size = 16
sub_batch = 1
n_classes = len(id_2_label)
box_scales = [32.0, 64.0, 128.0, 256.0, 512.0]
n_scales = len(box_scales)
display_step = 25
if subsample:
train_data = train_data[:2500]
# Define the checkpoint callback function. #
model_path = \
"C:/Users/admin/Desktop/TF_Models/crowd_human_model/"
train_loss = \
model_path + "crowd_human_losses_centernet_resnet101.csv"
ckpt_model = model_path + "crowd_human_centernet_resnet101"
# Build the model. #
centernet_model = tf_obj_detector.build_model(
n_classes, n_scales=n_scales, backbone_model="resnet101")
model_optimizer = tf.keras.optimizers.SGD(momentum=0.9)
checkpoint = tf.train.Checkpoint(
step=tf.Variable(0),
centernet_model=centernet_model,
model_optimizer=model_optimizer)
ck_manager = tf.train.CheckpointManager(
checkpoint, directory=ckpt_model, max_to_keep=1)
if restore_flag:
train_loss_df = pd.read_csv(train_loss)
training_loss = [tuple(
train_loss_df.iloc[x].values) \
for x in range(len(train_loss_df))]
checkpoint.restore(ck_manager.latest_checkpoint)
if ck_manager.latest_checkpoint:
print("Model restored from {}".format(
ck_manager.latest_checkpoint))
else:
print("Error: No latest checkpoint found.")
else:
training_loss = []
st_step = checkpoint.step.numpy().astype(np.int32)
# Print out the model summary. #
print(centernet_model.summary())
print("-" * 50)
print("Fit model on training data (" +\
str(len(train_data)) + " training samples).")
train(centernet_model, n_classes, img_dims,
sub_batch, batch_size, box_scales,
train_data, training_loss, st_step,
max_steps, model_optimizer, checkpoint,
ck_manager, id_2_label, decay=decay_rate,
base_rows=base_rows, base_cols=base_cols,
display_step=display_step, step_cool=step_cool,
init_lr=init_lr, min_lr=min_lr, downsample=downsample,
thresh=0.50, save_flag=True, train_loss_log=train_loss)
print("Model fitted.")
|
from heapq import heappush, heappop, heapify
# heappop - pop and return the smallest element from heap,
# maintaining the heap invariant.
# heappush - push the value item onto the heap,
# maintaining heap invarient.
# heapify - transform list into heap, in place,
# in linear time
class MinHeap:
def __init__(self):
self.heap = []
def heapify(self,arr):
heapify(arr)
self.heap=arr
return self.heap
def parent(self, i):
return (i-1)//2
def insertKey(self, k):
heappush(self.heap, k)
print(self.heap)
def decreaseKey(self, i, new_val):
self.heap[i] = new_val
while(i != 0 and self.heap[self.parent(i)] > self.heap[i]):
self.heap[i] , self.heap[self.parent(i)] = (
self.heap[self.parent(i)], self.heap[i])
i=self.parent(i)
def extractMin(self):
return heappop(self.heap)
def deleteindex(self, i):
self.decreaseKey(i, float("-inf"))
self.extractMin()
def getMin(self):
return self.heap[0]
heapObj = MinHeap()
heapObj.insertKey(3)
heapObj.insertKey(2)
heapObj.insertKey(15)
heapObj.insertKey(5)
heapObj.insertKey(4)
heapObj.deleteindex(3)
heapObj.insertKey(45)
heapObj.insertKey(1)
heapObj.heapify([3,2,15,5,4,45,1])
print(heapObj.heap)
heapObj.insertKey(6)
print(heapObj.heap)
heapObj.deleteindex(3)
print(heapObj.heap)
|
# -*- coding: utf-8 -*-
# @Author : WangNing
# @Email : 3190193395@qq.com
# @File : DBData_reflect.py
# @Software: PyCharm
class API:
# api
api_id = 0
api_name = 1
file_name = 2
req_url = 3
req_method = 4
param_type = 5
has_rely = 6
status = 7
create_time = 8
class CASE:
# case
id = 0
api_id = 1
req_data = 2
rely_data = 3
expect_code = 4
res_data = 5
check_point = 6
status = 7
create_time = 8
class DATA_STORAGE:
api_id = 0
case_id = 1
rely_data = 2
create_time = 3
|
import cv2
import numpy as np
import imutils
image = cv2.imread("image/picasso.jpg")
cv2.imshow("Original",image)
cv2.waitKey(0)
(h,w) = image.shape[:2]
center = (w//2,h//2)
M = cv2.getRotationMatrix2D(center,45,1.0)
rotated = cv2.warpAffine(image,M,(w,h))
cv2.imshow("Rotated by 45 Degrees",rotated)
cv2.waitKey(0)
M= cv2.getRotationMatrix2D(center,-90,1.0)
rotated = cv2.warpAffine(image,M,(w,h))
cv2.imshow("Rotated By -90 Degrees",rotated)
cv2.waitKey(0)
rotated = imutils.rotate(image,180)
cv2.imshow("Rotated by 180 Degrees",rotated)
cv2.waitKey(0) |
import math
import copy;
import numpy as np
from numpy import linalg as LA
from sklearn.utils import check_array
import skimage.measure as measure;
import skimage.metrics as metrics;
import skimage.exposure as exposure;
from skimage import filters
MINIMISATION=[];
MINIMISATION.append("SAE");
MINIMISATION.append("SSE");
MINIMISATION.append("MAE");
MINIMISATION.append("MSE");
MINIMISATION.append("RMSE");
MINIMISATION.append("NRMSE_euclidean");
MINIMISATION.append("NRMSE_mean");
MINIMISATION.append("NRMSE_min_max");
MINIMISATION.append("mean_relative_error");
MINIMISATION.append("max_relative_error");
MINIMISATION=set(MINIMISATION);
MAXIMISATION=[];
MAXIMISATION.append("cosine_similarity");
MAXIMISATION.append("SSIM");
MAXIMISATION.append("PSNR");
MAXIMISATION.append("ZNCC");
MAXIMISATION=set(MAXIMISATION);
def getEntropy(anImage):
grayImg = (linearNormalisation(anImage, 0, 255)).astype(np.uint8);
return measure.shannon_entropy(grayImg);
def zeroMeanNormalisation(anImage):
return (anImage - anImage.mean()) / (anImage.std());
def linearNormalisation(anImage, aMinValue = 0, aMaxValue = 1):
return aMinValue + (aMaxValue - aMinValue) * (anImage - anImage.mean()) / (anImage.std());
def normalise(anImage):
#return zeroMeanNormalisation(anImage);
return linearNormalisation(anImage);
#return copy.deepcopy(anImage);
def productImage(anImage1, anImage2):
check_array(anImage1, anImage2);
return (np.multiply(anImage1, anImage2));
def getHistogram(anImage, aNumberOfBins):
return exposure.histogram(anImage, aNumberOfBins);
def getSAE(aReferenceVector, aTestVector):
check_array(aReferenceVector, aTestVector);
return np.abs(np.subtract(aReferenceVector, aTestVector)).sum();
def getMAE(aReferenceVector, aTestVector):
check_array(aReferenceVector, aTestVector);
return np.abs(np.subtract(aReferenceVector, aTestVector)).mean();
def getCosineSimilarity(aReferenceVector, aTestVector):
check_array(aReferenceVector, aTestVector);
u = aReferenceVector.flatten();
v = aTestVector.flatten();
return np.dot(u, v) / (LA.norm(u) * LA.norm(v))
def getMeanRelativeError(aReferenceVector, aTestVector):
check_array(aReferenceVector, aTestVector);
return np.abs(np.divide(np.subtract(aReferenceVector, aTestVector), aReferenceVector)).mean();
def getMaxRelativeError(aReferenceVector, aTestVector):
check_array(aReferenceVector, aTestVector);
return np.abs(np.divide(np.subtract(aReferenceVector, aTestVector), aReferenceVector)).max();
def getSSIM(aReferenceVector, aTestVector):
check_array(aReferenceVector, aTestVector);
return metrics.structural_similarity( aReferenceVector, aTestVector);
def getSSE(aReferenceVector, aTestVector):
check_array(aReferenceVector, aTestVector);
return np.square(np.subtract(aReferenceVector, aTestVector)).sum();
def getMSE(aReferenceVector, aTestVector):
check_array(aReferenceVector, aTestVector);
return metrics.mean_squared_error( aReferenceVector, aTestVector);
def getRMSE(aReferenceVector, aTestVector):
check_array(aReferenceVector, aTestVector);
return math.sqrt(getMSE(aReferenceVector, aTestVector));
def getNRMSE_euclidean(aReferenceVector, aTestVector):
check_array(aReferenceVector, aTestVector);
return metrics.normalized_root_mse(aReferenceVector, aTestVector, normalization='euclidean');
def getNRMSE_mean(aReferenceVector, aTestVector):
check_array(aReferenceVector, aTestVector);
return metrics.normalized_root_mse(aReferenceVector, aTestVector, normalization='mean');
def getNRMSE_minMax(aReferenceVector, aTestVector):
check_array(aReferenceVector, aTestVector);
return metrics.normalized_root_mse(aReferenceVector, aTestVector, normalization='min-max');
def getPSNR(aReferenceVector, aTestVector):
check_array(aReferenceVector, aTestVector);
return metrics.peak_signal_noise_ratio(aReferenceVector, aTestVector, data_range=aReferenceVector.max() - aReferenceVector.min());
def getNCC(aReferenceVector, aTestVector):
check_array(aReferenceVector, aTestVector);
return productImage(zeroMeanNormalisation(aReferenceVector), zeroMeanNormalisation(aTestVector)).mean();
def getTV(anImage):
image_prewitt_h = filters.prewitt_h(anImage);
image_prewitt_v = filters.prewitt_v(anImage);
return np.abs(image_prewitt_h).mean() + np.abs(image_prewitt_v).mean();
def cropCenter(img, cropx, cropy):
y, x = img.shape
startx = x // 2 - (cropx // 2)
starty = y // 2 - (cropy // 2)
return img[starty:starty + cropy, startx:startx + cropx]
|
import lasagne
from numpy.random import RandomState
import theano
import theano.tensor as T
from collections import OrderedDict
from braindecode.veganlasagne.remember import RememberBest
from braindecode.veganlasagne.stopping import Or, MaxEpochs, ChanBelow
import logging
import numpy as np
from pylearn2.config import yaml_parse
from pylearn2.utils.timing import log_timing
from copy import deepcopy
from braindecode.datahandling.splitters import (SingleFoldSplitter,
PreprocessedSplitter, FixedTrialSplitter)
from braindecode.veganlasagne.monitors import MonitorManager, MisclassMonitor,\
LossMonitor, RuntimeMonitor
from braindecode.datahandling.batch_iteration import BalancedBatchIterator
from braindecode.veganlasagne.layers import get_n_sample_preds,\
get_input_time_length, get_model_input_window
from braindecode.veganlasagne.layer_util import layers_to_str
log = logging.getLogger(__name__)
class ExperimentCrossValidation():
def __init__(self, final_layer, dataset, exp_args, n_folds, shuffle):
self.final_layer = final_layer
self.dataset = dataset
self.n_folds = n_folds
self.exp_args = exp_args
self.shuffle = shuffle
def setup(self):
lasagne.random.set_rng(RandomState(9859295))
def run(self):
self.all_layers = []
self.all_monitor_chans = []
for i_fold in range(self.n_folds):
log.info("Running fold {:d} of {:d}".format(i_fold + 1,
self.n_folds))
this_layers = deepcopy(self.final_layer)
this_exp_args = deepcopy(self.exp_args)
## make sure dataset is loaded...
self.dataset.ensure_is_loaded()
dataset_splitter = SingleFoldSplitter(
n_folds=self.n_folds, i_test_fold=i_fold,
shuffle=self.shuffle)
exp = Experiment(this_layers, self.dataset, dataset_splitter,
**this_exp_args)
exp.setup()
exp.run()
self.all_layers.append(deepcopy(exp.final_layer))
self.all_monitor_chans.append(deepcopy(exp.monitor_chans))
def create_default_experiment(final_layer, dataset, n_epochs=100,
**overwrite_args):
# make special case for this, since we access dataset.X here,
# which might not exist
if 'splitter' not in overwrite_args:
n_trials = len(dataset.X)
splitter = FixedTrialSplitter(n_train_trials=n_trials // 2,
valid_set_fraction=0.2)
else:
splitter = overwrite_args['splitter']
monitors = [MisclassMonitor(), LossMonitor(),RuntimeMonitor()]
stop_criterion = MaxEpochs(n_epochs)
exp_args = dict(splitter=splitter,
preprocessor=None, iterator=BalancedBatchIterator(batch_size=45),
loss_expression=lasagne.objectives.categorical_crossentropy,
updates_expression=lasagne.updates.adam,
updates_modifier=None,
monitors=monitors,
stop_criterion=stop_criterion,
remember_best_chan='valid_misclass',
run_after_early_stop=True,
batch_modifier=None)
exp_args.update(**overwrite_args)
exp = Experiment(final_layer, dataset, **exp_args)
return exp
class Experiment(object):
def __init__(self, final_layer, dataset, splitter, preprocessor,
iterator, loss_expression, updates_expression, updates_modifier,
monitors, stop_criterion, remember_best_chan, run_after_early_stop,
batch_modifier=None):
self.final_layer = final_layer
self.dataset = dataset
self.dataset_provider = PreprocessedSplitter(splitter, preprocessor)
self.preprocessor=preprocessor
self.iterator = iterator
self.loss_expression = loss_expression
self.updates_expression = updates_expression
self.updates_modifier = updates_modifier
self.monitors = monitors
self.stop_criterion = stop_criterion
self.monitor_manager = MonitorManager(monitors)
self.remember_extension = RememberBest(remember_best_chan)
self.run_after_early_stop = run_after_early_stop
self.batch_modifier = batch_modifier
def setup(self, target_var=None):
lasagne.random.set_rng(RandomState(9859295))
self.dataset.ensure_is_loaded()
self.print_layer_sizes()
log.info("Create theano functions...")
self.create_theano_functions(target_var)
# reset remember best extension in case you rerun some experiment
self.remember_extension = RememberBest(
self.remember_extension.chan_name)
log.info("Done.")
def print_layer_sizes(self):
log.info("Layers...")
# start on newline so everything starts from left end of terminal,
# including input layer string
log.info('\n' + layers_to_str(self.final_layer))
def create_theano_functions(self, target_var, deterministic_training=False):
if target_var is None:
if hasattr(self.dataset, 'get_dummy_y'):
log.info("Use dataset-supplied dummy y to determine "
"shape and type of target variable")
dummy_y = self.dataset.get_dummy_y()
# tensor with as many dimensions as y
target_type = T.TensorType(
dtype=dummy_y.dtype,
broadcastable=[False]*len(dummy_y.shape))
target_var = target_type()
else:
log.info("Automatically determine size of target variable by example...")
# get a dummy batch and determine target size
# use test set since it is smaller
# maybe memory is freed quicker
# prevent reloading at this step?
was_reloadable = self.dataset.reloadable
self.dataset.reloadable = False
test_set = self.dataset_provider.get_train_valid_test(self.dataset)['test']
self.dataset.reloadable = was_reloadable
batches = self.iterator.get_batches(test_set, shuffle=False)
dummy_batch = batches.next()
dummy_y = dummy_batch[1]
del test_set
# tensor with as many dimensions as y
target_type = T.TensorType(
dtype=dummy_y.dtype,
broadcastable=[False]*len(dummy_y.shape))
target_var = target_type()
self.dataset.ensure_is_loaded()
prediction = lasagne.layers.get_output(self.final_layer,
deterministic=deterministic_training)
# test as in during testing not as in "test set"
test_prediction = lasagne.layers.get_output(self.final_layer,
deterministic=True)
# Loss function might need layers or not...
try:
loss = self.loss_expression(prediction, target_var).mean()
test_loss = self.loss_expression(test_prediction, target_var).mean()
except TypeError:
loss = self.loss_expression(prediction, target_var, self.final_layer).mean()
test_loss = self.loss_expression(test_prediction, target_var, self.final_layer).mean()
# create parameter update expressions
params = lasagne.layers.get_all_params(self.final_layer, trainable=True)
updates = self.updates_expression(loss, params)
if self.updates_modifier is not None:
# put norm constraints on all layer, for now fixed to max kernel norm
# 2 and max col norm 0.5
updates = self.updates_modifier.modify(updates, self.final_layer)
input_var = lasagne.layers.get_all_layers(self.final_layer)[0].input_var
# Store all parameters, including update params like adam params,
# needed for resetting to best model after early stop
# not sure why i am not only doing update params below
# possibly because batch norm is not in update params?
all_layer_params = lasagne.layers.get_all_params(self.final_layer)
self.all_params = all_layer_params
# now params from adam would still be missing... add them ...
all_update_params = updates.keys()
for param in all_update_params:
if param not in self.all_params:
self.all_params.append(param)
self.train_func = theano.function([input_var, target_var], updates=updates)
self.monitor_manager.create_theano_functions(input_var, target_var,
test_prediction, test_loss)
def run(self):
log.info("Run until first stop...")
self.run_until_early_stop()
# always setup for second stop, in order to get best model
# even if not running after early stop...
log.info("Setup for second stop...")
self.setup_after_stop_training()
if self.run_after_early_stop:
log.info("Run until second stop...")
self.run_until_second_stop()
self.readd_old_monitor_chans()
def run_until_early_stop(self):
log.info("Split/Preprocess datasets...")
datasets = self.dataset_provider.get_train_valid_test(self.dataset)
log.info("...Done")
self.create_monitors(datasets)
self.run_until_stop(datasets, remember_best=True)
return datasets
def run_until_stop(self, datasets, remember_best):
self.monitor_epoch(datasets)
self.print_epoch()
if remember_best:
self.remember_extension.remember_epoch(self.monitor_chans,
self.all_params)
self.iterator.reset_rng()
while not self.stop_criterion.should_stop(self.monitor_chans):
self.run_one_epoch(datasets, remember_best)
def run_one_epoch(self, datasets, remember_best):
batch_generator = self.iterator.get_batches(datasets['train'],
shuffle=True)
with log_timing(log, None, final_msg='Time updates following epoch:'):
for inputs, targets in batch_generator:
if self.batch_modifier is not None:
inputs, targets = self.batch_modifier.process(inputs,
targets)
# could happen that batch modifier has removed all inputs...
if len(inputs) > 0:
self.train_func(inputs, targets)
self.monitor_epoch(datasets)
self.print_epoch()
if remember_best:
self.remember_extension.remember_epoch(self.monitor_chans,
self.all_params)
def setup_after_stop_training(self):
# also remember old monitor chans, will be put back into
# monitor chans after experiment finished
self.old_monitor_chans = deepcopy(self.monitor_chans)
self.remember_extension.reset_to_best_model(self.monitor_chans,
self.all_params)
loss_to_reach = self.monitor_chans['train_loss'][-1]
self.stop_criterion = Or(stop_criteria=[
MaxEpochs(num_epochs=self.remember_extension.best_epoch * 2),
ChanBelow(chan_name='valid_loss', target_value=loss_to_reach)])
log.info("Train loss to reach {:.5f}".format(loss_to_reach))
def run_until_second_stop(self):
datasets = self.dataset_provider.get_train_merged_valid_test(
self.dataset)
self.run_until_stop(datasets, remember_best=False)
def create_monitors(self, datasets):
self.monitor_chans = OrderedDict()
self.last_epoch_time = None
for monitor in self.monitors:
monitor.setup(self.monitor_chans, datasets)
def monitor_epoch(self, all_datasets):
self.monitor_manager.monitor_epoch(self.monitor_chans, all_datasets,
self.iterator)
def print_epoch(self):
# -1 due to doing one monitor at start of training
i_epoch = len(self.monitor_chans.values()[0]) - 1
log.info("Epoch {:d}".format(i_epoch))
for chan_name in self.monitor_chans:
log.info("{:25s} {:.5f}".format(chan_name,
self.monitor_chans[chan_name][-1]))
log.info("")
def readd_old_monitor_chans(self):
for key in self.old_monitor_chans:
new_key = 'before_reset_' + key
self.monitor_chans[new_key] = self.old_monitor_chans[key]
def load_layers_from_dict(train_dict):
"""Layers can be a list or an object that returns a list."""
layers_obj = train_dict['layers']
if hasattr(layers_obj, '__len__'):
return layers_obj
else:
return layers_obj.get_layers()
def create_experiment(yaml_filename, seed=9859295):
"""Utility function to create experiment from yaml file"""
# for reproducibility for layer weights
# should be same seed as in experiment_runner.py
lasagne.random.set_rng(RandomState(seed))
train_dict = yaml_parse.load(open(yaml_filename, 'r'))
layers = load_layers_from_dict(train_dict)
final_layer = layers[-1]
dataset = train_dict['dataset']
splitter = train_dict['dataset_splitter']
if (np.any([hasattr(l, 'n_stride') for l in layers])):
n_sample_preds = get_n_sample_preds(final_layer)
# for backwards compatibility input time length also
input_time_length = get_input_time_length(final_layer)
log.info("Setting n_sample preds automatically to {:d}".format(
n_sample_preds))
for monitor in train_dict['exp_args']['monitors']:
if hasattr(monitor, 'n_sample_preds'):
monitor.n_sample_preds = n_sample_preds
if hasattr(monitor, 'input_time_length'):
monitor.input_time_length = input_time_length
train_dict['exp_args']['iterator'].n_sample_preds = n_sample_preds
log.info("Input window length is {:d}".format(
get_model_input_window(final_layer)))
# add early stop chan, encessary for backwards compatibility
exp_args = train_dict['exp_args']
exp_args['remember_best_chan'] = train_dict['exp_args'].pop('remember_best_chan',
'valid_misclass')
exp_args['run_after_early_stop'] = train_dict['exp_args'].pop('run_after_early_stop',
True)
exp = Experiment(final_layer, dataset, splitter,
**exp_args)
assert len(np.setdiff1d(layers,
lasagne.layers.get_all_layers(final_layer))) == 0, ("All layers "
"should be used, unused {:s}".format(str(np.setdiff1d(layers,
lasagne.layers.get_all_layers(final_layer)))))
return exp
|
from calculate_next_step import calculate_next_step
import time
# funtion: count_steps
# dependency: check_steps()
# input: state_1, state_2
# output: steps
# description: use check_steps() to calculate steps from state_1 to state_2
def count_steps(state_1, state_2):
steps = int(0)
if state_1 == state_2:
return(steps)
this_step = [state_1]
while True:
steps += 1
this_step = calculate_next_step(this_step)
for e in this_step:
if state_2 == e:
return(steps)
# test code
if __name__ == "__main__":
state_1 = (1, 2, 3, 4, 5, 6, 7, 8)
# state_2 = (1, 2, 3, 4, 5, 6, 7, 8)
# state_2 = (2, 6, 8, 4, 5, 7, 3, 1)
# state_2 = (1, 5, 3, 2, 4, 6, 7, 8)
state_2 = (7, 2, 1, 5, 4, 3, 6, 8)
steps = count_steps(state_1, state_2)
print('steps =', steps) |
class Solution(object):
def multiply(self, num1, num2):
n1 = 0
for n in num1:
n1 = n1*10 + (ord(n)-48)
n2 = 0
for n in num2:
n2 = n2*10 + (ord(n)-48)
print(n1,n2)
return(str(n1*n2)) |
import setuptools
setuptools.setup(
name="svt",
version="2.0.1",
author="Abhishek Dutta and Li Zhang",
author_email="adutta@robots.ox.ac.uk, lz@robots.ox.ac.uk",
description="Seebibyte Visual Tracker can be used to track any object in a video.",
license="BSD",
keywords='visual tracker, object tracker, svt, video tracker',
long_description='SVT is a visual tracking software that can track any object in a video.',
long_description_content_type="text/markdown",
url="http://www.robots.ox.ac.uk/~vgg/projects/seebibyte/software/svt/",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
install_requires=[], #['opencv-python', 'pytorch', 'torchvision'],
entry_points={
'console_scripts': [
'svt=svt.main:main',
],
},
# data_files=[('svt/ui', ['svt/ui/svt.html'])]
package_data={'svt':['ui/svt.html']}
)
|
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def deleteNode(self, node):
if node.next == None: return
node.val = node.next.val
node.next = node.next.next
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
Solution().deleteNode(head.next.next)
curr = head
while curr != None:
print(curr.val)
curr = curr.next
#1 2 4 |
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from my_diary.models import Diary
from datetime import date
def root_page(request):
return HttpResponseRedirect('home/')
def home_page(request):
current_date = date.today()
context = {
'diaries': Diary.objects.all(),
'today': current_date
}
response = render(request, 'index.html', context)
return HttpResponse(response) |
from echo.src.base_objective import BaseObjective
import numpy as np
from mlmicrophysics.models import DenseNeuralNetwork
import pandas as pd
from os.path import join
import pickle
from sklearn.metrics import r2_score
class Objective(BaseObjective):
def __init__(self, config, metric="val_loss"):
BaseObjective.__init__(self, config, metric)
def train(self, trial, conf):
input_quant_data = {}
output_quant_data = {}
output_data = {}
subsets = ["train", "val"]
for subset in subsets:
input_quant_data[subset] = pd.read_parquet(join(conf["data"]["scratch_path"],
f"mp_quant_input_{subset}.parquet"))
output_quant_data[subset] = pd.read_parquet(join(conf["data"]["scratch_path"],
f"mp_quant_output_{subset}.parquet"))
output_data[subset] = pd.read_parquet(join(conf["data"]["scratch_path"], f"mp_output_{subset}.parquet"))
with open(join(conf["data"]["out_path"], "output_quantile_transform.pkl"), "rb") as out_scaler_pickle:
output_scaler = pickle.load(out_scaler_pickle)
dnn = DenseNeuralNetwork(**conf["model"])
dnn.fit(input_quant_data["train"], output_quant_data["train"])
val_quant_preds = dnn.predict(input_quant_data["val"], batch_size=40000)
val_preds = output_scaler.inverse_transform(val_quant_preds)
val_r2 = r2_score(np.log10(output_data["val"]), np.log10(val_preds))
results_dict = {"val_loss": val_r2}
return results_dict
|
from typing import Any, Dict
import torch
from torch.nn.functional import one_hot
from torchvision.prototype import tv_tensors as proto_tv_tensors
from torchvision.transforms.v2 import Transform
class LabelToOneHot(Transform):
_transformed_types = (proto_tv_tensors.Label,)
def __init__(self, num_categories: int = -1):
super().__init__()
self.num_categories = num_categories
def _transform(self, inpt: proto_tv_tensors.Label, params: Dict[str, Any]) -> proto_tv_tensors.OneHotLabel:
num_categories = self.num_categories
if num_categories == -1 and inpt.categories is not None:
num_categories = len(inpt.categories)
output = one_hot(inpt.as_subclass(torch.Tensor), num_classes=num_categories)
return proto_tv_tensors.OneHotLabel(output, categories=inpt.categories)
def extra_repr(self) -> str:
if self.num_categories == -1:
return ""
return f"num_categories={self.num_categories}"
|
# https://oeis.org/A141481/b141481.txt
# lookup value at i = 58 |
# -*- coding: utf-8 -*-
from typing import List
class UnionFind:
def __init__(self, n: int):
self.ids = list(range(n))
self.sizes = [1] * n
def root(self, i: int) -> int:
while i != self.ids[i]:
self.ids[i] = self.ids[self.ids[i]]
i = self.ids[i]
return i
def union(self, p: int, q: int):
i, j = self.root(p), self.root(q)
if i == j:
return
if self.sizes[i] < self.sizes[j]:
self.ids[i] = j
self.sizes[j] += self.sizes[i]
else:
self.ids[j] = i
self.sizes[i] += self.sizes[j]
def find(self, p: int, q: int) -> bool:
return self.root(p) == self.root(q)
class Solution:
def validPath(self, n: int, edges: List[List[int]], start: int, end: int) -> bool:
union_find = UnionFind(n)
for u, v in edges:
union_find.union(u, v)
return union_find.find(start, end)
if __name__ == "__main__":
solution = Solution()
assert solution.validPath(3, [[0, 1], [1, 2], [2, 0]], 0, 2)
assert not solution.validPath(6, [[0, 1], [0, 2], [3, 5], [5, 4], [4, 3]], 0, 5)
|
from unittest import TestCase
from app import app
from models import db, User, DEFAULT_IMG_URL, Post, Tag, PostTag
import time, datetime
# Perform tests on a Test database
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///blogly_test'
app.config['SQLALCHEMY_ECHO'] = False
db.drop_all()
db.create_all()
class UserModelTestCase(TestCase):
def setUp(self):
"""Delete any leftover database entries"""
Post.query.delete()
User.query.delete()
Tag.query.delete()
def tearDown(self):
"""Clean up fouled transactions"""
db.session.rollback()
def test_fullNameProp(self):
user = User(first_name="John", last_name="Doe")
self.assertEqual(user.full_name, "John Doe")
def test_defaultImage(self):
userOne = User(first_name="John", last_name="Doe")
userTwo = User(first_name="Jane", last_name="Doe", image_url="https://duckduckgo.com/assets/bathroom.png")
db.session.add(userOne)
db.session.add(userTwo)
db.session.commit()
self.assertEqual(userOne.image_url, DEFAULT_IMG_URL)
self.assertEqual(userTwo.image_url, "https://duckduckgo.com/assets/bathroom.png")
def test_sortedQuery(self):
userOne = User(first_name="John", last_name="Doe")
userTwo = User(first_name="Jane", last_name="Doe")
userThree = User(first_name="John", last_name="Fitzgerald")
db.session.add_all([userOne, userTwo, userThree])
db.session.commit()
self.assertEqual(User.get_all(), [userTwo, userOne, userThree])
class PostModelTestCase(TestCase):
def setUp(self):
"""Delete leftover DB entries and make a new entry, cache ID and timestamp"""
Post.query.delete()
User.query.delete()
Tag.query.delete()
user = User(first_name="John", last_name="Doe")
db.session.add(user)
db.session.commit()
post = Post(title="My kitten", content="Look at my kitten, ain't she cute?", poster_id=user.id)
db.session.add(post)
db.session.commit()
self.time_stamp = post.created_at
self.user_id = user.id
def tearDown(self):
"""Clean up fouled transactions"""
db.session.rollback()
def test_auto_timestamping(self):
setup_post = Post.query.filter_by(title='My kitten').one()
self.assertEqual(setup_post.created_at, self.time_stamp)
time_marker = datetime.datetime.now()
time.sleep(1)
new_post = Post(title="My dog", content="Look at my dog, ain't he cute", poster_id=self.user_id)
db.session.add(new_post)
db.session.commit()
self.assertNotEqual(time_marker, new_post.created_at)
self.assertNotEqual(new_post.created_at, setup_post.created_at)
class PostTagModelTestCase(TestCase):
def setUp(self):
Post.query.delete()
User.query.delete()
Tag.query.delete()
user = User(first_name="John", last_name="Doe")
db.session.add(user)
db.session.commit()
post = Post(title="My kitten", content="Look at my kitten, ain't she cute?", poster_id=user.id)
db.session.add(post)
db.session.commit()
tag1 = Tag(name="pets")
tag2 = Tag(name="winning")
db.session.add_all([tag1, tag2])
db.session.commit()
self.user_id = user.id
self.post_id = post.id
self.tag1_id = tag1.id
self.tag2_id = tag2.id
def tearDown(self):
db.session.rollback()
def test_posts_relationship(self):
## Adding
post = Post.query.get(self.post_id)
tag1 = Tag.query.get(self.tag1_id)
tag2 = Tag.query.get(self.tag2_id)
post.tags.append(tag1)
post.tags.append(tag2)
db.session.add(post)
db.session.commit()
self.assertIn(tag1, post.tags)
self.assertIn(tag2, post.tags)
## Deletion cascade
Tag.query.filter_by(id=self.tag1_id).delete()
db.session.commit()
self.assertNotIn(tag1, post.tags)
self.assertIn(tag2, post.tags) |
#-*-coding:utf-8-*-
#__author__='maxiaohui'
from utils import apitest,filesHandler
import unittest
from config import config
class webserver(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_01login_correct(self):
r=apitest.testAPI("get","/api/account/login",params={"password":"2580"})
config.cookies=r.cookies
print(r.json())
self.assertIn("success",r.text)
# @unittest.skip("跳过当前测试")
def test_02login_error(self):
r = apitest.testAPI("get", "/api/account/login", params={"password": "324323"})
print(r.json())
self.assertIn('login_password_error',r.text)
# @unittest.skip("跳过当前测试")
def test_03add_batch(self):
r=apitest.testAPI("get","/api/persons/batch",cookies=config.cookies)
print(r.json())
def test_04uploadPic(self):
f=open(filesHandler.randomChoiceFile(config.imageFolder),"rb")
files={"file":("blob",f,"image/jpeg")}
r=apitest.testAPI("post","/api/upload/image",cookies=config.cookies,files=files)
print(r.json())
f.close()
if __name__ == "__main__": #当前脚本运行实例
unittest.main() |
# -*- coding: utf-8 -*-
'''
Created on 2018年1月5日
@author: ls
'''
import os
import datetime
import shelve
import sys
if not os.path.exists('fee.db.dat'):
print('初始化fee.db')
with shelve.open('fee.db') as db:
db['electron'] = [186]
db['water'] = [657]
def totalRent(electriThis, waterThis, property_fee = 97.5, coef_Electron = 0.68, coef_water = 3.8, base = 1800):
'''
waterUse = [this month, last month]
electriUse = [this month, last month]
# 基本房租
base = 1800
# 电费系数
coef_Electron = 0.68
# 水费系数
coef_water = 3.8
# 物业费
property_fee = 97.5
'''
with shelve.open('fee.db') as db:
electriLast = db['electron'][-1]
waterLast = db['water'][-1]
water_fee = (waterThis - waterLast) * coef_water
electri_fee = (electriThis - electriLast) * coef_Electron
total = base + electri_fee + water_fee + property_fee
print('实际电费:', electri_fee)
print('实际水费:', water_fee)
print('\n\n')
print('----时间: ',datetime.datetime.now())
print('\n')
print("----基本:", base, "元")
print('\n')
print("----电费:(%s - %s) X %s = %.3f 元" %(electriThis, electriLast, coef_Electron, electri_fee))
print('\n')
print("----水费:(00%s - 00%s) X %s = %.3f 元" %(waterThis, waterLast, coef_water, water_fee))
print('\n')
print("----物业费:", property_fee, "元")
print('\n')
print("----合计:%s + %.3f + %.3f + %s = %.3f 元\n" % (base, electri_fee, water_fee, property_fee, total))
print('\n')
filePath = os.path.join(os.getcwd(),'log\\'+str(datetime.datetime.now()).replace(' ','-').replace(':','-')+'.log')
with open(filePath, 'w') as f:
f.write('----时间: ' + str(datetime.datetime.now())+'------------------------------------------\n')
f.write('\n')
f.write("----基本:" + str(base) + " 元\n")
f.write('\n')
f.write("----电费:(%s - %s) X %s = %.3f 元\n" %(electriThis, electriLast, coef_Electron, electri_fee))
f.write('\n')
f.write("----水费:(00%s - 00%s) X %s = %.3f 元\n" %(waterThis, waterLast, coef_water, water_fee))
f.write('\n')
f.write("----物业费:" + str(property_fee) + " 元\n")
f.write('\n')
f.write("----合计:%s + %.3f + %.3f + %s = %.3f 元\n" % (base, electri_fee, water_fee, property_fee, total))
f.write('\n')
print('\n')
print('日志路径:', filePath, '\n')
if __name__ == '__main__':
# ini = input("是否打印默认输出?[y/n/enter]:\n")
# if ini == 'y':
# waterUse = 658
# electriUse = 347.47
# totalRent(electriUse, waterUse)
#
# if len(ini) == 0:
script_path = os.path.split(os.path.abspath(sys.argv[0]))[0]
print("script_path: %s" % script_path)
electriThis = float(input('输入这个月的电表度数:\n'))
waterThis = float(input('输入这个月的水表度数:\n'))
print('\n')
totalRent(electriThis, waterThis)
save = input("是否保存这个月的数据,输入是'y'保存,按回车不保存\n")
shelve_db_path = os.path.join(script_path, 'fee.db')
if len(save) != 0:
with shelve.open(shelve_db_path) as db:
db['electron'] = db['electron'] + [electriThis]
db['water'] = db['water'] + [waterThis]
with shelve.open(shelve_db_path) as db:
print('electron:', db['electron'])
print('water:', db['water'])
print('\n\n\n')
|
import datetime
from time import time
class Sensor:
def __init__(self):
self.temperature = 0
self.humidity = 0
self.brightness = 0
self.source = 'A'
@staticmethod
def created_at() -> str:
return datetime.datetime.fromtimestamp(time()).strftime('%Y-%m-%d %H:%M:%S')
def to_document(self) -> dict:
return {
"temperature": self.temperature,
"humidity": self.humidity,
"brightness": self.brightness,
"source": self.source,
"created_at": Sensor.created_at()
}
|
# encoding: utf-8
"""
@author: Cai Zhongheng
@contact: caiouyang4@gmail.com
@file: inv_function.py
@time: 2019/6/30 11:45
@desc: 使用QR分解来求解方阵的逆矩阵
"""
import numpy as np
import sys
def inv_qr(input_matrix):
# 使用QR分解来求解逆矩阵,其中QR分解使用Givens旋转
n = input_matrix.shape[0]
R = input_matrix.copy() # 深拷贝
G = np.eye(n, dtype=complex) # 初始化为单位矩阵
Q = G.copy()
# 从第0列开始,先从下往上,然后从左到右
for col_idx in range(n-1):
for row_idx in range(n-1, col_idx-1, -1):
# 计算c和s
if R[col_idx, col_idx] == 0:
c = 0
s = 1
else:
c = np.abs(R[col_idx, col_idx])/np.sqrt(R[col_idx, col_idx]*np.conj(R[col_idx, col_idx]) +
R[row_idx, col_idx]*np.conj(R[row_idx, col_idx]))
s = c*R[row_idx, col_idx]/R[col_idx, col_idx]
# 形成G矩阵
G[col_idx, col_idx] = c
G[row_idx, row_idx] = c
G[col_idx, row_idx] = np.conj(s)
G[row_idx, col_idx] = -s
# 通过矩阵乘法将R和Q矩阵下三角的元素依次置0
R.dot(G)
Q.dot(G)
# 将G矩阵恢复为单位矩阵,方便下次用
G = np.eye(n, dtype=complex)
# 计算上三角矩阵的逆矩阵
inv_R = inv_upper_tri_matrix(R)
return np.dot(inv_R, Q)
def inv_upper_tri_matrix(input_matrix):
# 上三角矩阵的逆矩阵
n = input_matrix.shape[0]
if n is 1:
return 1/input_matrix
output_matrix = np.zeros((n, n), dtype=complex)
for col_idx in range(n):
# 先计算对角线上的元素
if input_matrix[col_idx, col_idx] is 0:
print('该矩阵不可逆!!!')
sys.exit()
else:
output_matrix[col_idx, col_idx] = 1/input_matrix[col_idx, col_idx]
# 从对角线开始往右斜上方推进
for idx in range(1, n):
row_idx = 0
col_idx = idx
while row_idx < n and col_idx < n:
output_matrix[row_idx, col_idx] = -1*np.dot(input_matrix[row_idx, (row_idx+1):(col_idx+1)],
output_matrix[(row_idx+1):(col_idx+1), col_idx])
output_matrix[row_idx, col_idx] /= input_matrix[row_idx, row_idx]
row_idx += 1
col_idx += 1
return output_matrix
if __name__ == '__main__':
np.random.seed(23)
matrix_len = 4
input_matrix = np.random.random((matrix_len, matrix_len)) + 1j * np.random.random((matrix_len, matrix_len))
# input_matrix = np.triu(input_matrix)
print(input_matrix)
inv_matrix = inv_qr(input_matrix)
# inv_matrix = inv_upper_tri_matrix(input_matrix)
py_inv = np.linalg.inv(inv_matrix)
print(np.dot(inv_matrix, py_inv))
|
import copy
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
from collections import deque
from envs import create_atari_env
from model import ActorCritic
from regularization import MaxDivideMin
from regularization import MaxMinusMin
def ensure_shared_grads(model, shared_model):
for param, shared_param in zip(model.parameters(),
shared_model.parameters()):
if shared_param.grad is not None:
return
shared_param._grad = param.grad
def train(rank, args, shared_model, counter, lock, logger, optimizer=None):
if args.save_sigmas:
sigmas_f = logger.init_one_sigmas_file(rank)
torch.manual_seed(args.seed + rank)
env = create_atari_env(args.env_name)
env.seed(args.seed + rank)
model = ActorCritic(env.observation_space.shape[0], env.action_space)
if optimizer is None:
optimizer = optim.Adam(shared_model.parameters(), lr=args.lr)
if args.add_rank_reg:
if args.rank_reg_type == "maxdividemin":
rank_reg = MaxDivideMin.apply
elif args.rank_reg_type == "maxminusmin":
rank_reg = MaxMinusMin.apply
model.train()
state = env.reset()
state = torch.from_numpy(state)
done = True
local_counter = 0
episode_length = 0
while True:
if args.max_counter_num != 0 and counter.value > args.max_counter_num:
exit(0)
# Sync with the shared model
model.load_state_dict(shared_model.state_dict())
if done:
cx = Variable(torch.zeros(1, 256))
hx = Variable(torch.zeros(1, 256))
else:
cx = Variable(cx.data)
hx = Variable(hx.data)
values = []
log_probs = []
rewards = []
entropies = []
if args.add_rank_reg:
hiddens = [None] * 2 # 0: last layer, 1: last last layer
for step in range(args.num_steps):
episode_length += 1
model_inputs = (Variable(state.unsqueeze(0)), (hx, cx))
if args.add_rank_reg:
value, logit, (hx, cx), internal_features = model(model_inputs, return_features=True)
else:
value, logit, (hx, cx) = model(model_inputs)
prob = F.softmax(logit, dim=1)
log_prob = F.log_softmax(logit, dim=1)
entropy = -(log_prob * prob).sum(1, keepdim=True)
entropies.append(entropy)
if args.add_rank_reg:
if hiddens[0] is None:
hiddens[0] = internal_features[-1]
hiddens[1] = internal_features[-2]
else:
hiddens[0] = torch.cat([hiddens[0], internal_features[-1]])
hiddens[1] = torch.cat([hiddens[1], internal_features[-2]])
action = prob.multinomial(num_samples=1).data
log_prob = log_prob.gather(1, Variable(action))
state, reward, done, _ = env.step(action.numpy())
done = done or episode_length >= args.max_episode_length
reward = max(min(reward, 1), -1)
local_counter += 1
with lock:
if local_counter % 20 == 0:
counter.value += 20
if done:
episode_length = 0
state = env.reset()
state = torch.from_numpy(state)
values.append(value)
log_probs.append(log_prob)
rewards.append(reward)
if done:
break
R = torch.zeros(1, 1)
if not done:
value, _, _ = model((Variable(state.unsqueeze(0)), (hx, cx)))
R = value.data
values.append(Variable(R))
policy_loss = 0
value_loss = 0
R = Variable(R)
gae = torch.zeros(1, 1)
for i in reversed(range(len(rewards))):
R = args.gamma * R + rewards[i]
advantage = R - values[i]
value_loss = value_loss + 0.5 * advantage.pow(2)
# Generalized Advantage Estimataion
delta_t = args.gamma * values[i + 1].data - values[i].data + rewards[i]
gae = gae * args.gamma * args.tau + delta_t
policy_loss = policy_loss - \
log_probs[i] * Variable(gae) - args.entropy_coef * entropies[i]
total_loss = policy_loss + args.value_loss_coef * value_loss
# internal layers regularizer
retain_graph = None
if args.add_rank_reg:
current_rankreg_coef = args.rank_reg_coef
# total_loss = total_loss + rank_reg(hiddens[0], args.rank_reg_coef)
if args.save_sigmas and local_counter % args.save_sigmas_every <= 3:
norm = rank_reg(hiddens[0], current_rankreg_coef, counter.value, sigmas_f, logger)
else:
norm = rank_reg(hiddens[0], current_rankreg_coef)
total_loss = total_loss + norm
optimizer.zero_grad()
total_loss.backward(retain_graph=retain_graph)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
ensure_shared_grads(model, shared_model)
optimizer.step()
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import BasePermission, IsAuthenticated
from rest_framework.response import Response
from users.models import Card
class IsKagisysHousing(BasePermission):
def has_permission(self, request, view):
return request.user.groups.filter(name='kagisys').exists()
@login_required
@permission_classes(IsKagisysHousing)
@api_view(['GET'])
def get_all_idm(request):
return Response(','.join(Card.objects.values_list('idm', flat=True)))
|
__author__ = "Narwhale"
string = input("请输入字符串:")
length = len(string)
count = 1
if string != string.upper():
count = 0
for i in range(length-1):
if string[i] == string[i+1]:
count = 0
for i in range(length-3):
x1 = string.find(string[i],i+2)
if x1 == -1:
continue
else:
for j in range((i+1),x1):
x2 = string.find(string[j],(x1+1))
if x2 > 0:
count = 0
break
if count ==0:
print("Dislikes")
elif count==1:
print("Likes") |
#! /usr/bin/env python
#coding:utf-8
import sys
import os
import re
import urllib2
import urllib
import requests
import cookielib
import getpass
import json
from bs4 import BeautifulSoup
## 这段代码是用于解决中文报错的问题
reload(sys)
sys.setdefaultencoding("utf8")
#####################################################
domain = 'http://www.lintcode.com/'
class xSpider(object):
def __init__(self):
'''initiation'''
self.name = ''
self.passwprd = ''
self.cj = cookielib.LWPCookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
urllib2.install_opener(self.opener)
def setLoginInfo(self,username,password):
'''set user information'''
self.name = username
self.pwd = password
def preLogin(self):
'''to get csrfmiddlewaretoken'''
req = urllib2.Request('http://www.lintcode.com/accounts/signin/')
req.add_header('Host','www.lintcode.com')
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.130 Safari/537.36')
response = urllib2.urlopen(req)
content = response.read().decode('utf-8')
pattern = re.compile('<input.*?csrfmiddlewaretoken.*?/>')
item = re.findall(pattern, content)
print 'get csrfmiddlewaretoken success!'
return item[0][item[0].find('value=') + 7 : -4]
def login(self, csrfmiddlewaretoken):
'''login'''
loginurl = 'http://www.lintcode.com/zh-cn/accounts/signin/'
loginparams = {'csrfmiddlewaretoken':csrfmiddlewaretoken,'username_or_email':self.name, 'password':self.pwd}
req = urllib2.Request(loginurl, urllib.urlencode(loginparams))
req.add_header('Host','www.lintcode.com')
req.add_header('Origin','http://www.lintcode.com')
req.add_header('Referer','http://www.lintcode.com/zh-cn/accounts/signin/')
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.130 Safari/537.36')
response = urllib2.urlopen(req)
self.operate = self.opener.open(req)
thePage = response.read()
print 'login success!'
return thePage
def getAcceptedQuetionList(self, questionPage):
'''get Accepted Quetion List'''
question_soup = BeautifulSoup(questionPage)
questionList = question_soup.find('div', attrs={'class': 'list-group list'}).find_all('a')
acceptedQuetionList = []
for questionItem in questionList:
if questionItem.get_text('|', strip=True).split('|')[1] == 'Accepted':
acceptedQuetionList.append(questionItem.get('href'))
print 'getAcceptedQuetionList success'
return acceptedQuetionList
def getEachLadderList(self,level):
ladderURL = domain + 'en/ladder/2/level/' + level +'/'
req = urllib2.Request(ladderURL)
req.add_header('Host','www.lintcode.com')
req.add_header('Origin','http://www.lintcode.com')
req.add_header('Referer','http://www.lintcode.com/zh-cn/accounts/signin/')
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.130 Safari/537.36')
response = urllib2.urlopen(req)
self.operate = self.opener.open(req)
ladderPage = response.read()
ladder_soup = BeautifulSoup(ladderPage)
eachStepList = ladder_soup.find_all('a',attrs={'class': 'list-group-item'})
acceptedQuetionList = []
for questionItem in eachStepList:
if questionItem.get_text('|', strip=True).split('|')[1] == 'Accepted':
acceptedQuetionList.append(questionItem.get('href'))
print 'getAcceptedQuetionList success'
return acceptedQuetionList
def getSubmissionId(self, questionName):
'''download each submission question id'''
quesURL = domain + 'zh-cn' + questionName + '/submissions/'
req = urllib2.Request(quesURL)
req.add_header('Host','www.lintcode.com')
req.add_header('Origin','http://www.lintcode.com')
req.add_header('Referer','http://www.lintcode.com/zh-cn/accounts/signin/')
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.130 Safari/537.36')
response = urllib2.urlopen(req)
self.operate = self.opener.open(req)
submissionPage = response.read()
submission_soup = BeautifulSoup(submissionPage)
hrefList = submission_soup.find('tbody').find_all('a')
idhref = hrefList[0].get('href')
return idhref
def getCode(self, submissionId):
'''get description and code'''
codeURL = domain + submissionId
req = urllib2.Request(codeURL)
req.add_header('Host','www.lintcode.com')
req.add_header('Origin','http://www.lintcode.com')
req.add_header('Referer','http://www.lintcode.com/zh-cn/accounts/signin/')
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.130 Safari/537.36')
response = urllib2.urlopen(req)
self.operate = self.opener.open(req)
codePage = response.read()
code_soup = BeautifulSoup(codePage)
questionList = code_soup.find_all('p')
description = '/**\n*' + questionList[1].get_text()
otherItemList = code_soup.find_all('div',attrs={'class': 'm-t-lg m-b-lg'})
for otherItem in otherItemList:
description += otherItem.get_text('*')
description += '*/\n\n'
pattern = re.compile('var response =.*?lint_info.*?};')
codeStrList = re.findall(pattern, codePage)
codevar = str(codeStrList[0])
codeStr = codevar[codevar.find('var response = ') + 15 : - 1]
codeDict = json.loads(codeStr)
codeReal = codeDict["solution"]
return description, codeReal
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'Usage ./lcSpider.py USERNAME'
sys.exit(0)
userSpider = xSpider()
username = sys.argv[1]
password = getpass.getpass('Password:')
userSpider.setLoginInfo(username,password)
csrfmiddlewaretoken = userSpider.preLogin()
questionPage = userSpider.login(csrfmiddlewaretoken)
'''get ladderPage'''
'''
stepList = ['1.String','2.Integer-Array','3.Binary-Search','4.Math-Bit-Manipulation','5.Greedy',
'6.Linked-List','7.Binary-Tree','8.Search-Recursion','9.Dynamic-Programming','10.Data-Structure']
for step in stepList:
stepAcceptedQuetionList = userSpider.getEachLadderList(step.split('.')[0])
for stepAcceptedQuetion in stepAcceptedQuetionList:
htmlfilefrom = open('lintcodeHTML/all/' + stepAcceptedQuetion[9:] + '.cpp.html').read()
htmlpath = 'lintcodeHTML/US-Giants/' + step
if not os.path.isdir(htmlpath):
os.makedirs(htmlpath)
htmlfileto = open(htmlpath + '/' + stepAcceptedQuetion[9:] + '.cpp.html', 'w')
htmlfileto.write(htmlfilefrom)
htmlfileto.close
'''
acceptedQuetionList = userSpider.getAcceptedQuetionList(questionPage)
count = 0
FileExistNames = os.listdir('./lintcode')
for acceptedQuetion in acceptedQuetionList:
# count += 1
if acceptedQuetion[9:] + '.cpp' not in FileExistNames:
submissionId = userSpider.getSubmissionId(acceptedQuetion)
description, myCode = userSpider.getCode(submissionId)
codeFile = open('lintcode/' + acceptedQuetion[9:] + '.cpp', 'w')
codeFile.write(description)
codeFile.write(str(myCode).replace('\\n','\n'))
codeFile.close
print 'get ' + acceptedQuetion[9:] + '.cpp success'
# if count % 5 == 0:
# print count
|
#!/usr/bin/python3
import numpy as np
from cpa import CPA
traces_file="traces_capdir63/knownrand_fixed/knownrand_fixed_P63_data/traces/2016.06.01-16.37.37_traces.npy"
key_file="traces_capdir63/knownrand_fixed/knownrand_fixed_P63_data/traces/2016.06.01-16.37.37_keylist.npy"
plaintext_file="traces_capdir63/knownrand_fixed/knownrand_fixed_P63_data/traces/2016.06.01-16.37.37_textin.npy"
traces=np.load(traces_file)
key=np.load(key_file)
plaintext=np.load(plaintext_file)
cpa=CPA(traces, key, plaintext)
cpa.train()
cpa.save_train("krf_cpa_corr.npy")
cpa.plot_train("result/knownrand_fixed")
test_traces_file="traces_capdir63/knownfixed_rand/knownfixed_rand_P63_data/traces/2016.06.01-16.34.03_traces.npy"
test_plaintext_file="traces_capdir63/knownfixed_rand/knownfixed_rand_P63_data/traces/2016.06.01-16.34.03_textin.npy"
test_traces=np.load(test_traces_file)
test_plaintext=np.load(test_plaintext_file)
cpa.test(test_traces, test_plaintext) |
from os import getcwd
from os import listdir
from os import rmdir
from pathlib import Path
from shutil import move
current_location = Path(getcwd())
pictures = []
for folder in current_location.iterdir():
if folder.is_dir():
for picture in folder.iterdir():
pictures.append(picture)
for picture in pictures:
new_name = picture.name
counter = 1
while True:
test_path = Path(current_location, new_name)
if test_path.exists():
new_name = "{0}{1}{2}".format(picture.stem, counter, picture.suffix)
print("{0} exists. trying: {1}".format(test_path.name, new_name))
counter += 1
else:
break
new_location = Path(current_location, new_name)
print("moving: {0} to {1}".format(picture, new_location))
move(str(picture), str(new_location))
for folder in current_location.iterdir():
if folder.is_dir():
if not listdir(str(folder)):
print("Folder: {0} is empty. Deleting!".format(folder))
rmdir(str(folder))
|
"""
4. 寻找两个正序数组的中位数
给定两个大小为 m 和 n 的正序(从小到大)数组 nums1 和 nums2。请你找出并返回这两个正序数组的中位数。
进阶:你能设计一个时间复杂度为 O(log (m+n)) 的算法解决此问题吗?
示例 1:
输入:nums1 = [1,3], nums2 = [2]
输出:2.00000
解释:合并数组 = [1,2,3] ,中位数 2
示例 2:
输入:nums1 = [1,2], nums2 = [3,4]
输出:2.50000
解释:合并数组 = [1,2,3,4] ,中位数 (2 + 3) / 2 = 2.5
示例 3:
输入:nums1 = [0,0], nums2 = [0,0]
输出:0.00000
示例 4:
输入:nums1 = [], nums2 = [1]
输出:1.00000
示例 5:
输入:nums1 = [2], nums2 = []
输出:2.00000
"""
"""
中位数其实就是中间的那个数,所以首先提出方法1,但是复杂度差不多是n。
"""
def findMedianSortedArrays(nums1: list, nums2: list) -> float:
# 这边一定要注意,如果数组长度是偶数,那么就需要拿到两个数求平均,下标如果从0开始的话,二者的下标应该是 n/2-1和n/2,奇数的话,下标就是n//2,如果按照位数算的话,就在原本的基础上+1就可以了。
numsLen = (len(nums2) + len(nums1))
[counter1, counter2] = [numsLen / 2, numsLen / 2 + 1] if numsLen // 2 == numsLen / 2 else [numsLen // 2 + 1,
numsLen // 2 + 1]
counter = 0
tempList = []
while nums1 or nums2:
if not nums1 or nums2 and nums1[0] >= nums2[0]:
counter += 1
tempList.append(nums2.pop(0))
elif not nums2 or nums1 and nums2[0] >= nums1[0]:
counter += 1
tempList.append(nums1.pop(0))
if counter == numsLen // 2 + 1:
break
return (tempList[int(counter1-1)]+tempList[int(counter2-1)])/2
"""
第二种方法,代码比较简单,就是合并两个数组,排序,然后拿中间值就可以了,因为两个数组本身就是有序的,所以排序就很简单了。
"""
def findMedianSortedArrays2(nums1: list, nums2: list) -> float:
nums = nums1 + nums2
nums = sorted(nums)
numsLen = len(nums)
[counter1, counter2] = [numsLen / 2, numsLen / 2 + 1] if numsLen // 2 == numsLen / 2 else [numsLen // 2 + 1,
numsLen // 2 + 1]
return (nums[int(counter1-1)]+nums[int(counter2-1)])/2
if __name__ == '__main__':
nums1 = [0,1]
nums2 = [0,1]
res = findMedianSortedArrays2(nums1, nums2)
print(res) |
"""
Faça uma função que recebe dois catetos de um triangulo e calcule sua hipotenusa.
"""
import math
def hipotenusa(a, b):
h = math.sqrt(pow(a,2) + pow(b, 2))
print(h)
a = float(input('Cateto 1 '))
b = float(input('Cateto 2 '))
hipotenusa(a, b)
|
# Generated by Django 3.0.5 on 2020-04-15 10:15
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('image1', models.CharField(max_length=500)),
('short', models.CharField(max_length=200)),
('desc', models.TextField(max_length=2000)),
('author', models.CharField(max_length=100)),
('category1', models.CharField(max_length=100)),
('category2', models.CharField(blank=True, max_length=100)),
('category3', models.CharField(blank=True, max_length=100)),
('date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
],
),
]
|
#!/usr/bin/env python
def square(i):
return i*i
def applyToEach( L, f ):
''' assumes L - list, f - function
replacing each element e of L with f(e) '''
for i in range( len(L) ):
L[i] = f( L[i] )
L = [4,5,9]
applyToEach( L, square )
print L
L = [1,-2,3.4]
applyToEach( L, abs )
print L
applyToEach( L, int )
print L
|
import numpy as np
def test_random():
x = random_partition(100, 10)
print(x)
def random_container(N, Nruns, Nbits=8):
runs = []
set_lengths = random_partition(N, Nruns, 0, 1)
clear_lengths = random_partition(2**Nbits - N, Nruns, 1, 0)
start = last = 0
for i in range(Nruns):
start = last + clear_lengths[i]
last = start + set_lengths[i]
runs.append([start, last])
return runs
def random_partition(sum, num, first0=0, last0=0):
# first0=1 means first element may be 0
# last0=1 means last element may be 0
# first0 last0 start end
# 0 0 1 sum-1
# 0 1 1 sum
# 1 0 0 sum-1
# 1 1 0 sum
# generate distinct ints in [start, end]
vals = np.random.permutation(sum - 1 + first0 + last0)[0:num-1]
if first0 == 0:
vals += 1
# append 0 and sum, then sort
print(vals)
vals = np.append(vals, [0, sum])
print(vals)
vals.sort()
print(vals)
print(np.diff(vals))
# return the diff
return np.diff(vals)
|
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
import copy
class Buffer:
def __init__(self):
self.buffer = []
def append_sample(self, sample):
self.buffer.append(sample)
def sample(self, sample_size):
s, a, r, s_next, done = [],[],[],[],[]
if sample_size > len(self.buffer):
sample_size = len(self.buffer)
rand_sample = random.sample(self.buffer, sample_size)
for values in rand_sample:
s.append(values[0])
a.append(values[1])
r.append(values[2])
s_next.append(values[3])
done.append([4])
return torch.tensor(s,dtype=torch.float32), torch.tensor(a,dtype=torch.float32), torch.tensor(r,dtype=torch.float32), torch.tensor(s_next,dtype=torch.float32), done
def __len__(self):
return len(self.buffer)
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 5)
self.l2 = nn.Linear(5, 3)
self.l3 = nn.Linear(3, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 7)
self.l2 = nn.Linear(7, 6)
self.l3 = nn.Linear(6, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 7)
self.l5 = nn.Linear(7, 6)
self.l6 = nn.Linear(6, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class RL_Agents:
def __init__(self, building_info, observation_spaces = None, action_spaces = None):
#Hyper-parameters
self.discount = 0.992 #Discount factor
self.batch_size = 100 #Size of each MINI-BATCH
self.iterations = 1 # Number of updates of the actor-critic networks every time-step
self.policy_freq = 2 # Number of iterations after which the actor and target networks are updated
self.tau = 5e-3 #Rate at which the target networks are updated
self.lr_init = 1e-3 #5e-2
self.lr_final = 1e-3 #3e-3
self.lr_decay_rate = 1/(78*8760)
self.expl_noise_init = 0.75 # Exploration noise at time-step 0
self.expl_noise_final = 0.01 # Magnitude of the minimum exploration noise
self.expl_noise_decay_rate = 1/(290*8760) # Decay rate of the exploration noise in 1/h
self.policy_noise = 0.025*0
self.noise_clip = 0.04*0
self.max_action = 0.25
self.min_samples_training = 400 #Min number of tuples that are stored in the batch before the training process begins
# Parameters
self.device = "cpu"
self.time_step = 0
self.building_info = building_info # Can be used to create different RL agents based on basic building attributes or climate zones
self.observation_spaces = observation_spaces
self.action_spaces = action_spaces
self.n_buildings = len(observation_spaces)
self.buffer = {i: Buffer() for i in range(self.n_buildings)}
self.networks_initialized = False
# Monitoring variables (one per agent)
self.actor_loss_list = {i: [] for i in range(self.n_buildings)}
self.critic1_loss_list = {i: [] for i in range(self.n_buildings)}
self.critic2_loss_list = {i: [] for i in range(self.n_buildings)}
self.q_val_list = {i: [] for i in range(self.n_buildings)}
self.q1_list = {i: [] for i in range(self.n_buildings)}
self.q2_list = {i: [] for i in range(self.n_buildings)}
self.a_track1 = []
self.a_track2 = []
#Networks and optimizers (one per agent)
self.actor, self.critic, self.actor_target, self.critic_target, self.actor_optimizer, self.critic_optimizer = {}, {}, {}, {}, {}, {}
for i, (o, a) in enumerate(zip(observation_spaces, action_spaces)):
self.actor[i] = Actor(o.shape[0], a.shape[0], self.max_action).to(self.device)
self.critic[i] = Critic(o.shape[0], a.shape[0]).to(self.device)
self.actor_target[i] = copy.deepcopy(self.actor[i])
self.critic_target[i] = copy.deepcopy(self.critic[i])
self.actor_optimizer[i] = optim.Adam(self.actor[i].parameters(), lr=self.lr_init)
self.critic_optimizer[i] = optim.Adam(self.critic[i].parameters(), lr=self.lr_init)
def select_action(self, states):
expl_noise = max(self.expl_noise_final, self.expl_noise_init * (1 - self.time_step * self.expl_noise_decay_rate))
actions = []
for i, state in enumerate(states):
a = self.actor[i](torch.tensor(state, dtype=torch.float32))
self.a_track1.append(a)
a = a.cpu().detach().numpy() + expl_noise * np.random.normal(loc = 0, scale = self.max_action, size=a.shape)
self.a_track2.append(a)
a = np.clip(a, -self.max_action, self.max_action)
actions.append(a)
return actions
def add_to_buffer(self, states, actions, rewards, next_states, dones):
# Information contained in the building_info variable can be used to choose the number of buffers and what information goes to each buffer
dones = [dones for _ in range(self.n_buildings)]
for i, (s, a, r, s_next, done) in enumerate(zip(states, actions, rewards, next_states, dones)):
s = (s - self.observation_spaces[i].low)/(self.observation_spaces[i].high - self.observation_spaces[i].low + 0.00001)
s_next = (s_next - self.observation_spaces[i].low)/(self.observation_spaces[i].high - self.observation_spaces[i].low + 0.00001)
self.buffer[i].append_sample((s, a, r, s_next, done))
lr = max(self.lr_final, self.lr_init * (1 - self.time_step * self.lr_decay_rate))
for i in range(self.n_buildings):
self.actor_optimizer[i] = optim.Adam(self.actor[i].parameters(), lr=lr)
self.critic_optimizer[i] = optim.Adam(self.critic[i].parameters(), lr=lr)
#One TD3 control agent for each building
for i in range(self.n_buildings):
#Learning begins when a minimum number of tuples have beena added to the buffer
if len(self.buffer[i]) > self.min_samples_training:
#Every time-step we randomly sample 'self.iterations' number of minibatches from the buffer of experiences and perform 'self.iterations' number of updates of the networks.
for k in range(self.iterations):
state, action, reward, next_state, dones_mask = self.buffer[i].sample(self.batch_size)
target_Q = reward.unsqueeze(dim=-1)
with torch.no_grad():
noise = (torch.randn_like(action) * self.policy_noise).clamp(-self.noise_clip, self.noise_clip)
# Select action according to policy
next_action = (self.actor_target[i](next_state) + noise).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target[i](next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward.unsqueeze(dim=-1) + target_Q * self.discount
# Get current Q estimates
current_Q1, current_Q2 = self.critic[i](state, action)
# Compute critic loss
critic1_loss = F.mse_loss(current_Q1, target_Q)
critic2_loss = F.mse_loss(current_Q2, target_Q)
critic_loss = critic1_loss + critic2_loss
# Optimize the critic
self.critic_optimizer[i].zero_grad()
critic_loss.backward()
self.critic_optimizer[i].step()
# Save values
self.q_val_list[i].append(target_Q)
self.q1_list[i].append(current_Q1)
self.q2_list[i].append(current_Q2)
self.critic1_loss_list[i].append(critic1_loss)
self.critic2_loss_list[i].append(critic2_loss)
# Delayed policy updates
if k % self.policy_freq == 0:
# Compute actor loss
actor_loss = -self.critic[i].Q1(state, self.actor[i](state)).mean()
self.actor_loss_list[i].append(actor_loss)
# Optimize the actor
self.actor_optimizer[i].zero_grad()
actor_loss.backward()
self.actor_optimizer[i].step()
# Update the frozen target models
for param, target_param in zip(self.critic[i].parameters(), self.critic_target[i].parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor[i].parameters(), self.actor_target[i].parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
self.time_step += 1
class RBC_Agent:
def __init__(self, actions_spaces):
self.actions_spaces = actions_spaces
self.reset_action_tracker()
def reset_action_tracker(self):
self.action_tracker = []
def select_action(self, states):
hour_day = states[0][2]
# Daytime: release stored energy
a = [[0.0 for _ in range(len(self.actions_spaces[i].sample()))] for i in range(len(self.actions_spaces))]
if hour_day >= 9 and hour_day <= 21:
a = [[-0.08 for _ in range(len(self.actions_spaces[i].sample()))] for i in range(len(self.actions_spaces))]
# Early nightime: store DHW and/or cooling energy
if (hour_day >= 1 and hour_day <= 8) or (hour_day >= 22 and hour_day <= 24):
a = []
for i in range(len(self.actions_spaces)):
if len(self.actions_spaces[i].sample()) == 2:
a.append([0.091, 0.091])
else:
a.append([0.091])
self.action_tracker.append(a)
return np.array(a) |
# coding=utf-8
import os
import sys
import unittest
from time import sleep
from selenium import webdriver
from selenium.common.exceptions import NoAlertPresentException, NoSuchElementException
sys.path.append(os.environ.get('PY_DEV_HOME'))
from webTest_pro.common.initData import init
from webTest_pro.common.model.baseActionAdd import user_login, add_groupsubGrps
from webTest_pro.common.model.baseActionDel import del_subjectGrp
from webTest_pro.common.model.baseActionSearch import search_subjectGrp
from webTest_pro.common.model.baseActionModify import update_subjectGrp
from webTest_pro.common.logger import logger, T_INFO
reload(sys)
sys.setdefaultencoding("utf-8")
loginInfo = init.loginInfo
schools = [{'schoolName': u'二中', 'schoolType': u'高中', 'schoolArea': u'郑州市'},
{'schoolName': u'三中', 'schoolType': u'中学', 'schoolArea': u'郑州市'},
{'schoolName': u'四中', 'schoolType': u'中学', 'schoolArea': u'开封市'},
{'schoolName': u'五中', 'schoolType': u'小学', 'schoolArea': u'开封市'},
{'schoolName': u'六中', 'schoolType': u'小学', 'schoolArea': u'开封市'},
{'schoolName': u'一中', 'schoolType': u'高中', 'schoolArea': u'郑州市'}]
classrooms = [{'className': '31className', 'classAccNumber': '1'},
{'className': '32className', 'classAccNumber': '1'}]
modifyClassRoom = {'className': '32classNamemodify', 'classAccNumber': '100'}
subjectsGroups = [{'groupName': u'计算机', 'groupCode': '001', 'description': u'计算机'},
{'groupName': u'物理', 'groupCode': '002', 'description': u'物理'}]
searchsubjectGrps = [{'groupName': u'计算机'},
{'groupName': u'物理'}]
modifysubjectsGroup = [{'groupName': u'计算机modif', 'groupCode': '0101', 'description': u'计算机modif', 'searchName': u'计算机'}]
class schoolgroupmanager(unittest.TestCase):
''''科目组管理'''
def setUp(self):
if init.execEnv['execType'] == 'local':
T_INFO(logger,"\nlocal exec testcase")
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(8)
self.verificationErrors = []
self.accept_next_alert = True
T_INFO(logger,"start tenantmanger...")
else:
T_INFO(logger,"\nremote exec testcase")
browser = webdriver.DesiredCapabilities.CHROME
self.driver = webdriver.Remote(command_executor=init.execEnv['remoteUrl'], desired_capabilities=browser)
self.driver.implicitly_wait(8)
self.verificationErrors = []
self.accept_next_alert = True
T_INFO(logger,"start tenantmanger...")
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
T_INFO(logger,"tenantmanger end!")
def test_add_subjectgrp(self):
'''添加教室'''
print "exec:test_add_subjectgrp..."
driver = self.driver
user_login(driver, **loginInfo)
for subjectsGroup in subjectsGroups:
add_groupsubGrps(driver, **subjectsGroup)
self.assertEqual(u"添加成功!", driver.find_element_by_css_selector(".layui-layer-content").text)
sleep(0.5)
print "exec:test_add_subjectgrp success."
def test_bsearch_subjectgrp(self):
'''查询教室信息'''
print "exec:test_bsearch_subjectgrp"
driver = self.driver
user_login(driver, **loginInfo)
for searchsubjectGrp in searchsubjectGrps:
search_subjectGrp(driver, **searchsubjectGrp)
self.assertEqual(searchsubjectGrp['groupName'],
driver.find_element_by_xpath("//table[@id='SchoolGroupmodaltab']/tbody/tr/td").text)
print "exec test_bsearch_subjectgrp success"
sleep(0.5)
def test_bupdate_subjectgrp(self):
'''修改组管理'''
print "exec:test_bupdate_subjectgrp"
driver = self.driver
user_login(driver, **loginInfo)
for itme in modifysubjectsGroup:
update_subjectGrp(driver, **itme)
print "exec test_bupdate_subjectgrp success"
sleep(0.5)
def test_del_subjectgrp_ok(self):
'''删除科目组_确定'''
print "exec:test_del_subjectgrp_ok..."
driver = self.driver
user_login(driver, **loginInfo)
for searchsubjectGrp in searchsubjectGrps:
del_subjectGrp(driver, **searchsubjectGrp)
self.assertEqual(u"删除成功!", driver.find_element_by_css_selector(".layui-layer-content").text)
sleep(0.5)
print "exec:test_del_subjectgrp_ok success."
def is_element_present(self, how, what):
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException as e:
return False
return True
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException as e:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
if __name__ == '__main__':
unittest.main()
|
#===================================================================================
# Kallist Ensg with version annotation
#===================================================================================
inputTPM="/dt2/04rsem/06ENS3803_Merge191120/191120rsemTPMENSG.csv"
def ENSG2GeneName(inputTPM):
import os
file1="/media/cytogenbi1/D2C67EE7C67ECAED/BI/02ref/ensembl38.97/GeneE38_97EXversion.txt"
file2="/media/cytogenbi2/6eaf3ba8-a866-4e8a-97ef-23c61f7da612/00ref/modGTF38/GeneE38_97EXversion.txt"
if os.path.isfile(file1):
inf1=open(file1)
glines=inf1.readlines()
elif os.path.isfile(file2):
inf1=open(file2)
glines=inf1.readlines()
else :
print("There's no gtf file!!")
inf1.close()
geneD={}
for line in glines:
geneD[line.split("\t")[1]]=line.split("\t")[3].strip("\n")
res1=open("%s"%(str(inputTPM)))
resline=res1.readlines()
res1.close()
outputFile=inputTPM.split(".csv")[0]+"_geneAnno.csv"
outputFile2=inputTPM.split(".csv")[0]+"_gene_ENSGAnno.csv"
ouf=open("%s"%(str(outputFile)),"w")
ouf2=open("%s"%(str(outputFile2)),"w")
for rline in resline:
rkey=rline.split(",")[0].replace('"','')
geneN = geneD.get(rkey)
print(geneN,"_",rkey)
geneNa = str(geneN)
ouf.write('"%s",%s'%(geneNa, ",".join(rline.split(",")[1:])))
gene_Na = str(geneN)+"_"+rkey
ouf2.write('"%s",%s'%(gene_Na, ",".join(rline.split(",")[1:])))
ouf.close()
ouf2.close()
#===================================================================================
#===================================================================================
# Kallisto Ensg with version annotation
#===================================================================================
inputTPM="/media/cytogenbi1/D2C67EE7C67ECAED/BI/07kallisto/results/191114CMC11_19kallisto_raw.csv"
def ENSG2GeneName(inputTPM):
import os
file1="/media/cytogenbi1/D2C67EE7C67ECAED/BI/02ref/ensembl38.97/GeneE38_97Wthversion.txt"
file2="/media/cytogenbi2/6eaf3ba8-a866-4e8a-97ef-23c61f7da612/00ref/modGTF38/GeneE38_97Wthversion.txt"
if os.path.isfile(file1):
inf1=open(file1)
glines=inf1.readlines()
elif os.path.isfile(file2):
inf1=open(file2)
glines=inf1.readlines()
else :
print("There's no gtf file!!")
inf1.close()
geneD={}
for line in glines:
geneD[line.split("\t")[1]]=line.split("\t")[3].strip("\n")
res1=open("%s"%(str(inputTPM)))
resline=res1.readlines()
res1.close()
outputFile=inputTPM.split(".csv")[0]+"_geneAnno.csv"
ouf=open("%s"%(str(outputFile)),"w")
for rline in resline:
rkey=rline.split(",")[0].replace('"','')
geneN = geneD.get(rkey)
print(geneN,"_",rkey)
geneNa = str(geneN)+"_"+rkey
ouf.write('"%s",%s'%(geneNa, ",".join(rline.split(",")[1:])))
ouf.close()
#===================================================================================
|
# -*- coding: utf-8 -*-
"""
99: program finished
1 : adds num in two positions and store the result in third position.
2 : multiplies num in two positions and store the result in third position.
3 : takes an input to store in a specific pos
4 : outputs the value in the specific pos
5 : jump_if_true
6 : jump if false
7 : less then
8 : equals
9 : move relative base
"""
import copy
correct_op = [1,2,3,4,5,6,7,8,9,99] #supported operations so far
# define macros of directions
output_list = []
def arcade_cabinet(code_list, iter_input):
global output_list
cursor = 0
rela_base = 0
op_code = code_list[cursor]%100
input_cnt = 0
#record pos of the ball
ball_x = 0
ball_y = 0
#record pos of the paddle
pad_x = 0
pad_y = 0
index = 0
move_direction = 0
pad_direction = 0
while(op_code in correct_op):
#print('op code is: ', op_code, ' cursor is: ',code_list[cursor], code_list[cursor+1],code_list[cursor+2], code_list[cursor+3])
op_code = code_list[cursor]%100
op_mode = []
op_mode_int = code_list[cursor]//100
#print('op_mode_int: ' +str(op_mode_int))
for i in range(0,3):
op_mode.append(op_mode_int%10)
op_mode_int = op_mode_int//10
if(op_code == 1):
if(op_mode[0] == 0):
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op1')
if(op_mode[1] == 0):
p2 = code_list[code_list[cursor+2]]
elif(op_mode[1] == 1):
p2 = code_list[cursor+2]
elif(op_mode[1] == 2):
p2 = code_list[rela_base + code_list[cursor+2]]
else:
print('error getting addr in op1')
if(op_mode[2] == 0):
code_list[code_list[cursor+3]] = p1 + p2
elif(op_mode[2] == 2):
#print('cursor is: ', cursor, ' cursor is: ',code_list[cursor], code_list[cursor+1],code_list[cursor+2], code_list[cursor+3])
#print('relative is: ', rela_base)
code_list[rela_base + code_list[cursor+3]] = p1+ p2
else:
print('error getting addr in op1')
cursor += 4
elif(op_code == 2):
#print('curr pos: ', code_list[cursor], code_list[cursor+1], code_list[cursor+2], code_list[cursor+3])
if(op_mode[0] == 0):
#print('curr pos: ', code_list[cursor+1])
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op2')
if(op_mode[1] == 0):
p2 = code_list[code_list[cursor+2]]
elif(op_mode[1] == 1):
p2 = code_list[cursor+2]
elif(op_mode[1] == 2):
p2 = code_list[rela_base + code_list[cursor+2]]
else:
print('error getting addr in op2')
if(op_mode[2] == 0):
code_list[code_list[cursor+3]] = p1 * p2
elif(op_mode[2] == 2):
code_list[rela_base + code_list[cursor+3]] = p1 * p2
else:
print('error getting addr in op2')
cursor += 4
elif(op_code == 3):
input_cnt += 1
if (op_mode[0] != 0):
code_list[rela_base + code_list[cursor+1]] = pad_direction
else:
code_list[code_list[cursor+1]] = pad_direction
cursor += 2
elif(op_code == 4):
if(op_mode[0] == 0):
#print("the output value (mode 0): " + str(code_list[code_list[cursor+1]]))
output_list.append(code_list[code_list[cursor+1]])
elif(op_mode[0] == 2):
output_list.append(code_list[rela_base + code_list[cursor+1]])
else:
#print("the output value (mode 1): " + str(code_list[cursor+1]))
output_list.append(code_list[cursor+1])
#find the coord of the ball and get direction
if index == 2 and output_list[-1] == 4:
if (len(output_list) > 3):
move_direction = 1 if output_list[-3] > ball_x else -1
ball_x = output_list[-3]
ball_y = output_list[-2]
#find the coord of the paddle and determine direction
if index == 2 and output_list[-1] == 3:
pad_x = output_list[-3]
pad_y = output_list[-2]
if (pad_x > (ball_x + move_direction)):
pad_direction = -1
elif (pad_x < (ball_x + move_direction)):
pad_direction = 1
else:
pad_direction = 0
#acumulate index
index += 1
if index % 3 == 0:
index = 0
else:
pass
cursor += 2
elif(op_code == 5):
if(op_mode[0] == 0):
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op5')
if(op_mode[1] == 0):
p2 = code_list[code_list[cursor+2]]
elif(op_mode[1] == 1):
p2 = code_list[cursor+2]
elif(op_mode[1] == 2):
p2 = code_list[rela_base + code_list[cursor+2]]
else:
print('error getting addr in op5')
if p1:
cursor = p2
else:
cursor += 3
elif(op_code == 6):
if(op_mode[0] == 0):
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op6')
if(op_mode[1] == 0):
p2 = code_list[code_list[cursor+2]]
elif(op_mode[1] == 1):
p2 = code_list[cursor+2]
elif(op_mode[1] == 2):
p2 = code_list[rela_base + code_list[cursor+2]]
else:
print('error getting addr in op6')
if not p1:
cursor = p2
else:
cursor += 3
elif(op_code == 7):
if(op_mode[0] == 0):
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op7')
if(op_mode[1] == 0):
p2 = code_list[code_list[cursor+2]]
elif(op_mode[1] == 1):
p2 = code_list[cursor+2]
elif(op_mode[1] == 2):
p2 = code_list[rela_base + code_list[cursor+2]]
else:
print('error getting addr in op7')
if(op_mode[2] == 0):
code_list[code_list[cursor+3]] = 1 if p1 < p2 else 0
elif(op_mode[2] == 2):
code_list[rela_base + code_list[cursor+3]] = 1 if p1 < p2 else 0
else:
print('error getting addr in op7')
cursor += 4
elif(op_code == 8):
if(op_mode[0] == 0):
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op8')
if(op_mode[1] == 0):
p2 = code_list[code_list[cursor+2]]
elif(op_mode[1] == 1):
p2 = code_list[cursor+2]
elif(op_mode[1] == 2):
p2 = code_list[rela_base + code_list[cursor+2]]
else:
print('error getting addr in op8')
if(op_mode[2] == 0):
code_list[code_list[cursor+3]] = 1 if p1 == p2 else 0
elif(op_mode[2] == 2):
code_list[rela_base + code_list[cursor+3]] = 1 if p1 == p2 else 0
else:
print('error getting addr in op8')
cursor += 4
elif(op_code == 9):
if(op_mode[0] == 0):
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op9')
rela_base += p1
cursor += 2
else:
if(op_code == 99):
print("program halt at: " + str(code_list[cursor-1]))
return -1
op_code = code_list[cursor]%100
print('break: error: ', code_list[cursor], ' next value: ', code_list[cursor+1])
def show_screen(screen_data):
global block_cnt
screen_len = len(screen_data)
screen_wide = len(screen_data[0])
for i in range(screen_len):
for j in range(screen_wide):
if(screen_data[i][j] == 0):
print(' ', end = '')
elif(screen_data[i][j] == 1):
print('|', end = '')
elif(screen_data[i][j] == 2):
print('x', end = '')
elif(screen_data[i][j] == 3):
print('_', end = '')
elif(screen_data[i][j] == 4):
print('o', end = '')
print('\n', end = '')
if __name__ == "__main__":
f = open("input.txt", "r")
line = f.read()
mem = line.split(',' , line.count(','))
mem = list(map(int, mem))
mem[0] = 2
screen = []
scr_len = 25
scr_wide = 50
for i in range(scr_len):
scr_line = []
for j in range(scr_wide):
scr_line.append(0)
screen.append(scr_line)
mem_extend = []
ext_len = 100
for i in range(ext_len):
mem_extend.append(0)
mem.extend(mem_extend)
arcade_cabinet(copy.deepcopy(mem), 0)
#modify the screen
inx = 0
#200 is the answer from 1st question
block_cnt = 0
while inx in range(len(output_list)):
if (output_list[inx] == -1 and output_list[inx+1] == 0):
print('final score is:', output_list[inx+2])
break
if(screen[output_list[inx+1]][output_list[inx]] == 0):
if output_list[inx+2] == 1 or output_list[inx+2] == 2 or output_list[inx+2] == 3 or output_list[inx+2] == 4:
screen[output_list[inx+1]][output_list[inx]] = output_list[inx+2]
if output_list[inx+2] == 2:
block_cnt += 1
else:
pass
#if this is a block, can be destroied
elif(screen[output_list[inx+1]][output_list[inx]] == 2):
if output_list[inx+2] == 4:
screen[output_list[inx+1]][output_list[inx]] = 0
block_cnt -= 1
else:
pass
else:
#execpts 0 and 2, every thing cannot be changed
if(screen[output_list[inx+1]][output_list[inx]] not in [0,1,2,3,4]):
print('unrecged ops!')
pass
inx += 3
#draw the game figure:
show_screen(screen)
print('blocks is:',block_cnt)
'''
def int_compute(code_list, iter_input):
global output_list
cursor = 0
rela_base = 0
op_code = code_list[cursor]%100
input_cnt = 0
while(op_code in correct_op):
#print('op code is: ', op_code, ' cursor is: ',code_list[cursor], code_list[cursor+1],code_list[cursor+2], code_list[cursor+3])
op_code = code_list[cursor]%100
op_mode = []
op_mode_int = code_list[cursor]//100
#print('op_mode_int: ' +str(op_mode_int))
for i in range(0,3):
op_mode.append(op_mode_int%10)
op_mode_int = op_mode_int//10
#print('op_mode is ', op_mode)
if(op_code == 1):
if(op_mode[0] == 0):
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op1')
if(op_mode[1] == 0):
p2 = code_list[code_list[cursor+2]]
elif(op_mode[1] == 1):
p2 = code_list[cursor+2]
elif(op_mode[1] == 2):
p2 = code_list[rela_base + code_list[cursor+2]]
else:
print('error getting addr in op1')
if(op_mode[2] == 0):
code_list[code_list[cursor+3]] = p1 + p2
elif(op_mode[2] == 2):
#print('cursor is: ', cursor, ' cursor is: ',code_list[cursor], code_list[cursor+1],code_list[cursor+2], code_list[cursor+3])
#print('relative is: ', rela_base)
code_list[rela_base + code_list[cursor+3]] = p1+ p2
else:
print('error getting addr in op1')
cursor += 4
elif(op_code == 2):
#print('curr pos: ', code_list[cursor], code_list[cursor+1], code_list[cursor+2], code_list[cursor+3])
if(op_mode[0] == 0):
#print('curr pos: ', code_list[cursor+1])
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op2')
if(op_mode[1] == 0):
p2 = code_list[code_list[cursor+2]]
elif(op_mode[1] == 1):
p2 = code_list[cursor+2]
elif(op_mode[1] == 2):
p2 = code_list[rela_base + code_list[cursor+2]]
else:
print('error getting addr in op2')
if(op_mode[2] == 0):
code_list[code_list[cursor+3]] = p1 * p2
elif(op_mode[2] == 2):
code_list[rela_base + code_list[cursor+3]] = p1 * p2
else:
print('error getting addr in op2')
cursor += 4
elif(op_code == 3):
input_cnt += 1
print('input cnt: ', input_cnt)
if (op_mode[0] != 0):
code_list[rela_base + code_list[cursor+1]] = iter_input
else:
code_list[code_list[cursor+1]] = iter_input
cursor += 2
elif(op_code == 4):
#print('op_mode: ' + str(op_mode))
if(op_mode[0] == 0):
#print("the output value (mode 0): " + str(code_list[code_list[cursor+1]]))
output_list.append(code_list[code_list[cursor+1]])
elif(op_mode[0] == 2):
output_list.append(code_list[rela_base + code_list[cursor+1]])
else:
#print("the output value (mode 1): " + str(code_list[cursor+1]))
output_list.append(code_list[cursor+1])
cursor += 2
elif(op_code == 5):
if(op_mode[0] == 0):
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op5')
if(op_mode[1] == 0):
p2 = code_list[code_list[cursor+2]]
elif(op_mode[1] == 1):
p2 = code_list[cursor+2]
elif(op_mode[1] == 2):
p2 = code_list[rela_base + code_list[cursor+2]]
else:
print('error getting addr in op5')
if p1:
cursor = p2
else:
cursor += 3
elif(op_code == 6):
if(op_mode[0] == 0):
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op6')
if(op_mode[1] == 0):
p2 = code_list[code_list[cursor+2]]
elif(op_mode[1] == 1):
p2 = code_list[cursor+2]
elif(op_mode[1] == 2):
p2 = code_list[rela_base + code_list[cursor+2]]
else:
print('error getting addr in op6')
if not p1:
cursor = p2
else:
cursor += 3
elif(op_code == 7):
if(op_mode[0] == 0):
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op7')
if(op_mode[1] == 0):
p2 = code_list[code_list[cursor+2]]
elif(op_mode[1] == 1):
p2 = code_list[cursor+2]
elif(op_mode[1] == 2):
p2 = code_list[rela_base + code_list[cursor+2]]
else:
print('error getting addr in op7')
if(op_mode[2] == 0):
code_list[code_list[cursor+3]] = 1 if p1 < p2 else 0
elif(op_mode[2] == 2):
code_list[rela_base + code_list[cursor+3]] = 1 if p1 < p2 else 0
else:
print('error getting addr in op7')
cursor += 4
elif(op_code == 8):
if(op_mode[0] == 0):
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op8')
if(op_mode[1] == 0):
p2 = code_list[code_list[cursor+2]]
elif(op_mode[1] == 1):
p2 = code_list[cursor+2]
elif(op_mode[1] == 2):
p2 = code_list[rela_base + code_list[cursor+2]]
else:
print('error getting addr in op8')
if(op_mode[2] == 0):
code_list[code_list[cursor+3]] = 1 if p1 == p2 else 0
elif(op_mode[2] == 2):
code_list[rela_base + code_list[cursor+3]] = 1 if p1 == p2 else 0
else:
print('error getting addr in op8')
cursor += 4
elif(op_code == 9):
if(op_mode[0] == 0):
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op9')
rela_base += p1
cursor += 2
else:
if(op_code == 99):
print("program halt at: " + str(code_list[cursor-1]))
return -1
op_code = code_list[cursor]%100
print('break: error: ', code_list[cursor], ' next value: ', code_list[cursor+1])
''' |
class RedirectStdoutTo:
def __init__(self, out_new):
self.out_new = out_new
def __enter__(self):
self.out_old = sys.stdout
sys.stdout = self.out_new
def __exit__(self, *args):
sys.stdout = self.out_old
|
#test callback
from comet_ml import Experiment
import pytest
import os
import sys
from memory_profiler import profile
precision = 10
import matplotlib.pyplot as plt
import h5py
import pandas as pd
experiment = Experiment(api_key="ypQZhYfs3nSyKzOfz13iuJpj2", project_name='deeplidar', log_code=True)
import keras
import tensorflow as tf
fp = open('callbacks.log', 'w+')
#Path hack
dir_path = os.path.dirname(os.path.realpath(__file__))
parent_path = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
sys.path.append(parent_path)
from DeepForest import evalmAP, config
from DeepForest.utils.generators import create_NEON_generator
from keras_retinanet import models
DeepForest_config = config.load_config(dir="..")
site = "TEAK"
def get_session():
""" Construct a modified tf session.
"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
keras.backend.tensorflow_backend.set_session(get_session())
print('Loading model, this may take a second...')
model = models.load_model("../snapshots/resnet50_28.h5", backbone_name="resnet50", convert=True, nms_threshold=DeepForest_config["nms_threshold"])
@profile(precision=precision, stream=fp)
def test_callback(model, experiment):
#create the NEON generator
NEON_generator = create_NEON_generator(DeepForest_config["batch_size"], DeepForest_config)
average_precisions = evalmAP.evaluate(
NEON_generator,
model,
iou_threshold=0.5,
score_threshold=0.15,
max_detections=300,
save_path="../snapshots/",
experiment=experiment
)
test_callback(model, experiment) |
import matplotlib.pyplot as plt
input_values = [1, 2, 3, 4, 5]
squeres = [1, 4, 9, 16, 25]
plt.style.use('seaborn')
fig, ax = plt.subplots()
ax.plot(input_values, squeres, linewidth=3)
ax.set_title("Kwadraty liczb", fontsize=24)
ax.set_xlabel("Wartość", fontsize=14)
ax.set_ylabel("Kwadraty wartości", fontsize=14)
ax.tick_params(axis='both', labelsize=14)
plt.show() |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-10-09 20:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_input', '0018_auto_20171009_1609'),
]
operations = [
migrations.AlterField(
model_name='dailyuserinputstrong',
name='number_of_alcohol_consumed_yesterday',
field=models.CharField(blank=True, choices=[('', '-'), (0, '0'), (0.5, '0.5'), (1, '1'), (1.5, '1.5'), (2, '2'), (2.5, '2.5'), (3, '3'), (3.5, '3.5'), (4, '4'), (4.5, '4.5'), (5, '5'), (5.5, '5.5'), (6, '6'), (6.5, '6.5'), (7, '7'), (7.5, '7.5'), (8, '8'), (8.5, '8.5'), (9, '9'), (9.5, '9.5'), (10, '10')], max_length=3, null=True),
),
migrations.AlterField(
model_name='dailyuserinputstrong',
name='work_out_easy_or_hard',
field=models.CharField(blank=True, choices=[('', '-'), ('easy', 'Easy'), ('hard', 'Hard'), ('no workout today', 'No Workout Today')], max_length=20),
),
]
|
#This programa helps to covert equations from mathematica (as plain text)
#to a maneageable form to copy into Matlab or other language.
#How to: Copy the mathematica cell as plaintext and paste it in file, use
#this file as input of this script.
import sys, getopt, re
# Get the parameters
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print('test.py -i <inputfile> -o <outputfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('test.py -i <inputfile> -o <outputfile>')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
###############
#regex = r"([a-zA-Z]+) (\d+)"
fileIn=open(inputfile,"r")
stringg=fileIn.read()
regext="(\[t\])"
#Weirly, for match one \ its needed four \\\\
regexBrackets="\\\\\[(.*?)\]" #quita los \[Algo]
regexSpaceMult="([a-z|A-Z])([\s]+)([a-z|A-Z])" #pone * donde hay multiplicaciones
reg=re.sub(regext, r" ", stringg)
reg=re.sub(regexBrackets, r"\1", reg)
reg=re.sub(regexSpaceMult, r"\1*\3", reg)
reg=re.sub(regexSpaceMult, r"\1*\3", reg)
fileOut=open(outputfile,"w")
fileOut.write(reg)
if __name__ == "__main__":
main(sys.argv[1:]) |
# 插入排序
import random
def insert_sort(ls, n):
sort_ls = []
for i, v in enumerate(ls):
if v <= n:
sort_ls.append([v, i])
# 插入排序
j = len(sort_ls) - 1
while j - 1 >= 0 and v < sort_ls[j - 1][0]:
sort_ls[j] = sort_ls[j - 1]
j -= 1
sort_ls[j] = [v, i]
print('排序后的有效列表:', sort_ls)
# if a+b=n;a=ls[i];b=n-1 二分查找b
l = len(sort_ls) - 1
if l < 1:
print('查无此数')
return None
for i in range(l + 1):
a = sort_ls[i][0]
b = n - a
if b != a:
# 二分查找b
left = 0
right = len(sort_ls) - 1
while left <= right:
mid = (left + right) // 2
val = sort_ls[mid][0]
if b < val:
right = mid - 1
elif b > val:
left = mid + 1
else:
print(sort_ls[i], sort_ls[mid])
return None
print('查无此数2')
def main():
ls = list(range(10))
random.shuffle(ls)
n = random.randint(0, 9)
print('源数据列表:{};求和总数{}'.format(ls, n))
insert_sort(ls, n)
for i in range(1000):
main()
print('#' * 20)
|
from django.shortcuts import render
from rest_framework import permissions, status, viewsets
from .serializers import *
from .models import *
from rest_framework.response import Response
# Create your views here.
class CustomUsersViewSet(viewsets.ModelViewSet):
queryset = CustomUsers.objects.all()
serializer_class = CustomUsersSerializers
permission_classes = [permissions.IsAuthenticated]
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
request_dict=request.data.dict()
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
|
#!/usr/bin/env python
"""Unit tests for phonenumbermatcher.py"""
# Based on original Java code:
# java/test/com/google/i18n/phonenumbers/PhoneNumberMatchTest.java
# java/test/com/google/i18n/phonenumbers/PhoneNumberMatcherTest.java
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import pathfix
pathfix.fix()
from phonenumbers import PhoneNumberMatch, PhoneNumberMatcher, Leniency
from phonenumbers import PhoneNumber, phonenumberutil
from testphonenumberutil import insert_test_metadata, reinstate_real_metadata
class PhoneNumberMatchTest(unittest.TestCase):
"""Tests the value type semantics for PhoneNumberMatch.
Equality must be based on the covered range and corresponding phone
number. Range and number correctness are tested by PhoneNumberMatcherTest.
"""
def setUp(self):
pass
def tearDown(self):
pass
def testValueTypeSemantics(self):
number = PhoneNumber()
match1 = PhoneNumberMatch(10, "1 800 234 45 67", number)
match2 = PhoneNumberMatch(10, "1 800 234 45 67", number)
self.assertEquals(match1, match2)
self.assertEquals(match1.start, match2.start)
self.assertEquals(match1.end, match2.end)
self.assertEquals(match1.number, match2.number)
self.assertEquals(match1.raw_string, match2.raw_string)
self.assertEquals("1 800 234 45 67", match1.raw_string)
def testIllegalArguments(self):
"""Tests the value type semantics for matches with a None number."""
try:
PhoneNumberMatch(-110, "1 800 234 45 67", PhoneNumber())
self.fail("Expected failed constructor")
except Exception:
pass
try:
PhoneNumberMatch(10, "1 800 234 45 67", None)
self.fail("Expected failed constructor")
except Exception:
pass
try:
PhoneNumberMatch(10, None, PhoneNumber())
self.fail("Expected failed constructor")
except Exception:
pass
try:
PhoneNumberMatch(10, None, None)
self.fail("Expected failed constructor")
except Exception:
pass
def testStringConvert(self):
"""Check string conversion"""
number = PhoneNumber()
match = PhoneNumberMatch(10, "1 800 234 45 67", number)
self.assertEquals("PhoneNumberMatch [10,25) 1 800 234 45 67", str(match))
class NumberContext(object):
"""Small class that holds the context of the number we are testing
against. The test will insert the phone number to be found between
leadingText and trailingText."""
def __init__(self, leadingText, trailingText):
self.leadingText = leadingText
self.trailingText = trailingText
class PhoneNumberMatcherTest(unittest.TestCase):
"""Tests for PhoneNumberMatcher.
This only tests basic functionality based on test metadata. See
testphonenumberutil.py for the origin of the test data.
"""
def setUp(self):
insert_test_metadata()
def tearDown(self):
reinstate_real_metadata()
# See PhoneNumberUtilTest.testParseNationalNumber().
def testFindNationalNumber(self):
# same cases as in testParseNationalNumber
self.doTestFindInContext("033316005", "NZ")
self.doTestFindInContext("33316005", "NZ")
# National prefix attached and some formatting present.
self.doTestFindInContext("03-331 6005", "NZ")
self.doTestFindInContext("03 331 6005", "NZ")
# Testing international prefixes.
# Should strip country code.
self.doTestFindInContext("0064 3 331 6005", "NZ")
# Try again, but this time we have an international number with Region
# Code US. It should recognize the country code and parse accordingly.
self.doTestFindInContext("01164 3 331 6005", "US")
self.doTestFindInContext("+64 3 331 6005", "US")
self.doTestFindInContext("64(0)64123456", "NZ")
# Check that using a "/" is fine in a phone number.
self.doTestFindInContext("123/45678", "DE")
self.doTestFindInContext("123-456-7890", "US")
# See PhoneNumberUtilTest.testParseWithInternationalPrefixes().
def testFindWithInternationalPrefixes(self):
self.doTestFindInContext("+1 (650) 333-6000", "NZ")
self.doTestFindInContext("1-650-333-6000", "US")
# Calling the US number from Singapore by using different service
# providers
# 1st test: calling using SingTel IDD service (IDD is 001)
self.doTestFindInContext("0011-650-333-6000", "SG")
# 2nd test: calling using StarHub IDD service (IDD is 008)
self.doTestFindInContext("0081-650-333-6000", "SG")
# 3rd test: calling using SingTel V019 service (IDD is 019)
self.doTestFindInContext("0191-650-333-6000", "SG")
# Calling the US number from Poland
self.doTestFindInContext("0~01-650-333-6000", "PL")
# Using "++" at the start.
self.doTestFindInContext("++1 (650) 333-6000", "PL")
# Using a full-width plus sign.
self.doTestFindInContext(u"\uFF0B1 (650) 333-6000", "SG")
# The whole number, including punctuation, is here represented in
# full-width form.
self.doTestFindInContext(u"\uFF0B\uFF11\u3000\uFF08\uFF16\uFF15\uFF10\uFF09" +
u"\u3000\uFF13\uFF13\uFF13\uFF0D\uFF16\uFF10\uFF10\uFF10",
"SG")
# See PhoneNumberUtilTest.testParseWithLeadingZero().
def testFindWithLeadingZero(self):
self.doTestFindInContext("+39 02-36618 300", "NZ")
self.doTestFindInContext("02-36618 300", "IT")
self.doTestFindInContext("312 345 678", "IT")
# See PhoneNumberUtilTest.testParseNationalNumberArgentina().
def testFindNationalNumberArgentina(self):
# Test parsing mobile numbers of Argentina.
self.doTestFindInContext("+54 9 343 555 1212", "AR")
self.doTestFindInContext("0343 15 555 1212", "AR")
self.doTestFindInContext("+54 9 3715 65 4320", "AR")
self.doTestFindInContext("03715 15 65 4320", "AR")
# Test parsing fixed-line numbers of Argentina.
self.doTestFindInContext("+54 11 3797 0000", "AR")
self.doTestFindInContext("011 3797 0000", "AR")
self.doTestFindInContext("+54 3715 65 4321", "AR")
self.doTestFindInContext("03715 65 4321", "AR")
self.doTestFindInContext("+54 23 1234 0000", "AR")
self.doTestFindInContext("023 1234 0000", "AR")
# See PhoneNumberUtilTest.testParseWithXInNumber().
def testFindWithXInNumber(self):
self.doTestFindInContext("(0xx) 123456789", "AR")
# This test is intentionally constructed such that the number of digit
# after xx is larger than 7, so that the number won't be mistakenly
# treated as an extension, as we allow extensions up to 7 digits. This
# assumption is okay for now as all the countries where a carrier
# selection code is written in the form of xx have a national
# significant number of length larger than 7.
self.doTestFindInContext("011xx5481429712", "US")
# See PhoneNumberUtilTest.testParseNumbersMexico().
def testFindNumbersMexico(self):
# Test parsing fixed-line numbers of Mexico.
self.doTestFindInContext("+52 (449)978-0001", "MX")
self.doTestFindInContext("01 (449)978-0001", "MX")
self.doTestFindInContext("(449)978-0001", "MX")
# Test parsing mobile numbers of Mexico.
self.doTestFindInContext("+52 1 33 1234-5678", "MX")
self.doTestFindInContext("044 (33) 1234-5678", "MX")
self.doTestFindInContext("045 33 1234-5678", "MX")
# See PhoneNumberUtilTest.testParseNumbersWithPlusWithNoRegion().
def testFindNumbersWithPlusWithNoRegion(self):
# "ZZ" is allowed only if the number starts with a '+' - then the
# country code can be calculated.
self.doTestFindInContext("+64 3 331 6005", "ZZ")
# None is also allowed for the region code in these cases.
self.doTestFindInContext("+64 3 331 6005", None)
# See PhoneNumberUtilTest.testParseExtensions().
def testFindExtensions(self):
self.doTestFindInContext("03 331 6005 ext 3456", "NZ")
self.doTestFindInContext("03-3316005x3456", "NZ")
self.doTestFindInContext("03-3316005 int.3456", "NZ")
self.doTestFindInContext("03 3316005 #3456", "NZ")
self.doTestFindInContext("0~0 1800 7493 524", "PL")
self.doTestFindInContext("(1800) 7493.524", "US")
# Check that the last instance of an extension token is matched.
self.doTestFindInContext("0~0 1800 7493 524 ~1234", "PL")
# Verifying bug-fix where the last digit of a number was previously omitted if it was a 0 when
# extracting the extension. Also verifying a few different cases of extensions.
self.doTestFindInContext("+44 2034567890x456", "NZ")
self.doTestFindInContext("+44 2034567890x456", "GB")
self.doTestFindInContext("+44 2034567890 x456", "GB")
self.doTestFindInContext("+44 2034567890 X456", "GB")
self.doTestFindInContext("+44 2034567890 X 456", "GB")
self.doTestFindInContext("+44 2034567890 X 456", "GB")
self.doTestFindInContext("+44 2034567890 X 456", "GB")
self.doTestFindInContext("(800) 901-3355 x 7246433", "US")
self.doTestFindInContext("(800) 901-3355 , ext 7246433", "US")
self.doTestFindInContext("(800) 901-3355 ,extension 7246433", "US")
self.doTestFindInContext("(800) 901-3355 , 7246433", "US")
self.doTestFindInContext("(800) 901-3355 ext: 7246433", "US")
def testFindInterspersedWithSpace(self):
self.doTestFindInContext("0 3 3 3 1 6 0 0 5", "NZ")
# Test matching behavior when starting in the middle of a phone number.
def testIntermediateParsePositions(self):
text = "Call 033316005 or 032316005!"
# | | | | | |
# 0 5 10 15 20 25
# Iterate over all possible indices.
for ii in xrange(6):
self.assertEqualRange(text, ii, 5, 14)
# 7 and 8 digits in a row are still parsed as number.
self.assertEqualRange(text, 6, 6, 14)
self.assertEqualRange(text, 7, 7, 14)
# Anything smaller is skipped to the second instance.
for ii in xrange(8, 20):
self.assertEqualRange(text, ii, 19, 28)
def testMatchWithSurroundingZipcodes(self):
number = "415-666-7777"
zipPreceding = "My address is CA 34215. " + number + " is my number."
expectedResult = phonenumberutil.parse(number, "US")
matcher = PhoneNumberMatcher(zipPreceding, "US")
if matcher.has_next():
match = matcher.next()
else:
match = None
self.assertTrue(match is not None,
msg="Did not find a number in '" + zipPreceding + "'; expected " + number)
self.assertEquals(expectedResult, match.number)
self.assertEquals(number, match.raw_string)
# Now repeat, but this time the phone number has spaces in it. It should still be found.
number = "(415) 666 7777"
zipFollowing = "My number is " + number + ". 34215 is my zip-code."
matcher = PhoneNumberMatcher(zipFollowing, "US")
if matcher.has_next():
matchWithSpaces = matcher.next()
else:
matchWithSpaces = None
self.assertTrue(matchWithSpaces is not None,
msg="Did not find a number in '" + zipFollowing + "'; expected " + number)
self.assertEquals(expectedResult, matchWithSpaces.number)
self.assertEquals(number, matchWithSpaces.raw_string)
def testNoMatchIfRegionIsNone(self):
# Fail on non-international prefix if region code is None.
self.assertTrue(self.hasNoMatches(PhoneNumberMatcher("Random text body - number is 0331 6005, see you there", None)))
def testNoMatchInEmptyString(self):
self.assertTrue(self.hasNoMatches(PhoneNumberMatcher("", "US")))
self.assertTrue(self.hasNoMatches(PhoneNumberMatcher(" ", "US")))
def testNoMatchIfNoNumber(self):
self.assertTrue(self.hasNoMatches(PhoneNumberMatcher("Random text body - number is foobar, see you there", "US")))
def testSequences(self):
# Test multiple occurrences.
text = "Call 033316005 or 032316005!"
region = "NZ"
number1 = PhoneNumber()
number1.country_code = phonenumberutil.country_code_for_region(region)
number1.national_number = 33316005
match1 = PhoneNumberMatch(5, "033316005", number1)
number2 = PhoneNumber()
number2.country_code = phonenumberutil.country_code_for_region(region)
number2.national_number = 32316005
match2 = PhoneNumberMatch(19, "032316005", number2)
matcher = PhoneNumberMatcher(text, region, Leniency.POSSIBLE, sys.maxint)
self.assertEquals(match1, matcher.next())
self.assertEquals(match2, matcher.next())
self.assertFalse(matcher.has_next())
def testNoneInput(self):
self.assertTrue(self.hasNoMatches(PhoneNumberMatcher(None, "US")))
self.assertTrue(self.hasNoMatches(PhoneNumberMatcher(None, None)))
def testMaxMatches(self):
# Set up text with 100 valid phone numbers.
numbers = "My info: 415-666-7777," * 100
# Matches all 100. Max only applies to failed cases.
number = phonenumberutil.parse("+14156667777", None)
expected = [number] * 100
matcher = PhoneNumberMatcher(numbers, "US", Leniency.VALID, 10)
actual = [x.number for x in matcher]
self.assertEquals(expected, actual)
def testMaxMatchesInvalid(self):
# Set up text with 10 invalid phone numbers followed by 100 valid.
numbers = (("My address 949-8945-0" * 10) +
("My info: 415-666-7777," * 100))
matcher = PhoneNumberMatcher(numbers, "US", Leniency.VALID, 10)
self.assertFalse(matcher.has_next())
def testMaxMatchesMixed(self):
# Set up text with 100 valid numbers inside an invalid number.
numbers = "My info: 415-666-7777 123 fake street" * 100
# Only matches the first 5 despite there being 100 numbers due to max matches.
# There are two false positives per line as "123" is also tried.
number = phonenumberutil.parse("+14156667777", None)
expected = [number] * 5
matcher = PhoneNumberMatcher(numbers, "US", Leniency.VALID, 10)
actual = [x.number for x in matcher]
self.assertEquals(expected, actual)
def testEmptyIteration(self):
matcher = PhoneNumberMatcher("", "ZZ")
self.assertFalse(matcher.has_next())
self.assertFalse(matcher.has_next())
try:
matcher.next()
self.fail("Violation of the iterator contract.")
except Exception:
# Success
pass
self.assertFalse(matcher.has_next())
def testSingleIteration(self):
matcher = PhoneNumberMatcher("+14156667777", "ZZ")
# With hasNext() -> next().
# Double hasNext() to ensure it does not advance.
self.assertTrue(matcher.has_next())
self.assertTrue(matcher.has_next())
self.assertTrue(matcher.next() is not None)
self.assertFalse(matcher.has_next())
try:
matcher.next()
self.fail("Violation of the Matcher contract.")
except Exception:
# Success
pass
self.assertFalse(matcher.has_next())
# With next() only.
matcher = PhoneNumberMatcher("+14156667777", "ZZ")
self.assertTrue(matcher.next() is not None)
try:
matcher.next()
self.fail("Violation of the Matcher contract.")
except Exception:
# Success
pass
def testDoubleIteration(self):
matcher = PhoneNumberMatcher("+14156667777 foobar +14156667777 ", "ZZ")
# With hasNext() -> next().
# Double hasNext() to ensure it does not advance.
self.assertTrue(matcher.has_next())
self.assertTrue(matcher.has_next())
self.assertTrue(matcher.next() is not None)
self.assertTrue(matcher.has_next())
self.assertTrue(matcher.has_next())
self.assertTrue(matcher.next() is not None)
self.assertFalse(matcher.has_next())
try:
matcher.next()
self.fail("Violation of the Matcher contract.")
except Exception:
# Success
pass
self.assertFalse(matcher.has_next())
# With next() only.
matcher = PhoneNumberMatcher("+14156667777 foobar +14156667777 ", "ZZ")
self.assertTrue(matcher.next() is not None)
self.assertTrue(matcher.next() is not None)
try:
matcher.next()
self.fail("Violation of the Matcher contract.")
except Exception:
# Success
pass
def assertEqualRange(self, text, index, start, end):
"""Asserts that another number can be found in text starting at index, and that
its corresponding range is [start, end).
"""
sub = text[index:]
matcher = PhoneNumberMatcher(sub, "NZ", Leniency.POSSIBLE, sys.maxint)
self.assertTrue(matcher.has_next())
match = matcher.next()
self.assertEquals(start - index, match.start)
self.assertEquals(end - index, match.end)
self.assertEquals(match.raw_string, sub[match.start:match.end])
def doTestFindInContext(self, number, defaultCountry):
"""Tests numbers found by PhoneNumberMatcher in various textual contexts"""
self.findPossibleInContext(number, defaultCountry)
parsed = phonenumberutil.parse(number, defaultCountry)
if phonenumberutil.is_valid_number(parsed):
self.findValidInContext(number, defaultCountry)
def findPossibleInContext(self, number, defaultCountry):
contextPairs = [NumberContext("", ""), # no context
NumberContext(" ", "\t"), # whitespace only
NumberContext("Hello ", ""), # no context at end
NumberContext("", " to call me!"), # no context at start
NumberContext("Hi there, call ", " to reach me!"), # no context at start
NumberContext("Hi there, call ", ", or don't"), # with commas
# Three examples without whitespace around the number.
NumberContext("Hi call", ""),
NumberContext("", "forme"),
NumberContext("Hi call", "forme"),
# With other small numbers.
NumberContext("It's cheap! Call ", " before 6:30"),
# With a second number later.
NumberContext("Call ", " or +1800-123-4567!"),
NumberContext("Call me on June 21 at", ""), # with a Month-Day date
# With publication pages.
NumberContext("As quoted by Alfonso 12-15 (2009), you may call me at ", ""),
NumberContext("As quoted by Alfonso et al. 12-15 (2009), you may call me at ", ""),
# With dates, written in the American style.
NumberContext("As I said on 03/10/2011, you may call me at ", ""),
NumberContext("As I said on 03/27/2011, you may call me at ", ""),
NumberContext("As I said on 31/8/2011, you may call me at ", ""),
NumberContext("As I said on 1/12/2011, you may call me at ", ""),
NumberContext("I was born on 10/12/82. Please call me at ", ""),
# With a postfix stripped off as it looks like the start of another number
NumberContext("Call ", "/x12 more"),
]
self.doTestInContext(number, defaultCountry, contextPairs, Leniency.POSSIBLE)
# Tests valid numbers in contexts that fail for Leniency.POSSIBLE.
def findValidInContext(self, number, defaultCountry):
contextPairs = [
# With other small numbers.
NumberContext("It's only 9.99! Call ", " to buy"),
# With a number Day.Month.Year date.
NumberContext("Call me on 21.6.1984 at ", ""),
# With a number Month/Day date.
NumberContext("Call me on 06/21 at ", ""),
# With a number Day.Month date
NumberContext("Call me on 21.6. at ", ""),
# With a number Month/Day/Year date.
NumberContext("Call me on 06/21/84 at ", ""),
]
self.doTestInContext(number, defaultCountry, contextPairs, Leniency.VALID)
def doTestInContext(self, number, defaultCountry, contextPairs, leniency):
for context in contextPairs:
prefix = context.leadingText
text = prefix + number + context.trailingText
start = len(prefix)
end = start + len(number)
matcher = PhoneNumberMatcher(text, defaultCountry, leniency, sys.maxint)
if matcher.has_next():
match = matcher.next()
else:
match = None
self.assertTrue(match is not None,
msg="Did not find a number in '" + text + "'; expected '" + number + "'")
extracted = text[match.start:match.end]
self.assertEquals(start, match.start,
msg="Unexpected phone region in '" + text + "'; extracted '" + extracted + "'")
self.assertEquals(end, match.end,
msg="Unexpected phone region in '" + text + "'; extracted '" + extracted + "'")
self.assertEquals(number, extracted)
self.assertEquals(match.raw_string, extracted)
self.ensureTermination(text, defaultCountry, leniency)
# Exhaustively searches for phone numbers from each index within text to
# test that finding matches always terminates.
def ensureTermination(self, text, defaultCountry, leniency):
for index in xrange(len(text) + 1):
sub = text[index:]
matches = ""
# Iterates over all matches.
for match in PhoneNumberMatcher(sub, defaultCountry, leniency, sys.maxint):
matches += ", " + str(match)
def hasNoMatches(self, matcher):
"""Returns True if there were no matches found."""
return not matcher.has_next()
|
AA,BB=map(int,input().split())
CC1=list(map(int,input().split()))
p=list(map(int,input().split()))
q=[]
aa=0
for i in range(AA):
x=p[i]/CC1[i]
q.append(x)
while BB>=0 and len(q)>0:
mindex=q.index(max(q))
if BB>=CC1[mindex]:
aa=aa+p[mindex]
BB=BB-CC1[mindex]
CC1.pop(mindex)
p.pop(mindex)
q.pop(mindex)
print(aa)
|
from interface import apa102
import threading
from threading import Thread
import math
import time
class Lights(Thread):
def __init__(self, effect, effect_params):
Thread.__init__(self)
self.number = 3
self.led = apa102.APA102(num_led=3)
self.effect = effect
self.effect_params = effect_params
self._stop_event = threading.Event()
def run(self):
if self.effect == "change":
self.change(self.effect_params["r"], self.effect_params["g"], self.effect_params["b"], self.effect_params["intensity"], self.effect_params["duration"])
elif self.effect == "pulse":
self.pulse(self.effect_params["r"], self.effect_params["g"], self.effect_params["b"])
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
def change(self, r, g, b, intensity=100, duration=0):
for x in range(0, self.number):
self.led.set_pixel(x, r, g, b, intensity)
self.led.show()
if duration != 0:
time.sleep(duration)
self.change(0, 0, 0, 0)
def pulse(self, r, g, b):
# add speed and intensity range later
t = 0
while not self.stopped():
intensity = math.sin(t - (3 * math.pi / 2)) / 2 + 0.5
self.change(r, g, b, int(round(intensity*100)))
t = t+0.4
time.sleep(0.1)
|
# KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
__author__ = 'Sebastian'
######################################################################################################################
# Interface for a device that can be used for a Secure Key Authorization exchange.
######################################################################################################################
class ISKADevice:
# To be called on each execution of the server before starting to use a device type.
@staticmethod
def initialize(root_folder):
raise NotImplementedError()
# To be called when bootstrapping or re-bootstrapping the server.
@staticmethod
def bootstrap():
raise NotImplementedError()
@staticmethod
def list_devices():
raise NotImplementedError()
def get_name(self):
raise NotImplementedError()
def get_port(self):
raise NotImplementedError()
def get_friendly_name(self):
raise NotImplementedError()
def connect(self):
raise NotImplementedError()
def disconnect(self):
raise NotImplementedError()
# DATA needs to be a dictionary of key-value pairs (the value is not used, only the key, but the value has to be non-empty).
def get_data(self, data):
raise NotImplementedError()
# DATA needs to be a dictionary of key-value pairs.
def send_data(self, data):
raise NotImplementedError()
def send_file(self, file_path, file_id):
raise NotImplementedError()
|
#!/usr/bin/env python
#
# SPIMAP
# Matt Rasmussen 2010-2011
#
# Train the SPIMAP dup/loss rates model from a gene count matrix
#
#=============================================================================
# python libs
import sys, optparse
from math import log, exp
from os.path import realpath, dirname, join
# spidir libs
try:
import spidir
except:
sys.path.append(join(realpath(dirname(dirname(__file__))), "python"))
import spidir
# rasmus libs
from rasmus import util, treelib, plotting
# compbio libs
from compbio import phylo
#=============================================================================
# options
o = optparse.OptionParser()
o.set_defaults()
o.add_option("-s", "--stree", dest="stree",
metavar="<species tree newick file>")
o.add_option("-S", "--smap", dest="smap",
metavar="<gene to species file>")
o.add_option("-c", "--countmatrix", dest="countmatrix",
metavar="<gene count matrix output file>")
o.add_option("", "--maxgene", dest="maxgene", type="int",
default=20, metavar="<maximum number of genes in ancestor>")
o.add_option("", "--maxiter", dest="maxiter", type="int",
default=100, metavar="<maximum number of ML iterations>")
o.add_option("", "--birth", dest="birth_init", type="float",
default=.1, metavar="<initial birth rate>")
o.add_option("", "--death", dest="death_init", type="float",
default=.1, metavar="<initial death rate>")
o.add_option("", "--step", dest="step", type="float",
default=1.0, metavar="<initial step size>")
o.add_option("-r", "--range", dest="range",
metavar="<start>,<step>,<stop>")
o.add_option("-H", "--heatmap", dest="heatmap",
default="duploss",
metavar="<heatmap file>")
#=============================================================================
def error(message, exit=1):
print >>sys.stderr, message
sys.exit(exit)
def main(conf, args):
# TODO: make more checks
if conf.stree is None or conf.smap is None:
error("error: must specify species tree and mapping")
# read in species data
stree = treelib.read_tree(conf.stree)
gene2species = phylo.read_gene2species(conf.smap)
infile = open(conf.countmatrix)
species = infile.next().rstrip().split("\t")
# read all counts
counts = [map(int, line.rstrip().split("\t"))
for line in infile]
top = max(map(max, counts))
conf.maxgene = max(top*2, conf.maxgene)
if conf.range is not None:
# make heatmap
make_heatmap(conf, stree, counts)
else:
# estimate ml birth death parameters
b, d = birth_death_ml(conf, stree, counts)
print "birth\t%f" % b
print "death\t%f" % d
def birth_death_ml(conf, stree, counts):
birth0 = conf.birth_init
death0 = conf.death_init
step = conf.step
scale = sum(node.dist for node in stree)
for node in stree:
node.dist /= scale
birth0 *= scale
death0 *= scale
opt = spidir.birth_death_counts_ml_alloc(stree, counts,
birth0, death0, step,
maxgene=conf.maxgene)
for i in range(conf.maxiter):
status, size, (b,d) = spidir.birth_death_counts_ml_iter(opt)
print >>sys.stderr, "b=%f\td=%f\tsize=%f" % (b/scale, d/scale, size)
if status == 0:
break
spidir.birth_death_counts_ml_free(opt)
b /= scale
d /= scale
return b, d
def make_heatmap(conf, stree, counts):
start, step, stop = map(float, conf.range.split(","))
rates = list(util.frange(start,stop+step*.5,step))
mat = util.make_matrix(len(rates), len(rates), 0.0)
for i, birth in enumerate(rates):
for j, death in enumerate(rates):
l = spidir.birth_death_forest_counts(stree, counts,
birth, death,
maxgene=conf.maxgene,
rootgene=1)
print >>sys.stderr, birth, death, l
print birth, death, l
mat[j][i] = l
util.write_delim(conf.heatmap + ".txt", util.map2(str, mat))
if min(rates) >= .01:
labels = ["%.2f" % x for x in rates]
else:
labels = ["%.2e" % x for x in rates]
high = max(util.flatten(mat))
plotting.heatmap(util.map2(lambda x: exp(x-high), mat),
rlabels=labels, clabels=labels,
xmargin=100, ymargin=100, ydir=-1,
filename=conf.heatmap + ".svg")
conf, args = o.parse_args()
main(conf, args)
|
#!/usr/bin/python
__author__ = 'Elisabetta Ronchieri'
import sys
import unittest
import getopt
import exceptions
from tstorm.utils import report_file
from tstorm.utils import settings
from tstorm.utils import usage
from tstorm.utils import sequence
from tstorm.utils import release
from tstorm.utils import range
from tstorm.utils import limit
from tstorm.utils import test
from tstorm.utils import tests
from tstorm.utils import filters
from tstorm.utils import configuration
try:
from tstorm.tests import commontests as cts
from tstorm.tests.atomic import atomicstests as at
from tstorm.tests.functional import webdavtests as wt
from tstorm.tests.functional import functionalitiestests as ft
from tstorm.tests.functional import functionalitiestests_novoms as ftnv
from tstorm.tests.functional import tapetests as tt
from tstorm.tests.functional.regression import regressiontests as rt
from tstorm.tests.functional.regression import regressiontests_novoms as rtnv
from tstorm.tests.functional.regression import regression_ldaptests as rlt
from tstorm.tests.load import loadstests as lt
except ImportError, err:
print ''
class OptionError(exceptions.Exception):
pass
class RunTestsError(exceptions.Exception):
pass
class RunTests(object):
def __init__(self):
self.parameters = {}
self.parameters['custom_conf_file'] = (False, 'tstorm.ini')
try:
storm_release = release.Release(__import__('tstorm').get_storm_release())
except release.ReleaseError, err:
print '\n\nExecution: ', err
usage.get_usage(self.parameters)
sys.exit(2)
self.parameters['storm_release'] = storm_release
self.parameters['voms'] = True
self.parameters['report'] = True
self.parameters['custom_destination_file'] = (False, '')
self.parameters['tests_sequence'] = (False, [])
self.parameters['tests_sequence_file']= (False, '')
self.parameters['list_tests_details'] = (False, {})
self.parameters['filter_tests_details'] = (False, {})
self.parameters['valid_tests'] = {}
self.parameters['node'] = []
if settings.configuration_file_exists(file_name = 'map_tests_ids.json'):
'''Get Test Id Mapping Info from file'''
self.parameters['mti_info'] = settings.get_json_file_information(file_name = 'map_tests_ids.json')
else:
raise RunTestsError("map-tests-ids.json file is not in the right location")
self.tests_instance = tests.Tests(self.parameters['mti_info'])
def verify_conf_file(self):
if self.parameters['custom_conf_file'][0]:
if settings.file_exists(self.parameters['custom_conf_file'][1]):
self.parameters['custom_conf_file'] = (True, settings.get_custom_configuration_file(file_name=self.parameters['custom_conf_file'][1]))
else:
raise RunTestsError("ini file is not in the right location")
else:
if settings.configuration_file_exists(file_name = self.parameters['custom_conf_file'][1]):
self.parameters['custom_conf_file'] = (False, settings.get_configuration_file(file_name = self.parameters['custom_conf_file'][1]))
else:
raise RunTestsError("ini file is not in the right location")
check_configuration_file = configuration.LoadConfiguration(conf_file = self.parameters['custom_conf_file'][1])
if not check_configuration_file.is_configuration_file_valid():
print '''Example of ini configuration file:\n'''
check_configuration_file.print_configuration_file_template()
raise RunTestsError("Wrong Test Configuration file")
for key, value in check_configuration_file.get_test_settings()['node'].items():
if value.lower() == 'yes':
self.parameters['node'].append(key)
def do_parse(self):
try:
opts, args = getopt.getopt(sys.argv[1:],
"hvlc:d:i:f:s:r:",
["help","noreport","novoms","list", "conf=","destfile=",
"ids=","file-ids=","version","filter-list=",
"storm-release="])
except getopt.GetoptError, err:
print str(err)
usage.get_usage(self.parameters)
sys.exit(2)
for opt, value in opts:
if opt == "-h" or opt == "--help":
usage.get_usage(self.parameters)
sys.exit(0)
elif opt == "-v" or opt == "--version":
msg = 'T-StoRM version %s' % (__import__('tstorm').get_version())
print msg
sys.exit(0)
elif opt in ("-c", "--conf"):
self.parameters['custom_conf_file'] = (True, value)
elif opt in ("-d", "--destfile"):
self.parameters['custom_destination_file'] = (True, value)
elif opt == "-i" or opt == "--ids":
try:
tmp_sequence_tests = sequence.Sequence(value).get_sequence()
self.parameters['tests_sequence'] = (True, tmp_sequence_tests)
except sequence.SequenceError, err:
print '\n\nExecution: ', err
usage.get_usage(self.parameters)
sys.exit(2)
elif opt == "-f" or opt == "--file-ids":
self.parameters['tests_sequence_file'] = (True, value)
elif opt == "-l" or opt == "--list":
self.parameters['list_tests_details'] = (True, {})
elif opt == "-s" or opt == "--filter-list":
try:
tmp_filter_tests_details = filters.Filters(value).get_filters()
self.parameters['filter_tests_details'] = (True, tmp_filter_tests_details)
except filters.FiltersError, err:
print '\n\nExecution: ', err
usage.get_usage(self.parameters)
sys.exit(2)
elif opt == "-r" or opt == "--storm-release":
try:
self.parameters['storm_release'] = release.Release(value)
except release.ReleaseError, err:
print '\n\nExecution: ', err
usage.get_usage(self.parameters)
sys.exit(2)
elif opt == "--novoms":
self.parameters['voms'] = False
elif opt == "--noreport":
self.parameters['report'] = False
else:
raise OptionError("Unhandled option")
def run_test(self, tfn, uid, lfn, n_df, n_dfn):
sd=True
if uid.is_regression():
sd=False
elif 'ts_https' in uid.get_aggregator() or \
'ts_http' in uid.get_aggregator() or \
'ts_https_voms' in uid.get_aggregator() or \
'_https_' in uid.get_aggregator() or \
'_http_' in uid.get_aggregator() or \
'_https' in uid.get_aggregator() or \
'_http' in uid.get_aggregator() or \
'_tape' in uid.get_aggregator():
sd=False
ifn,dfn,back_ifn= settings.set_inpt_fn(n_df,n_dfn,path=lfn.get_path(),subdir=sd)
if uid.get_aggregator() != "" and '_wo' not in uid.get_aggregator():
lfn.put_name(uid.get_name())
lfn.put_description(uid.get_description())
lfn.put_uuid(uid.get_id())
if uid.is_regression():
lfn.put_ruid(uid.get_rfc())
lfn.put_output()
runner = unittest.TextTestRunner(verbosity=2).run(eval(uid.get_aggregator()))
lfn.put_prologue()
def set_valid_tests(self):
self.parameters['valid_tests'] = self.tests_instance.get_valid_tests(self.parameters['storm_release'])
def modify_valid_tests(self):
if self.parameters['tests_sequence_file'][0]:
if settings.file_exists(self.parameters['tests_sequence_file'][1]):
self.parameters['tests_sequence'] = (True,
self.parameters['tests_sequence'][1] + settings.get_tests_sequence(self.parameters['tests_sequence_file'][1]))
else:
raise RunTestsError("File that contains tests sequence does not exist")
if self.parameters['tests_sequence'][0]:
if not settings.is_tests_sequence_valid(self.parameters['tests_sequence'][1],
self.parameters['mti_info'].values()):
raise RunTestsError("Wrong Tests Sequence")
new_valid_tests = {}
for x in self.parameters['tests_sequence'][1]:
for key, value in self.parameters['valid_tests'].items():
if x == value.get_id():
new_valid_tests[key] = value
#print new_valid_tests[key], key, value
break
return new_valid_tests
def do_pre_run(self):
self.verify_conf_file()
self.set_valid_tests()
if self.parameters['tests_sequence'][0]:
self.parameters['valid_tests'] = self.modify_valid_tests()
if self.parameters['tests_sequence_file'][0]:
self.parameters['valid_tests'] = self.modify_valid_tests()
def do_list(self):
if self.parameters['list_tests_details'][0]:
self.tests_instance.get_info(node=self.parameters['node'])
sys.exit(0)
if self.parameters['filter_tests_details'][0]:
self.tests_instance.get_info(node=self.parameters['node'], info=self.parameters['filter_tests_details'][1])
sys.exit(0)
def do_run_tests(self):
log_file = report_file.ReportFile(report = self.parameters['report'])
log_file.print_where_report_file_is()
tests_methods = self.tests_instance.get_methods(tests = self.parameters['valid_tests'], node=self.parameters['node'])
for key, value in tests_methods.items():
self.run_test(self.parameters['custom_conf_file'][1], \
value, log_file, \
self.parameters['custom_destination_file'][0], \
self.parameters['custom_destination_file'][1])
log_file.close_file()
log_file.print_where_report_file_is()
|
import sys
import os.path
import time
import math
## Run this file with
## python3 ./src/DataTool.py ./input/de_cc_data.txt ./input/itcont_head500.txt
start_time = time.time()
# Check if there is missing arguments for input or output files
if len(sys.argv) <= 2:
print ("Error: There are missing arguments.")
print("Please rerun the python script with the following convention with file paths for both input and output files: python pharmacy_counting.py inputFile outputFile ")
sys.exit(1)
# Check if input file exist
if not (os.path.exists(sys.argv[1])):
print("Error: This input file does not exist:", (sys.argv[1]))
sys.exit(1)
# Accept input and output files from command line arguments
inputFile = sys.argv[1]
outputFile = sys.argv[2]
print ("The input file is: ", inputFile)
print ("The output file is: ", outputFile)
def get_feeds(stdin):
for line in stdin:
line = line.strip()
drug_name, prescriber, drug_cost = line.split('\t')
yield (drug_name, prescriber, math.ceil(float(drug_cost)))
def create_sample_inputfile(filename, nlines):
from itertools import islice
with open(filename) as infile:
with open(outputFile, 'w') as output_file:
output_file.write(infile.readline())
for line in islice(infile, nlines):
with open(outputFile, 'a') as output_file:
output_file.write(line)
create_sample_inputfile(inputFile, 500)
# # #Use Generator
# # #Scan the file line by line
# with open(inputFile) as infile:
# # use the function property of readline() to read and omit the header of the file
# # infile.readline()
#
# with open(outputFile, 'w') as output_file:
# output_file.write(infile.readline())
#
# for i in range(0, 100):
#
# for line in infile:
#
# with open(outputFile, 'a') as output_file:
# output_file.write(line)
print("--- Finished in ---")
print("--- %s seconds ---" % (time.time() - start_time)) |
#!/usr/bin/python3
import brlapi
import time
from .load_conf import *
keyboard = init_bms()
def printb(txt, pos):
try:
b = brlapi.Connection()
b.enterTtyMode()
size = getattr(b, "displaySize")
if len(txt) < size[0]:
b.writeText(txt[0:])
elif (len(txt) - pos) < size[0]:
b.writeText(txt[pos:])
else:
b.writeText(txt[(pos * size[0]):(pos+1 * size[0])])
finally:
return 0
def get_key():
key = -1
try:
b = brlapi.Connection()
b.enterTtyMode()
key = b.readKey()
finally:
if key < 256 and key > 0:
key = chr(key)
return key
def flashb(txt):
try:
b = brlapi.Connection()
b.enterTtyMode()
b.writeText(str(txt))
time.sleep(2)
del(b)
finally:
return 0
def msgb(txt):
try:
b = brlapi.Connection()
b.enterTtyMode()
b.writeText(txt)
b.readKey()
finally:
return 0
def inputb(txt):
b = brlapi.Connection()
b.enterTtyMode()
r = 0
key = ""
i = len(txt)+1
tmp = 0
while tmp != 65293:
b.writeText(txt+key, i)
tmp = b.readKey()
if len(key) > 39:
ts = key[41:]
b.writeText(ts, i)
if tmp <= 255:
key += chr(tmp)
i += 1
elif tmp == 65288 and (len(key)) < i:
key = key[:-1]
i -= 1
if len(key) == 0:
b.writeText(txt+" ", i)
key = ""
if i == len(txt):
i = len(txt)+1
elif i == 41:
return key
elif tmp == 536936448:
return -1
return key
|
# Crie um programa que receba uma string e substitua os numeros por letras de acordo com a tabela abaixo
# a = 1
# e = 2
# i = 3
# o = 4
# u = 5
# ex: 1c4rd4
string = input()
string = string.replace('1', 'a');
string = string.replace('2', 'e');
string = string.replace('3', 'i');
string = string.replace('4', 'o');
string = string.replace('5', 'u');
print(string) |
class Solution:
def longest_prefix_forTwo(self,s1,s2):
result=''
short_len = len(s1) if len(s1)<=len(s2) else len(s2)
for i in range(short_len):
if s1[i]!=s2[i]:
break
result+=s1[i]
return result
def longestCommonPrefix(self, strs):
if len(strs)==0:
return''
if len(strs)==1:
return strs[0]
match=self.longest_prefix_forTwo(strs[0],strs[1])
for i in range(2,len(strs)):
if len(match)==0:
break
match=self.longest_prefix_forTwo(match,strs[i])
return match
"""
:type strs: List[str]
:rtype: str
"""
|
# Copyright (c) 2020 Yul HR Kang. hk2699 at caa dot columbia dot edu.
from typing import Dict, Union, Any, Tuple, Iterable, List
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from pprint import pprint
from collections import OrderedDict as odict
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter)
import torch
import csv
from lib.pylabyk import localfile, np2, plt2, argsutil, yktorch as ykt
from lib.pylabyk import numpytorch as npt
from lib.pylabyk.numpytorch import npy
from lib.pylabyk.cacheutil import mkdir4file
npt.device0 = torch.device('cpu')
ykt.default_device = torch.device('cpu')
from data_2d import load_data, consts
locfile = localfile.LocalFile(
pth_root='../../Data_2D/Data_2D_Py/data_2d/main_stim_strengths_table',
cache_dir=''
)
def main():
save_conds_lumped()
save_conds_per_dim()
def save_conds_lumped():
dat = load_data.load_data_combined()
file = locfile.get_file_csv('conds')
mkdir4file(file)
wrote_header = False
with open(file, 'w') as csvfile:
for parad in ['RT', 'sh', 'VD', 'unimanual', 'bimanual']:
for subj in consts.SUBJS[parad]:
parad1, bi = consts.parad2parad_bi(parad)
i_subj = dat['subjs'].index(subj)
i_parad = dat['parads'].index(parad1)
dat1 = np2.filt_dict(
dat,
np.array(dat['id_subj'] == i_subj) &
np.array(dat['id_parad'] == i_parad)
)
conds = [np.unique(np.abs(dat1['cond'][:, i]))
for i in range(2)]
conds_str = [';'.join(['%g' % c for c in conds1])
for conds1 in conds]
n_runs = len(np.unique(dat1['i_all_Run']))
d = {
'Paradigm': parad,
'Participant': subj,
'Motion strengths': conds_str[0],
'Color strengths': conds_str[1],
'# runs': len(np.unique(dat1['i_all_Run'])),
'# trials': len(dat1['i_all_Run']),
}
if not wrote_header:
writer = csv.DictWriter(csvfile, fieldnames=d.keys())
writer.writeheader()
wrote_header = True
writer.writerow(d)
print('Saved to %s' % file)
print('--')
def save_conds_per_dim():
dat = load_data.load_data_combined()
file = locfile.get_file_csv('conds_by_dim')
mkdir4file(file)
wrote_header = False
with open(file, 'w') as csvfile:
for parad in ['RT', 'sh', 'VD', 'unimanual', 'bimanual']:
for subj in consts.SUBJS[parad]:
for n_dim_rel in [1, 2]:
for dim_rel in range(2):
parad1, bi = consts.parad2parad_bi(parad)
i_subj = dat['subjs'].index(subj)
i_parad = dat['parads'].index(parad1)
dat1 = np2.filt_dict(
dat,
np.array(dat['id_subj'] == i_subj) &
np.array(dat['id_parad'] == i_parad) &
np.array(dat['n_dim_task'] == n_dim_rel) &
np.array(dat['dim_rel'][:, dim_rel])
)
conds = [np.unique(np.abs(dat1['cond'][:, i]))
for i in range(2)]
conds_str = [';'.join(['%g' % c for c in conds1])
for conds1 in conds]
d = {
'Paradigm': parad,
'Participant': subj,
'# dimensions': n_dim_rel,
'Relevant dim': dim_rel,
'Motion strengths': conds_str[0],
'Color strengths': conds_str[1],
'# runs': len(np.unique(dat1['i_all_Run'])),
'# trials': len(dat1['i_all_Run']),
}
if not wrote_header:
writer = csv.DictWriter(csvfile, fieldnames=d.keys())
writer.writeheader()
wrote_header = True
writer.writerow(d)
print('Saved to %s' % file)
print('--')
if __name__ == '__main__':
main() |
"""Template helpers to render mailing list providers custom forms."""
from django import template
from django.utils.html import format_html
from django.urls import reverse
from django.conf import settings
register = template.Library()
@register.simple_tag
def mailing_list_form_action():
""""returns the value of the `action` field for the ml form."""
return settings.MAILING_LIST_FORM_ACTION
@register.simple_tag(takes_context=True)
def mailing_list_hidden_fields(context):
"""Returns custom hidden fields required to make the ml form work."""
request = context['request']
redirection_url = reverse('mailing_list_registration')
absolute_redirection_url = request.build_absolute_uri(redirection_url)
return format_html(
'''
<input type="hidden" name="listid" id="listid" value="{listid}">
<input type="hidden" name="from_url" id="from_url" value="yes">
<input type="hidden" name="hdn_email_txt" id="hdn_email_txt" value="">
<input type="hidden" name="sib_simple" value="simple">
<input type="hidden" name="sib_forward_url" value="{redirection_url}">
''',
listid=settings.MAILING_LIST_LIST_ID,
redirection_url=absolute_redirection_url)
|
# How to create a class in python
# rules is given below ---
#
# class ClassName:
# statement
# ...
# ...
# ...
# example 1
# creating class in here ...
class Square:
side = 0
def area(self):
return self.side * self.side
# creating a object of Square class
obj = Square()
obj.side = 5 # accessing the attribute of that class
value = obj.area() # accessing the method of that class
print(value)
# ---------------------------------------------------------------
# class 2
class Square2:
def area(self):
return self.side * self.side
obj2 = Square2()
obj2.side = 5 # we can create an attribute into a class like this... although there has not any attribute
result = obj2.area()
print(result)
# note :- in python 'self' keyword must be given ... beacause of accessing the property of the class.
|
import os
from enum import Enum
HOST = '0.0.0.0'
PORT = int(os.getenv('PORT', 9004))
GUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
SOURCE_FILE = 'data.zip'
MAX_PAYLOAD_FRAME_CONTROL = 125
MAX_PAYLOAD_FIRST_ORDER = (2**16) - 1
MAX_PAYLOAD_SECOND_ORDER = (2**64) - 1
MAX_MESSAGE_SIZE_ALLOWED = 17000000
class WSOpcodeType(Enum):
CONTINOUS = 0x0
TEXT = 0x1
BIN = 0x2
PING = 0x9
PONG = 0xA
CLOSE = 0x8 |
import os
import argparse
import tempfile
import json
parser = argparse.ArgumentParser()
parser.add_argument("--key", type=str, help="key name")
parser.add_argument("--val", type=str, help="value")
args = parser.parse_args()
storage_path = os.path.join(tempfile.gettempdir(), 'storage.data')
# print(storage_path)
if not os.path.exists(storage_path):
with open(storage_path, "w", encoding='utf-8') as f:
# print("File creates!")
f.write("{}")
if args.key and args.val:
# print("the key = {} and value equals {}".format(args.key, args.val))
with open(storage_path, "r", encoding='utf-8') as fr:
data = fr.read()
dict = json.loads(data)
# print(type(dict), dict)
if args.key in dict:
dict[args.key].append(args.val)
else:
# print(dict)
dict.update({args.key: [args.val]})
with open(storage_path, "w", encoding='utf-8') as fw:
# print(dict)
json.dump(dict, fw)
elif args.key:
with open(storage_path, "r", encoding='utf-8') as fr:
data = fr.read()
dict = json.loads(data)
if args.key in dict:
print(*dict[args.key], sep=', ')
else:
print("None")
|
class Solution(object):
def minimumEffortPath(self, heights):
m, n = len(heights), len(heights[0])
neibs = [[1, 0], [0, 1], [-1, 0], [0, -1]]
def dfs(LIMIT, x, y):
visited.add((x, y))
for dx, dy in neibs:
if 0<=dx+x<m and 0<=dy+y<n and (dx+x, dy+y) not in visited:
if abs(heights[x][y] - heights[dx+x][dy+y]) <= LIMIT:
dfs(LIMIT, dx+x, dy+y)
beg, end = -1, max(max(heights, key=max))
while beg + 1 < end:
mid = (beg + end)//2
visited = set()
dfs(mid, 0, 0)
if (m - 1, n - 1) in visited:
end = mid
else:
beg = mid
return end |
# This file is part of beets.
# Copyright 2016, Fabrice Laporte.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the 'hidden' utility."""
import unittest
import sys
import tempfile
from beets.util import hidden
from beets import util
import subprocess
import errno
import ctypes
class HiddenFileTest(unittest.TestCase):
def setUp(self):
pass
def test_osx_hidden(self):
if not sys.platform == 'darwin':
self.skipTest('sys.platform is not darwin')
return
with tempfile.NamedTemporaryFile(delete=False) as f:
try:
command = ["chflags", "hidden", f.name]
subprocess.Popen(command).wait()
except OSError as e:
if e.errno == errno.ENOENT:
self.skipTest("unable to find chflags")
else:
raise e
self.assertTrue(hidden.is_hidden(f.name))
def test_windows_hidden(self):
if not sys.platform == 'win32':
self.skipTest('sys.platform is not windows')
return
# FILE_ATTRIBUTE_HIDDEN = 2 (0x2) from GetFileAttributes documentation.
hidden_mask = 2
with tempfile.NamedTemporaryFile() as f:
# Hide the file using
success = ctypes.windll.kernel32.SetFileAttributesW(f.name,
hidden_mask)
if not success:
self.skipTest("unable to set file attributes")
self.assertTrue(hidden.is_hidden(f.name))
def test_other_hidden(self):
if sys.platform == 'darwin' or sys.platform == 'win32':
self.skipTest('sys.platform is known')
return
with tempfile.NamedTemporaryFile(prefix='.tmp') as f:
fn = util.bytestring_path(f.name)
self.assertTrue(hidden.is_hidden(fn))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('register/', views.user_register_view, name='register'),
path('login/', views.user_login_view, name='login'),
path('logout/', views.user_logout_view, name='logout'),
path('order/', views.order_view, name='order'),
path('add-to-cart/<pk>/', views.add_to_cart, name='add-to-cart'),
path('remove-from-cart/<pk>/', views.remove_from_cart, name='remove'),
path('remove-item-from-cart/<pk>/', views.remove_item_from_cart, name='remove-item'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
import sqlalchemy
import sqlalchemy.ext.declarative
from sqlalchemy.orm import sessionmaker
import codecs
Base = sqlalchemy.ext.declarative.declarative_base()
class Word(Base):
__tablename__ = 'words'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
english = sqlalchemy.Column(sqlalchemy.String)
japanese = sqlalchemy.Column(sqlalchemy.String)
part = sqlalchemy.Column(sqlalchemy.Integer)
section = sqlalchemy.Column(sqlalchemy.Integer)
part_section = sqlalchemy.Column(sqlalchemy.String)
def __init__(self, id, en, jp, part, section, part_section):
self.id = id
self.english = en
self.japanese = jp
self.part = part
self.section = section
self.part_section = part_section
url = 'postgresql+psycopg2://masuda:hogehoge@localhost:5432/netpro'
engine = sqlalchemy.create_engine(url, echo=True)
# スキーマ作成
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
def get_session():
return session
def add_words():
fin = codecs.open("words.txt", "r", "utf-8")
counter = 1
for line in fin:
print(line)
word = line.split(",")
session.add(Word(counter, word[0], word[1], word[2], word[3], word[4]))
counter += 1
session.commit()
fin.close()
|
#!/usr/bin/python
import os, sys
import getpass
from customer import *
from logger import setup_logging
from bootstrap.bootinitial import *
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(sys.argv[0])))
try:
from tendo import singleton
me = singleton.SingleInstance()
except Exception, e:
print "Another instance of script is running. Exiting!!!"
sys.exit(2)
try:
setup_logging("Notification", os.path.join("data", "log"), logfile="bootstrap.log", scrnlog=False)
# setup_logging("suds.client", os.path.join("data", "log"), logfile="bootstrap.log", scrnlog = False)
logger = logging.getLogger("Notification")
customer_id = get_user_input("Enter customer ID")
customer_name = ''
customer_mapping_path = "customer.csv"
customer_mapping = CustomerMapping(customer_mapping_path)
customer_mapping.read_mapping()
# customer_mapping.print_mapping()
logger.debug("Fetching customer details for ID: %s from mapping data"
% (customer_id))
customer = customer_mapping.get_customer(customer_id)
is_required = False
if customer:
customer.print_info(on_screen=True)
is_required = query_yes_no("Customer detail already present. Do you want to modify it?", "no")
else:
logger.info("Customer Information not found.Creating new customer.")
is_required = True
customer = Customer()
customer.id = customer_id
if is_required == False:
sys.exit();
while True:
# customer.name = get_user_input("Enter customer name", customer.name)
customer.url = get_user_input("Enter Ticketing URL", customer.url)
customer.username = get_user_input("Enter User name", customer.username)
pprompt = lambda: (getpass.getpass(), getpass.getpass('Retype password: '))
password, retype_password = pprompt()
while password != retype_password:
print('Passwords do not match. Try again')
password, retype_password = pprompt()
if len(password.strip()) > 0:
customer.password = password.strip()
print "%s : %s" % (TICKETING_API_AUTOTASK, TICKET_API[TICKETING_API_AUTOTASK])
print "%s : %s" % (TICKETING_API_JIRA, TICKET_API[TICKETING_API_JIRA])
print "%s : %s" % (TICKETING_API_CONNECTWISE, TICKET_API[TICKETING_API_CONNECTWISE])
print "%s : %s" % (TICKETING_API_FRESHSERVICE, TICKET_API[TICKETING_API_FRESHSERVICE])
print "%s : %s" % (TICKETING_API_SALESFORCE, TICKET_API[TICKETING_API_SALESFORCE])
print "%s : %s" % (TICKETING_API_SERVICENOW, TICKET_API[TICKETING_API_SERVICENOW])
customer.set_ticketing_api(int(get_user_input("Select ticketing API", str(
TICKETING_API_AUTOTASK if customer.ticket_api == 0 else customer.ticket_api))))
extra_fields = load_extra_fields(customer)
customer_mapping.add_customer(customer)
customer.print_info(on_screen=True, autotask_fields=extra_fields)
is_required = query_yes_no("Please verify customer information before we save it. Do you want to save it?",
"yes")
if is_required:
customer_mapping.save_mapping()
break
except Exception, ex:
print ex
logger.exception(ex)
|
import json
def did_x_and_y_act_together(data, actor_id_1, actor_id_2):
for item in data:
if actor_id_1 in item[:2] and actor_id_2 in item[:2]:
return True
return False
def get_actors_with_bacon_number(data, n):
newData = {}
#Creating a dictionary with actor id as key, and set of all actors that actor has acted with as value
for i in range(len(data)):
if data[i][0] in newData:
newData[data[i][0]].add(data[i][1])
else:
newData[data[i][0]] = {data[i][1]}
if data[i][1] in newData:
newData[data[i][1]].add(data[i][0])
else:
newData[data[i][1]] = {data[i][0]}
count = n
currentSet = {4724} #Holds all actors with bacon number count
cumulativeSet = {4724} #Holds all actors with bacon number <= count
while count > 0 and len(currentSet)>0:
tempSet = set() #Will hold all actors in a movie with each actor in currentSet, including actors in currentSet
for actor in currentSet: #Iterates through all actors in currentSet
for otherActor in newData[actor]:
tempSet.add(otherActor) #adding all actors that this actor has acted with to tempSet
currentSet = tempSet - cumulativeSet #currentSet filters tempSet to not include any actors in cumulativeSet
count -= 1
cumulativeSet = tempSet | cumulativeSet #tempSet added to cumulativeSet
return currentSet
def get_bacon_path(data, actor_id):
newData = {}
#Creating a dictionary with actor id as key, and set of all actors that actor has acted with as value
for i in range(len(data)):
if data[i][0] in newData:
newData[data[i][0]].add(data[i][1])
else:
newData[data[i][0]] = {data[i][1]}
if data[i][1] in newData:
newData[data[i][1]].add(data[i][0])
else:
newData[data[i][1]] = {data[i][0]}
#Implementing BFS Algorithm
bacon = 4724 #Initial starting node
toCheck = [[bacon]] #List of paths to be checked
checked = set() #Nodes that have already been checked
currentElem = 0 #Index in toCheck that we are currently on
if bacon == actor_id: #Checks if looking for bacon path of Kevin Bacon
return [bacon]
while currentElem < len(toCheck): #While there are still paths to be checked
currentPath = toCheck[currentElem]
currentActor = currentPath[-1]
if currentActor not in checked:
checked.add(currentActor)
for otherActor in newData[currentActor]:
if otherActor not in checked:
newPath = currentPath[:]
newPath.append(otherActor) #New path is old path + current actor
toCheck.append(newPath) #Appending new path to be checked
if otherActor == actor_id:
return newPath
currentElem += 1
return None
def get_path(data, actor_id_1, actor_id_2):
newData = {}
#Creating a dictionary with actor id as key, and set of all actors that actor has acted with as value
for i in range(len(data)):
if data[i][0] in newData:
newData[data[i][0]].add(data[i][1])
else:
newData[data[i][0]] = {data[i][1]}
if data[i][1] in newData:
newData[data[i][1]].add(data[i][0])
else:
newData[data[i][1]] = {data[i][0]}
#Implementing BFS Algorithm
toCheck = [[actor_id_1]] #List of paths to be checked
checked = set() #Nodes that have already been checked
currentElem = 0 #Index in toCheck that we are currently on
if actor_id_1 == actor_id_2: #Checks if looking for bacon path of Kevin Bacon
return [actor_id_1]
while currentElem < len(toCheck): #While there are still paths to be checked
currentPath = toCheck[currentElem]
currentActor = currentPath[-1]
if currentActor not in checked:
checked.add(currentActor)
for otherActor in newData[currentActor]:
if otherActor not in checked:
newPath = currentPath[:]
newPath.append(otherActor) #New path is old path + current actor
toCheck.append(newPath) #Appending new path to be checked
if otherActor == actor_id_2:
return newPath
currentElem += 1
return None
def get_movies(data, actor_id_1, actor_id_2):
path = get_path(data, actor_id_1, actor_id_2)
newData = {}
for i in range(len(data)):
if data[i][2] in newData:
newData[data[i][2]].add(data[i][0])
newData[data[i][2]].add(data[i][1])
else:
newData[data[i][2]] = {data[i][0],data[i][1]}
movies = []
for i in range(len(path)-1):
for movie in newData:
if {path[i],path[i+1]}.issubset(newData[movie]):
movies.append(movie)
break;
return movies
if __name__ == '__main__':
with open('resources/small.json') as f:
smalldb = json.load(f)
# additional code here will be run only when lab.py is invoked directly
# (not when imported from test.py), so this is a good place to put code
# used, for example, to generate the results for the online questions.
pass
|
from tkinter import *
import sqlite3
import tabulate
import random
from datetime import time
import time
import tkinter as tk
from PIL import ImageTk, Image
from tkinter import ttk
from tkinter import messagebox
from tkcalendar import DateEntry
base = Tk()
base.geometry("1500x750")
#base.config(background="#ffa500")
base.config(background="#36648B")
base.title("Criminal Finder Application")
#------------------------------- Events Starts -------------------------------------------
def event1():
temp = Frame(rightFrame, width=1055, height=710)
temp.grid(row=0, column=1, padx=0, pady=0)
temp.config(background="#283A90")
def event1_back():
temp.destroy()
rightFrame = Frame(base, width=0, height=0)
rightFrame.grid(row=0, column=1, padx=0, pady=90)
h1 = Label(temp, text="ADD NEW CRIMINAL", height="2", width="30", font=20)
h1.place(x=360, y=30)
criminal_id = Label(temp, text="ENTER CRIMINAL ID", fg="white", background="#283A90", font="20")
criminal_id.place(x=200, y=100)
txt1 = Entry(temp, width="57")
txt1.place(x=570, y=100)
criminal_name = Label(temp, text="ENTER CRIMINAL NAME", fg="white", background="#283A90", font="20")
criminal_name.place(x=200, y=140)
txt2 = Entry(temp, width="57")
txt2.place(x=570, y=140)
criminal_mob = Label(temp, text="ENTER CRIMINAL MOBILE NUMBER", fg="white", background="#283A90", font="20")
criminal_mob.place(x=200, y=180)
txt3 = Entry(temp, width="57")
txt3.place(x=570, y=180)
criminal_dob = Label(temp, text="ENTER CRIMINAL D.O.B.", fg="white", background="#283A90", font="20")
criminal_dob.place(x=200, y=220)
txt4 = DateEntry(temp, font=('Times New Roman', 12, 'bold'), width=20)
#txt4 = Entry(temp, width="40")
txt4.place(x=570, y=220)
criminal_gender = Label(temp, text="ENTER CRIMINAL GENDER.", fg="white", background="#283A90", font="20")
criminal_gender.place(x=200, y=260)
var = IntVar()
Radiobutton(temp, text="Male", padx=5, variable=var, value=1).place(x=570, y=260)
Radiobutton(temp, text="Female", padx=20, variable=var, value=2).place(x=670, y=260)
Radiobutton(temp, text="Other", padx=20, variable=var, value=3).place(x=820, y=260)
criminal_addr = Label(temp, text="ENTER CRIMINAL ADDRESS", fg="white", background="#283A90", font="20")
criminal_addr.place(x=200, y=300)
txt5 = Entry(temp, width="57")
txt5.place(x=570, y=307)
criminal_image = Label(temp, text="UPLOAD CRIMINAL IMAGE", fg="white", background="#283A90", font="20")
criminal_image.place(x=200, y=340)
upload = Button(temp, width="25", height="1", bd="2", activebackground="green", text="UPLOAD AN IMAGE")
upload.place(x=570, y=344)
crime_type = Label(temp, text="ENTER CRIME TYPE", fg="white", background="#283A90", font="20")
crime_type.place(x=200, y=380)
txt6 = Entry(temp, width="57")
txt6.place(x=570, y=380)
crime_city = Label(temp, text="ENTER CRIME CITY", fg="white", background="#283A90", font="20")
crime_city.place(x=200, y=420)
txt7 = Entry(temp, width="57")
txt7.place(x=570, y=420)
crime_date = Label(temp, text="ENTER CRIME DATE", fg="white", background="#283A90", font="20")
crime_date.place(x=200, y=460)
txt8 = DateEntry(temp, font=('Times New Roman', 12, 'bold'), width=20)
txt8.place(x=570, y=460)
crime_time = Label(temp, text="ENTER CRIME TIME", fg="white", background="#283A90", font="20")
crime_time.place(x=200, y=500)
txt9 = Entry(temp, width="57")
txt9.place(x=570, y=500)
jail_name = Label(temp, text="ENTER JAIL NAME", fg="white", background="#283A90", font="20")
jail_name.place(x=200, y=540)
txt10 = Entry(temp, width="57")
txt10.place(x=570, y=540)
jail_addr = Label(temp, text="ENTER JAIL ADDRESS", fg="white", background="#283A90", font="20")
jail_addr.place(x=200, y=580)
txt11 = Entry(temp, width="57")
txt11.place(x=570, y=580)
btn1 = Button(temp, width="13", height="1", bd="5", activebackground="green", text="SAVE")
btn1.place(x=300, y=615)
# ---------------------
def event1_reset():
txt1.delete(0, END)
txt2.delete(0, END)
txt3.delete(0, END)
txt4.delete(0, END)
txt5.delete(0, END)
txt6.delete(0, END)
txt7.delete(0, END)
txt8.delete(0, END)
txt9.delete(0, END)
txt10.delete(0, END)
txt11.delete(0, END)
txt1.focus()
# ----------------------
btn2 = Button(temp, width="13", height="1", bd="5", activebackground="yellow", text="Reset", command=event1_reset)
btn2.place(x=500, y=615)
btn3 = Button(temp, width="13", height="1", bd="5", activebackground="blue", text="Back", command=event1_back)
btn3.place(x=700, y=615)
def event2():
temp = Frame(rightFrame, width=1055, height=710)
temp.grid(row=0, column=1, padx=0, pady=0)
temp.config(background="#283A90")
def event2_back():
temp.destroy()
rightFrame = Frame(base, width=0, height=0)
rightFrame.grid(row=0, column=1, padx=0, pady=90)
h1 = Label(temp, text="UPDATE CRIMINAL INFO", height="2", width="30", font=20)
h1.place(x=360, y=30)
retrieve = Button(temp, width="13", height="1", bd="5", activebackground="green", text="Retrieve")
retrieve.place(x=800, y=50)
criminal_id = Label(temp, text="ENTER CRIMINAL ID", fg="white", background="#283A90", font="20")
criminal_id.place(x=200, y=100)
txt1 = Entry(temp, width="57")
txt1.place(x=570, y=100)
criminal_name = Label(temp, text="ENTER CRIMINAL NAME", fg="white", background="#283A90", font="20")
criminal_name.place(x=200, y=140)
txt2 = Entry(temp, width="57")
txt2.place(x=570, y=140)
criminal_mob = Label(temp, text="ENTER CRIMINAL MOBILE NUMBER", fg="white", background="#283A90", font="20")
criminal_mob.place(x=200, y=180)
txt3 = Entry(temp, width="57")
txt3.place(x=570, y=180)
criminal_dob = Label(temp, text="ENTER CRIMINAL D.O.B.", fg="white", background="#283A90", font="20")
criminal_dob.place(x=200, y=220)
txt4 = DateEntry(temp, font=('Times New Roman', 12, 'bold'), width=20)
# txt4 = Entry(temp, width="40")
txt4.place(x=570, y=220)
criminal_gender = Label(temp, text="ENTER CRIMINAL GENDER.", fg="white", background="#283A90", font="20")
criminal_gender.place(x=200, y=260)
var = IntVar()
Radiobutton(temp, text="Male", padx=5, variable=var, value=1).place(x=570, y=260)
Radiobutton(temp, text="Female", padx=20, variable=var, value=2).place(x=670, y=260)
Radiobutton(temp, text="Other", padx=20, variable=var, value=3).place(x=820, y=260)
criminal_addr = Label(temp, text="ENTER CRIMINAL ADDRESS", fg="white", background="#283A90", font="20")
criminal_addr.place(x=200, y=300)
txt5 = Entry(temp, width="57")
txt5.place(x=570, y=307)
criminal_image = Label(temp, text="UPLOAD CRIMINAL IMAGE", fg="white", background="#283A90", font="20")
criminal_image.place(x=200, y=340)
upload = Button(temp, width="25", height="1", bd="2", activebackground="green", text="UPLOAD AN IMAGE")
upload.place(x=570, y=344)
crime_type = Label(temp, text="ENTER CRIME TYPE", fg="white", background="#283A90", font="20")
crime_type.place(x=200, y=380)
txt6 = Entry(temp, width="57")
txt6.place(x=570, y=380)
crime_city = Label(temp, text="ENTER CRIME CITY", fg="white", background="#283A90", font="20")
crime_city.place(x=200, y=420)
txt7 = Entry(temp, width="57")
txt7.place(x=570, y=420)
crime_date = Label(temp, text="ENTER CRIME DATE", fg="white", background="#283A90", font="20")
crime_date.place(x=200, y=460)
txt8 = DateEntry(temp, font=('Times New Roman', 12, 'bold'), width=20)
txt8.place(x=570, y=460)
crime_time = Label(temp, text="ENTER CRIME TIME", fg="white", background="#283A90", font="20")
crime_time.place(x=200, y=500)
txt9 = Entry(temp, width="57")
txt9.place(x=570, y=500)
jail_name = Label(temp, text="ENTER JAIL NAME", fg="white", background="#283A90", font="20")
jail_name.place(x=200, y=540)
txt10 = Entry(temp, width="57")
txt10.place(x=570, y=540)
jail_addr = Label(temp, text="ENTER JAIL ADDRESS", fg="white", background="#283A90", font="20")
jail_addr.place(x=200, y=580)
txt11 = Entry(temp, width="57")
txt11.place(x=570, y=580)
btn1 = Button(temp, width="13", height="1", bd="5", activebackground="green", text="SAVE")
btn1.place(x=300, y=615)
# ---------------------
def event2_reset():
txt1.delete(0, END)
txt2.delete(0, END)
txt3.delete(0, END)
txt4.delete(0, END)
txt5.delete(0, END)
txt6.delete(0, END)
txt7.delete(0, END)
txt8.delete(0, END)
txt9.delete(0, END)
txt10.delete(0, END)
txt11.delete(0, END)
txt1.focus()
# ----------------------
btn2 = Button(temp, width="13", height="1", bd="5", activebackground="yellow", text="Reset", command=event2_reset)
btn2.place(x=500, y=615)
btn3 = Button(temp, width="13", height="1", bd="5", activebackground="blue", text="Back", command=event2_back)
btn3.place(x=700, y=615)
def event3():
temp = Frame(rightFrame, width=1055, height=710)
temp.grid(row=0, column=1, padx=0, pady=0)
temp.config(background="#283A90")
def event3_back():
temp.destroy()
rightFrame = Frame(base, width=0, height=0)
rightFrame.grid(row=0, column=1, padx=0, pady=90)
h1 = Label(temp, text="REMOVE CRIMINAL", height="2", width="30", font=20)
h1.place(x=360, y=30)
r_criminal_id = Label(temp, text="ENTER CRIMINAL ID", fg="white", background="#283A90", font="20")
r_criminal_id.place(x=200, y=150)
txt1 = Entry(temp, width="57")
txt1.place(x=570, y=150)
btn1 = Button(temp, width="13", height="1", bd="5", activebackground="green", text="Remove")
btn1.place(x=300, y=250)
#---------------------
def event3_reset():
txt1.delete(0, END)
txt1.focus()
#----------------------
btn2 = Button(temp, width="13", height="1", bd="5", activebackground="yellow", text="Reset",command=event3_reset)
btn2.place(x=500, y=250)
btn3 = Button(temp, width="13", height="1", bd="5", activebackground="blue", text="Back", command=event3_back)
btn3.place(x=700, y=250)
def event4():
temp = Frame(rightFrame, width=1055, height=710)
temp.grid(row=0, column=1, padx=0, pady=0)
temp.config(background="#283A90")
def event4_back():
temp.destroy()
rightFrame = Frame(base, width=0, height=0)
rightFrame.grid(row=0, column=1, padx=0, pady=90)
def s1():
temp = Frame(rightFrame, width=1055, height=710)
temp.grid(row=0, column=1, padx=0, pady=0)
temp.config(background="#283A90")
h1 = Label(temp, text="SEARCH BY CRIMINAL ID", height="2", width="30", font=20)
h1.place(x=360, y=30)
def event41_back():
temp.destroy()
rightFrame = Frame(base, width=0, height=0)
rightFrame.grid(row=0, column=1, padx=0, pady=90)
s_criminal_id = Label(temp, text="ENTER CRIMINAL ID", fg="white", background="#283A90", font="20")
s_criminal_id.place(x=200, y=150)
txt1 = Entry(temp, width="57")
txt1.place(x=570, y=150)
btn1 = Button(temp, width="13", height="1", bd="5", activebackground="green", text="Search")
btn1.place(x=300, y=250)
# ---------------------
def event41_reset():
txt1.delete(0, END)
txt1.focus()
# ----------------------
btn2 = Button(temp, width="13", height="1", bd="5", activebackground="yellow", text="Reset",command=event41_reset)
btn2.place(x=500, y=250)
btn3 = Button(temp, width="13", height="1", bd="5", activebackground="blue", text="Back", command=event41_back)
btn3.place(x=700, y=250)
def s2():
temp = Frame(rightFrame, width=1055, height=710)
temp.grid(row=0, column=1, padx=0, pady=0)
temp.config(background="#283A90")
h1 = Label(temp, text="SEARCH BY CRIMINAL NAME", height="2", width="30", font=20)
h1.place(x=360, y=30)
def event42_back():
temp.destroy()
rightFrame = Frame(base, width=0, height=0)
rightFrame.grid(row=0, column=1, padx=0, pady=90)
s_criminal_name = Label(temp, text="ENTER CRIMINAL NAME", fg="white", background="#283A90", font="20")
s_criminal_name.place(x=200, y=150)
txt2 = Entry(temp, width="57")
txt2.place(x=570, y=150)
btn1 = Button(temp, width="13", height="1", bd="5", activebackground="green", text="Search")
btn1.place(x=300, y=250)
# ---------------------
def event42_reset():
txt2.delete(0, END)
txt2.focus()
# ----------------------
btn2 = Button(temp, width="13", height="1", bd="5", activebackground="yellow", text="Reset",command=event42_reset)
btn2.place(x=500, y=250)
btn3 = Button(temp, width="13", height="1", bd="5", activebackground="blue", text="Back", command=event42_back)
btn3.place(x=700, y=250)
def s3():
temp = Frame(rightFrame, width=1055, height=710)
temp.grid(row=0, column=1, padx=0, pady=0)
temp.config(background="#283A90")
h1 = Label(temp, text="SEARCH BY CRIMINAL MOBILE NUMBER", height="2", width="40", font=20)
h1.place(x=330, y=30)
def event43_back():
temp.destroy()
rightFrame = Frame(base, width=0, height=0)
rightFrame.grid(row=0, column=1, padx=0, pady=90)
s_criminal_mob = Label(temp, text="ENTER CRIMINAL MOBILE NUMBER", fg="white", background="#283A90", font="20")
s_criminal_mob.place(x=200, y=150)
txt3 = Entry(temp, width="57")
txt3.place(x=570, y=150)
btn1 = Button(temp, width="13", height="1", bd="5", activebackground="green", text="Search")
btn1.place(x=300, y=250)
# ---------------------
def event43_reset():
txt3.delete(0, END)
txt3.focus()
# ----------------------
btn2 = Button(temp, width="13", height="1", bd="5", activebackground="yellow", text="Reset",command=event43_reset)
btn2.place(x=500, y=250)
btn3 = Button(temp, width="13", height="1", bd="5", activebackground="blue", text="Back", command=event43_back)
btn3.place(x=700, y=250)
h1 = Label(temp, text="SEARCH CRIMINAL", height="2", width="30", font=20)
h1.place(x=360, y=30)
btn1 = Button(temp, width="35", height="2", bd="5", activebackground="green", text="SEARCH BY CRIMINAL ID", command=s1)
btn1.place(x=400, y=150)
btn2 = Button(temp, width="35", height="2", bd="5", activebackground="red", text="SEARCH BY CRIMINAL NAME", command=s2)
btn2.place(x=400, y=300)
btn3 = Button(temp, width="35", height="2", bd="5", activebackground="red", text="SEARCH BY CRIMINAL MOBILE NUMBER", command=s3)
btn3.place(x=400, y=450)
btn4 = Button(temp, width="13", height="1", bd="5", activebackground="blue", text="Back", command=event4_back)
btn4.place(x=480, y=600)
def event5():
temp = Frame(rightFrame, width=1055, height=710)
temp.grid(row=0, column=1, padx=0, pady=0)
temp.config(background="#283A90")
h1 = Label(temp, text="CRIME STATASTICS", height="2", width="30", font=20)
h1.place(x=360, y=30)
def event5_back():
temp.destroy()
rightFrame = Frame(base, width=0, height=0)
rightFrame.grid(row=0, column=1, padx=0, pady=90)
btn1 = Button(temp, width="35", height="2", bd="5", activebackground="green", text="STATEWISE STATASTICS")
btn1.place(x=400, y=200)
btn2 = Button(temp, width="35", height="2", bd="5", activebackground="blue",text="DISTRICTWISE STATASTICS")
btn2.place(x=400, y=350)
btn3 = Button(temp, width="13", height="1", bd="5", activebackground="blue", text="Back", command=event5_back)
btn3.place(x=480, y=500)
def event6():
temp = Frame(rightFrame, width=1055, height=710)
temp.grid(row=0, column=1, padx=0, pady=0)
temp.config(background="#283A90")
h1 = Label(temp, text="DOWNLOAD RECORD PDF", height="2", width="30", font=20)
h1.place(x=360, y=30)
def event6_back():
temp.destroy()
rightFrame = Frame(base, width=0, height=0)
rightFrame.grid(row=0, column=1, padx=0, pady=90)
btn1 = Button(temp, width="35", height="2", bd="5", activebackground="green", text="STATEWISE PDF")
btn1.place(x=400, y=200)
btn2 = Button(temp, width="35", height="2", bd="5", activebackground="blue",text="DISTRICTWISE PDF")
btn2.place(x=400, y=350)
btn3 = Button(temp, width="13", height="1", bd="5", activebackground="blue", text="Back", command=event6_back)
btn3.place(x=480, y=500)
def event7():
res = messagebox.askyesnocancel('Notification', 'Do you want to exit?')
if (res == True):
base.destroy()
#------------------------------- Events Ends -------------------------------------------
#---------------------------- Clock Starts ----------------------------------
def tick():
time_string = time.strftime("%H:%M:%S")
date_string = time.strftime("%d/%m/%Y")
clock.config(text='Date :'+date_string+"\n"+"Time : "+time_string)
clock.after(1000,tick)
clock = Label(base,font=('Times new roman',14,'bold'),relief=RIDGE,borderwidth=4,bg='linen')
clock.place(x=0,y=0)
tick()
#---------------------------- Clock Ends ----------------------------------
#-------------------------------------- Slidebar Starts -----------------------------------------------------
colors = ['red','green','black','red2','gold2','indianred1','sienna1','orange2','darkorchid1','cornflower blue','saddle brown','cornsilk3','steelblue4']
def IntroLabelColorTick():
fg = random.choice(colors)
SliderLabel.config(fg=fg)
SliderLabel.after(1000,IntroLabelColorTick)
def IntroLabelTick():
global count,text
if(count>=len(ss)):
count = 0
text = ''
SliderLabel.config(text=text)
else:
text = text+ss[count]
SliderLabel.config(text=text)
count += 1
SliderLabel.after(300,IntroLabelTick)
ss = 'Criminal Finder Application'
count = 0
text = ''
SliderLabel = Label(base,text=ss,font=('Times new roman',30,'italic bold'),relief=RIDGE,borderwidth=4,width=35,bg='linen')
SliderLabel.place(x=350,y=0)
IntroLabelTick()
IntroLabelColorTick()
#-------------------------------------- Slidebar Ends -----------------------------------------------------
#--------------------------- leftFrame Starts -------------------------------
leftFrame = Frame(base, width=500, height = 710)
leftFrame.grid(row=0, column=0, padx=2, pady=90)
#leftFrame.config(background = "#FF6E33")
leftFrame.config(background = "#4E78A0")
#---------------------------------- leftFrame Ends -----------------------------------------
#------------------------------- RighttFrame Starts ----------------------------------------
rightFrame = Frame(base, width=1030, height = 710)
rightFrame.grid(row=0, column=1, padx=0, pady=90)
#rightFrame.config(background = "#FFE633")
#------------------------------- RighttFrame Ends ----------------------------------------
#----------------------- Leftframe Menus Starts --------------------------------------
b1 = Button(leftFrame, width="40", height="2", bd="5", activebackground="green", text="ADD NEW CRIMINAL", command=event1)
b1.place(x=100, y=50)
b2 = Button(leftFrame, width="40", height="2", bd="5", activebackground="green", text="UPDATE CRIMINAL INFO", command=event2)
b2.place(x=100, y=150)
b3 = Button(leftFrame, width="40", height="2", bd="5", activebackground="green", text="REMOVE CRIMINAL", command=event3)
b3.place(x=100, y=250)
b4 = Button(leftFrame, width="40", height="2", bd="5", activebackground="green", text="SEARCH CRIMINAL", command=event4)
b4.place(x=100, y=350)
b5 = Button(leftFrame, width="40", height="2", bd="5", activebackground="green", text="CRIME STATASTICS", command=event5)
b5.place(x=100, y=450)
b6 = Button(leftFrame, width="40", height="2", bd="5", activebackground="green", text="DOWNLOAD RECORD PDF", command=event6)
b6.place(x=100, y=550)
b7 = Button(leftFrame, width="40", height="2", bd="5", activebackground="red", text="EXIT", command=event7)
b7.place(x=100, y=650)
#----------------------- Leftframe Menus Ends --------------------------------------
#----------------------------- Add Image To RightFrame Starts -----------------------------------
image = Image.open("C:/Users/Lenovo/Desktop/crime3.jpg")
photo = ImageTk.PhotoImage(image)
label = Label(rightFrame,image=photo, width=1050, height=700)
label.grid(row=0, column=1, padx=0, pady=0)
#----------------------------- Add Image To RightFrame Ends -------------------------------------
base.mainloop() |
# This should match UserSchema in grant.user.models
anonymous_user = {
'userid': 0,
'display_name': 'Anonymous',
'title': 'N/A',
'avatar': None,
'social_medias': [],
'email_verified': True,
}
|
t = ('a', 'b', 'c', 'd')
t1 = t[:2] + t[3:]
print(t1) |
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import collections.abc
from typing import Any, Optional, Type as PyType, TypeVar
from google.protobuf import timestamp_pb2
from google.protobuf.empty_pb2 import Empty
from .._gen.com.daml.ledger.api import v1 as lapipb
from ..damlast.daml_lf_1 import DefDataType, Type
from ..damlast.util import find_variant_type
from ..prim import (
FrozenDict,
date_to_int,
datetime_to_epoch_microseconds,
decimal_to_str,
to_bool,
to_date,
to_datetime,
to_decimal,
to_int,
to_party,
to_str,
to_variant,
)
from .context import Context
from .mapper import ValueMapper
__all__ = ["ProtobufDecoder", "ProtobufEncoder", "get_value", "set_value"]
T = TypeVar("T")
class ProtobufDecoder(ValueMapper):
"""
Convert value_pb2.Value (or any of its cases) to a native Python type.
This mapper also handles:
* re-typing ContractIds (they come over the Ledger API without their type parameter)
* non-verbose Ledger API record values (field names are omitted for message brevity, meaning
they can only be understood in a generic way by knowing the metadata associated with the
object ahead of time)
"""
def data_record(
self, context: "Context", dt: "DefDataType", record: "DefDataType.Fields", obj: "Any"
) -> "Any":
msg = get_value(obj, "record", lapipb.Record)
d = dict()
for fld_metadata, fld_data in zip(record.fields, msg.fields):
name = fld_metadata.field
value = context.append_path(name).convert(fld_metadata.type, fld_data.value)
d[name] = value
return d
def data_variant(
self, context: "Context", dt: "DefDataType", variant: "DefDataType.Fields", obj: "Any"
) -> "Any":
msg = get_value(obj, "variant", lapipb.Variant)
obj_ctor = msg.constructor
obj_value = msg.value
obj_type = find_variant_type(dt, variant, obj_ctor)
d_value = context.append_path(obj_ctor).convert(obj_type, obj_value)
return {obj_ctor: d_value}
def data_enum(
self,
context: "Context",
dt: "DefDataType",
enum: "DefDataType.EnumConstructors",
obj: "Any",
) -> "Any":
msg = get_value(obj, "enum", lapipb.Enum)
return context.value_validate_enum(msg.constructor, enum)
def prim_unit(self, context: "Context", obj: "Any") -> "Any":
return {}
def prim_bool(self, context: "Context", obj: "Any") -> "Any":
return get_value(obj, "bool", bool)
def prim_int64(self, context: "Context", obj: "Any") -> "Any":
return get_value(obj, "int64", int)
def prim_text(self, context: "Context", obj: "Any") -> "Any":
return get_value(obj, "text", str)
def prim_timestamp(self, context: "Context", obj: "Any") -> "Any":
return to_datetime(get_value(obj, "timestamp", timestamp_pb2.Timestamp))
def prim_party(self, context: "Context", obj: "Any") -> "Any":
return to_party(get_value(obj, "party", str))
def prim_list(self, context: "Context", item_type: "Type", obj: "Any") -> "Any":
msg = get_value(obj, "list", lapipb.List)
return context.convert_list(item_type, msg.elements)
def prim_date(self, context: "Context", obj: "Any") -> "Any":
msg = get_value(obj, "date", str)
return to_date(msg)
def prim_contract_id(self, context: "Context", item_type: "Type", obj: "Any") -> "Any":
msg = get_value(obj, "contract_id", str)
return context.convert_contract_id(item_type, msg)
def prim_optional(self, context: "Context", t: "Type", obj: "Any") -> "Any":
msg = get_value(obj, "optional", lapipb.Optional)
maybe_val = msg.value if msg.HasField("value") else None
return context.convert_optional(t, maybe_val)
def prim_text_map(self, context: "Context", item_type: "Type", obj: "Any") -> "Any":
msg = get_value(obj, "map", lapipb.Map)
mapping = {entry_pb.key: entry_pb.value for entry_pb in msg.entries}
return context.convert_text_map(item_type, mapping)
def prim_numeric(self, context: "Context", nat: int, obj: "Any") -> "Any":
msg = get_value(obj, "numeric", str)
return to_decimal(msg)
def prim_gen_map(
self, context: "Context", key_type: "Type", value_type: "Type", obj: "Any"
) -> "Any":
msg = get_value(obj, "gen_map", lapipb.GenMap)
obj = {}
for i, entry in enumerate(msg.entries):
key = context.append_path(f"[key: {i}]").convert(key_type, entry.key)
value = context.append_path(f"[{key}]").convert(value_type, entry.value)
if isinstance(key, collections.abc.Mapping):
# Python dicts cannot be the keys of a dict because they are not hashable;
# FrozenDict wraps a dict with a trivial hashing implementation to avoid
# problems
obj[FrozenDict(key)] = value
else:
obj[key] = value
return obj
class ProtobufEncoder(ValueMapper):
def data_record(
self, context: "Context", dt: "DefDataType", record: "DefDataType.Fields", obj: "Any"
) -> "Any":
msg = lapipb.Record()
for fld in record.fields:
entry = msg.fields.add()
entry.label = fld.field
ctor, val = context.append_path(fld.field).convert(fld.type, obj[fld.field])
set_value(entry.value, ctor, val)
return "record", msg
def data_variant(
self, context: "Context", dt: "DefDataType", variant: "DefDataType.Fields", obj: "Any"
) -> "Any":
obj_ctor, obj_value = to_variant(obj)
obj_type = find_variant_type(dt, variant, obj_ctor)
msg_case, msg_value = context.append_path(obj_ctor).convert(obj_type, obj_value)
msg = lapipb.Variant()
msg.constructor = obj_ctor
set_value(msg.value, msg_case, msg_value)
return "variant", msg
def data_enum(
self,
context: "Context",
dt: "DefDataType",
enum: "DefDataType.EnumConstructors",
obj: "Any",
) -> "Any":
msg = lapipb.Enum()
msg.constructor = context.value_validate_enum(obj, enum)
return "enum", msg
def prim_unit(self, context: "Context", obj: "Any") -> "Any":
return "unit", Empty()
def prim_bool(self, context: "Context", obj: "Any") -> "Any":
return "bool", to_bool(obj)
def prim_int64(self, context: "Context", obj: "Any") -> "Any":
return "int64", to_int(obj)
def prim_text(self, context: "Context", obj: "Any") -> "Any":
return "text", to_str(obj)
def prim_timestamp(self, context: "Context", obj: "Any") -> "Any":
return "timestamp", datetime_to_epoch_microseconds(to_datetime(obj))
def prim_party(self, context: "Context", obj: "Any") -> "Any":
return "party", to_str(obj)
def prim_list(self, context: "Context", item_type: "Type", obj: "Any") -> "Any":
msg = lapipb.List()
for i, item in enumerate(obj):
value = msg.elements.add()
ctor, val = context.append_path(f"[{i}").convert(item_type, item)
set_value(value, ctor, val)
return "list", msg
def prim_date(self, context: "Context", obj: "Any") -> "Any":
return "date", date_to_int(to_date(obj))
def prim_contract_id(self, context: "Context", item_type: "Type", obj: "Any") -> "Any":
return "contract_id", to_str(obj)
def prim_optional(self, context: "Context", item_type: "Type", obj: "Any") -> "Any":
msg = lapipb.Optional()
if obj is not None:
ctor, val = context.append_path("?").convert(item_type, obj)
set_value(msg.value, ctor, val)
return "optional", msg
def prim_text_map(self, context: "Context", item_type: "Type", obj: "Any") -> "Any":
msg = lapipb.Map()
for key, value in obj.items():
entry = msg.entries.add()
entry.key = key
ctor, val = context.append_path(f"[{key}]").convert(item_type, value)
set_value(entry.value, ctor, val)
return "map", msg
def prim_numeric(self, context: "Context", nat: int, obj: "Any") -> "Any":
d = to_decimal(obj)
return "numeric", decimal_to_str(d) if d is not None else None
def prim_gen_map(
self, context: "Context", key_type: "Type", value_type: "Type", obj: "Any"
) -> "Any":
msg = lapipb.GenMap()
for i, (key, value) in enumerate(obj.items()):
entry = msg.entries.add()
key_ctor, key_val = context.append_path(f"[key: {i}]").convert(key_type, key)
val_ctor, val_val = context.append_path(f"[{key}]").convert(value_type, value)
set_value(entry.key, key_ctor, key_val)
set_value(entry.value, val_ctor, val_val)
return "gen_map", msg
def get_value(obj: "Any", field: str, pb_type: "PyType[T]") -> "T":
return obj if isinstance(obj, pb_type) else getattr(obj, field)
def set_value(message: "lapipb.Value", ctor: "Optional[str]", value: "Any") -> None:
"""
Work around the somewhat crazy API of Python's gRPC library to apply a known value to a
:class:`Value`.
:param message:
The :class:`Value` object to modify.
:param ctor:
The actual field to apply to, or ``None`` to interpret the entire message as a ``Record``
instead.
:param value:
The actual value to set. Must be compatible with the appropriate field.
"""
try:
if ctor is None:
message.MergeFrom(value)
elif ctor == "unit":
message.unit.SetInParent()
elif ctor in ("record", "variant", "list", "optional", "enum", "map", "gen_map"):
getattr(message, ctor).MergeFrom(value)
else:
setattr(message, ctor, value)
except: # noqa
from .. import LOG
LOG.error("Failed to set a value %s, %s", ctor, value)
raise
|
import boto3
import functools
@functools.lru_cache()
def describe_regions():
client = boto3.client('ec2')
response = client.describe_regions()
return [r['RegionName'] for r in response['Regions']]
class AWSLambda:
def __init__(self, region):
self._client = boto3.client('lambda', region)
self._region = region
self._accountalias = boto3.client('iam').list_account_aliases()
def get_account_alias(self):
yield self._accountalias['AccountAliases'][0]
def list_functions(self, **kwargs):
for page in self._client.get_paginator(
'list_functions'
).paginate(**kwargs):
for function in page['Functions']:
function['region'] = self._region
yield function
def get_function_name(function):
return function['FunctionName']
# def get_function_runtime(function):
# return function['Runtime']
def python27_runtime_filter(function):
return function['Runtime'] == 'python2.7'
def main():
regions = describe_regions()
report = []
for region in regions:
lambda_filter = filter(python27_runtime_filter, AWSLambda(region).list_functions())
for lf in lambda_filter:
report.append(lf['FunctionName'][0])
print(report)
# for function in filter(python27_runtime_filter, AWSLambda(region).list_functions()):
# for functions in function:
# report.append(functions({'FunctionName'}))
# for region in regions:
# for function in filter(python27_runtime_filter, AWSLambda(region).list_functions()):
# report.append(({FunctionName, Region, AccountAlias}))
# print(function)
# print(report)
# def write_lambda_function_csv(filename='{}-lambda_funtions_python2.csv'.format()):
# """Writes output of Lambda Functions to CSV file"""
# with open(filename, newline='') as csvfile:
# return [w for w in csv.write(csvfile)]
# def add_csv_to_set(ec2_instances):
# for instances in read_instance_names_from_file():
# for i in instances:
# ec2_instances.add(i)
# return instances
# region = 'us-west-2'
# client = boto3.client('lambda', region)
# print(get_function_name())
# functions = list_functions()
# for function in functions:
# pprint.pprint(function)
# function = []
# print(get_function_runtime())
# for r in runtime:
# print(r)
if __name__ == '__main__':
main() |
from __future__ import print_function
import sys
import re
import numpy as np
#build the stopwords set
stopwords_lines = open('stopwords.txt').readlines()
stopwords = set()
for line in stopwords_lines:
stop_word=line.strip('\t\n')
stopwords.add(stop_word)
doc = open('new_conceptbag.txt').readlines()
conceptbag= {}
num=1
for line in doc:
word = line.split('/n')[0]
word = re.sub(r'[^a-zA-Z0-9 -]+','', word)
if word != '':
word = word.lower()
if word not in conceptbag:
conceptbag[word]= num
num=num+1
print (num)
np.save('new_conceptbag.npy',conceptbag)
bag2=open("new_conceptbag2.txt","w")
print (conceptbag,file = bag2)
sd=sorted(conceptbag.items())
print() |
# Дан список из n элементов, заполненный произвольными целыми числами в диапазоне от -100 до 100.
# Вывести на экран сумму всех положительных элементов кратных двум.
import random
numbers = []
n = int(input("Введите количество чисел: "))
sum = 0
for i in range(n):
numbers.append(random.randint(-100, 100))
if numbers[i] > 0 and numbers[i] % 2 == 0:
sum += numbers[i]
print(numbers)
print(sum)
|
import pygame
import Vector2
from Helpers import Colors
from Helpers.EventHelpers import EventExist
class DropDownItem:
def __init__(self, item, rect=None):
self.Item = item
self.Rect = rect
def Draw(self, game, position: Vector2):
if self.IsHoverdByMouse():
self.Rect = pygame.draw.rect(game.Settings.GetScreen(), Colors.DIMGREY, [position.X, position.Y, 150, 30])
else:
self.Rect = pygame.draw.rect(game.Settings.GetScreen(), Colors.GREY, [position.X, position.Y, 150, 30])
# Create a font
font = pygame.font.Font(None, 15)
# Render the text
text = font.render(str(self.Item), True, Colors.WHITE)
# Blit the text
game.Settings.GetScreen().blit(text, [position.X + 10, position.Y + 15])
def IsHoverdByMouse(self):
return self.Rect is not None and self.Rect.collidepoint(pygame.mouse.get_pos())
def IsClickedByMouse(self, game):
return self.IsHoverdByMouse() and EventExist(game.Events, pygame.MOUSEBUTTONDOWN)
|
import numpy as np
import math
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as Data
import torchvision.datasets as dset
import torchvision.transforms as transforms
from matplotlib import pyplot as plt
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = 'cpu'
num_classes = 11
batch_size = 11
learning_rate = 5e-5
num_epochs = 600
training_dir = ".\\Train"
testing_dir = ".\\Test"
label_dir = ".\\Test\\test\\test.json"
train_dataset = dset.ImageFolder(root=training_dir,
transform=transforms.ToTensor())
train_loader = Data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=0,
drop_last=True)
test_dataset = dset.ImageFolder(root=testing_dir,
transform=transforms.ToTensor())
test_loader = Data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=0)
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.DCNN = nn.Sequential(
# input: 64*64*3, output: 30*30*8
nn.Conv2d(in_channels=3, out_channels=8, kernel_size=4, stride=2, padding=0),
nn.BatchNorm2d(8),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.MaxPool2d(kernel_size=2, stride=1, padding=0, dilation=1, ceil_mode=False),
# input: 30*30*8, output: 13*13*16
nn.Conv2d(in_channels=8, out_channels=16, kernel_size=4, stride=2, padding=0),
nn.BatchNorm2d(16),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.MaxPool2d(kernel_size=2, stride=1, padding=0, dilation=1, ceil_mode=False),
# input: 13*13*16, output: 4*4*32
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=4, stride=2, padding=0),
nn.BatchNorm2d(32),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.MaxPool2d(kernel_size=2, stride=1, padding=0, dilation=1, ceil_mode=False),
# input: 4*4*32, output: 2*2*32
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=2, stride=1, padding=0),
nn.BatchNorm2d(32),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.MaxPool2d(kernel_size=2, stride=1, padding=0, dilation=1, ceil_mode=False),
)
# input: 2*2*32, output: batch*1*30
self.FC1 = nn.Sequential(
nn.Linear(in_features=32 * 2 * 2, out_features=30, bias=True),
nn.BatchNorm1d(30),
nn.ReLU(inplace=True),
nn.Dropout(0.25)
)
# input: batch*1*30, output: batch*1*(bidirectional+1)*hidden_size
self.input_size = 30
self.hidden_size = 20
self.num_layers = 2
self.lstm = nn.LSTM(input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True,
bidirectional=True)
self.hidden = self.init_hidden()
# input: batch*1*(bidirectional+1)*hidden_size, output: batch*1*11
self.FC2 = nn.Sequential(
nn.Linear(in_features=2 * self.hidden_size if self.lstm.bidirectional else self.hidden_size,
out_features=num_classes,
bias=True),
nn.BatchNorm1d(1),
nn.ReLU(inplace=True),
nn.Dropout(0.25),
nn.Softmax(dim=2)
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
def init_hidden(self):
if self.lstm.bidirectional:
return (torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).to(device),
torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).to(device))
else:
return (torch.zeros(self.num_layers, batch_size, self.hidden_size).to(device),
torch.zeros(self.num_layers, batch_size, self.hidden_size).to(device))
def forward(self, x):
x = self.DCNN(x)
x = x.view(x.size(0), -1)
x = self.FC1(x)
x = x.unsqueeze(1)
x, self.hidden = self.lstm(x, self.hidden)
x = self.FC2(x)
x = x.squeeze(1)
return x
model = Model().to(device)
model.train()
# criterion = nn.CrossEntropyLoss()
# criterion = nn.NLLLoss()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(),
lr=learning_rate,
betas=(0.6, 0.999),
weight_decay=0.01)
Loss = []
for epoch in range(num_epochs):
running_loss = 0
cnt = 0
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
model.zero_grad()
model.hidden = model.init_hidden()
# forward
outputs = model(images)
target = torch.zeros(batch_size, num_classes)
for n in range(batch_size):
target[n][labels[n]] = 1
loss = criterion(outputs, target)
# backward
tmp = loss.cpu()
running_loss += tmp.data.numpy()
loss.backward()
optimizer.step()
cnt += 1
running_loss = running_loss / cnt
Loss.append(running_loss)
if (epoch + 1) % 10 == 0:
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch + 1, num_epochs, running_loss))
acc = 0
cnt = 0
reference = []
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
model.zero_grad()
model.hidden = model.init_hidden()
outputs = model(images)
reference = outputs.detach().numpy()
tmp = np.argmax(outputs.detach().numpy(), axis=1).tolist()
for n in range(len(tmp)):
cnt += 1
if tmp[n] == labels[n]:
acc += 1
print(acc / cnt)
# test
model.eval()
class_dict = train_dataset.class_to_idx
pred = []
pred_dict = {}
for i, (images, labels) in enumerate(test_loader):
images = images.to(device)
model.zero_grad()
model.hidden = model.init_hidden()
outputs = model(images)
file_name = test_loader.dataset.samples[i*batch_size: (i+1)*batch_size]
tmp = []
for pic in range(num_classes):
mse = []
a = outputs[pic].detach().numpy()
for c in range(num_classes):
b = reference[c]
test = np.linalg.norm(a-b)
mse.append(test)
tmp.append(np.argmax(mse).tolist())
pred = pred + tmp
for n in range(batch_size):
pred_dict[file_name[n]] = tmp[n]
with open(label_dir, 'r') as inFile:
labelDict = json.load(inFile)
test = []
for key in labelDict.keys():
item = labelDict[key]
tmp = class_dict[item]
test.append(tmp)
acc = 0
for i in range(len(test)):
if test[i] == pred[i]:
acc += 1
acc /= len(test)
print(acc)
x = np.arange(0, num_epochs)
plt.title("loss for each epoch")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.plot(x, Loss)
plt.show()
|
# 作业:编写登陆接口
# 输入用户名密码
# 认证成功后显示欢迎信息
# 输错三次后锁定
user = {'hepd':123456, 'hhhd':654321} # 字典
name = input('用户名: ')
comm = 1
while name in user:
password = int(input('密码: '))
if comm < 3:
if password == user[name]:
print ('登录成功!欢迎')
break
else:
comm += 1
print ('密码错误!您还有%d次尝试机会,请重新输入密码' % (4 - comm))
else:
print ("尝试过多,账号已锁定!")
break
else:
print ('新用户名')
password = int(input('密码: '))
user[name] = password
print (user) |
import cv2
import numpy as np
DIM = 2
def gray_progress_bar(image,
stage_clear,
time_remaining,
keys):
# Resize
scale_image = cv2.convertScaleAbs(
cv2.resize(
image, (84*DIM, 84*DIM), interpolation=cv2.INTER_CUBIC), alpha=(
255.0 / 1.0))
# Add state zone
state_image = cv2.rectangle(scale_image, (0, 75*DIM), (84*DIM, 84*DIM), (0, 0, 0), -1)
# Add floor progress bar
progress_bar = int((stage_clear / 26) * 84*DIM)
state_image = cv2.rectangle(
state_image, (0, 75*DIM), (progress_bar, 78*DIM), (70, 70, 70), -1)
# Add time remaining progress bar
progress_bar = int((time_remaining / 10000) * 84*DIM)
state_image = cv2.rectangle(
state_image, (0, 78*DIM), (progress_bar, 81*DIM), (140, 140, 140), -1)
# keys state
if keys > 0:
state_image = cv2.rectangle(
state_image, (0, 81*DIM), (84*DIM, 84*DIM), (255, 255, 255), -1)
# Gray
gray_image = cv2.cvtColor(state_image, cv2.COLOR_BGR2GRAY)
# cv2.imshow('progress', cv2.resize(gray_image, (840, 840), interpolation=cv2.INTER_CUBIC))
# cv2.waitKey(20)
return gray_image
def rgb_progress_bar(image,
stage_clear,
time_remaining,
keys):
# Resize
scale_image = cv2.convertScaleAbs(image, alpha=(255.0 / 1.0))
# Add state zone
state_image = cv2.rectangle(
scale_image, (0, 150), (168, 168), (0, 0, 0), -1)
# Add floor progress bar
progress_bar = int((stage_clear / 26) * 168)
state_image = cv2.rectangle(
state_image, (0, 150), (progress_bar, 156), (70, 70, 70), -1)
# Add time remaining progress bar
progress_bar = int((time_remaining / 10000) * 168)
state_image = cv2.rectangle(
state_image, (0, 156), (progress_bar, 162), (140, 140, 140), -1)
# keys state
if keys > 0:
state_image = cv2.rectangle(
state_image, (0, 162), (168, 168), (255, 255, 255), -1)
# cv2.imshow('progress', cv2.resize(state_image, (840, 840), interpolation=cv2.INTER_CUBIC))
# cv2.waitKey(20)
return state_image
|
#!/usr/bin/env python3
"""
Renumber the test cases such that the numbers within each family count
consecutively from 1.
Usage:
renumber_test_cases.py [<cases>]...
Arguments:
<cases>
The test cases to renumber. By default, all cases will be renumbered.
The numbers in the test names don't have any real significance, but sometimes
it's just nice to keep related tests close together in number. For example,
imagine we want to add two new test cases that are similar to the existing case
"dict_4". We could name these tests "dict_4a" and "dict_5b", then use this
script to renumber them to "dict_5" and "dict_6" without overwriting any
existing tests.
"""
import docopt
import shutil
from tempfile import mkdtemp
from pathlib import Path
from more_itertools import bucket
ROOT_DIR = Path(__file__).parent
CASE_DIR = ROOT_DIR / 'test_cases'
import sys; sys.path.append(str(ROOT_DIR / 'api'))
import nestedtext_official_tests as official
if __name__ == '__main__':
args = docopt.docopt(__doc__)
tmp_dir = Path(mkdtemp(prefix='renumber_test_cases_'))
cases = official.load_test_cases(args['<cases>'])
families = bucket(cases, key=lambda x: x.family)
for key in families:
sorted_cases = sorted(families[key], key=lambda x: x.num)
d = len(str(len(sorted_cases)))
for i, case in enumerate(sorted_cases, 1):
shutil.move(case.dir, tmp_dir / f'{case.family}_{i:0{d}}')
for dir in tmp_dir.iterdir():
shutil.move(dir, CASE_DIR / dir.name)
tmp_dir.rmdir()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from head import *
from mushroom_pb2 import *
from arm_packer import *
class ArmFrameSolution():
"""
与RAM通信数据包的解析与构造
"""
def __init__(self):
pass
def receive(self, handler):
"""
接收数据包
:param handler: 连接句柄
:rtype: 成功返回所收到的原数据包,失败返回 ''
"""
frame = ''
cup = handler.recv(1)
if cup == A_HEAD[0]:
frame += cup
cup = handler.recv(1)
if cup == A_HEAD[1]:
frame += cup
cup = handler.recv(len(A_HEAD) - 2)
if cup == A_HEAD[2:]:
#TODO: 检测空字符和超长数据
frame += cup
cup = handler.recv(byte_pkg_len)
frame += cup
pkg_len = int(b2a_hex(cup), 16)
frame += handler.recv(pkg_len)
#TODO: 读取超时处理
return frame
return ''
def send(self, handler, frame):
"""
发送数据包
:param handler: 连接句柄
:param frame: 待发数据包
:rtype: 成功返回 发送字节数,否则返回 -1
"""
handler.send(frame)
return len(frame)
def generator(self, frame):
"""
构造器
:param frame: 带构造成数据包的信息
:rtype: 成功返回 构造结果 , 失败返回空字符串
"""
return frame
def unpack(self, frame):
"""
拆包,判断帧头尾正确性,及完成校验任务
:param frame: 待解析的数据包
:rtype: 成功返回拆包后的字典,否则返回空字符
"""
# len_head = len(HEAD)
# if frame[:len_head] != HEAD:
# print 'wrone head'
# return ''
frame = frame[len(A_HEAD):]
pkg_len = int(b2a_hex(frame[:byte_pkg_len]), 16)
frame = frame[byte_pkg_len:]
# version = int(b2a_hex(frame[:byte_version]), 16)
# frame = frame[byte_version:]
m_header_len = int(b2a_hex(frame[:byte_m_header_len]), 16)
frame = frame[byte_m_header_len:]
message_header = frame[:m_header_len]
data = frame[m_header_len:]
return {
'message_header' : message_header,
'data' : data,
}
def parse(self, protobuf_msg_dic):
data_inst = ''
header_inst = MessageHeader()
header_inst.ParseFromString(protobuf_msg_dic['message_header'])
proto_inst = {'header_inst':header_inst, 'data':protobuf_msg_dic['data']}
return proto_inst
def dispatch(self, proto_inst, birth_fileno):
"""
解析器
:param frame: 待解析的数据包
:rtype: 【待定】
"""
message_id = proto_inst['header_inst'].message_id
version = -1
version = proto_inst['header_inst'].version
log_msg = 'bef dispatch: message_id = %s, version = %s' %(str(message_id), str(version))
print log_msg
return arm_protocal[message_id](proto_inst, birth_fileno)
|
import datetime
while True:
date_ = input('Input the date in standard format DD/MM/YY: ')
if date_ == ('q'):
print('goodbye!')
quit()
day , month, year = date_.split('/')
d = int(day)
m = int(month)
y = int(year)
def checkfake(d):
if not(0 < d < 32):
print('дата не верна')
quit()
def checkmonth(m):
if not(0 < m < 13):
print('дата не верна')
quit()
def checkyear(y):
if not(y > 0):
print('дата не верна')
quit()
def february(d, m):
if m == 2 and d == 29:
n = input('Это високосный год?')
if n != 'y':
print('дата не верна')
quit()
def April(d):
if m==1 and 0 < d < 31:
print('дата не верна')
quit()
def June(d):
if m==1 and 0 < d < 31:
print('дата не верна')
quit()
def September(d):
if m==1 and 0 < d < 31:
print('дата не верна')
quit()
def November(d):
if m == 1 and 0 < d < 31:
print('дата не верна')
quit()
checkfake(d)
checkmonth(m)
checkyear(y)
february(d, m)
print('дата корректна', day, month, year) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.