content stringlengths 5 1.05M |
|---|
# -*- coding: utf-8 -*-
"""
[input]
project_dir
[output]
optimal hyperparameters, score each fold, avg, sd
<eval_func>.csv
"""
import argparse
import glob
import os
import pickle
import numpy as np
from sklearn.externals.joblib import Parallel, delayed
from Evaluation import ndcg
def eval_by_hy_parm(hy_parm_dir):
dir_name = os.path.basename(hy_parm_dir)
lmd = dir_name.split("_")[1]
step = dir_name.split("_")[-1]
score_dict = {"lmd": lmd, "step": step, "scores": []}
pred_files = sorted(glob.glob(hy_parm_dir + "/*"))
for pred_file, (_, test_index) in zip(pred_files, skf):
ys_pred = np.loadtxt(pred_file)
ys_test = ys[test_index]
activities_test = activities[test_index]
score = score_func(ys_test, ys_pred, activities_test)
score_dict["scores"].append(score)
score_dict["mean"] = np.mean(score_dict["scores"])
score_dict["std"] = np.std(score_dict["scores"])
return score_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("pred_dir")
parser.add_argument("ys_pkl")
parser.add_argument("split_pkl")
parser.add_argument("activity_pkl")
parser.add_argument("eval_func", choices=["ndcg", "ncg"])
parser.add_argument("--top", type=int, default=100)
parser.add_argument("--ignore_negative", default=True)
args = parser.parse_args()
with open(args.ys_pkl, "rb") as ys_pkl_fp:
ys = pickle.load(ys_pkl_fp)
with open(args.split_pkl, "rb") as split_pkl_fp:
skf = pickle.load(split_pkl_fp)
with open(args.activity_pkl, "rb") as activity_pkl_fp:
activities = pickle.load(activity_pkl_fp)
"""
evaluation function
"""
def score_func(y_true, y_pred, activities):
if args.eval_func == "ndcg":
return ndcg(y_true, y_pred,
top=args.top,
decay_func_name="log2",
ignore_negative=args.ignore_negative,
activity_labels=activities,
)
elif args.eval_func == "ncg":
return ndcg(y_true, y_pred,
top=args.top,
decay_func_name="cg",
ignore_negative=args.ignore_negative,
activity_labels=activities
)
else:
raise ValueError("{} not exist!".format(args.eval_func))
"""
parallelization
"""
hy_param_dirs = glob.glob(args.pred_dir + "/*")
score_dicts = []
score_dicts = Parallel(n_jobs=-1, verbose=5)(
delayed(eval_by_hy_parm)(hy_param_dir)
for hy_param_dir in hy_param_dirs
)
score_dicts.sort(key=lambda d: d["mean"], reverse=True)
print(score_dicts)
print(score_dicts[0])
|
import tkinter as tk
from config.config import CONFIG
import concurrent.futures
from controller import services
from services.IPInfoService import IPInfo
from beans.PacketBean import Packet
"""
This module for creating Graphical Interface and attaching handler for every events which occurs in ui
"""
def showappversion():
toplevel = tk.Toplevel()
tk.Label(toplevel, text=services["appversion"](), height=0, width=50).pack()
def showauthorinfo():
toplevel = tk.Toplevel()
arr = services["authorinfo"]()
for line in arr[:-1]:
tk.Label(toplevel, text=line, height=0, width=50, ).pack()
repo = tk.Label(toplevel, text=arr[-1])
repo.bind("<Button-1>", lambda x: root.clipboard_append(arr[-1]))
repo.pack()
maxwidth = None
maxheight = None
expiring_map_object = {}
ignored_ip_set_object = {}
should_listen_on_expiring_map_object = False
root = None
ipInfoService = None
snifferThreadId = None
IP_API = CONFIG["IP_API"]
stop_button = None
start_button = None
content_holder_data_frame = None
data_frame = None
canvas_around_data_frame = None
executors = None
def sniffer_callback(data):
global expiring_map_object
global should_listen_on_expiring_map_object
global ipInfoService
global ignored_ip_set_object
global snifferThreadId
global executors
snifferThreadId = data.get("threadId")
expiring_map_object = data.get("expiring_map", None)
ignored_ip_set_object = data.get("ignored_ip_set", None)
executors = concurrent.futures.ProcessPoolExecutor()
data["executors"] = executors
ipInfoService = IPInfo(**data)
should_listen_on_expiring_map_object = True
response_object_reader()
def get_list_interfaces():
return services["list_of_interfaces"]()
def app_close_callback():
global root
stop_sniffer_thread()
root.destroy()
def start_sniffer_thread(val):
global start_button, stop_button
q = services["startSnifferThread"](val)
sniffer_callback(q)
start_button.pack_forget()
stop_button.pack()
def stop_sniffer_thread():
global snifferThreadId
global executors
global should_listen_on_expiring_map_object
global start_button, stop_button
if snifferThreadId:
services["stopSnifferThread"](snifferThreadId)
should_listen_on_expiring_map_object = False
if executors:
executors.shutdown(wait=True)
# start_button.config(state='normal')
# stop_button.config(state='disabled')
stop_button.pack_forget()
start_button.pack()
def render_frame():
global root
def create_filemenu(parent, root):
filemenu = tk.Menu(parent)
filemenu.add_command(label="run with root", command=services["restart_with_root"])
filemenu.add_command(label="close", command=root.quit)
return filemenu
def create_aboutmenu(parent):
aboutmenu = tk.Menu(parent)
aboutmenu.add_command(label="version", command=showappversion)
aboutmenu.add_command(label="author", command=showauthorinfo)
return aboutmenu
def create_menu(root):
menu = tk.Menu(root)
root.config(menu=menu)
filemenu = create_filemenu(menu, root)
aboutmenu = create_aboutmenu(menu)
menu.add_cascade(label="app", menu=filemenu)
menu.add_cascade(label="about", menu=aboutmenu)
def create_root():
root = tk.Tk()
root.title(CONFIG["APP_NAME"])
return root
root = create_root()
create_menu(root)
root.protocol("WM_DELETE_WINDOW", app_close_callback)
return root
def render_permission_change(root):
root.mainloop()
def render_content(root):
global snifferThreadId
global content_holder_data_frame
global stop_button
global start_button
global maxheight
global maxwidth
maxheight = root.winfo_screenheight()
maxwidth = root.winfo_screenwidth() + 700
rootFrame = tk.Frame(root, height=maxheight/4, width=maxwidth/2)
top_bar_frame = tk.Frame(rootFrame, background="white")
top_bar_frame.pack(fill=None, expand=True)
dropDownVal = tk.StringVar(top_bar_frame)
dropDownVal.set("")
dropDown = tk.OptionMenu(top_bar_frame, dropDownVal, *get_list_interfaces())
#dropDown.pack()
start_button = tk.Button(top_bar_frame, text="start", command=lambda: start_sniffer_thread(dropDownVal.get()))
stop_button = tk.Button(top_bar_frame, text="stop", command=lambda: stop_sniffer_thread())
content_holder_data_frame = tk.Frame(rootFrame,height=maxheight/4, width=maxwidth/2, background="white")
start_button.pack()
#stop_button.pack()
content_holder_data_frame.pack()
content_holder_data_frame.pack_propagate(0)
rootFrame.pack()
scrollbar_interface()
root.resizable(0, 0)
root.mainloop()
def scrollbar_interface():
global content_holder_data_frame
global data_frame
global maxwidth
global canvas_around_data_frame
def myfunction(event):
global canvas_around_data_frame
canvas_around_data_frame.configure(scrollregion=canvas_around_data_frame.bbox("all"))
content_header_frame = tk.Frame(content_holder_data_frame, width=maxwidth/2)
content_header_frame.grid(row=0, column=0)
each_column_width = maxwidth/2/8
content_header_frame.grid_columnconfigure(0, minsize=each_column_width)
content_header_frame.grid_columnconfigure(1, minsize=each_column_width)
content_header_frame.grid_columnconfigure(2, minsize=each_column_width)
content_header_frame.grid_columnconfigure(3, minsize=each_column_width)
content_header_frame.grid_columnconfigure(4, minsize=each_column_width)
content_header_frame.grid_columnconfigure(5, minsize=each_column_width)
content_header_frame.grid_columnconfigure(6, minsize=(each_column_width + 30))
content_header_frame.grid_columnconfigure(7, minsize=(each_column_width))
cell = tk.Label(content_header_frame, text="Server IP Address", font="Helvetica 10 bold")
cell.grid(row=0, column=0)
cell = tk.Label(content_header_frame, text="Protocol", font="Helvetica 10 bold")
cell.grid(row=0, column=1)
cell = tk.Label(content_header_frame, text="Interface", font="Helvetica 10 bold")
cell.grid(row=0, column=2)
cell = tk.Label(content_header_frame, text="Country", font="Helvetica 10 bold")
cell.grid(row=0, column=3)
cell = tk.Label(content_header_frame, text="State", font="Helvetica 10 bold")
cell.grid(row=0, column=4)
cell = tk.Label(content_header_frame, text="Region", font="Helvetica 10 bold")
cell.grid(row=0, column=5)
cell = tk.Label(content_header_frame, text="Domain Name", font="Helvetica 10 bold")
cell.grid(row=0, column=6)
cell = tk.Label(content_header_frame, text=" ", font="Helvetica 10 bold")
cell.grid(row=0, column=7)
content_header_frame.pack()
scroll_and_data_frame = tk.Frame(content_holder_data_frame, width=maxwidth/2)
scroll_and_data_frame.grid(row=1, column=0)
canvas_around_data_frame = tk.Canvas(scroll_and_data_frame, width=maxwidth/2)
data_frame = tk.Frame(canvas_around_data_frame)
myscrollbar = tk.Scrollbar(scroll_and_data_frame, orient="vertical", command=canvas_around_data_frame.yview)
canvas_around_data_frame.configure(yscrollcommand=myscrollbar.set)
myscrollbar.pack(side="right", fill="y")
canvas_around_data_frame.pack(side="left")
canvas_around_data_frame.create_window((0, 0), window=data_frame, anchor='nw')
data_frame.bind("<Configure>", myfunction)
scroll_and_data_frame.pack()
executor = concurrent.futures.ProcessPoolExecutor()
def name_filter(name):
if len(name) > 22:
arr = name.split()
count = 0
for i in range(len(arr)):
word = arr[i]
count += len(word)
if count >= 22:
return " ".join(arr[:i]) + "\n" + " ".join(arr[i:])
else:
return name
def populate_other_fields(packet_bean: Packet):
if not packet_bean.request_fired:
packet_bean.request_fired = True
def cb(obj):
if obj:
packet_bean.country = obj["country"] if obj['country'] else '-'
packet_bean.state = obj["region"] if obj['region'] else '-'
packet_bean.region = obj["city"] if obj['city'] else '-'
packet_bean.domain_name = name_filter(obj["businessWebsite"] or obj["org"])
return ipInfoService.getDomainNamesForIP(packet_bean.communicatingIP, cb)
def response_object_reader():
global root
global expiring_map_object
global ignored_ip_set_object
global should_listen_on_expiring_map_object
global data_frame
global maxwidth
global canvas_around_data_frame
row_index = 0
for widget in data_frame.winfo_children():
widget.destroy()
temp_frame = tk.Frame(data_frame)
if expiring_map_object:
for key in list(expiring_map_object.dictionary.keys()):
if key not in ignored_ip_set_object:
packet_bean = expiring_map_object.get(key)
def printOutPacketData(packet_bean):
if packet_bean:
populate_other_fields(packet_bean)
row_frame = tk.Frame(temp_frame)
each_column_width = maxwidth / 2 / 8
row_frame.grid_columnconfigure(0, minsize=each_column_width)
row_frame.grid_columnconfigure(1, minsize=each_column_width)
row_frame.grid_columnconfigure(2, minsize=each_column_width)
row_frame.grid_columnconfigure(3, minsize=each_column_width)
row_frame.grid_columnconfigure(4, minsize=each_column_width)
row_frame.grid_columnconfigure(5, minsize=each_column_width)
row_frame.grid_columnconfigure(6, minsize=(each_column_width + 30))
ip_column = tk.Label(row_frame, text=packet_bean.communicatingIP)
ip_column.grid(row=row_index, column=0)
cell = tk.Label(row_frame, text=packet_bean.protocol)
cell.grid(row=row_index, column=1)
cell = tk.Label(row_frame, text=packet_bean.interface)
cell.grid(row=row_index, column=2)
cell = tk.Label(row_frame, text=packet_bean.country)
cell.grid(row=row_index, column=3)
cell = tk.Label(row_frame, text=packet_bean.state)
cell.grid(row=row_index, column=4)
cell = tk.Label(row_frame, text=packet_bean.region)
cell.grid(row=row_index, column=5)
cell = tk.Label(row_frame, text=packet_bean.domain_name)
cell.grid(row=row_index, column=6)
def callback_for_right_click(x):
global canvas_around_data_frame
popup_menu = tk.Menu(canvas_around_data_frame, tearoff=0)
popup_menu.add_command(label="Copy {ip}".format(ip=packet_bean.communicatingIP),
command=lambda: root.clipboard_append(packet_bean.communicatingIP))
popup_menu.add_command(label="Block {ip}".format(ip=packet_bean.communicatingIP),
command=lambda: services["block_ip_address"](packet_bean))
popup_menu.tk_popup(x.x_root, x.y_root)
def destory_menu(x):
popup_menu.destroy()
popup_menu.bind("<FocusOut>", destory_menu)
ip_column.bind('<Button-3>', callback_for_right_click)
row_frame.bind('<Button-3>', callback_for_right_click)
row_frame.pack()
printOutPacketData(packet_bean)
row_index+=1
temp_frame.pack()
if should_listen_on_expiring_map_object:
root.after(1000, response_object_reader) |
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import connexion
import six
from swagger_server.models.api_settings import ApiSettings # noqa: E501
from swagger_server.models.api_status import ApiStatus # noqa: E501
from swagger_server.models.dictionary import Dictionary # noqa: E501
from swagger_server import util
def get_application_settings(): # noqa: E501
"""get_application_settings
Returns the application settings. # noqa: E501
:rtype: ApiSettings
"""
return util.invoke_controller_impl()
def modify_application_settings(dictionary): # noqa: E501
"""modify_application_settings
Modify one or more of the application settings. # noqa: E501
:param dictionary: A dictionary where the name of the keys corresponds to the name of the settings.
:type dictionary: dict | bytes
:rtype: ApiSettings
"""
if connexion.request.is_json:
dictionary = Dictionary.from_dict(connexion.request.get_json()) # noqa: E501
return util.invoke_controller_impl()
def set_application_settings(settings): # noqa: E501
"""set_application_settings
Set and store the application settings. # noqa: E501
:param settings:
:type settings: dict | bytes
:rtype: ApiSettings
"""
if connexion.request.is_json:
settings = ApiSettings.from_dict(connexion.request.get_json()) # noqa: E501
return util.invoke_controller_impl()
|
import pandas as pd
import subprocess
import os
import csv
import json
from datetime import datetime
from os import walk
from dockerhub.downloader import Downloader
def select_tags(list_tags):
tags_ = []
for i in range(len(list_tags)):
if i==0 or i==(len(list_tags)-1) or i == round((len(list_tags)/2)) or i==round(len(list_tags)*0.25) or i==round(len(list_tags)*0.75):
tags_.append(list_tags[i])
return tags_
class DockerImages:
def __init__(self, path=os.getcwd(), save_log=True, download_manifest=True):
if not os.path.exists(path+'/csv'):
os.makedirs(path+'/csv')
self.csv_path = path+'/csv'
if not os.path.exists(path+'/logs'):
os.makedirs(path+'/logs')
self.log_path = path+'/logs'
self.total_images = 0
self.image_stars = {}
if not os.path.exists(path+'/logs/info'):
os.makedirs(path+'/logs/info')
self.info_path = path+'/logs/info'
self.save_log = save_log
self.download_manifest = download_manifest
self.images_tags = {}
self.images_layers = {}
self.images_env = {}
def search_all(self, keywords=[]):
dt_string = datetime.now().strftime("%Y-%m-%d_%H.%M")
data_file = open(self.csv_path + '/Docker_images_{}.csv'.format(dt_string), mode='w', newline='',
encoding='utf-8')
data_writer = csv.writer(data_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data_writer.writerow(['Keyword', 'Name', 'Description', 'Stars', 'IsOfficial', 'IsAutomated'])
for keyword in keywords:
log_ = self._search_by_keyword(keyword)
try:
file1 = open(log_, 'r', encoding='utf-8')
Lines = file1.readlines()
count = 0
# Strips the newline character
for line in Lines:
line_json = eval(line)
self.total_images += 1
data_writer.writerow(
[keyword, line_json['Name'], line_json['Description'], line_json['StarCount'],
line_json['IsOfficial'],
line_json['IsAutomated']])
if not line_json['Name'] in self.image_stars:
self.image_stars[line_json['Name']] = line_json['StarCount']
#self.image_list.append(line_json['Name'])
file1.close()
except Exception as e:
print(e)
self._remove_path(log_)
data_file.close()
def _search_by_keyword(self,keyword):
log_ = self.log_path + '/log_' + keyword + '-{}.txt'.format(datetime.now().strftime("%Y-%m-%d-%H.%M"))
# process_command = 'docker search ' + keyword + ' > ' + log_
process_command = "docker search --format='{{json .}}' " + keyword + " > " + log_
p = subprocess.Popen(process_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = ""
for line in p.stdout.readlines():
output = output + str(line) + '\n'
retval = p.wait()
if retval == 0:
print("Docker search successful for keyword: {}!".format(keyword))
else:
print("Docker search Error for keyword {}!".format(keyword))
print(output)
return log_
def get_info_images(self, image_list):
dt_string = datetime.now().strftime("%Y-%m-%d-%H-%M")
data_file_stats = open(self.csv_path + '/Images_Info_stats_{}.csv'.format(dt_string), mode='w', newline='',
encoding='utf-8')
data_writer_stats = csv.writer(data_file_stats, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data_writer_stats.writerow(
['Image', 'Name', 'Digest', 'Tags', 'Created', 'DockerVersion', 'Labels', 'Architecture', 'Os',
'Layers', 'Env'])
data_file_tags = open(self.csv_path + '/Images_tags_{}.csv'.format(dt_string), mode='w', newline='',
encoding='utf-8')
data_writer_tags = csv.writer(data_file_tags, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data_writer_tags.writerow(['Image', 'Name', 'RepoTags'])
data_file_layers = open(self.csv_path + '/Images_layers_{}.csv'.format(dt_string), mode='w', newline='',
encoding='utf-8')
data_writer_layers = csv.writer(data_file_layers, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data_writer_layers.writerow(['Image', 'Name', 'Layers'])
downloader = Downloader(root_path=path_download)
for doker_image in image_list:
## Only consider image with atleast 1 stars
#print(star_)
#if int(star_) > 0:
print('Started inspecting image - {}'.format(doker_image))
log_info = self._info(self.info_path, doker_image)
try:
with open(log_info) as f:
json_data = json.load(f)
print(json_data)
self.images_tags[doker_image] = json_data['RepoTags']
self.images_layers[doker_image] = json_data['Layers']
self.images_env[doker_image] = json_data['Env']
count_layers = 0
count_env = 0
count_labels = 0
count_tags = 0
try:
count_labels = len(json_data['Labels'])
except:
pass
try:
count_layers = len(json_data['Layers'])
for layer in json_data['Layers']:
data_writer_layers.writerow([doker_image, json_data['Name'], layer])
except:
pass
try:
count_env = len(json_data['Env'])
except:
pass
try:
count_tags = len(json_data['RepoTags'])
list_tags = []
for tag_ in json_data['RepoTags']:
data_writer_tags.writerow([doker_image, json_data['Name'], tag_])
list_tags.append(tag_)
if len(list_tags) <= 4:
selected_tags = list_tags
else:
selected_tags = select_tags(list_tags)
downloaded_tags = downloader._check_images_tags_downloaded(doker_image, selected_tags)
print("Image: {}, selected {}/{}, Not downloaded previusly: {}, Tags: ".format(image_,
len(selected_tags),
len(list_tags), len(
downloaded_tags)), selected_tags, downloaded_tags)
if len(downloaded_tags) > 0:
downloader.download_manifest_single(doker_image, downloaded_tags)
except:
pass
#print(len(json_data['RepoTags']))
data_writer_stats.writerow(
[doker_image, json_data['Name'], json_data['Digest'], count_tags, json_data['Created'], json_data['DockerVersion'], count_labels, json_data['Architecture'], json_data['Os'],
count_layers, count_env])
#print(json_data['Digest'], json_data['Name'])
except Exception as e:
pass
#print(e, 'error occyred in info!')
#if os.path.exists(log_info):
# os.remove(log_info)
#self._remove_path(log_info)
data_file_stats.close()
data_file_tags.close()
data_file_layers.close()
def _info(self,info_path, doker_image):
doker_image_str = ''
if '/' in str(doker_image):
doker_image_str = str(doker_image).replace('/', '_')
log_info = info_path+ '/info_' + doker_image_str + '-{}.txt'.format(datetime.now().strftime("%Y-%m-%d-%H:%M"))
# process_command = 'docker search ' + keyword + ' > ' + log_
process_command = "skopeo inspect docker://"+doker_image+" > " + log_info
p = subprocess.Popen(process_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = ""
for line in p.stdout.readlines():
output = output + str(line) + '\n'
retval = p.wait()
if retval == 0:
print("Inspecting docker Image {} successful!".format(doker_image))
else:
print("Docker Inspect Error for Image {}!".format(doker_image))
#print(output)
return log_info
def _remove_path(self, path):
if self.save_log == False:
if os.path.exists(path):
os.remove(path)
#path = './images'
if not os.path.exists('../outputs'):
os.makedirs('../outputs')
if not os.path.exists('../outputs/dockerhub'):
os.makedirs('../outputs/dockerhub')
if not os.path.exists('../outputs/dockerhub/dockerimages'):
os.makedirs('../outputs/dockerhub/dockerimages')
path_download = '../outputs/dockerhub/dockerimages/downloads'
path = '../outputs/dockerhub/'
#path = './'
#df_data = pd.read_excel(open(path+'Ml Docker projects-v2.xlsx', 'rb'), sheet_name='filtered-final')
df_data = pd.read_csv('../docker-images.csv')
Repos = df_data.Repos.values.tolist()
docker_image = df_data.docker_image.values.tolist()
dockerImages = DockerImages(path=path, save_log=True, download_manifest=True)
list_images = []
for i in range(len(Repos)):
image_ = docker_image[i].strip()
split_image = image_.split(', ')
for image in split_image:
#print(i,len(image), image)
list_images.append(image)
print('Total images: ', len(list_images))
dockerImages.get_info_images(list_images)
|
#!/usr/bin/env python
import argparse
import commands
from tableCmpSrc.genTopoOrder import *
from tableCmpSrc.tableNumCmp import *
def generateTest(directory, test_num):
"""Generating Topo Order Tests"""
for i in range(test_num):
ai = i+1
# Randomly generate topo order
l, topoOrder = genTopoOrder2()
# Write topo order to file
cmd = "sudo ./createTopoOrder.sh %s %d %s" % (directory, ai, topoOrder)
status, output = commands.getstatusoutput(cmd)
if status != 0:
print "\nError occurred as generating topo order tests!\n"
print output
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='LCS parsing')
parser.add_argument('-n', '--num', help='Test number',
type=int, action="store", default=2)
parser.add_argument('-d', '--dir', help='Output directory',
type=str, action="store", default="test")
args = parser.parse_args()
generateTest(args.dir, args.num) |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Sequence, Union
from deprecate.utils import void
from torch.utils.data.dataloader import DataLoader
from pytorch_lightning.loops.dataloader import DataLoaderLoop
from pytorch_lightning.loops.epoch import EvaluationEpochLoop
from pytorch_lightning.trainer.connectors.logger_connector.result import _OUT_DICT, ResultCollection
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.types import EPOCH_OUTPUT
class EvaluationLoop(DataLoaderLoop):
"""Loops over all dataloaders for evaluation."""
def __init__(self) -> None:
super().__init__()
self.epoch_loop = EvaluationEpochLoop()
self._results = ResultCollection(training=False)
self._outputs: List[EPOCH_OUTPUT] = []
self._max_batches: List[int] = []
self._has_run: bool = False
@property
def num_dataloaders(self) -> int:
"""Returns the total number of dataloaders."""
# case where user does:
# return dl1, dl2
dataloaders = self.dataloaders
if dataloaders is None:
return 0
length = len(dataloaders)
if length > 0 and isinstance(dataloaders[0], (list, tuple)):
length = len(dataloaders[0])
return length
@property
def dataloaders(self) -> Sequence[DataLoader]:
"""Returns the validation or test dataloaders."""
dataloaders = self.trainer.test_dataloaders if self.trainer.testing else self.trainer.val_dataloaders
if dataloaders is None:
raise RuntimeError("Dataloaders should be available.")
return dataloaders
def connect(self, epoch_loop: EvaluationEpochLoop) -> None: # type: ignore[override]
"""Connect the evaluation epoch loop with this loop."""
self.epoch_loop = epoch_loop
@property
def done(self) -> bool:
"""Returns whether all dataloaders are processed or evaluation should be skipped altogether."""
return super().done or self.skip
@property
def skip(self) -> bool:
"""Returns whether the evaluation should be skipped."""
max_batches = self._get_max_batches()
return sum(max_batches) == 0
def reset(self) -> None:
"""Resets the internal state of the loop."""
self._max_batches = self._get_max_batches()
# bookkeeping
self._outputs = []
if isinstance(self._max_batches, int):
self._max_batches = [self._max_batches] * len(self.dataloaders)
super().reset()
def on_skip(self) -> List:
return []
def on_run_start(self, *args: Any, **kwargs: Any) -> None:
"""Runs the ``_on_evaluation_model_eval``, ``_on_evaluation_start`` and ``_on_evaluation_epoch_start``
hooks."""
void(*args, **kwargs)
# hook
self._on_evaluation_model_eval()
self.trainer.lightning_module.zero_grad()
self._on_evaluation_start()
self._on_evaluation_epoch_start()
def advance(self, *args: Any, **kwargs: Any) -> None:
"""Performs evaluation on one single dataloader."""
void(*args, **kwargs)
dataloader_idx: int = self.current_dataloader_idx
dataloader = self.trainer.training_type_plugin.process_dataloader(self.current_dataloader)
self.data_fetcher = dataloader = self.trainer._data_connector.get_profiled_dataloader(
dataloader, dataloader_idx=dataloader_idx
)
dl_max_batches = self._max_batches[dataloader_idx]
dl_outputs = self.epoch_loop.run(dataloader, dataloader_idx, dl_max_batches, self.num_dataloaders)
# store batch level output per dataloader
self._outputs.append(dl_outputs)
if not self.trainer.sanity_checking:
# indicate the loop has run
self._has_run = True
def on_run_end(self) -> List[_OUT_DICT]:
"""Runs the ``_on_evaluation_epoch_end`` hook."""
outputs, self._outputs = self._outputs, [] # free memory
# lightning module method
self._evaluation_epoch_end(outputs)
# hook
self._on_evaluation_epoch_end()
# log epoch metrics
eval_loop_results = self.trainer.logger_connector.update_eval_epoch_metrics()
# hook
self._on_evaluation_end()
# enable train mode again
self._on_evaluation_model_train()
return eval_loop_results
def teardown(self) -> None:
self._results.cpu()
self.epoch_loop.teardown()
def _get_max_batches(self) -> List[int]:
"""Returns the max number of batches for each dataloader."""
if self.trainer.testing:
max_batches = self.trainer.num_test_batches
else:
if self.trainer.sanity_checking:
self.trainer.num_sanity_val_batches = [
min(self.trainer.num_sanity_val_steps, val_batches) for val_batches in self.trainer.num_val_batches
]
max_batches = self.trainer.num_sanity_val_batches
else:
max_batches = self.trainer.num_val_batches
return max_batches
def _reload_evaluation_dataloaders(self) -> None:
"""Reloads dataloaders if necessary."""
if self.trainer.testing:
self.trainer.reset_test_dataloader()
elif self.trainer.val_dataloaders is None or self.trainer._should_reload_dl_epoch:
self.trainer.reset_val_dataloader()
def _on_evaluation_start(self, *args: Any, **kwargs: Any) -> None:
"""Runs ``on_{validation/test}_start`` hooks."""
assert self._results is not None
self._results.to(device=self.trainer.lightning_module.device)
if self.trainer.testing:
self.trainer.call_hook("on_test_start", *args, **kwargs)
else:
self.trainer.call_hook("on_validation_start", *args, **kwargs)
def _on_evaluation_model_eval(self) -> None:
"""Sets model to eval mode."""
if self.trainer.testing:
self.trainer.call_hook("on_test_model_eval")
else:
self.trainer.call_hook("on_validation_model_eval")
def _on_evaluation_model_train(self) -> None:
"""Sets model to train mode."""
model_ref = self.trainer.lightning_module
if self.trainer.testing:
model_ref.on_test_model_train()
else:
model_ref.on_validation_model_train()
def _on_evaluation_end(self, *args: Any, **kwargs: Any) -> None:
"""Runs ``on_{validation/test}_end`` hook."""
if self.trainer.testing:
self.trainer.call_hook("on_test_end", *args, **kwargs)
else:
self.trainer.call_hook("on_validation_end", *args, **kwargs)
# reset the logger connector state
self.trainer.logger_connector.reset_results()
def _on_evaluation_epoch_start(self, *args: Any, **kwargs: Any) -> None:
"""Runs ``on_epoch_start`` and ``on_{validation/test}_epoch_start`` hooks."""
self.trainer.logger_connector.on_epoch_start()
self.trainer.call_hook("on_epoch_start", *args, **kwargs)
if self.trainer.testing:
self.trainer.call_hook("on_test_epoch_start", *args, **kwargs)
else:
self.trainer.call_hook("on_validation_epoch_start", *args, **kwargs)
def _evaluation_epoch_end(self, outputs: List[EPOCH_OUTPUT]) -> None:
"""Runs ``{validation/test}_epoch_end``"""
# inform logger the batch loop has finished
self.trainer.logger_connector.epoch_end_reached()
# call the model epoch end
model = self.trainer.lightning_module
# unset dataloader_idx in model
model._current_dataloader_idx = None
# with a single dataloader don't pass a 2D list
output_or_outputs: Union[EPOCH_OUTPUT, List[EPOCH_OUTPUT]] = (
outputs[0] if len(outputs) > 0 and self.num_dataloaders == 1 else outputs
)
if self.trainer.testing:
if is_overridden("test_epoch_end", model):
model._current_fx_name = "test_epoch_end"
model.test_epoch_end(output_or_outputs)
else:
if is_overridden("validation_epoch_end", model):
model._current_fx_name = "validation_epoch_end"
model.validation_epoch_end(output_or_outputs)
def _on_evaluation_epoch_end(self) -> None:
"""Runs ``on_{validation/test}_epoch_end`` hook."""
hook_name = "on_test_epoch_end" if self.trainer.testing else "on_validation_epoch_end"
self.trainer.call_hook(hook_name)
self.trainer.call_hook("on_epoch_end")
self.trainer.logger_connector.on_epoch_end()
|
#!/usr/bin/env python
"""
A script that helps initiate a new python library.
Dependencies:
$ pip install requests
Usage: pylibcreator.py --path /path/to/foo_lib --token <github_personal_token>
Does the following tasks for you:
1) Initializes a new private git repository for foo_lib on github and clones that to /path/to/foo_lib
2) Sets up package and tests directories
3) Sets up a setup.py file with initial contents
"""
#Standard Lib
import sys
import os
import getopt
import urllib2
import json
#External Dependencies
import requests
class LibCreator(object):
"""
Class that maintains state about the library path and git user to help creating a python library and exposes a LibCreateor_self.run() method
to execute the process of creating a new library.
"""
def __init__(self, libpath, ghtoken):
self._libpath = os.path.expanduser(libpath)
self._ghtoken = ghtoken
def _libname(self):
"""
Returns the library name
"""
return os.path.basename(self._libpath)
def _libdir(self):
"""
Returns the directory on the filesystem that contains the library
"""
return os.path.dirname(self._libpath)
def _setup_with_git(self):
if os.path.exists(self._libpath):
raise StandardError("Directory already exists at {0}".format(self._libpath))
old_cwd = os.getcwd() #Save current directory to get back to that later.
os.chdir(self._libdir())
try:
github_create_url = "https://api.github.com/user/repos"
data = json.dumps({'name':self._libname(),
'description':'A python library',
'private':True,
'has_wiki':True,
'auto_init':True,
'gitignore_template':'Python',})
response = requests.post(github_create_url, data, auth=(self._ghtoken, 'x-oauth-basic'))
if response.status_code != 201:
raise StandardError("Unexpected response when trying to create repository\nResponse: {0}\n\n{1} \n".format(response.status_code, response.json()))
response_body = response.json()
repo_url = response_body['ssh_url']
if repo_url == None or len(repo_url) == 0:
raise StandardError("Could not get the repository url from github create repo response: %s".format(response_body))
os.system("git clone {0}".format(repo_url))
finally:
os.chdir(old_cwd)
def _setup_dir_structure(self):
old_cwd = os.getcwd()
print "Setting up setup.py file and package and tests directories in {0} ... ".format(self._libpath)
os.chdir(self._libpath)
os.mkdir(self._libname())
os.mkdir("tests")
os.system("touch {0}/__init__.py".format(self._libname()))
os.system("touch tests/__init__.py")
os.system("touch setup.py")
setup_string = """\
from distutils.core import setup
setup(
name='{0}',
version='0.0.1',
packages=['{0}', ],
license='MIT License',
description='',
long_description='',
) """.format(self._libname())
setup_file_path = os.path.join(self._libpath, 'setup.py')
if not os.path.exists(setup_file_path):
raise StandardError("Trying to write to setup.py but it doesn't exist at path {0}".format(setup_file_path))
print "Now writing to setup.py: {0}".format(setup_string)
print "Initializing setup.py contents for {0} ... ".format(setup_file_path)
with open(setup_file_path, 'w') as f:
f.write(setup_string)
def run(self):
"""
Actually executes process of creating library.
"""
try:
self._setup_with_git()
self._setup_dir_structure()
os.chdir(self._libpath)
except StandardError, e:
print "Error setting up library: {0}".format(e)
sys.exit(1)
def main():
path = ''
token = ''
usage_string = 'pylibcreate.py -p <path_for_library> -t <github_token>'
try:
opts, args = getopt.getopt(sys.argv[1:], "hp:t:", ["help", "path=", "token="])
except getopt.GetoptError:
print usage_string
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print usage_string
elif opt in ('-p', '--path'):
path = arg
elif opt in ('-t', '--token'):
token = arg
else:
print "Invalid option: " + opt
print "Usage: " + usage_string
sys.exit(2)
if len(path.strip()) == 0 or len(token.strip()) == 0:
print "Invalid arguments. Must provide non-empty path and user. \nUsage: " + usage_string
sys.exit(2)
lc = LibCreator(path, token)
lc.run()
if __name__ == '__main__':
main()
|
import streamlit as st
import math
import plotly.graph_objects as go
def round_decimals_down(number: float, decimals: int = 2):
"""
Returns a value rounded down to a specific number of decimal places.
"""
if not isinstance(decimals, int):
raise TypeError("decimal places must be an integer")
elif decimals < 0:
raise ValueError("decimal places has to be 0 or more")
elif decimals == 0:
return math.ceil(number)
factor = 10 ** decimals
return math.floor(number * factor) / factor
def create_plotly_table(data):
fig = go.Figure(
data=[
go.Table(
header=dict(
values=list(data.keys()),
line_color="white",
fill_color="white",
font=dict(size=12, color="black"),
align="left",
),
cells=dict(
values=[data.get(k) for k in data.keys()],
align="left",
fill=dict(color=[["#F9F9F9", "#FFFFFF"] * 5]),
),
)
]
)
fig.update_layout(
autosize=False,
height=150,
margin=dict(
l=20,
r=20,
b=10,
t=30,
),
)
st.write(fig)
def local_css(file_name):
with open(file_name) as f:
st.markdown("<style>{}</style>".format(f.read()), unsafe_allow_html=True)
def percentage_format(x):
return f"{x:.0%}" |
from aiogram.types import ReplyKeyboardRemove, \
ReplyKeyboardMarkup, KeyboardButton, \
InlineKeyboardMarkup, InlineKeyboardButton
from messages import MESSAGES, ROOMS, PETS_AND_FRIENDS
# Q&A Keyboard
Q_and_A_keyboard = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
q_and_exit_button = KeyboardButton(MESSAGES["exit_Q"])
Q_and_A_keyboard.add(q_and_exit_button)
# -----------------------------------------------------------------
# Questionnaire place keyboard
# PLACE
place_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
place1_button = KeyboardButton(MESSAGES["place1"])
place2_button = KeyboardButton(MESSAGES["place2"])
place_kb.add(place2_button).add(place1_button)
# FLOOR (PLACE1)
place1_floor_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
floor1_button = KeyboardButton(MESSAGES["floor1"])
floor2_button = KeyboardButton(MESSAGES["floor2"])
place1_floor_kb.add(floor1_button).add(floor2_button)
# FLOOR (IF PLACE2)
place2_floor_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
floor3_button = KeyboardButton(MESSAGES["mountain_bot"])
floor4_button = KeyboardButton(MESSAGES["mountain_top"])
place2_floor_kb.add(floor3_button).add(floor4_button)
# PLACE1->FLOOR1 (MAIBORODA 1st floor) (ROOMS)
place1_floor1_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
room1_button = KeyboardButton(ROOMS["room1"])
room2_button = KeyboardButton(ROOMS["room2"])
room3_button = KeyboardButton(ROOMS["room3"])
place1_floor1_kb.add(room1_button).add(room2_button).add(room3_button)
# PLACE1->FLOOR2 (MAIBORODA 2nd floor) (ROOMS)
place1_floor2_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
room4_button = KeyboardButton(ROOMS["room4"])
room5_button = KeyboardButton(ROOMS["room5"])
room6_button = KeyboardButton(ROOMS["room6"])
place1_floor2_kb.add(room4_button).add(room5_button).add(room6_button)
# PLACE2->Bottom_of_mountain (Mountain bottom)
place2_floor1_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
river_button = KeyboardButton(ROOMS["river"])
tree_button = KeyboardButton(ROOMS["tree"])
place2_floor1_kb.add(tree_button).add(river_button)
# PLACE2->TOP_OF_MOUNTAIN
place2_floor2_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
igloo_button = KeyboardButton(ROOMS["igloo"])
cave_button = KeyboardButton(ROOMS["cave"])
place2_floor2_kb.add(cave_button).add(igloo_button)
# save_button = KeyboardButton(MESSAGES["save"])
# room1
room1_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
room1_friend1_b = KeyboardButton(PETS_AND_FRIENDS["room1_friend1"])
room1_friend2_b = KeyboardButton(PETS_AND_FRIENDS["room1_friend2"])
room1_kb.add(room1_friend2_b).add(room1_friend1_b)
# room2
room2_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
room2_friend1_b = KeyboardButton(PETS_AND_FRIENDS["room2_friend1"])
room2_friend2_b = KeyboardButton(PETS_AND_FRIENDS["room2_friend2"])
room2_kb.add(room2_friend2_b).add(room2_friend1_b)
# room3
room3_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
room3_friend1_b = KeyboardButton(PETS_AND_FRIENDS["room3_friend1"])
room3_friend2_b = KeyboardButton(PETS_AND_FRIENDS["room3_friend2"])
room3_kb.add(room3_friend2_b).add(room3_friend1_b)
# room4
room4_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
room4_friend1_b = KeyboardButton(PETS_AND_FRIENDS["room4_friend1"])
room4_friend2_b = KeyboardButton(PETS_AND_FRIENDS["room4_friend2"])
room4_kb.add(room4_friend2_b).add(room4_friend1_b)
# room5
room5_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
room5_friend1_b = KeyboardButton(PETS_AND_FRIENDS["room5_friend1"])
room5_friend2_b = KeyboardButton(PETS_AND_FRIENDS["room5_friend2"])
room5_kb.add(room5_friend2_b).add(room5_friend1_b)
# room6
room6_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
room6_friend1_b = KeyboardButton(PETS_AND_FRIENDS["room6_friend1"])
room6_friend2_b = KeyboardButton(PETS_AND_FRIENDS["room6_friend2"])
room6_kb.add(room6_friend2_b).add(room6_friend1_b)
# river
river_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
river1_b = KeyboardButton(PETS_AND_FRIENDS["pet_river1"])
river2_b = KeyboardButton(PETS_AND_FRIENDS["pet_river2"])
river_kb.add(river1_b).add(river2_b)
# tree
tree_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
tree1_b = KeyboardButton(PETS_AND_FRIENDS["pet_tree1"])
tree2_b = KeyboardButton(PETS_AND_FRIENDS["pet_tree2"])
tree_kb.add(tree1_b).add(tree2_b)
# igloo
igloo_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
igloo1_b = KeyboardButton(PETS_AND_FRIENDS["pet_igloo1"])
igloo2_b = KeyboardButton(PETS_AND_FRIENDS["pet_igloo2"])
igloo_kb.add(igloo2_b).add(igloo1_b)
# cave
cave_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
cave1_b = KeyboardButton(PETS_AND_FRIENDS["pet_cave1"])
cave2_b = KeyboardButton(PETS_AND_FRIENDS["pet_cave2"])
cave_kb.add(cave2_b).add(cave1_b)
# -----------------------------------------------------------------
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol
from tinyrpc.transports.http import HttpPostClientTransport
from tinyrpc import RPCClient
rpc_client = RPCClient(
JSONRPCProtocol(),
HttpPostClientTransport('http://127.0.0.1:5000/')
)
remote_server = rpc_client.get_proxy()
# call a method called 'reverse_string' with a single string argument
result = remote_server.reverse_string('Hello, World!')
print("Server answered:", result)
|
from flask import Flask
from celery_app import celery
from pymongo import MongoClient
#app
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(app.config)
# 添加蓝本
from celery_app.domainviews import domain_blueprint
from celery_app.ipviews import ipscan_blueprint
from celery_app.pluginviews import pluginscan_blueprint
from celery_app.taskview import tasks_blueprint
from celery_app.vulnviews import vuln_blueprint
from celery_app.authen import user_blueprint
app.register_blueprint(domain_blueprint)
app.register_blueprint(ipscan_blueprint)
app.register_blueprint(pluginscan_blueprint)
app.register_blueprint(tasks_blueprint)
app.register_blueprint(vuln_blueprint)
app.register_blueprint(user_blueprint)
return app
#celery
def make_celery(app):
class ContextTask(celery.Task):
def __call__(self, *args, **kwargs):
with app.app_context():
return self.run(*args, **kwargs)
celery.Task = ContextTask
return celery
client = MongoClient("127.0.0.1", 27017,connect=False)
# 指定mongodb数据库
papapa = client.papapa
pa_domain=papapa.pa_domain
pa_sub_domain=papapa.pa_sub_domain
pa_ip=papapa.pa_ip
pa_plugin=papapa.pa_plugin
pa_vuln=papapa.pa_vuln
pa_taskid=papapa.pa_taskid
pa_user=papapa.pa_user
|
import filecmp
from itertools import combinations
from pathlib import Path
from typing import List
from src import settings
def handle_duplicates() -> List[Path]:
extensions = [".jpg"]
files = []
result = []
for file_ext in extensions:
for filepath in Path().glob("*" + file_ext):
files.append(filepath)
for file1, file2 in combinations(files, 2):
# If any of the two files have been moved, just continue the iteration
if not file1.is_file() or not file2.is_file():
continue
if filecmp.cmp(file1, file2, shallow=False):
Path("duplicates").mkdir(exist_ok=True)
if len(file1.name) < len(file2.name):
file_to_move = file2
elif len(file1.name) > len(file2.name):
file_to_move = file1
else:
file_to_move = file1 if file1 > file2 else file2
new_filepath = Path("duplicates", file_to_move.name)
file_to_move.replace(new_filepath)
result.append(new_filepath)
files.clear()
for file_ext in extensions:
for filepath in Path().glob("*" + file_ext):
files.append(filepath)
paths = settings.get("pic_paths", [])
for path in paths:
for file_ext in extensions:
for filepath in Path(path).glob("**/*" + file_ext):
matching_file = next(
(f for f in files if f.name == filepath.name), None
)
if (
matching_file
and matching_file.is_file()
and filecmp.cmp(matching_file, filepath, shallow=False)
):
Path("duplicates").mkdir(exist_ok=True)
new_filepath = Path("duplicates", matching_file.name)
matching_file.replace(new_filepath)
result.append(new_filepath)
return result
|
from pydantic import BaseSettings
from functools import lru_cache
class CdaSettings(BaseSettings):
cda_endpoint: str
cda_uid: str
cda_secret: str
class Config:
env_file = ".env"
env_file_encoding = "utf-8"
@lru_cache()
def get_cda_settings():
return CdaSettings()
|
import os
class PlaylistLibrary(object):
def __init__(self, base_path):
self.__base_path = base_path
def add_album(self, album_info):
m3u_path = os.path.join(self.__base_path, '%s.m3u' % album_info.name)
m3u_file = open(m3u_path, 'wt')
m3u_file.write('# %s\n' % album_info.name)
for track_info in album_info.itertracks():
rel_path = self._rel_path(self.__base_path, track_info.path)
m3u_file.write('%s\n' % rel_path)
def _rel_path(self, path, file):
common = os.path.commonprefix([path, file])
common_parts = common.split(os.sep)[:-1]
path_parts = self.__base_path.split(os.sep)[len(common_parts):]
file_parts = file.split(os.sep)[len(common_parts):]
rel = os.path.join(*((['..'] * len(path_parts)) + file_parts))
return rel
|
"""
Practical 9
Write a program to verify
ii) Eulers's Theorem
"""
import math
import time
import decimal
def gcd(a,b):
if(b==0):
return a
else:
return gcd(b,a%b)
def phi(n):
count = 0
for i in range(0,n):
if(gcd(i,n)==1):
count = count + 1
return count
def euler(a,n,p):
lhs = float(math.pow(a,p))
rhs = float(lhs%n)
return lhs,rhs
aValue = int(input("Enter value for a: "))
bValue = int(input("Enter value for n: "))
start = time.time()
phi = phi(bValue)
e,f = euler(aValue,bValue,phi)
de = '%.2E'% decimal.Decimal(e)
print("a^phi(n)=1(mod n) i.e {} = {}".format(de,f))
end = time.time()
print("Running time = {}".format(end - start))
"""
Output
Enter value for a: 10
Enter value for n: 11
a^phi(n)=1(mod n) i.e 1.00E+10 = 1.0
Running time = 0.000361204147339
"""
|
# Generated by Django 3.2.7 on 2021-11-10 19:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
import yamlfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AlertsConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('yaml', yamlfield.fields.YAMLField()),
],
),
migrations.CreateModel(
name='Server',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(max_length=128)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('creation_date', models.DateTimeField(auto_now_add=True)),
('last_modified_setup', models.DateTimeField(auto_now_add=True)),
('last_seen', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='servers', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Metrics',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.URLField(max_length=512)),
('is_enabled', models.BooleanField(default=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='metrics', to=settings.AUTH_USER_MODEL)),
],
),
]
|
from unittest import TestCase
from leetcodepy.surrounded_regions import *
solution1 = Solution1()
expected = [
['X', 'X', 'X', 'X'],
['X', 'X', 'X', 'X'],
['X', 'X', 'X', 'X'],
['X', 'O', 'X', 'X']
]
class TestSurroundedRegions(TestCase):
def test1(self):
board = [
['X', 'X', 'X', 'X'],
['X', 'O', 'O', 'X'],
['X', 'X', 'O', 'X'],
['X', 'O', 'X', 'X']
]
solution1.solve(board)
self.assertListEqual(expected, board)
|
import torch
import copy
import os
import operator
import re
import json
import numpy as np
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.sampler import BatchSampler, SequentialSampler, RandomSampler
from vilmedic.networks import *
from vilmedic.datasets import *
import torch_optimizer
from torch.optim import *
from torch_optimizer import *
from torch.optim.lr_scheduler import *
def get_eval_func(models):
dummy = models[0]
if isinstance(dummy, nn.DataParallel):
dummy = dummy.module
return dummy.eval_func
def create_optimizer(config, logger, params, state_dict=None):
assert 'lr' in config.optim_params
if hasattr(torch.optim, config.optimizer):
optim = getattr(torch.optim, config.optimizer)
elif hasattr(torch_optimizer, config.optimizer):
optim = getattr(torch_optimizer, config.optimizer)
else:
raise NotImplementedError(config.optimizer)
optimizer = optim(params, **config.optim_params)
logger.settings('Optimizer {} created'.format(type(optimizer).__name__))
if state_dict is not None and "optimizer" in state_dict:
optimizer.load_state_dict(state_dict["optimizer"])
logger.info('Optimizer state loaded')
else:
logger.info(optimizer)
return optimizer
def create_model(config, dl, logger, state_dict=None):
# Create model, give him dataloader also
config = copy.deepcopy(config.model)
model = eval(config.pop('proto'))(**config, dl=dl, logger=logger)
logger.settings('Model {} created'.format(type(model).__name__))
# eval_func is the method called by the Validator to evaluate the model
assert hasattr(model, "eval_func")
if state_dict is not None and "model" in state_dict:
params = {k.replace('module.', ''): v for k, v in state_dict["model"].items()}
model.load_state_dict(params)
logger.info('Model state loaded')
else:
logger.info(model)
if torch.cuda.device_count() > 1:
logger.info("Using {} GPUs!".format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
return model.cuda()
def create_data_loader(config, split, logger, called_by_validator=False):
dataset_config = copy.deepcopy(config.dataset)
dataset = eval(dataset_config.proto)(split=split, ckpt_dir=config.ckpt_dir, **dataset_config)
if hasattr(dataset, 'get_collate_fn'):
collate_fn = dataset.get_collate_fn()
else:
collate_fn = default_collate
if split == 'train' and not called_by_validator:
logger.settings('DataLoader')
logger.info(dataset)
sampler = BatchSampler(
RandomSampler(dataset),
batch_size=config.batch_size,
drop_last=False)
logger.info('Using' + type(sampler.sampler).__name__)
else: # eval or test
sampler = BatchSampler(
SequentialSampler(dataset),
batch_size=config.batch_size,
drop_last=False)
return DataLoader(dataset,
num_workers=4,
collate_fn=collate_fn,
batch_sampler=sampler,
pin_memory=True)
def create_scaler(config, logger, state_dict=None):
scaler = torch.cuda.amp.GradScaler(enabled=(config.use_amp or False))
logger.settings('Using scaler : {}'.format(scaler.is_enabled()))
if state_dict is not None and "scaler" in state_dict:
scaler.load_state_dict(state_dict["scaler"])
logger.info('Scaler state loaded')
return scaler
def create_training_scheduler(config, optimizer, logger, state_dict=None):
config = copy.deepcopy(config)
training_scheduler = TrainingScheduler(lr_decay_func=config.lr_decay,
optimizer=optimizer,
early_stop_metric=config.early_stop_metric,
early_stop_limit=config.early_stop,
**config.lr_decay_params)
logger.settings('Training scheduler created')
if state_dict is not None and "training_scheduler" in state_dict:
training_scheduler.load_state_dict(state_dict["training_scheduler"])
logger.info('Training scheduler state loaded')
else:
logger.info(training_scheduler)
return training_scheduler
class CheckpointSaver(object):
def __init__(self, ckpt_dir, logger, seed, ckpt=None):
self.ckpt_dir = ckpt_dir
self.seed = seed
self.logger = logger
self.current_tag = None
self.current_epoch = None
if ckpt is not None:
self.current_tag, self.current_epoch = self.extract_tag_and_step(ckpt)
self.logger.settings(
'Resuming checkpoint at epoch {} with tag {}.'.format(self.current_epoch, self.current_tag))
def save(self, state_dict, tag, current_epoch):
if self.current_tag is not None:
old_ckpt = os.path.join(self.ckpt_dir,
'{}_{}_{}.pth'.format(self.current_tag, self.current_epoch, self.seed))
assert os.path.exists(old_ckpt), old_ckpt
os.remove(old_ckpt)
tag = np.round(tag, 6)
path = os.path.join(self.ckpt_dir, '{}_{}_{}.pth'.format(tag, current_epoch, self.seed))
torch.save(state_dict, path)
self.logger.info('{} saved.'.format(path))
self.current_tag = tag
self.current_epoch = current_epoch
def extract_tag_and_step(self, ckpt):
groups = re.match('.*/(.*?)_(.*?)_(.*?).pth', ckpt)
return float(groups.group(1)), int(groups.group(2))
class TrainingScheduler(object):
def __init__(self, lr_decay_func, optimizer, early_stop_metric, early_stop_limit, **lr_decay_params):
super().__init__()
self.epoch = 0
self.early_stop = 0
self.early_stop_limit = early_stop_limit
self.metric_comp_func = operator.gt
self.mode = 'max'
self.current_best_metric = -float('inf')
self.lr_decay_params = lr_decay_params
if early_stop_metric == 'loss':
self.metric_comp_func = operator.lt
self.mode = 'min'
self.current_best_metric = float('inf')
self.scheduler_name = lr_decay_func
if self.scheduler_name == 'ReduceLROnPlateau':
lr_decay_params["mode"] = self.mode
self.scheduler = eval(lr_decay_func)(optimizer, **lr_decay_params)
def step(self, mean_eval_metric=None, training_loss=None):
ret = {
"done_training": False,
"save_state": False,
}
self.epoch = self.epoch + 1
# If eval has not started, dont compute early stop
if mean_eval_metric is None:
return ret
# LR sched
if self.scheduler_name == 'ReduceLROnPlateau':
self.scheduler.step(mean_eval_metric)
else:
self.scheduler.step()
# Early stop
if self.metric_comp_func(mean_eval_metric, self.current_best_metric):
self.current_best_metric = mean_eval_metric
self.early_stop = 0
ret["save_state"] = True
else:
self.early_stop += 1
if self.early_stop == self.early_stop_limit:
ret["done_training"] = True
return ret
def __repr__(self):
s = "TrainingScheduler (\n"
s += self.scheduler_name + "\n"
s += str(json.dumps(dict(self.lr_decay_params), indent=4, sort_keys=True)) + '\n'
s += 'Early stopping' + "\n"
s += ' {0}: {1}\n'.format("early_stop_limit", self.early_stop_limit)
s += ' {0}: {1}\n'.format("metric_comp_func", self.metric_comp_func)
s += ' {0}: {1}\n'.format("mode", self.mode)
s += ' {0}: {1}\n'.format("current_best_metric", self.current_best_metric)
s += ')'
return s
def state_dict(self):
training_sched = {key: value for key, value in self.__dict__.items() if key != 'scheduler'}
training_sched["scheduler"] = self.scheduler.state_dict()
return training_sched
def load_state_dict(self, state_dict):
if "scheduler" in state_dict: # Retro compatible with older checkpoint version
scheduler = state_dict.pop("scheduler")
self.__dict__.update(state_dict)
self.scheduler.load_state_dict(scheduler)
|
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import sys
import re
import os
from oscrypto import tls, errors as oscrypto_errors, version
from asn1crypto.util import OrderedDict
if sys.version_info < (3,):
from urlparse import urlparse
str_cls = unicode # noqa
else:
from urllib.parse import urlparse
str_cls = str
class HttpsClientException(Exception):
pass
class HttpsClientError(HttpsClientException):
pass
class HttpsClient():
def __init__(self, keep_alive=True, ignore_close=False):
self.socket = None
self.timeout = None
self.url_info = None
self.keep_alive = keep_alive
self.ignore_close = ignore_close
def close(self):
"""
Closes any open connection
"""
if not self.socket:
return
self.socket.close()
self.socket = None
def download(self, url, timeout):
"""
Downloads a URL and returns the contents
:param url:
The URL to download
:param timeout:
The int number of seconds to set the timeout to
:return:
The string contents of the URL
"""
self.setup_connection(url, timeout)
tries = 0
while tries < 2:
tries += 1
try:
self.ensure_connected()
req_headers = OrderedDict()
req_headers['Host'] = self.url_info[0]
if self.url_info[1] != 443:
req_headers['Host'] += ':%d' % self.url_info[1]
req_headers['Connection'] = 'Keep-Alive' if self.keep_alive else 'Close'
req_headers["User-Agent"] = 'oscrypto %s TLS HTTP Client' % version.__version__
request = 'GET '
url_info = urlparse(url)
path = '/' if not url_info.path else url_info.path
if url_info.query:
path += '?' + url_info.query
request += path + ' HTTP/1.1'
self.write_request(request, req_headers)
response = self.read_headers()
if not response:
self.close()
continue
v, code, message, resp_headers = response
data = self.read_body(code, resp_headers, timeout)
if code == 301:
location = resp_headers.get('location')
if not isinstance(location, str_cls):
raise HttpsClientError('Missing or duplicate Location HTTP header')
if not re.match(r'https?://', location):
if not location.startswith('/'):
location = os.path.dirname(url_info.path) + location
location = url_info.scheme + '://' + url_info.netloc + location
return self.download(location, timeout)
if code != 200:
raise HttpsClientError('HTTP error %s downloading %s.' % (code, url))
else:
return data
except (oscrypto_errors.TLSGracefulDisconnectError):
self.close()
continue
def setup_connection(self, url, timeout):
"""
:param url:
The URL to download
:param timeout:
The int number of seconds to set the timeout to
:return:
A boolean indicating if the connection was reused
"""
url_info = urlparse(url)
if url_info.scheme == 'http':
raise HttpsClientException('Can not connect to a non-TLS server')
hostname = url_info.hostname
port = url_info.port
if not port:
port = 443
if self.socket and self.url_info != (hostname, port):
self.close()
self.timeout = timeout
self.url_info = (hostname, port)
return self.ensure_connected()
def ensure_connected(self):
"""
Make sure a valid tls.TLSSocket() is open to the server
:return:
A boolean indicating if the connection was reused
"""
if self.socket:
return True
host, port = self.url_info
session = tls.TLSSession()
self.socket = tls.TLSSocket(host, port, timeout=self.timeout, session=session)
return False
def write_request(self, request, headers):
"""
:param request:
A unicode string of the first line of the HTTP request
:param headers:
An OrderedDict of the request headers
"""
lines = [request]
for header, value in headers.items():
lines.append('%s: %s' % (header, value))
lines.extend(['', ''])
request = '\r\n'.join(lines).encode('iso-8859-1')
self.socket.write(request)
def read_headers(self):
"""
Reads the HTTP response headers from the socket
:return:
On error, None, otherwise a 4-element tuple:
0: A 2-element tuple of integers representing the HTTP version
1: An integer representing the HTTP response code
2: A unicode string of the HTTP response code name
3: An OrderedDict of HTTP headers with lowercase unicode key and unicode values
"""
version = None
code = None
text = None
headers = OrderedDict()
data = self.socket.read_until(b'\r\n\r\n')
string = data.decode('iso-8859-1')
first = False
for line in string.split('\r\n'):
line = line.strip()
if first is False:
if line == '':
continue
match = re.match(r'^HTTP/(1\.[01]) +(\d+) +(.*)$', line)
if not match:
return None
version = tuple(map(int, match.group(1).split('.')))
code = int(match.group(2))
text = match.group(3)
first = True
else:
if not len(line):
continue
parts = line.split(':', 1)
if len(parts) == 2:
name = parts[0].strip().lower()
value = parts[1].strip()
if name in headers:
if isinstance(headers[name], tuple):
headers[name] = headers[name] + (value,)
else:
headers[name] = (headers[name], value)
else:
headers[name] = value
return (version, code, text, headers)
def parse_content_length(self, headers):
"""
Returns the content-length from a dict of headers
:return:
An integer of the content length
"""
content_length = headers.get('content-length')
if isinstance(content_length, str_cls) and len(content_length) > 0:
content_length = int(content_length)
return content_length
def read_body(self, code, resp_headers, timeout):
"""
"""
data = b''
transfer_encoding = resp_headers.get('transfer-encoding')
if transfer_encoding and transfer_encoding.lower() == 'chunked':
while True:
line = self.socket.read_until(b'\r\n').decode('iso-8859-1').rstrip()
if re.match(r'^[a-fA-F0-9]+$', line):
chunk_length = int(line, 16)
if chunk_length == 0:
break
data += self.socket.read_exactly(chunk_length)
if self.socket.read_exactly(2) != b'\r\n':
raise HttpsClientException('Unable to parse chunk newline')
else:
self.close()
raise HttpsClientException('Unable to parse chunk length')
else:
content_length = self.parse_content_length(resp_headers)
if content_length is not None:
if content_length > 0:
data = self.socket.read_exactly(content_length)
elif code == 304 or code == 204 or (code >= 100 and code < 200):
# Per https://tools.ietf.org/html/rfc7230#section-3.3.3 these have no body
pass
else:
# This should only happen if the server is going to close the connection
while self.socket.select_read(timeout=timeout):
data += self.socket.read(8192)
self.close()
if not self.ignore_close and resp_headers.get('connection', '').lower() == 'close':
self.close()
return data
|
# ADD OLD DATES AND TIMES OF MESSAGES SO IT IS CHRONOLOGICAL, REPLACE REAL NAMES WITH FICTIONAL NAMES
def remove_white_lines(file):
stripped = open('chat_stripped.txt', 'w', encoding='utf-8')
with open(file, 'r', encoding='utf-8') as f:
for line in f:
if len(line) > 1:
# print("length of line: ", len(line), "line: ", line)
stripped.write(line)
stripped.close()
return stripped
def replace_names(file):
# NAMES TO REMOVE:
# removed because of privacy reasons, replace * with a specific name from your chat in case you want to
# run this code yourself to generate your own romeo/juliet story!
target = open('romeo_juliet.txt', 'w', encoding='utf-8')
with open(file, 'r+', encoding='utf-8') as f:
for line in f:
target.write(line.replace('*', 'Romeo').replace('*', 'Juliet').replace('*', 'Rosaline')
.replace('*', 'Benvolio').replace('*', 'Nurse ').replace('*', 'Mercutio').replace('*', 'Prince Escalus')
.replace('Richard', 'Friar John').replace('*', 'Balthasar').replace('*', 'Capulet'))
target.close()
return target
def add_titles_clean_timestamps(timestamps, file):
day_month = []
for i in range(len(timestamps)):
if '-' in timestamps[i][:8] and timestamps[i][:8] not in day_month:
day_month.append(timestamps[i][:8])
# ADD DATES AS TITLES IN CHAT
with_titles = open('clean_chat_titles.txt', 'w', encoding='utf-8')
idx = 0
with open(file, 'r', encoding='utf-8') as f:
for line in f:
if line[:8] == day_month[idx] and idx < len(day_month) - 1:
if idx == 0:
whitespace = 0
else:
whitespace = 7
word_list = str.split(line)
last_word = word_list[-1]
with_titles.write(whitespace * '<br/>' + '<font fontSize = 14><b>' + day_month[idx] + ' | ' +
last_word.capitalize() + '</b> </font>' + 2 * '<br/>')
idx += 1
with_titles.write(line[8:])
else:
with_titles.write(line[8:])
with_titles.close()
return with_titles
def make_time_chronological(source_file, generated_text):
with open(source_file, 'r', encoding='utf-8') as f:
timestamps = []
for line in f:
timestamps.append(line[:15])
dest = open('chronological_generated.txt', 'w', encoding='utf-8')
with open(generated_text, 'r', encoding='utf-8') as f:
idx = 0
for line in f:
dest.write(timestamps[idx] + line[15:])
idx += 1
return timestamps, dest
|
from django.contrib import admin
from .models import Service, Organisation
admin.site.register(Service)
admin.site.register(Organisation)
|
import numpy as np
from ..mixins import StrMixin
class Kernel(StrMixin):
def __init__(self, kernel: str = None, **kwargs):
self._name = kernel
self._kernel, self._kwargs = kernel, kwargs
def project(self, x1: np.ndarray, x2: np.ndarray) -> np.ndarray:
if self._kernel is None:
return self.identical(x1, x2)
return getattr(self, self._kernel)(x1, x2)
@staticmethod
def identical(x1, x2):
return x1.dot(x2.T)
def poly(self, x1, x2):
p = self._kwargs.get("p", 3)
return (x1.dot(x2.T) + 1.) ** p
def rbf(self, x1, x2):
gamma = self._kwargs.get("gamma", 1.)
return np.exp(-gamma * np.sum((x1[..., None, :] - x2) ** 2, axis=2))
__all__ = ["Kernel"]
|
from models.model import BaseModel
from keras.models import Model
from keras.layers import LSTM, Input, concatenate, LeakyReLU, Dense, Dropout, ReLU
from keras.activations import tanh
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.losses import Huber
from metrics import Metrics
import numpy as np
class LSTM_nn(BaseModel):
def __init__ (self, generator, cfg, **kwargs):
super().__init__(generator, cfg, **kwargs)
self.arch = kwargs["arch_type"]
if(self.arch == 1):
self.build_nn()
else:
self.build_nn_2()
def prep_data(self, X=None):
df = self.generator.X if X is None else X
std = np.array(df["sentiment_score_std"])
sentiment = df[["sentiment_score_mean_n_1", "sentiment_score_mean_n_2", 'sentiment_score_mean_n_3']].values[..., np.newaxis].astype('float32')
ret = df[["log_returns_n_1", 'log_returns_n_2', 'log_returns_n_3']].values[..., np.newaxis].astype('float32')
macd = df[['signal_macd_n_1', 'signal_macd_n_2', 'signal_macd_n_3']].values[..., np.newaxis].astype('float32')
rsi = df[['rsi_n_1', 'rsi_n_2', 'rsi_n_3']].values[..., np.newaxis].astype('float32')
#print(sentiment.shape)
#print(np.concatenate([sentiment, ret, macd, rsi], axis=2).shape)
final = [std, sentiment, ret, macd, rsi] if (self.arch == 1) else [std, np.concatenate([sentiment, ret, macd, rsi], axis=2)]
return [std, np.concatenate([sentiment, ret, macd, rsi], axis=2)], self.generator.y
def train(self):
X, y = self.prep_data()
X_train, y_train = list(map(lambda x: x[:-10], X)), y[:-10]
X_val, y_val = list(map(lambda x: x[-10:], X)), y[-10:]
self.model.fit(X_train, y_train , batch_size=self.cfg.batch_size, validation_data=(X_val, y_val), epochs=1000, callbacks=self.callbacks())
def predict (self, X):
#self.generator.mode = DataMode.TESTING
#preds = []
#for x, y in self.generator:
# preds.append(self.model.predict(x))
# self.model.train_on_batch(x, y)
pred, _ = self.prep_data(X)
return self.model.predict(pred)
def build_nn(self):
print("building")
sentiment = Input(shape=(self.cfg.past_values, 1))
first_LSTM = LSTM(1, )(sentiment)
ret = Input(shape=(self.cfg.past_values, 1))
second_LSTM = LSTM(1, )(ret)
rsi = Input(shape=(self.cfg.past_values, 1))
third_LSTM = LSTM(1, )(rsi)
macd = Input(shape=(self.cfg.past_values, 1))
fourth_LSTM = LSTM(1, )(macd)
merge_one = concatenate([first_LSTM, second_LSTM, third_LSTM, fourth_LSTM])
std = Input(shape=(1, ))
merge_two = concatenate([merge_one, std])
hidden_layers = Dense(8, )(merge_two,)
hidden_layers = tanh(hidden_layers)
hidden_layers = Dropout(0.5)(hidden_layers)
output_layer = Dense(1,)(hidden_layers)
self.model = Model(inputs=[std, sentiment, ret, macd, rsi], outputs=output_layer)
self.model.compile(optimizer="adam", loss="mean_absolute_error",
metrics=Metrics._get_all())
print(self.model.summary())
def build_nn_2(self):
print("building")
sentiment = Input(shape=(self.cfg.past_values, 4))
first_LSTM = LSTM(1, )(sentiment)
std = Input(shape=(1, ))
merge_two = concatenate([first_LSTM, std])
hidden_layers = Dense(7, )(merge_two,)
hidden_layers = ReLU()(hidden_layers)
hidden_layers = Dropout(0.5)(hidden_layers)
output_layer = Dense(1,)(hidden_layers)
self.model = Model(inputs=[std, sentiment], outputs=output_layer)
self.model.compile(optimizer="adam", loss="mean_absolute_error",
metrics=Metrics._get_all())
print(self.model.summary())
def callbacks(self):
"""
function to get checkpointer, early stopper and lr_reducer in our CNN
"""
#Stop training when f1_m metric has stopped improving for 10 epochs
earlystopper = EarlyStopping(monitor = "val_loss",
mode='min',
patience = 10,
verbose = 1,
restore_best_weights = True)
#Reduce learning rate when loss has stopped improving for 5 epochs
lr_reducer = ReduceLROnPlateau(monitor='loss',
mode='min',
factor=0.5,
patience=5,
min_delta= 0.001,
min_lr=0.000001,
verbose=1)
return [earlystopper, lr_reducer]
|
from typing import Optional
import torch
from fn import F
from torch import Tensor, zeros
from torch.nn import Parameter, LayerNorm, Linear, MultiheadAttention, GELU, Identity, ModuleList
from . import MLP, ResidualBlock
from ..neko_module import NekoModule
from ..layer import PositionalEmbedding, Concatenate
from ..util import ModuleFactory, compose, Shape
class AttentionModule(NekoModule):
"""
The AttentionModule is the layer taking the input and calculate Q, K and V and feed them into MultiHeadAttention
layers.
input -> (Q, K, V) -> MultiHeadAttention
The MultiHeadAttention is proposed by Vaswani, et al. (2017).
Args:
embed_dim (``int``): The embedding dim of input sequence.
num_heads (``bool``): Parallel attention heads.
dropout (``float``, optional): A Dropout layer on attn_output_weights. Default: 0.0.
bias (``bool``, optional): Add bias as module parameter. Default: True.
add_bias_kv (``bool``, optional): Add bias to the key and value sequences at dim=0.
add_zero_attn (``bool``, optional): Add a new batch of zeros to the key and value sequences at dim=1.
kdim (``int``, optional): Total number of features in key. Default: None.
vdim (``int``, optional): Total number of features in value. Default: None.
return_attention_weights (``bool``, optional): Return both value output and attention weights if True else
return attention value output only.
Attributes:
q_linear (:class:`~torch.nn.Linear`): The PyTorch Linear layer for calculating Q.
k_linear (:class:`~torch.nn.Linear`): The PyTorch Linear layer for calculating K.
v_linear (:class:`~torch.nn.Linear`): The PyTorch Linear layer for calculating V.
attention (:class:`~torch.nn.MultiheadAttention`): The PyTorch MultiheadAttention layer of this module.
References:
Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., ... & Polosukhin, I. (2017).
Attention is all you need. In Advances in neural information processing systems (pp. 5998-6008).
"""
def __init__(self, embed_dim: int, num_heads: int, dropout: float = 0., bias: bool = True,
add_bias_kv: bool = False, add_zero_attn: bool = False, kdim: Optional[int] = None, vdim: Optional[int] = None,
return_attention_weights: bool = False
):
super().__init__()
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.q_linear = Linear(embed_dim, self.kdim)
self.k_linear = Linear(embed_dim, self.kdim)
self.v_linear = Linear(embed_dim, self.vdim)
self.attention = MultiheadAttention(embed_dim, num_heads, dropout, bias, add_bias_kv, add_zero_attn, kdim, vdim,
batch_first=True)
self.return_attention_weights = return_attention_weights
def forward(self, x: Tensor) -> Tensor:
f = F() >> (map, lambda linear: linear(x)) >> (lambda xs: self.attention(*xs))
x, weight = f([self.q_linear, self.k_linear, self.v_linear])
return (x, weight) if self.return_attention_weights else x
class TransformerEncoderBlock(NekoModule):
"""
The TransformerEncoderBlock is a block in Transformer encoder which is proposed by Vaswani, et al. (2017).
This TransformerEncoderBlock contains the multi-head attention module and the feed-forward module.
input -> concat cls token -> add positional encoding -> AttentionModule -> FFN
Args:
input_shape (:class:`~tensorneko.util.type.Shape`): The shape of input sequence (N, D). N for length of
sequence. D for embedding dimension.
num_heads (``int``): Parallel attention heads.
has_cls_token (``bool``, optional): The input will concat to a cls token if True. Default ``False``.
has_pos_encoding (``bool``, optional): The input will add positional encoding if True. Default ``False``.
linear_drop (``float``, optional): The dropout rate for linear layers. Default ``0.5``.
attention_drop (``float``, optional): The dropout rate for attention layers. Default ``0.5``.
build_normalization (``() -> torch.nn.Module``, optional): The normalization builder function for the block.
Default :class:`~torch.nn.LayerNorm`.
mlp_ratio (``float``, optional): The MLP ratio, which is the multiplication of hidden layers in FFN. e.g, if the
embedding is 1024, and the mlp_ratio is 4.0. The FFN will be 1024 -> 4096 -> 1024. Default ``4.0``.
build_mlp_activation (``() -> torch.nn.Module``, optional): The activation builder for FFN.
Default :class:`~torch.nn.GELU`.
Attributes:
cls_token (:class:`~torch.nn.Parameter`): The trainable cls token parameters.
pos_emb_layer (:class:`~tensorneko.layer.PositionalEmbedding`): The positional embedding layer.
attn_module (:class:`~tensorneko.module.ResidualBlock`): The attention module with residual connection.
feedforward_module (:class:`~tensorneko.module.ResidualBlock`): The MLP module with residual connection.
References:
Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., ... & Polosukhin, I. (2017).
Attention is all you need. In Advances in neural information processing systems (pp. 5998-6008).
"""
def __init__(self, input_shape: Shape, num_heads: int, has_cls_token: bool = False,
has_pos_encoding: bool = True,
linear_drop: float = 0.5, attention_drop: float = 0.5,
build_normalization: Optional[ModuleFactory] = LayerNorm,
mlp_ratio: float = 4.0, build_mlp_activation: Optional[ModuleFactory] = GELU
):
super().__init__()
# n: number of patches, d: embedding dimension
n, d = input_shape
# num of head
self.num_head = num_heads
# positional embedding
self.has_cls_token = has_cls_token
if self.has_cls_token:
# prepare for class token
self.cls_token = Parameter(zeros(1, d))
self.token_concat = Concatenate(dim=0)
input_shape = (n + 1, d)
self.has_pos_encoding = has_pos_encoding
if self.has_pos_encoding:
self.pos_emb_layer = PositionalEmbedding(input_shape=input_shape, dropout_rate=linear_drop)
# set normalization builder
if build_normalization is LayerNorm:
build_normalization = F(LayerNorm, input_shape)
elif build_normalization is None:
build_normalization = Identity
# multi-head attention module with residual connection and normalization
self.attn_module = ResidualBlock(
sub_module_layers=(build_normalization, F(AttentionModule, d, self.num_head, attention_drop)),
tail_module_layers=None
)
self.feedforward_module = ResidualBlock((
build_normalization,
F(MLP, [d, int(d * mlp_ratio), d],
build_activation=build_mlp_activation, dropout_rate=linear_drop
))
)
def forward(self, x: Tensor) -> Tensor:
f = F()
if self.has_cls_token:
f = f >> (map, lambda tokens: self.token_concat([self.cls_token, tokens])) >> list >> torch.stack
if self.has_pos_encoding:
f = f >> self.pos_emb_layer
f = f >> self.attn_module >> self.feedforward_module
return f(x)
class TransformerEncoder(NekoModule):
"""
The TransformerEncoder repeatedly generate :class:`TransformerEncoderBlock` with specified times.
Args:
input_shape (:class:`~tensorneko.util.type.Shape`): The shape of input sequence (N, D). N for length of
sequence. D for embedding dimension.
num_heads (``int``): Parallel attention heads.
has_cls_token (``bool``, optional): The input will concat to a cls token if True. Default ``False``.
linear_drop (``float``, optional): The dropout rate for linear layers. Default ``0.5``.
attention_drop (``float``, optional): The dropout rate for attention layers. Default ``0.5``.
build_normalization (``() -> torch.nn.Module``, optional): The normalization builder function for the block.
Default :class:`~torch.nn.LayerNorm`.
mlp_ratio (``float``, optional): The MLP ratio, which is the multiplication of hidden layers in FFN. e.g, if the
embedding is 1024, and the mlp_ratio is 4.0. The FFN will be 1024 -> 4096 -> 1024. Default ``4.0``.
build_mlp_activation (``() -> torch.nn.Module``, optional): The activation builder for FFN.
Default :class:`~torch.nn.GELU`.
pos_encoding (``str``, optional): The option of where you want to add positional encoding. ``"all"`` for all
blocks. ``"first"`` for first block only. ``"none"`` for no adding. Default ``"all"``.
repeat (``int``, optional): The repeat time of TransformerEncoderBlock.
Attributes:
blocks (:class:`~torch.nn.ModuleList`): The blocks list in the module.
"""
def __init__(self, input_shape: Shape, num_heads: int, has_cls_token: bool = False, linear_drop: float = 0.5,
attention_drop: float = 0.5, build_normalization: Optional[ModuleFactory] = LayerNorm, mlp_ratio: float = 4.0,
build_mlp_activation: Optional[ModuleFactory] = GELU, pos_encoding: str = "all",
repeat: int = 1
):
super().__init__()
if pos_encoding == "all":
has_pos_encoding = [True] * repeat
elif pos_encoding == "first":
has_cls_token = [True] + (repeat - 1) * [False]
elif pos_encoding == "none":
has_cls_token = [False] * repeat
def build_block(i):
return TransformerEncoderBlock(
input_shape, num_heads, has_cls_token,
has_pos_encoding[i],
linear_drop, attention_drop,
build_normalization, mlp_ratio, build_mlp_activation
)
self.blocks = ModuleList([build_block(i) for i in range(repeat)])
self.repeat = repeat
def forward(self, x: Tensor) -> Tensor:
return compose(self.blocks)(x)
|
p1 = int(input('Primeiro termo: '))
p = p1
r = int(input('Razão: '))
while p1 < (p + r*10):
print(p1, '->', end=' ')
p1 += r
print('Fim') |
from .. import Globals
import PyFileIO as pf
def _ReadMET():
'''
Reads a data file containing the mission elapsed times (METs) at the
start of every date from 20080101 - 20150430.
Returns:
numpy.recarray
'''
fname = Globals.ModulePath + '__data/MessengerMET.dat'
dtype = [('Date','int32'),('ut','float32'),('MET','float64')]
data = pf.ReadASCIIData(fname,dtype=dtype)
return data
|
from django import forms
from .models import *
class MovieForm(forms.ModelForm):
class Meta:
model = Movie
fields = ('title', 'creators', 'cast', 'description', 'genre', 'typ', 'seasons', 'release_date', 'image', 'video')
class ReviewForm(forms.ModelForm):
class Meta:
model = Review
fields = ("comment", "rating") |
import random
import math
import bisect
import numpy
class Instance:
def __init__(self, num_vars, num_clauses, k):
self.num_vars = num_vars
self.num_clauses = num_clauses
self.k = k
self.variables = range(1, num_vars+1)
self.clauses = {}
def generate(self):
raise NotImplementedError()
def __str__(self):
text = "p cnf {} {}\n".format(self.num_vars, self.num_clauses)
def list_join(l):
return " ".join(map(str, l)) + " 0"
return text + "\n".join(map(list_join, self.clauses.values()))
class UniformInstance(Instance):
def generate(self):
for m in range(self.num_clauses):
clause = sorted(random.sample(self.variables, self.k))
for i, var in enumerate(clause):
clause[i] = var * ((-1) ** numpy.random.randint(2))
self.clauses[m] = clause
class PowerInstance(Instance):
def __init__(self, beta=0.5, *args, **kwargs):
super().__init__(*args, **kwargs)
self.beta = beta
def generate(self):
norm = sum(i**(-self.beta) for i in range(1, self.num_vars+1))
prob_i = lambda i: i**(-self.beta) / norm
cumulative_probs = [0] * (self.num_vars+1)
for var in range(1, self.num_vars+1):
cumulative_probs[var] = cumulative_probs[var-1] + prob_i(var)
# remove the initial 0 since that was just for the computation above
cumulative_probs.pop(0)
for m in range(self.num_clauses):
samples = set()
while len(samples) < self.k:
sample = numpy.random.uniform()
var = bisect.bisect_left(cumulative_probs, sample) + 1
if var is not None:
samples.add(var)
clause = sorted(samples)
for i, var in enumerate(clause):
clause[i] = var * ((-1) ** numpy.random.randint(2))
self.clauses[m] = clause
|
import flask_app
from credentials import API_KEY, API_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET
import tweepy
from selenium import webdriver
#from webdriver_manager.chrome import ChromeDriverManager
#from webdriver_manager.utils import ChromeType
def god() :
auth = tweepy.OAuthHandler(API_KEY, API_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
driver = webdriver.Chrome( executable_path='./chromedriver',options=options)
url="https://voyager.jpl.nasa.gov/mission/status/#where_are_they_now"
driver.get(url)
st=driver.find_element_by_xpath("//*[@id='voy1_lt']").text.split(':')
h=st[0]
m=st[1]
s=st[2]
voy1_dist="NASA Voyager 1 is " + h +" hours "+m+" minutes "+s+" seconds light time away from Earth."
st=driver.find_element_by_xpath("//*[@id='voy2_lt']").text.split(':')
h=st[0]
m=st[1]
s=st[2]
voy2_dist="NASA Voyager 2 is " + h +" hours "+m+" minutes "+s+" seconds light time away from Earth."
with open('temp.txt', 'w') as f:
f.write(voy1_dist+'\n\n'+voy2_dist)
with open('temp.txt', 'r') as f:
#print(f.read())
api.update_status(f.read())
flask_app.keep_alive()
if __name__ == "__main__":
god()
|
import numpy as np
import dynet as dy
from math import sqrt
from typing import List
import numbers
from xnmt import events, expression_seqs, param_collections, param_initializers
from xnmt.persistence import serializable_init, Serializable, bare, Ref
from xnmt.transducers import base as transducers
class MultiHeadAttentionSeqTransducer(transducers.SeqTransducer, Serializable):
"""
This implements the Multi-headed attention layer of "Attention is All You Need":
https://papers.nips.cc/paper/7181-attention-is-all-you-need.pdf
Args:
input_dim: size of inputs
dropout: dropout to apply to attention matrix
param_init: how to initialize param matrices
bias_init: how to initialize bias params
num_heads: number of attention heads
"""
yaml_tag = '!MultiHeadAttentionSeqTransducer'
@events.register_xnmt_handler
@serializable_init
def __init__(self,
input_dim: numbers.Integral = Ref("exp_global.default_layer_dim"),
dropout: numbers.Real = Ref("exp_global.dropout", default=0.0),
param_init: param_initializers.ParamInitializer = Ref("exp_global.param_init", default=bare(param_initializers.GlorotInitializer)),
bias_init: param_initializers.ParamInitializer = Ref("exp_global.bias_init", default=bare(param_initializers.ZeroInitializer)),
num_heads: numbers.Integral = 8):
assert(input_dim % num_heads == 0)
self.dropout = dropout
param_collection = param_collections.ParamManager.my_params(self)
self.input_dim = input_dim
self.num_heads = num_heads
self.head_dim = input_dim // num_heads
self.pWq, self.pWk, self.pWv, self.pWo = [param_collection.add_parameters(dim=(input_dim, input_dim), init=param_init.initializer((input_dim, input_dim))) for _ in range(4)]
self.pbq, self.pbk, self.pbv, self.pbo = [param_collection.add_parameters(dim=(1, input_dim), init=bias_init.initializer((1, input_dim,))) for _ in range(4)]
@events.handle_xnmt_event
def on_start_sent(self, src):
self._final_states = None
def get_final_states(self) -> List[transducers.FinalTransducerState]:
return self._final_states
@events.handle_xnmt_event
def on_set_train(self, val):
self.train = val
def transduce(self, expr_seq: expression_seqs.ExpressionSequence) -> expression_seqs.ExpressionSequence:
"""
transduce the sequence
Args:
expr_seq: expression sequence or list of expression sequences (where each inner list will be concatenated)
Returns:
expression sequence
"""
Wq, Wk, Wv, Wo = [dy.parameter(x) for x in (self.pWq, self.pWk, self.pWv, self.pWo)]
bq, bk, bv, bo = [dy.parameter(x) for x in (self.pbq, self.pbk, self.pbv, self.pbo)]
# Start with a [(length, model_size) x batch] tensor
x = expr_seq.as_transposed_tensor()
x_len = x.dim()[0][0]
x_batch = x.dim()[1]
# Get the query key and value vectors
# TODO: do we need bias broadcasting in DyNet?
# q = dy.affine_transform([bq, x, Wq])
# k = dy.affine_transform([bk, x, Wk])
# v = dy.affine_transform([bv, x, Wv])
q = bq + x * Wq
k = bk + x * Wk
v = bv + x * Wv
# Split to batches [(length, head_dim) x batch * num_heads] tensor
q, k, v = [dy.reshape(x, (x_len, self.head_dim), batch_size=x_batch * self.num_heads) for x in (q,k,v)]
# Do scaled dot product [(length, length) x batch * num_heads], rows are queries, columns are keys
attn_score = q * dy.transpose(k) / sqrt(self.head_dim)
if expr_seq.mask is not None:
mask = dy.inputTensor(np.repeat(expr_seq.mask.np_arr, self.num_heads, axis=0).transpose(), batched=True) * -1e10
attn_score = attn_score + mask
attn_prob = dy.softmax(attn_score, d=1)
if self.train and self.dropout > 0.0:
attn_prob = dy.dropout(attn_prob, self.dropout)
# Reduce using attention and resize to match [(length, model_size) x batch]
o = dy.reshape(attn_prob * v, (x_len, self.input_dim), batch_size=x_batch)
# Final transformation
# o = dy.affine_transform([bo, attn_prob * v, Wo])
o = bo + o * Wo
expr_seq = expression_seqs.ExpressionSequence(expr_transposed_tensor=o, mask=expr_seq.mask)
self._final_states = [transducers.FinalTransducerState(expr_seq[-1], None)]
return expr_seq
|
""" Routines and Classes to get information about DHCP server
"""
import re
from ipaddress import IPv4Address, IPv6Address
from .config_reader import ConfigData
from .regex import MySystemRegex
def parse_dhcp_lease_file(dhcp_file=None):
"""
Open and parse DHCP Lease file
:param dhcp_file: Path of the DHCP Lease file (config.ini)
:return: Dict with fields parsed
{ mac_addr:
{
lease_time: <int>
ipv4: <ip>
ipv6: <ip>
hostname: <str>
}
}
"""
config = ConfigData()
ip_regex = MySystemRegex()
dhcp_config = config.get_dhcp_info()
dhcp_data = dict()
if dhcp_file is None:
dhcp_file = dhcp_config['dhcp']['lease_file']
with open(dhcp_file, 'r') as f:
dhcp_lease_entries = [line.strip().split() for line in f]
for entry in dhcp_lease_entries:
if entry[0] != 'duid' and entry[3].lower() not in \
dhcp_config['dhcp']['ignore_hosts']:
ipv4_validation = re.compile(ip_regex.ipv4_regex())
ipv6_validation = re.compile(ip_regex.ipv6_regex())
if dhcp_data.get(entry[1]):
# If the entry exists, we just add the IP Addr
if ipv4_validation.match(entry[2]):
dhcp_data[entry[1]]['ipv4'] = IPv4Address(entry[2])
if ipv6_validation.match(entry[2]):
dhcp_data[entry[1]]['ipv6'] = IPv6Address(entry[2])
else:
dhcp_data[entry[1]] = {
'lease_time': int(entry[0]),
'hostname': entry[3].capitalize()
}
if ipv4_validation.match(entry[2]):
dhcp_data[entry[1]]['ipv4'] = IPv4Address(entry[2])
if ipv6_validation.match(entry[2]):
dhcp_data[entry[1]]['ipv6'] = IPv6Address(entry[2])
return dhcp_data
|
S ="パタトクカシーー"
print(S[::2]) |
#!/usr/bin/python3
import argparse
import getpass
import json
import select
import shlex
import socket
import sys
class Croaked(Exception): pass
class SocketClosed(Exception): pass
class Connection(object):
def __init__(self, host='localhost', port=0x6666, username=getpass.getuser(), playername=None):
self.buf = b''
self.sock = socket.socket()
try:
self.sock.connect((host, port))
except Exception as e:
self.sock = None
raise
self.username = username
self.playername = playername
self.register()
self.room = set()
def debug(self, cls, *args):
pass
def debug_rx(self, *args):
self.debug('rx', *args)
def debug_tx(self, *args):
self.debug('tx', *args)
def try_shutdown(self):
if self.sock is None:
return
try:
self.sock.shutdown(socket.SHUT_RDWR)
except OSError:
pass
def read_msg(self):
while b'\n' not in self.buf:
self.buf += self.sock.recv(256)
msg, _, self.buf = self.buf.partition(b'\n')
d = json.loads(msg.decode('utf8'))
self.debug_rx(d)
return d
def maybe_read_msg(self, timeout=0.1):
if b'\n' not in self.buf:
r, w, x = select.select((self.sock.fileno(),), (), (), timeout)
if self.sock.fileno() in r:
data = self.sock.recv(256)
if not len(data):
self.sock.close()
self.sock = None
raise SocketClosed('End-of-file condition on socket')
self.buf += data
if b'\n' not in self.buf:
return None
msg, _, self.buf = self.buf.partition(b'\n')
d = json.loads(msg.decode('utf8'))
self.debug_rx(d)
return d
def write_msg(self, d):
self.debug_tx(d)
j = json.dumps(d) + '\n'
while j:
b = self.sock.send(j.encode('utf8'))
j = j[b:]
def croak(self, msg, swallow=False):
try:
self.write_msg({'type': 'error', 'error': str(msg)})
except Exception as e:
print("Failed to croak %r: %s" % (msg, e))
self.try_shutdown()
self.sock = None
if not swallow:
raise Croaked(msg)
def register(self):
welcome = self.read_msg()
if welcome.get('type') != 'welcome':
self.croak("Expected welcome message, got %s" % (welcome.get('type'),))
version = welcome.get('version')
if not isinstance(version, list):
self.croak("Malformed welcome message (version: %r)" % (version,))
if version[0] != 1:
self.croak("This client only supports protocol version 1 (not %r)" % (version[0],))
self.server_version = version
self.motd = welcome.get('message')
hello = {'type': 'hello', 'username': self.username}
if self.playername is not None:
hello['player'] = self.playername
self.write_msg(hello)
def __del__(self):
self.goodbye("Client connection GCed")
def goodbye(self, msg=None):
if self.sock is None:
return
gb = {'type': 'goodbye'}
if msg is not None:
gb['message'] = msg
self.write_msg(gb)
self.try_shutdown()
self.sock = None
def maybe_read_and_handle(self, timeout=0.1):
msg = self.maybe_read_msg(timeout=timeout)
if msg is None:
return
if 'type' not in msg:
self.croak("Message without type: %s" % json.dumps(msg))
typ = msg.pop('type')
if isinstance(typ, str):
method = 'handle_'+typ
if hasattr(self, method):
try:
return getattr(self, method)(**msg)
except Croaked:
raise
except Exception as e:
self.croak("Failed to handle message %s: %r" % (json.dumps(msg), e))
self.croak("Unhandled message type: %s %s" % (typ, json.dumps(msg)))
def handle_enter(self, user):
if user == self.username:
self.room = set()
else:
self.room.add(user)
def handle_exit(self, user):
if user == self.username:
self.room = set()
else:
self.room.remove(user)
def handle_invite(self, invitation, **kwargs):
d = {'type': 'invite', 'invitation': invitation}
d.update(kwargs)
if not isinstance(invitation, str):
self.croak("Malformed invite message: %s" % json.dumps(d))
method = 'handle_invite_'+invitation
if not hasattr(self, method):
self.croak("Unhandled invitation type: %s" % json.dumps(d))
return getattr(self, method)(**kwargs)
def handle_revoke(self, invitation, **kwargs):
d = {'type': 'revoke', 'invitation': invitation}
d.update(kwargs)
if not isinstance(invitation, str):
self.croak("Malformed revoke message: %s" % json.dumps(d))
method = 'handle_revoke_'+invitation
if not hasattr(self, method):
self.croak("Unhandled invitation type: %s" % json.dumps(d))
return getattr(self, method)(**kwargs)
def handle_accept(self, invitation, **kwargs):
d = {'type': 'accept', 'invitation': invitation}
d.update(kwargs)
if not isinstance(invitation, str):
self.croak("Malformed accept message: %s" % json.dumps(d))
method = 'handle_accept_'+invitation
if not hasattr(self, method):
self.croak("Unhandled invitation type: %s" % json.dumps(d))
return getattr(self, method)(**kwargs)
def handle_reject(self, invitation, **kwargs):
d = {'type': 'reject', 'invitation': invitation}
d.update(kwargs)
if not isinstance(invitation, str):
self.croak("Malformed reject message: %s" % json.dumps(d))
method = 'handle_reject_'+invitation
if not hasattr(self, method):
self.croak("Unhandled invitation type: %s" % json.dumps(d))
return getattr(self, method)(**kwargs)
def wait_for(self, **d):
# Save all the messages we weren't waiting for, so that we can
# replay them afterwards
bottle = []
try:
while True:
msg = self.read_msg()
for k,v in d.items():
if msg.get(k) != v:
break
else:
return msg
if msg.get('type') == 'error':
raise Exception("Server error: %s" % (msg.get('message'),))
bottle.append(msg)
finally:
# Restore the bottled-up messages
self.buf = '\n'.join(map(json.dumps, bottle)) + self.buf
def wall(self, msg):
self.write_msg({'type': 'wall', 'message': str(msg)})
def message(self, msg, to):
self.write_msg({'type': 'message', 'message': str(msg), 'to': str(to)})
def invite_game(self, to):
self.write_msg({'type': 'invite', 'invitation': 'new', 'to': str(to)})
def revoke_game(self, to):
self.write_msg({'type': 'revoke', 'invitation': 'new', 'to': str(to)})
def accept_game(self, to):
self.write_msg({'type': 'accept', 'invitation': 'new', 'to': str(to)})
def reject_game(self, to):
self.write_msg({'type': 'reject', 'invitation': 'new', 'to': str(to)})
def invite_join(self, to):
self.write_msg({'type': 'invite', 'invitation': 'join', 'to': str(to)})
def revoke_join(self, to):
self.write_msg({'type': 'revoke', 'invitation': 'join', 'to': str(to)})
def accept_join(self, to):
self.write_msg({'type': 'accept', 'invitation': 'join', 'to': str(to)})
def reject_join(self, to):
self.write_msg({'type': 'reject', 'invitation': 'join', 'to': str(to)})
def leave_game(self):
self.write_msg({'type': 'part'})
#self.wait_for(**{'type': 'part', 'from': self.username})
def rename_team(self, team_name):
self.write_msg({'type': 'team name', 'name': team_name})
def claim_player(self, player_name):
self.write_msg({'type': 'claim', 'player': player_name})
def assign_player(self, player_name, username):
self.write_msg({'type': 'assign', 'player': player_name, 'to': username})
def disown_player(self, player_name):
self.write_msg({'type': 'disown', 'player': player_name})
def action(self, action, **d):
msg = {'type': 'action', 'action': action}
msg.update(d)
self.write_msg(msg)
def call_toss(self, tails):
self.action('call toss', tails=tails)
def call_heads(self):
self.call_toss(False)
def call_tails(self):
self.call_toss(True)
def flip_coin(self):
self.action('flip coin')
def choose_first(self, bat):
self.action('choose first', bat=bat)
def choose_bat_first(self):
self.choose_first(True)
def choose_field_first(self):
self.choose_first(False)
def choose_batsman(self, player_name):
self.action('next bat', batsman=player_name)
def choose_keeper(self, player_name):
self.action('choose keeper', keeper=player_name)
def choose_bowler(self, player_name, keeper=None):
d = {'bowler': player_name}
if keeper is not None:
d['keeper'] = keeper
self.action('choose bowler', **d)
def field_swap(self, first_player, second_player):
self.action('field assign', swap=(first_player, second_player))
def field_done(self):
self.action('field assign')
def roll_dice(self):
self.action('roll')
class ConsoleClient(Connection):
def __init__(self, **kwargs):
self.in_invite_new = set()
self.in_invite_join = set()
self.cons = sys.stdin
self.halt = False
self.dbg = kwargs.pop('debug', False)
super(ConsoleClient, self).__init__(**kwargs)
print("%s Server version %s" % (self.tagify(), '.'.join(map(str, self.server_version))))
print("%s %s" % (self.tagify(), self.motd))
def debug(self, cls, *args):
if self.dbg:
print("DBG %s: %s" % (cls, ' '.join(map(str, args))))
def main(self):
try:
r, w, x = select.select((self.cons.fileno(),), (), (), 0)
if self.cons.fileno() in r:
inp = self.cons.readline().rstrip('\n')
self.do_input(inp)
if self.halt:
return
self.maybe_read_and_handle()
except SocketClosed as e:
print(e)
self.halt = True
except Croaked:
self.halt = True
raise
except Exception as e:
self.croak("main loop: %r" % e, True)
self.halt = True
def main_loop(self):
while not self.halt:
self.main()
def do_blank(self):
# Later this may be affected by the state machine
# (e.g. to perform a requested trigger action)
pass
def do_plain_input(self, inp):
# Later this may be affected by the state machine
self.wall(inp)
def do_input(self, inp):
if not inp:
return self.do_blank()
if inp[0] != '/':
return self.do_plain_input(inp)
words = shlex.split(inp[1:])
cmd, *args = words
method = 'cmd_' + cmd
if hasattr(self, method):
try:
return getattr(self, method)(*args)
except Exception as e:
print('Command handler: /%s:' % (cmd,), e)
return
print("Unrecognised command /%s" % cmd)
def cmd_quit(self, *messages):
self.goodbye(' '.join(messages) or 'Client quit')
self.halt = True
def cmd_invite(self, to):
self.invite_game(to)
def cmd_accept(self, to, what=None):
if what is None:
if to in self.in_invite_new:
if to in self.in_invite_join:
raise Exception("Ambiguous - specify '/accept <user> new' or '/accept <user> join'")
return self.accept_game(to)
if to in self.in_invite_join:
return self.accept_join(to)
raise Exception("No invite outstanding for new or join from", to)
if what == 'new':
if to not in self.in_invite_new:
print("No new-game invite outstanding from %s; trying anyway" % (to,))
return self.accept_game(to)
if what == 'join':
if to not in self.in_invite_join:
print("No join-game invite outstanding from %s; trying anyway" % (to,))
return self.accept_join(to)
raise Exception("<what> must be 'new' or 'join', not %s" % (what,))
def cmd_reject(self, to, what=None):
if what is None:
if to in self.in_invite_new:
if to in self.in_invite_join:
raise Exception("Ambiguous - specify '/reject <user> new' or '/reject <user> join'")
return self.reject_game(to)
if to in self.in_invite_join:
return self.reject_join(to)
raise Exception("No invite outstanding for new or join from", to)
if what == 'new':
if to not in self.in_invite_new:
print("No new-game invite outstanding from %s; trying anyway" % (to,))
return self.reject_game(to)
if what == 'join':
if to not in self.in_invite_join:
print("No join-game invite outstanding from %s; trying anyway" % (to,))
return self.reject_join(to)
raise Exception("<what> must be 'new' or 'join', not %s" % (what,))
def tagify(self, bracks='_', frm=None, w=16):
if frm is None:
tag = '-'
else:
tag = bracks[0] + frm + bracks[-1]
return tag.rjust(w)
def handle_error(self, message):
print("error: %s" % (message,))
def handle_wall(self, message, frm):
print("%s %s" % (self.tagify('{}', frm), message))
def handle_message(self, message, frm=None):
print("%s %s" % (self.tagify('<>', frm), message))
def handle_enter(self, user):
super(ConsoleClient, self).handle_enter(user)
print("%s entered the room" % (self.tagify('=', user),))
def handle_exit(self, user):
super(ConsoleClient, self).handle_exit(user)
print("%s left the room" % (self.tagify('=', user),))
# Revoke any outstanding invites
self.in_invite_new.discard(user)
self.in_invite_join.discard(user)
def handle_invite_new(self, frm):
print("%s invited you to start a game! /accept or /reject it." % self.tagify('=', frm))
self.in_invite_new.add(frm)
def handle_invite_join(self, frm):
print("%s invited you to join their team! /accept or /reject it." % self.tagify('=', frm))
self.in_invite_join.add(frm)
def handle_revoke_new(self, frm):
if frm not in self.in_invite_new:
return # ignore it
print("%s revoked the invitation to start a game." % self.tagify('=', frm))
self.in_invite_new.remove(frm)
def handle_revoke_join(self, frm):
if frm not in self.in_invite_join:
return # ignore it
print("%s revoked the invitation to join their team." % self.tagify('=', frm))
self.in_invite_join.remove(frm)
# def handle_accept_new(self, frm):
#
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Command-line client for networked Howzat game')
parser.add_argument('-u', '--username', default=getpass.getuser())
args = parser.parse_args()
ConsoleClient(username=args.username, debug=True).main_loop()
|
import numpy as onp
import jax.numpy as jnp
from jax import lax
from flax.optim import OptimizerDef
from flax import struct
#--------------------------------------------------------------------------------------------------
# UNCENTERED
@struct.dataclass
class _LaPropHyperParams:
learning_rate: onp.ndarray
beta1: onp.ndarray
beta2: onp.ndarray
eps: onp.ndarray
weight_decay: onp.ndarray
@struct.dataclass
class _LaPropParamState:
update_ema: onp.ndarray
grad_sq_ema: onp.ndarray
bias_correction1: float
class LaProp(OptimizerDef):
"""
LaProp optimizer
https://github.com/Z-T-WANG/LaProp-Optimizer
"""
def __init__(self, learning_rate=None, beta1=0.9, beta2=0.999, eps=1e-8, weight_decay=0.0):
hyper_params = _LaPropHyperParams(learning_rate, beta1, beta2, eps, weight_decay)
super().__init__(hyper_params)
def init_param_state(self, param):
return _LaPropParamState(jnp.zeros_like(param), jnp.zeros_like(param), 0.0)
def apply_param_gradient(self, step, hyper_params, param, state, grad):
assert hyper_params.learning_rate is not None, 'no learning rate provided.'
learning_rate = hyper_params.learning_rate
beta1 = hyper_params.beta1
beta2 = hyper_params.beta2
eps = hyper_params.eps
weight_decay = hyper_params.weight_decay
# exponential moving average for grad²
grad_sq = lax.square(grad)
grad_sq_ema = beta2 * state.grad_sq_ema + (1. - beta2) * grad_sq
# bias correction
bias_correction2 = 1 - beta2 ** (step + 1.)
grad_sq_ema_corr = grad_sq_ema / bias_correction2
# exponential moving average for update tensor
update = grad / (jnp.sqrt(grad_sq_ema_corr) + eps)
update_ema = beta1 * state.update_ema + (1. - beta1) * learning_rate * update
# bias correction
bias_correction1 = beta1 * state.bias_correction1 + (1 - beta1) * learning_rate
update_ema_corr = update_ema / bias_correction1
new_param = param - learning_rate * update_ema_corr
new_param -= learning_rate * weight_decay * param
new_state = _LaPropParamState(update_ema, grad_sq_ema, bias_correction1)
return new_param, new_state
#--------------------------------------------------------------------------------------------------
# CENTERED
@struct.dataclass
class _LaPropCenteredHyperParams:
learning_rate: onp.ndarray
beta1: onp.ndarray
beta2: onp.ndarray
eps: onp.ndarray
weight_decay: onp.ndarray
steps_before_using_centered: onp.ndarray
@struct.dataclass
class _LaPropCenteredParamState:
update_ema: onp.ndarray
grad_ema: onp.ndarray
grad_sq_ema: onp.ndarray
bias_correction1: float
class LaPropCentered(OptimizerDef):
"""
LaProp optimizer, centered variant
https://github.com/Z-T-WANG/LaProp-Optimizer
"""
def __init__(self, learning_rate=None, beta1=0.9, beta2=0.999, eps=1e-8, weight_decay=0.0,
steps_before_using_centered=10):
hyper_params = _LaPropCenteredHyperParams(learning_rate, beta1, beta2, eps, weight_decay,
steps_before_using_centered)
super().__init__(hyper_params)
def init_param_state(self, param):
return _LaPropCenteredParamState(jnp.zeros_like(param), jnp.zeros_like(param), jnp.zeros_like(param), 0.0)
def apply_param_gradient(self, step, hyper_params, param, state, grad):
assert hyper_params.learning_rate is not None, 'no learning rate provided.'
learning_rate = hyper_params.learning_rate
beta1 = hyper_params.beta1
beta2 = hyper_params.beta2
eps = hyper_params.eps
weight_decay = hyper_params.weight_decay
steps_before_using_centered = hyper_params.steps_before_using_centered
# exponential moving average for grad²
grad_sq = lax.square(grad)
grad_sq_ema = beta2 * state.grad_sq_ema + (1. - beta2) * grad_sq
# bias correction
bias_correction2 = 1 - beta2 ** (step + 1.)
grad_sq_ema_corr = grad_sq_ema / bias_correction2
# exponential moving average for grad
grad_ema = beta2 * state.grad_ema + (1. - beta2) * grad
grad_ema_corr = grad_ema / bias_correction2
# centering second moment
grad_sq_centered = jnp.where(step > steps_before_using_centered,
grad_sq_ema_corr - grad_ema_corr**2,
grad_sq_ema_corr)
# exponential moving average for update tensor
update = grad / (jnp.sqrt(grad_sq_centered) + eps)
update_ema = beta1 * state.update_ema + (1. - beta1) * learning_rate * update
# bias correction
bias_correction1 = beta1 * state.bias_correction1 + (1 - beta1) * learning_rate
update_ema_corr = update_ema / bias_correction1
new_param = param - learning_rate * update_ema_corr
new_param -= learning_rate * weight_decay * param
new_state = _LaPropCenteredParamState(update_ema, grad_ema, grad_sq_ema, bias_correction1)
return new_param, new_state
|
class Solution:
def pivotIndex(self, nums) -> int:
total = sum(nums)
left = 0
right = total
for i, n in enumerate(nums):
if i > 0:
left = left + nums[i - 1]
right = right - nums[i]
if left == right:
return i
return -1
slu = Solution()
print((slu.pivotIndex([1, 7, 3, 6, 5, 6])))
|
'''
Newb Code Snippets:
https://stevepython.wordpress.com
77-Print Text On Webcam And Save Video
Tested on Window 7 and Linux Mint 19.1
On my Linux I had to make the font smaller (0.5)and change text position
(60,260) to be able see the text. Also the saved video would not play.
This could be all down to my setup though.
pip install opencv-python
'''
import cv2
cv2.namedWindow("preview")
vc = cv2.VideoCapture(0)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
if vc.isOpened():
rval, frame = vc.read()
else:
rval = False
print("No camera found.")
while rval:
cv2.imshow("preview", frame)
rval, frame = vc.read()
# Print text onto video.
cv2.putText(frame, "Video subtitle Shambles test", (110, 460), \
cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), lineType=cv2.LINE_AA)
# write the frame to file
out.write(frame)
# End if Escape key detected
key = cv2.waitKey(25)
if key == 27: # exit on ESC
break
cv2.destroyWindow("preview")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-14 22:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import pydoc.core.conf
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Classifier',
fields=[
('name', models.CharField(max_length=255, primary_key=True, serialize=False)),
],
options={
'verbose_name_plural': 'classifiers',
'ordering': ('name',),
'verbose_name': 'classifier',
},
),
migrations.CreateModel(
name='Distribution',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('filename', models.CharField(blank=True, default='', help_text='the filename as provided by pypi', max_length=255)),
('file', models.FileField(blank=True, help_text='the distribution file (if it was mirrord locally)', max_length=255, null=True, upload_to=pydoc.core.conf.distribution_upload_to)),
('url', models.URLField(blank=True, help_text='the original url provided by pypi', max_length=255, null=True)),
('size', models.IntegerField(blank=True, null=True)),
('md5_digest', models.CharField(blank=True, max_length=32)),
('filetype', models.CharField(choices=[('sdist', 'Source'), ('bdist_dumb', '"dumb" binary'), ('bdist_rpm', 'RPM'), ('bdist_wininst', 'MS Windows installer'), ('bdist_egg', 'Python Egg'), ('bdist_dmg', 'OS X Disk Image')], max_length=32)),
('pyversion', models.CharField(blank=True, choices=[('any', 'Any i.e. pure python'), ('2.1', '2.1'), ('2.2', '2.2'), ('2.3', '2.3'), ('2.4', '2.4'), ('2.5', '2.5'), ('2.6', '2.6'), ('2.7', '2.7'), ('3.0', '3.0'), ('3.1', '3.1'), ('3.2', '3.2')], max_length=16)),
('comment', models.TextField(blank=True, default='')),
('signature', models.TextField(blank=True, default='')),
('uploaded_at', models.DateTimeField(blank=True, help_text='the time at which the package was uploaded (on pypi)', null=True)),
('mirrored_at', models.DateTimeField(blank=True, help_text='the time at which the package was downloaded to this mirror.', null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('is_from_external', models.BooleanField(default=False)),
],
options={
'verbose_name_plural': 'distributions',
'verbose_name': 'distribution',
},
),
migrations.CreateModel(
name='Package',
fields=[
('name', models.CharField(max_length=255, primary_key=True, serialize=False, unique=True)),
('auto_hide', models.BooleanField(default=True)),
('updated_from_remote_at', models.DateTimeField(blank=True, null=True)),
('parsed_external_links_at', models.DateTimeField(blank=True, null=True)),
],
options={
'verbose_name_plural': 'packages',
'ordering': ['name'],
'verbose_name': 'package',
'get_latest_by': 'releases__latest',
},
),
migrations.CreateModel(
name='PackageIndex',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.CharField(default='pypi', max_length=255, unique=True)),
('updated_from_remote_at', models.DateTimeField(blank=True, null=True)),
('xml_rpc_url', models.URLField(blank=True, default='http://pypi.python.org/pypi')),
('simple_url', models.URLField(blank=True, default='http://pypi.python.org/simple')),
],
options={
'verbose_name_plural': 'package indexes',
},
),
migrations.CreateModel(
name='Release',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.CharField(max_length=128)),
('metadata_version', models.CharField(default='1.0', max_length=64)),
('hidden', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('is_from_external', models.BooleanField(default=False)),
('package', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='releases', to='core.Package')),
],
options={
'verbose_name_plural': 'releases',
'ordering': ['-created'],
'verbose_name': 'release',
'get_latest_by': 'created',
},
),
migrations.AddField(
model_name='package',
name='index',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.PackageIndex'),
),
migrations.AddField(
model_name='distribution',
name='release',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='distributions', to='core.Release'),
),
migrations.AlterUniqueTogether(
name='release',
unique_together=set([('package', 'version')]),
),
migrations.AlterUniqueTogether(
name='distribution',
unique_together=set([('release', 'filetype', 'pyversion')]),
),
]
|
"""
https://github.com/FrederikSchorr/sign-language
Train a pre-trained I3D convolutional network to classify videos
"""
import os
import glob
import time
import sys
import numpy as np
import pandas as pd
import keras
from keras import backend as K
from datagenerator import VideoClasses, FramesGenerator
from model_i3d import Inception_Inflated3d, add_i3d_top
def layers_freeze(keModel:keras.Model) -> keras.Model:
print("Freeze all %d layers in Model %s" % (len(keModel.layers), keModel.name))
for layer in keModel.layers:
layer.trainable = False
return keModel
def layers_unfreeze(keModel:keras.Model) -> keras.Model:
print("Unfreeze all %d layers in Model %s" % (len(keModel.layers), keModel.name))
for layer in keModel.layers:
layer.trainable = True
return keModel
def count_params(keModel:keras.Model):
trainable_count = int(
np.sum([K.count_params(p) for p in set(keModel.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(keModel.non_trainable_weights)]))
print('Total params: {:,}'.format(trainable_count + non_trainable_count))
print('Trainable params: {:,}'.format(trainable_count))
print('Non-trainable params: {:,}'.format(non_trainable_count))
return
def train_I3D_oflow_end2end(diVideoSet):
"""
* Loads pretrained I3D model,
* reads optical flow data generated from training videos,
* adjusts top-layers adequately for video data,
* trains only news top-layers,
* then fine-tunes entire neural network,
* saves logs and models to disc.
"""
# directories
sFolder = "%03d-%d"%(diVideoSet["nClasses"], diVideoSet["nFramesNorm"])
sClassFile = "data-set/%s/%03d/class.csv"%(diVideoSet["sName"], diVideoSet["nClasses"])
#sVideoDir = "data-set/%s/%03d"%(diVideoSet["sName"], diVideoSet["nClasses"])
#sImageDir = "data-temp/%s/%s/image"%(diVideoSet["sName"], sFolder)
#sImageFeatureDir = "data-temp/%s/%s/image-i3d"%(diVideoSet["sName"], sFolder)
sOflowDir = "data-temp/%s/%s/oflow"%(diVideoSet["sName"], sFolder)
#sOflowFeatureDir = "data-temp/%s/%s/oflow-i3d"%(diVideoSet["sName"], sFolder)
sModelDir = "model"
diTrainTop = {
"fLearn" : 1e-3,
"nEpochs" : 3}
diTrainAll = {
"fLearn" : 1e-4,
"nEpochs" : 17}
nBatchSize = 4
print("\nStarting I3D end2end training ...")
print(os.getcwd())
# read the ChaLearn classes
oClasses = VideoClasses(sClassFile)
# Load training data
genFramesTrain = FramesGenerator(sOflowDir + "/train", nBatchSize,
diVideoSet["nFramesNorm"], 224, 224, 2, oClasses.liClasses)
genFramesVal = FramesGenerator(sOflowDir + "/val", nBatchSize,
diVideoSet["nFramesNorm"], 224, 224, 2, oClasses.liClasses)
# Load pretrained i3d model and adjust top layer
print("Load pretrained I3D flow model ...")
keI3DOflow = Inception_Inflated3d(
include_top=False,
weights='flow_imagenet_and_kinetics',
input_shape=(diVideoSet["nFramesNorm"], 224, 224, 2))
print("Add top layers with %d output classes ..." % oClasses.nClasses)
keI3DOflow = layers_freeze(keI3DOflow)
keI3DOflow = add_i3d_top(keI3DOflow, oClasses.nClasses, dropout_prob=0.5)
# Prep logging
sLog = time.strftime("%Y%m%d-%H%M", time.gmtime()) + \
"-%s%03d-oflow-i3d"%(diVideoSet["sName"], diVideoSet["nClasses"])
# Helper: Save results
csv_logger = keras.callbacks.CSVLogger("log/" + sLog + "-acc.csv", append = True)
# Helper: Save the model
os.makedirs(sModelDir, exist_ok=True)
cpTopLast = keras.callbacks.ModelCheckpoint(filepath = sModelDir + "/" + sLog + "-above-last.h5", verbose = 0)
cpTopBest = keras.callbacks.ModelCheckpoint(filepath = sModelDir + "/" + sLog + "-above-best.h5",
verbose = 1, save_best_only = True)
cpAllLast = keras.callbacks.ModelCheckpoint(filepath = sModelDir + "/" + sLog + "-entire-last.h5", verbose = 0)
cpAllBest = keras.callbacks.ModelCheckpoint(filepath = sModelDir + "/" + sLog + "-entire-best.h5",
verbose = 1, save_best_only = True)
# Fit top layers
print("Fit I3D top layers with generator: %s" % (diTrainTop))
optimizer = keras.optimizers.Adam(lr = diTrainTop["fLearn"])
keI3DOflow.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
count_params(keI3DOflow)
keI3DOflow.fit_generator(
generator = genFramesTrain,
validation_data = genFramesVal,
epochs = diTrainTop["nEpochs"],
workers = 4,
use_multiprocessing = True,
max_queue_size = 8,
verbose = 1,
callbacks=[csv_logger, cpTopLast, cpTopBest])
# Fit entire I3D model
print("Finetune all I3D layers with generator: %s" % (diTrainAll))
keI3DOflow = layers_unfreeze(keI3DOflow)
optimizer = keras.optimizers.Adam(lr = diTrainAll["fLearn"])
keI3DOflow.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
count_params(keI3DOflow)
keI3DOflow.fit_generator(
generator = genFramesTrain,
validation_data = genFramesVal,
epochs = diTrainAll["nEpochs"],
workers = 4,
use_multiprocessing = True,
max_queue_size = 8,
verbose = 1,
callbacks=[csv_logger, cpAllLast, cpAllBest])
return
if __name__ == '__main__':
"""diVideoSet = {"sName" : "ledasila",
"nClasses" : 21, # number of classes
"nFramesNorm" : 40, # number of frames per video
"nMinDim" : 240, # smaller dimension of saved video-frames
"tuShape" : (288, 352), # height, width
"nFpsAvg" : 25,
"nFramesAvg" : 75,
"fDurationAvg" : 3.0} # seconds
"""
diVideoSet = {"sName" : "chalearn",
"nClasses" : 20, # number of classes
"nFramesNorm" : 40, # number of frames per video
"nMinDim" : 240, # smaller dimension of saved video-frames
"tuShape" : (240, 320), # height, width
"nFpsAvg" : 10,
"nFramesAvg" : 50,
"fDurationAvG" : 5.0} # seconds
train_I3D_oflow_end2end(diVideoSet) |
from datetime import timedelta
from subparse import command
from .settings import asduration
default_report_interval = timedelta(seconds=5)
def generic_options(parser):
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--profile', default='profile.yml')
@command('.tweet_stream', 'twitter:stream')
def tweet_stream(parser):
"""
Listen to the twitter firehouse.
A new file is created when the stream is interrupted due to an issue or
when a SIGHUP is received locally. The resulting files can then be
concatenated together and/or ingested into the database for querying.
"""
parser.add_argument('filter_file')
parser.add_argument(
'--report-interval',
type=asduration,
default=default_report_interval,
)
parser.add_argument(
'--output-path-prefix',
help=(
'Store the data compressed to disk. '
'Helpful for debugging or redundancy.'
),
)
parser.add_argument('--rabbitmq-exchange', default='')
parser.add_argument('--rabbitmq-routing-key')
@command('.mq_archiver', 'mq:archive')
def mq_archiver(parser):
"""
Listen for messages in the queue and save them.
"""
parser.add_argument('--queue', required=True)
parser.add_argument(
'--report-interval',
type=asduration,
default=default_report_interval,
)
parser.add_argument(
'--output-path-prefix',
help=(
'Store the data compressed to disk. '
'Helpful for debugging or redundancy.'
),
)
parser.add_argument('--gcp-image-bucket')
parser.add_argument('--gcp-firestore-collection')
@command('.mq_replay', 'mq:replay')
def mq_replay(parser):
"""
Replay files from the output path into the queue.
"""
parser.add_argument('--exchange', default='')
parser.add_argument('--routing-key', required=True)
parser.add_argument('--files', nargs='+', required=True)
|
import sys
import re
from pathlib import Path
script_param_index = 0
if '--' in sys.argv: # debug mode
script_param_index = sys.argv.index('--') + 1
year_day_parser = re.compile(r'(\d+)(?:\\|/)(\d+)\.py$')
year, day = year_day_parser.search(sys.argv[script_param_index]).groups()
if (len(sys.argv) >= (script_param_index + 2)
and sys.argv[script_param_index + 1] == '-sample'):
folder = 'sample'
else:
folder = 'input'
input_path = Path.cwd() / year / folder / (day + '.txt')
with open(input_path) as input:
lines = input.readlines()
lines = [*map(lambda s: s.strip(), lines)]
try:
numbers = [*map(int, lines)]
except ValueError:
numbers = []
|
from chibi.file.snippets import is_file
from tests.snippet.files import Test_with_files
class Test_is_file( Test_with_files ):
amount_of_files = 3
amount_of_dirs = 3
def test_root_should_be_a_false( self ):
self.assertFalse( is_file( self.root_dir ) )
def test_all_dirs_list_should_be_false( self ):
for dir in self.dirs:
self.assertFalse( is_file( dir ) )
def test_all_files_liist_should_be_true( self ):
for file in self.files:
self.assertTrue( is_file( file ) )
|
"""Collection of helper functions to handle the logic for the KME.
"""
import base64
import os
import numpy as np
from typing import List
from hashlib import shake_128
import uuid
def concat_keys(key_array: List[List[int]]) -> List[int]:
""" Helper function to concatenate keys.
The function will concatenate integers in the same row of the 2D list ``key_array`` using :func:`~api.helper.concat_two_int`.
Parameters
----------
key_array: 2D list
2D list of keys. Each row are the integers to be concatenated.
Returns
-------
list
List of concatenated keys in integer type. The number of keys will be ``len(key_array)`` (number of rows).
Notes
-----
This function takes in a 2D array of keys in their integer form, and concatenates keys in the same row.
.. highlight:: python
.. code-block:: python
>>> arr = [[1122334455, 2233445566], [3344556677, 4455667788]]
>>> concat_keys(arr)
[4820389781632429246, 28729523099122538572]
Here, 4820389781632429246 is the integer result of concatenating 1122334455 and 2233445566 with the function
:func:`~api.helper.concat_two_int`. Similarly 28729523099122538572 is the result of concatenating 3344556677 and 4455667788.
"""
concatenated_keys = []
for row in key_array:
key1 = row[0]
for _, key2 in enumerate(row[1:]):
key1 = concat_two_int(key1, key2)
concatenated_keys.append(key1)
return concatenated_keys
def retrieve_keys_from_file(number: int, num_key_in_each: int, key_file_path: str) -> List[List[int]]:
""" Helper function to retrieve keys from the actual qcrypto binary key files.
This function will parse the qcrypto binary files appropriately and return the keys in integer representation.
It will also update the header files appropriately after consuming the keys.
See: `qcrypto github filespec.txt <https://github.com/kurtsiefer/qcrypto/blob/master/remotecrypto/filespec.txt>`_.
Parameters
----------
key_file_path: str
Path to directory containing qcrypto key files.
number: int
Number of keys requested
num_key_in_each: int
How many 32bit keys each key is made of.
Returns
-------
2D list
List of keys in decimal (integer) form. Each row represents one key and has ``num_key_in_each`` integers,
and there are ``number`` rows.
"""
keys_retrieved = np.array([], dtype=int)
tot_keys_to_retrieve = number * num_key_in_each
while tot_keys_to_retrieve > 0:
sorted_key_files = sorted(os.listdir(key_file_path))
key_file_name = sorted_key_files[0] # Retrieve first key file in sorted list
_key_file_path = os.path.join(key_file_path, key_file_name)
with open(_key_file_path, 'rb') as f:
key_file = np.fromfile(file=f, dtype='<u4')
os.remove(_key_file_path) # Delete the file. Rewrite back modified file later
header = key_file[:4] # Header has 4 elements. The rest are key material.
keys_available = key_file[4:]
len_of_key_file = len(keys_available)
if len_of_key_file >= tot_keys_to_retrieve: # Sufficient keys in this file alone
keys_retrieved = np.concatenate([keys_retrieved, keys_available[:tot_keys_to_retrieve]])
keys_available = keys_available[tot_keys_to_retrieve:] # Remaining keys
header[3] -= tot_keys_to_retrieve # Update header about number of keys in this file left
tot_keys_to_retrieve = 0
# Write updated file back with the same name
key_file = np.concatenate([header, keys_available])
key_file.tofile(_key_file_path)
else:
keys_retrieved = np.concatenate([keys_retrieved, keys_available[:]])
tot_keys_to_retrieve -= len_of_key_file
keys_retrieved = keys_retrieved.reshape(number, num_key_in_each) # reshape to 2D array # return as list
return keys_retrieved.tolist() # return as list
def retrieve_keys_given_uuid(uuid_array: List[List[str]], key_file_path: str) -> List[List[int]]:
""" Helper function to retrieve keys given the UUIDs of the keys.
This function
Parameters
----------
uuid_array: 2D list
2D list of strings. Each row represents a single key, and the elements in the row are UUIDs of the individual
keys that concatenate to make the full key.
key_file_path: str
Path to directory containing qcrypto key files.
Returns
-------
2D list
List of keys in decimal (integer) form. Each row represents a single key, and each element in a row is the
actual key that will be eventually concatenated to form the final key.
"""
uuid_array = np.array(uuid_array, dtype=str) # convert to numpy array for easier manipulation
keys_retrieved = np.zeros_like(uuid_array, dtype=int)
sorted_key_files = sorted(os.listdir(key_file_path))
for key_file_name in sorted_key_files:
_key_file_path = os.path.join(key_file_path, key_file_name)
with open(_key_file_path, 'rb') as f:
key_file = np.fromfile(file=f, dtype='<u4')
os.remove(_key_file_path)
header = key_file[:4]
keys_available = key_file[4:]
keys_to_remove = [] # store index of keys to remove if found to have matching UUIDS
count = 0 # count how many keys are being removed this file
for index, key in enumerate(keys_available): # iterate over every key in the key file
uuid_ = convert_int_to_uuid(key) # convert each key to UUID
if uuid_ in uuid_array: # if the UUID matches
item_index = np.where(uuid_array == uuid_)
row, col = item_index[0][0], item_index[1][0] # get the index of the first occurrence, incase of repeat
count += 1
keys_retrieved[row][col] = key
keys_to_remove.append(index)
uuid_array[row][col] = '0' # 0 to signify it has been retrieved
if np.all(uuid_array == '0'): # if uuid_array is all 0, stop searching as you have found all keys
break
keys_available = np.delete(keys_available, keys_to_remove)
header[3] -= count
key_file = np.concatenate([header, keys_available])
key_file.tofile(_key_file_path)
if np.all(uuid_array == '0'):
return keys_retrieved.tolist()
raise KeyError # if it hasn't returned after looping over all key files, then the key(s) cant be found
def int_to_bitstring(x: int) -> str:
""" Function to convert an integer to AT LEAST a 32-bit binary string.
For integer less than 32 bits, it will pad the integer with extra bits of 0s until it is of size 32bit.
If the integer is greater than 32-bits, then return the binary representation with the minimum number of bits to
represent the integer.
Parameters
----------
x: int
Integer to convert to bitstring
Returns
-------
str
Bitstring of AT LEAST size 32-bits.
Notes
-----
Here are some example cases.
.. highlight:: python
.. code-block:: python
>>> int1 = 2
>>> int_to_bitstring(int1)
00000000000000000000000000000010
>>> int2 = 9999999999999
>>> int_to_bitstring(int2)
10010001100001001110011100101001111111111111
In the first example, the binary representation of 2 is simply 10. Hence, the function pads the bitstring with
30 0s on the left to return a 32-bit bitstring. In the second example, 9999999999999 in binary consist of > 32-bits,
hence the function returns the full binary representation.
"""
return '{:032b}'.format(x)
def bin_to_int(x: str) -> int:
""" Convert from a binary string to a integer.
Parameters
----------
x: str
Binary string to convert.
Returns
-------
int
Corresponding integer.
"""
return int(x, 2)
def concat_two_int(int1: int, int2: int) -> int:
""" Concatenate two integers `in their 32-bit binary form`.
Parameters
----------
int1, int2: int
Two integers to concatenate.
Returns
-------
int
Concatenated integer.
Notes
-----
For example:
123 in 32-bit binary is 00000000000000000000000001111011.
456 in 32-bit binary is 00000000000000000000000111001000.
The binary concatenated form is
0000000000000000000000000111101100000000000000000000000111001000,
which in base 10 is 528280977864.
"""
int1_bin = int_to_bitstring(int1)
int2_bin = int_to_bitstring(int2)
concat = int1_bin + int2_bin
return bin_to_int(concat)
def int_to_base64(x: int) -> str:
""" Converts an integer to a base64 string.
This is helpful in converting the encryption key, often expressed as a large base 10 integer, into base64 as it
is the format required by the ETSI QKD API.
Parameters
----------
x: int
Integer to convert to base 64
Returns
-------
str
Corresponding string in base64
"""
base64_byte = base64.b64encode(bitstring_to_bytes(int_to_bitstring(x))) # returns a byte object encoded in base64
base64_str = base64_byte.decode() # convert from byte object to string
return base64_str
def bitstring_to_bytes(s: str) -> bytes:
""" Converts a bitstring to a byte object.
This function is necessary as certain libraries (specifically the base64 library) accepts byte objects, not strings
which are often expressed in UTF-8 or ASCII formats.
Parameters
----------
s: str
Bitstring to be converted to bytes object.
Returns
-------
bytes
Corresponding bitstring in bytes format.
"""
return int(s, 2).to_bytes((len(s)+7) // 8, byteorder='big')
def int_to_bytes(x: int) -> bytes:
""" Converts an integer to a byte object
Parameters
----------
x: int
Integer to be converted.
Returns
-------
bytes
Corresponding integer in bytes format.
"""
binary = int_to_bitstring(x)
bytes_ = bitstring_to_bytes(binary)
return bytes_
def convert_int_to_uuid(x: int) -> str:
"""Uses an integer as a seed to generate a UUID.
This function first hashes the integer using the `hashlib.shake_128 <https://docs.python.org/3/library/hashlib.html#shake-variable-length-digests>`_
hashing algorithm. This allows us to generate a 128bit hash that the `uuid <https://docs.python.org/3/library/uuid.html>`_
library requires as seed to generate a unique UUID.
Parameters
----------
x: int
Integer as seed.
Returns
-------
str
UUID in string format.
"""
x_byte = int_to_bytes(x)
m = shake_128()
m.update(x_byte)
digest = m.digest(16) # 16 bytes = 128 bits
u = str(uuid.UUID(bytes=digest))
return u
def flatten_2d_list(l: List[List]) -> List:
""" Flattens a 2D list into a 1D list.
Parameters
----------
l: 2D list
2D list to flatten
Returns
-------
1D list
Flattened list.
"""
return [item for sublist in l for item in sublist]
|
import requests
from twilio.rest import Client
import config
account_sid = config.twilio_account_sid
auth_token = config.twilio_auth_token
OWM_Endpoint = "https://api.openweathermap.org/data/2.5/onecall"
api_key = config.OWM_api_key
parameters = {
"lat": 41.14961,
"lon": -8.61099,
"exclude": "current,minutely,daily,alerts",
"appid": api_key
}
response = requests.get(OWM_Endpoint, params=parameters)
response.raise_for_status()
weather_data = response.json()
hourly_weather = weather_data["hourly"][:12]
will_rain = False
for hour_data in hourly_weather:
condition_code = hour_data["weather"][0]["id"]
#print(condition_code)
if int(condition_code) < 531:
will_rain = True
if will_rain:
client = Client(account_sid, auth_token)
message = client.messages \
.create(
body="It's going to rain 🌧️ today, remember to bring an umbrella ☂️",
from_='+15593153947',
to=config.my_phone_num
)
print(message.status)
|
#!/usr/bin/python3
import numpy as np
import os
import matplotlib.pyplot as plt
import logging
import RGreedy
def eval(sr_info_res, G_res, PL, params):
'''
Evaluate the PDR and lifetime of the solution
Args:
sr_info_res: generated sensor/end device configuration
G_res: generated gateway placement
PL: path loss matrix
params: important parameters
Return:
PDR: an array of PDR at each end device
PDR_gw: a matrix of PDR between end device-gateway pair
lifetime: an array of lifetime at each end device
'''
sr_cnt = sr_info_res.shape[0]
SF_cnt = len(params.SF)
CH_cnt = len(params.CH)
# Init N_kq: the number of nodes using the same SF and channel
N_kq = dict()
for k in range(SF_cnt):
for q in range(CH_cnt):
N_kq[str(k) + '_' + str(q)] = []
# Fill in N_kq
for idx in range(sr_cnt):
k = int(sr_info_res[idx, 2]) # SFk
q = int(sr_info_res[idx, 4]) # Channel q
if k != -1 and q != -1:
label = str(k) + '_' + str(q)
N_kq[label].append(sr_info_res[idx, :])
PDR, PDR_gw, lifetime = [], [], []
for idx in range(sr_cnt):
k = int(sr_info_res[idx, 2]) # SFk
q = int(sr_info_res[idx, 4]) # Channel q
if k != -1 and q != -1:
newPDR, newPDR_gw = RGreedy.GetPDR(sr_info_res, G_res, PL, N_kq, params, idx)
PDR.append(newPDR)
PDR_gw.append(newPDR_gw)
# Find the index of closet Transmission power
Ptx_idx = np.abs(params.Ptx - sr_info_res[idx, 3]).argmin()
lifetime.append(RGreedy.GetLifetime(k, params.Ptx[Ptx_idx], newPDR, params))
#print(np.array(PDR_gw))
#print(PDR)
#print(lifetime)
return PDR, PDR_gw, lifetime
def plot(sr_info, G, method):
# Visualize the placement and device configuration
# sr_cnt = sr_info.shape[0]
gw_cnt = G.shape[0]
plt.figure()
colorList = ['tab:blue', 'tab:orange', 'tab:green', 'tab:purple', 'tab:gray']
color = [colorList[int(i)] for i in list(sr_info[:, 2])]
plt.scatter(sr_info[:, 0], sr_info[:, 1], c=color , s=5)
color = ['r' for i in range(gw_cnt)]
plt.scatter(G[:, 0], G[:, 1], s=G[:, 2]*50, c=color, marker='^')
plt.xlabel('X (m)'); plt.ylabel('Y (m)');
# plt.legend()
if not os.path.exists('vis'):
os.makedirs('vis')
filename = './vis/vis_{}.png'.format(method)
plt.savefig(filename)
# plt.show()
def SaveInfo(sr_info, G, PL, method, params, DataParams):
'''
Save the generated solution to text file
Args:
sr_info: generated sensor/end device configuration solution
G: generated gateway placement solution
PL: path losses between end devices and selected gateways
method: a string showing the algorithm, to be added to file name
params, DataParams: parameters on experiments and datasets
'''
sr_cnt = sr_info.shape[0]
gw_cnt = G.shape[0]
if not os.path.exists('res'):
os.makedirs('res')
# Write sensor and gateway information to file
filename = './res/sr_{}.txt'.format(method)
with open (filename, 'w') as out:
for i in range(sr_cnt):
# Note: we need to convert SF to data rate (DR)
# SF7 - DR3, SF8 - DR2, SF9 - DR1, SF10 - DR0
out.write(str(round(sr_info[i, 0], 2)) + ' ' + \
str(round(sr_info[i, 1], 2)) + ' ' + \
str(int(3 - sr_info[i, 2])) + ' ' + \
str(round(sr_info[i, 3])) + '\n')
filename = './res/gw_{}.txt'.format(method)
with open (filename, 'w') as out:
for i in range(gw_cnt):
if G[i, 2]:
out.write(str(round(G[i, 0], 2)) + ' ' + \
str(round(G[i, 1], 2)) + '\n')
filename = './res/pl_{}.txt'.format(method)
# If PL file is not provided, we need to extract the ground-truth PL
# like the process in the initialization function init()
# This is for making deployment decisions under the isomorphic PL
# but simulating under the real PL, to see the difference due to PL
if DataParams.dataLoc and not DataParams.PL:
PL = np.load(DataParams.PLFile).T
PL = PL + 10.0
with open (filename, 'w') as out:
for i in range(sr_cnt):
for j in range(gw_cnt):
if G[j, 2]:
out.write(str(round(PL[i, j], 6)) + ' ')
out.write('\n')
def SaveRes(method, sr_cnt, M, gw_cnt, time):
# Log results
if not os.path.exists('res'):
os.makedirs('res')
with open('res/res.txt', 'a') as out:
out.write(method + ' ' + str(sr_cnt) + ' ' + str(M) + ' ' + \
str(gw_cnt) + ' ' + str(time) + '\n') |
from distutils.core import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="HW_10", # Replace with your own username
version="0.1.0",
author="Olga",
author_email="Olga@example.com",
description="Creation of packages for projects HW8 and HW9",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/CI1100/HW_10",
license= 'LICENSE.txt',
packages= ['final_project_packages', 'final_project_packages.test'],
scripts = ['bin/HW_8.py', 'bin/kmeans_iris.py'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
|
from .controllers import CoinbasePro, Kucoin, Local
class CryptoDockApi :
def __init__(self, Args) :
self.base = Args.API_HOST
self.port = Args.API_PORT
self.version = Args.API_VERSION
self.uri = "http://{}:{}/api/{}".format(self.base, self.port, self.version)
self.Local = Local(self.uri)
self.Kucoin = Kucoin(self.uri)
self.CoinbasePro = CoinbasePro(self.uri)
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import os
import argparse
import time
import sys
from f3.regex_counter import RegexCounter
from f3.filter_counter import FilterCounter
try:
from chardet.universaldetector import UniversalDetector
except ImportError:
print('WARNING: failed to load package "chardet". '
'Assuming the file is UTF-8 encoded.')
time.sleep(1)
SUPPORTED_ENCODINGS = ['windows-1252', 'ISO-8859-1', 'ascii',
'utf-8', 'utf-16', 'utf-32']
def _detect_encoding(source_path):
'''Use the chardet library to make a guess at the encoding.
See http://chardet.readthedocs.io/en/latest/usage.html '''
print('Detecting encoding', end='')
infile = open(source_path, 'rb')
detector = UniversalDetector()
for i, line in enumerate(infile.readlines()):
detector.feed(line)
if i % 1000 == 0:
print('.', end='')
sys.stdout.flush()
if detector.done or i > 40000:
break
detector.close()
infile.close()
result = detector.result
print('\nInput file is encoded using {0} with confidence {1}'
.format(result['encoding'], result['confidence']))
return result['encoding'], result['confidence']
def _choose_encoding(source_path):
if 'chardet' in sys.modules:
encoding, confidence = _detect_encoding(source_path)
else:
encoding = 'UTF-8'
while encoding not in SUPPORTED_ENCODINGS:
print('Encoding {0} is not supported. '
'Enter a supported encoding to continue, '
'or press ENTER to exit.'.format(encoding))
print('Supported encodings: ' + ', '.join(SUPPORTED_ENCODINGS))
entry = input('\n')
if entry == '':
sys.exit(0)
encoding = entry
return encoding
def decode(raw_data, encoding):
'''See: https://docs.python.org/3/library/codecs.html#standard-encodings '''
encoding = encoding.lower()
if encoding == 'ascii':
return raw_data.decode('ascii')
if encoding == 'windows-1252':
return raw_data.decode('windows-1252')
if encoding == 'iso-8859-1':
return raw_data.decode('iso-8859-1')
if encoding == 'utf-16':
return raw_data.decode('utf-16')
if encoding == 'utf-32':
return raw_data.decode('utf-32')
return raw_data.decode('utf-8')
def _find_freqs(source_path, args):
with open(source_path, 'rb') as infile:
raw_data = infile.read()
encoding = _choose_encoding(source_path)
decoded_data = decode(raw_data, encoding)
if args.filter_junk:
counter = FilterCounter()
else:
counter = RegexCounter()
freqs, filtered_freqs = counter.tokenize_and_count(decoded_data,
args.ignore_case)
return (sorted(freqs.items(), key=lambda x: x[1], reverse=True),
sorted(filtered_freqs.items(), key=lambda x: x[1], reverse=True))
def _write_freqs(freqs, dest_path):
with open(dest_path, 'w', encoding='utf-8') as outfile:
for word, freq in freqs:
outfile.write('{0}\t{1}\n'.format(word, freq))
print('Wrote to {0} successfully'.format(dest_path))
def _write_freqs_and_filtered_freqs(freqs, filtered_freqs, dest_path):
with open(dest_path, 'w', encoding='utf-8') as outfile:
for word, freq in freqs:
outfile.write('{0}\t{1}\n'.format(word, freq))
with open(dest_path + '.filtered', 'w', encoding='utf-8') as outfile:
for word, freq in filtered_freqs:
outfile.write('{0}\t{1}\n'.format(word, freq))
print('Wrote to {0} and {1} successfully'
.format(dest_path, dest_path + '.filtered'))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('source',
help='text file')
parser.add_argument('dest',
help='the TSV file frequencies will be written to')
parser.add_argument('-i', '--ignore-case',
action='store_true',
default=False,
help='ignore case for determining word equivalence')
parser.add_argument('-f', '--filter-junk',
action='store_true',
default=False,
help='Assume that the input text is interspersed'
'with HTML and English. Attempt to parse out'
'the HTML, and print out a separate list of'
'all suspected non-target language words')
args = parser.parse_args()
source_path = args.source
dest_path = args.dest
if not os.access(source_path, os.R_OK):
print('File "{0}" does not exist or you lack permission to read it.'
.format(source_path))
exit(1)
freqs, filtered_freqs = _find_freqs(source_path, args)
if args.filter_junk:
_write_freqs_and_filtered_freqs(freqs, filtered_freqs, dest_path)
else:
_write_freqs(freqs, dest_path)
if __name__ == '__main__':
main()
|
import numpy as np
import pymc3 as pm
import theano
import arviz as az
from arviz.utils import Numba
from scipy.stats import mode
import theano.tensor as tt
Numba.disable_numba()
Numba.numba_flag
floatX = theano.config.floatX
# For creating toy data
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, accuracy_score
sns.set_style("white")
# # ----------------------------- MNIST data load ---------------------------
# Importing traning data set
trainX_clean=np.genfromtxt("Python_code/data/MNIST-Train-cropped.txt")
# reshaping to form a 784 X 10000 matrix
trainX_clean=trainX_clean.reshape(784,10000, order="F")
#T Importing traning labels
trainY_clean=np.genfromtxt("Python_code/data/MNIST-Train-Labels-cropped.txt")
# Importing test data set
test_data_clean=np.genfromtxt("Python_code/data/MNIST-Test-cropped.txt")
# reshaping to form a 784 X 2000 matrix
test_data_clean=test_data_clean.reshape(784,2000, order = "F")
#Importing test labels
test_labels_clean=np.genfromtxt("Python_code/data/MNIST-Test-Labels-cropped.txt")
# # plot images
# num_row = 6 # number of rows in plot
# num_col = 6 # number of columns in plot
# fig, axes = plt.subplots(num_row, num_col, figsize=(1.5*num_col,2*num_row))
# for i in range(0,36):
# ax = axes[i//num_col, i%num_col]
# ax.imshow(trainX_clean[:,i].reshape(28,28,order="F"), cmap='Blues')
# ax.set_title('Label: {}'.format(trainY_clean[i]))
# plt.tight_layout()
# plt.show()
# Making the sample size smaller
trainX=trainX_clean[:,0:50]
trainY=trainY_clean[0:50]
testX=test_data_clean[:,0:50]
testY=test_labels_clean[0:50]
# Tranposing training data in order to run through our NN model
trainX=trainX.T
test=testX.T
# ------------------- Defining a BNN function ---------------------------------
def construct_nn(ann_input, ann_output, n_hidden = 5):
# Initialize random weights between each layer
init_1 = np.random.randn(trainX.shape[1], n_hidden).astype(floatX)
init_2 = np.random.randn(n_hidden, n_hidden).astype(floatX)
init_out = np.random.randn(n_hidden, trainY.shape[0]).astype(floatX)
with pm.Model() as neural_network:
ann_input = pm.Data("ann_input", trainX)
ann_output = pm.Data("ann_output", trainY)
# Input -> Layer 1
weights_1 = pm.Normal('w_1', mu=0, sd=1,
shape=(trainX.shape[1], n_hidden),
testval=init_1)
acts_1 = pm.Deterministic('activations_1', tt.tanh(tt.dot(ann_input, weights_1)))
# Layer 1 -> Layer 2
weights_2 = pm.Normal('w_2', mu=0, sd=1,
shape=(n_hidden, n_hidden),
testval=init_2)
acts_2 = pm.Deterministic('activations_2', tt.tanh(tt.dot(acts_1, weights_2)))
# Layer 2 -> Output Layer
weights_out = pm.Normal('w_out', mu=0, sd=1,
shape=(n_hidden, trainY.shape[0]),
testval=init_out)
acts_out = pm.Deterministic('activations_out',tt.nnet.softmax(tt.dot(acts_2, weights_out)))
# Define likelihood
out = pm.Multinomial('likelihood', n=1, p=acts_out,
observed=ann_output)
return neural_network
# # ----------------------------- Making predicitions ---------------------------
# Constructing af NN
neural_network = construct_nn(trainX, trainY, n_hidden=20)
# Sample from the posterior using the NUTS samplper
with neural_network:
trace = pm.sample(draws=5000, tune=1000, cores=2, chains=1)
# Visualizing the trace
with neural_network:
az.plot_trace(trace)
with neural_network:
inference = pm.ADVI() # approximate inference done using ADVI
approx = pm.fit(10000, method=inference)
trace = approx.sample(500)
# Making predictions using the posterior predective distribution
prediction=pm.sample_posterior_predictive(trace, model=neural_network)
# Relative frequency of predicting class 1
pred = prediction['out'].mean(axis=0)
# Returns the most common value in array (majority vote)
y_pred = mode(prediction['out'], axis=0).mode[0, :]
# Accuracy
print('Accuracy on train data = {}%'.format(accuracy_score(trainY, y_pred) * 100))
# Probability surface
# Replace shared variables with testing set
pm.set_data(new_data={"ann_input": testX, "ann_output": testY}, model=neural_network)
# Creater posterior predictive samples
ppc = pm.sample_posterior_predictive(trace, model=neural_network, samples=500)
# Use probability of > 0.5 to assume prediction of class 1
pred = ppc['out'].mean(axis=0) > 0.5
print('Accuracy on test data = {}%'.format((testY == pred).mean() * 100))
|
#!/usr/bin/python
import os
import wx
import sys
#import shutil
import pyFileOps.file as fops
# developed by help of : https://wiki.wxpython.org/AnotherTutorial
class MyTextDropTarget(wx.TextDropTarget):
def __init__(self, object):
wx.TextDropTarget.__init__(self)
self.object = object
def onDropText(self, x, y, data):
self.object.InsertStringItem(0, data)
class MyPopupMenu(wx.Menu):
def __init__(self, app):
wx.Menu.__init__(self)
self.app = app
item1 = wx.MenuItem(self, wx.NewId(), "Open File"); self.AppendItem(item1); self.Bind(wx.EVT_MENU, self.onOpenFile, item1)
item2 = wx.MenuItem(self, wx.NewId(), "Open Folder"); self.AppendItem(item2); self.Bind(wx.EVT_MENU, self.onOpenFolder, item2)
def osOpen(self, full_path ):
if sys.platform == 'linux2':
os.system('xdg-open %s' %full_path)
else:
os.system('start %s' %full_path)
def onOpenFile(self, event):
print "Open File : ", self.app.i_select
item = self.app.found_items[self.app.i_select][0]; # print "external open : ", item
abspath = os.path.join(self.app.txtDir.GetValue(), item[1])
full_path = os.path.join(abspath, item[0]); # print full_path
self.osOpen( full_path )
def onOpenFolder(self, event):
print "Open Folder : ", self.app.i_select
item = self.app.found_items[self.app.i_select][0]; # print "external open : ", item
abspath = os.path.join(self.app.txtDir.GetValue(), item[1])
self.osOpen( abspath )
class MyFrame(wx.Frame):
#root_path = ""
#includes=['.c','.cpp','.h','.cl','.py']
#excludes=['.png','.bmp','.jpg','.jpeg','.tif','.tiff','.gif']
#search_str = 'virtual'
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, id, title, wx.DefaultPosition, wx.Size(640, 480))
splitter1 = wx.SplitterWindow(self , -1, style=wx.SP_3D, size=(100,100))
splitter2 = wx.SplitterWindow(splitter1, -1, style=wx.SP_3D, size=(100,100))
splitter3 = wx.SplitterWindow(splitter1, -1, style=wx.SP_3D, size=(100,100))
panel = wx.Panel(splitter3, -1, size=(100,100) )
box = wx.BoxSizer(wx.VERTICAL)
self.txtDir = wx.TextCtrl(panel, -1); self.txtDir.SetValue( '/home/prokop/Dropbox/MyDevSW' )
self.txtExt = wx.TextCtrl(panel, -1); self.txtExt.SetValue('*.c;*.cpp;*.h;*.cl;*.py')
self.txtStr = wx.TextCtrl(panel, -1); self.txtStr.SetValue('virtual')
box.Add(self.txtDir, 1, wx.EXPAND | wx.ALL, 3)
box.Add(self.txtExt, 1, wx.EXPAND | wx.ALL, 3)
box.Add(self.txtStr, 1, wx.EXPAND | wx.ALL, 3)
panel.SetSizer(box)
self.dir = wx.GenericDirCtrl(splitter3, -1, dir=self.txtDir.GetValue(), size=(500,100), style=wx.DIRCTRL_DIR_ONLY)
#self.lc1 = wx.ListCtrl(splitter2, -1, size=(100,100), style=wx.LC_LIST)
self.lc1 = wx.ListCtrl(splitter2, -1, size=(200,100), style=wx.LC_REPORT)
self.lc1.InsertColumn(0, 'file'); self.lc1.SetColumnWidth(0, 200)
self.lc1.InsertColumn(1, 'path'); self.lc1.SetColumnWidth(1, 300)
self.text = wx.TextCtrl(splitter2, 1000, '', size=(100, 100), style=wx.TE_MULTILINE | wx.TE_RICH | wx.TE_PROCESS_ENTER)
#self.text.SetFocus()
# http://stackoverflow.com/questions/3570254/in-wxpython-how-do-you-bind-a-evt-key-down-event-to-the-whole-window
# http://stackoverflow.com/questions/8707160/wxpython-capture-keyboard-events-in-a-wx-frame
# http://stackoverflow.com/questions/3570254/in-wxpython-how-do-you-bind-a-evt-key-down-event-to-the-whole-window
self.Bind(wx.EVT_CHAR_HOOK, self.onKeyDown)
#self.Bind(wx.EVT_KEY_DOWN, self.onKeyDown)
wx.EVT_LIST_ITEM_SELECTED(self, self.lc1.GetId(), self.onFileSelect )
tree = self.dir.GetTreeCtrl()
splitter2.SplitHorizontally(self.lc1, self.text)
splitter3.SplitHorizontally(panel, self.dir)
splitter1.SplitVertically (splitter3, splitter2)
wx.EVT_TREE_SEL_CHANGED(self, tree.GetId(), self.onFolderSelect )
self.Centre()
self.lc1.Bind(wx.EVT_RIGHT_DOWN, self.onRightDown)
self.popup = MyPopupMenu(self)
def onKeyDown(self, event):
keycode = event.GetKeyCode()
print keycode
if keycode == wx.WXK_RETURN:
self.searchPath()
event.Skip()
def onRightDown(self,event):
#pos = event.GetPosition() # for some reason returns position relative to lc1
pos = self.ScreenToClient( wx.GetMousePosition() ) # a bit hack
self.PopupMenu(MyPopupMenu(self), pos )
def onFileSelect(self, event ):
#i = len(self.found_items) - event.GetIndex() -1 # why like that? - probably list filled last-on-top; numbered from 1
i = event.GetIndex()
self.i_select = i
item = self.found_items[i]
#print i," ",item
abspath = os.path.join( self.txtDir.GetValue(), item[0][1])
full_path = os.path.join(abspath, item[0][0])
print full_path
self.text.LoadFile( full_path )
self.text.SetInsertionPoint(0)
nch = len(self.txtStr.GetValue())
for ichar in item[1]:
self.text.SetStyle(ichar, ichar+nch , wx.TextAttr("black", "yellow"))
def onFolderSelect(self, event):
self.txtDir.SetValue( self.dir.GetPath() )
self.searchPath()
def searchPath(self):
path = self.txtDir.GetValue()
self.includes = self.txtExt.GetValue().split(";") #print self.includes
self.search_str = self.txtStr.GetValue() #print self.includes
items = fops.path2list_filter( path, include=self.includes, echoPerNFiles=100 )
found_is, founds = fops.searchInFiles ( items, path, self.search_str )
#self.lc1.ClearAll()
self.lc1.DeleteAllItems()
#print "found %i items" %len(found_is)
found_items = []
for ii,i in enumerate(found_is):
print items[i][0]
#self.lc1.InsertStringItem(0, items[i][0] )
num_items = self.lc1.GetItemCount()
self.lc1.InsertStringItem(num_items, items[i][0] )
self.lc1.SetStringItem (num_items, 1, items[i][1] )
#self.lc1.InsertStringItem(num_items, "Hey" )
#self.lc1.SetStringItem (num_items, 1, "How" )
found_items.append( (items[i],founds[ii]) )
self.found_items = found_items
class MyApp(wx.App):
def OnInit(self):
frame = MyFrame(None, -1, "BrowseNoter")
frame.Show(True)
self.SetTopWindow(frame)
return True
if __name__ == "__main__":
app = MyApp(0)
app.MainLoop()
|
"""create cases_per_county_and_day materialized view
Revision ID: a9ca657b90f5
Revises: f8791d49d830
Create Date: 2020-11-26 15:25:59.925681
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'a9ca657b90f5'
down_revision = 'f8791d49d830'
branch_labels = None
depends_on = None
def upgrade():
op.get_bind().execute("""
create materialized view cases_per_county_and_day as
WITH fixed_idlandkres AS (
SELECT t.datenbestand,
t.idbundesland,
t.bundesland,
t.objectid,
t.meldedatum,
t.gender,
t.agegroup,
t.casetype,
t.id,
CASE
WHEN t.idlandkreis::text ~~ '11___'::text THEN '11000'::character varying
ELSE t.idlandkreis
END AS idlandkreis,
CASE
WHEN t.casetype::text = 'case'::text THEN 1
ELSE 0
END AS new_cases,
CASE
WHEN t.casetype::text = 'death'::text THEN 1
ELSE 0
END AS new_deaths,
CASE
WHEN t.casetype::text = 'case'::text AND t.agegroup::text = 'A00-A04'::text THEN 1
ELSE 0
END AS "c_A00-A04",
CASE
WHEN t.casetype::text = 'case'::text AND t.agegroup::text = 'A05-A14'::text THEN 1
ELSE 0
END AS "c_A05-A14",
CASE
WHEN t.casetype::text = 'case'::text AND t.agegroup::text = 'A15-A34'::text THEN 1
ELSE 0
END AS "c_A15-A34",
CASE
WHEN t.casetype::text = 'case'::text AND t.agegroup::text = 'A35-A59'::text THEN 1
ELSE 0
END AS "c_A35-A59",
CASE
WHEN t.casetype::text = 'case'::text AND t.agegroup::text = 'A60-A79'::text THEN 1
ELSE 0
END AS "c_A60-A79",
CASE
WHEN t.casetype::text = 'case'::text AND t.agegroup::text = 'A80+'::text THEN 1
ELSE 0
END AS "c_A80+",
CASE
WHEN t.casetype::text = 'case'::text AND t.agegroup::text = 'unbekannt'::text THEN 1
ELSE 0
END AS "c_Aunbekannt",
CASE
WHEN t.casetype::text = 'death'::text AND t.agegroup::text = 'A00-A04'::text THEN 1
ELSE 0
END AS "d_A00-A04",
CASE
WHEN t.casetype::text = 'death'::text AND t.agegroup::text = 'A05-A14'::text THEN 1
ELSE 0
END AS "d_A05-A14",
CASE
WHEN t.casetype::text = 'death'::text AND t.agegroup::text = 'A15-A34'::text THEN 1
ELSE 0
END AS "d_A15-A34",
CASE
WHEN t.casetype::text = 'death'::text AND t.agegroup::text = 'A35-A59'::text THEN 1
ELSE 0
END AS "d_A35-A59",
CASE
WHEN t.casetype::text = 'death'::text AND t.agegroup::text = 'A60-A79'::text THEN 1
ELSE 0
END AS "d_A60-A79",
CASE
WHEN t.casetype::text = 'death'::text AND t.agegroup::text = 'A80+'::text THEN 1
ELSE 0
END AS "d_A80+",
CASE
WHEN t.casetype::text = 'death'::text AND t.agegroup::text = 'unbekannt'::text THEN 1
ELSE 0
END AS "d_Aunbekannt"
FROM cases_current t
),
first_meldedatum_per_lk AS (
SELECT fixed_idlandkres.idlandkreis,
min(fixed_idlandkres.meldedatum) AS first_meldetatum
FROM fixed_idlandkres
GROUP BY fixed_idlandkres.idlandkreis
),
filled_ts AS (
SELECT a_1.idlandkreis,
b."timestamp"::date AS meldedatum
FROM first_meldedatum_per_lk a_1
JOIN LATERAL ( SELECT generate_series(a_1.first_meldetatum::date::timestamp with time zone,
(now() - '1 day'::interval)::date::timestamp with time zone,
'1 day'::interval) AS "timestamp") b ON true
),
new_data_per_day AS (
SELECT b.idlandkreis,
b.meldedatum,
sum(a_1.new_cases) AS new_cases,
sum(a_1.new_deaths) AS new_deaths,
sum(a_1."c_A00-A04") AS "c_A00-A04",
sum(a_1."c_A05-A14") AS "c_A05-A14",
sum(a_1."c_A15-A34") AS "c_A15-A34",
sum(a_1."c_A35-A59") AS "c_A35-A59",
sum(a_1."c_A60-A79") AS "c_A60-A79",
sum(a_1."c_A80+") AS "c_A80+",
sum(a_1."c_Aunbekannt") AS "c_Aunbekannt",
sum(a_1."d_A00-A04") AS "d_A00-A04",
sum(a_1."d_A05-A14") AS "d_A05-A14",
sum(a_1."d_A15-A34") AS "d_A15-A34",
sum(a_1."d_A35-A59") AS "d_A35-A59",
sum(a_1."d_A60-A79") AS "d_A60-A79",
sum(a_1."d_A80+") AS "d_A80+",
sum(a_1."d_Aunbekannt") AS "d_Aunbekannt"
FROM fixed_idlandkres a_1
RIGHT JOIN filled_ts b
ON a_1.meldedatum::date = b.meldedatum AND a_1.idlandkreis::text = b.idlandkreis::text
GROUP BY b.idlandkreis, b.meldedatum
ORDER BY b.idlandkreis, b.meldedatum
),
combined_stats AS (
SELECT n.meldedatum::timestamp without time zone + '1 day'::interval AS "timestamp",
n.idlandkreis AS ids,
le.name,
le.bez AS "desc",
le.geom,
sum(n.new_cases)
OVER (PARTITION BY n.idlandkreis ORDER BY n.meldedatum ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS cases,
sum(n.new_deaths)
OVER (PARTITION BY n.idlandkreis ORDER BY n.meldedatum ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS deaths,
sum(n."c_A00-A04")
OVER (PARTITION BY n.idlandkreis ORDER BY n.meldedatum ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS "c_A00-A04",
sum(n."c_A05-A14")
OVER (PARTITION BY n.idlandkreis ORDER BY n.meldedatum ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS "c_A05-A14",
sum(n."c_A15-A34")
OVER (PARTITION BY n.idlandkreis ORDER BY n.meldedatum ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS "c_A15-A34",
sum(n."c_A35-A59")
OVER (PARTITION BY n.idlandkreis ORDER BY n.meldedatum ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS "c_A35-A59",
sum(n."c_A60-A79")
OVER (PARTITION BY n.idlandkreis ORDER BY n.meldedatum ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS "c_A60-A79",
sum(n."c_A80+")
OVER (PARTITION BY n.idlandkreis ORDER BY n.meldedatum ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS "c_A80+",
sum(n."c_Aunbekannt")
OVER (PARTITION BY n.idlandkreis ORDER BY n.meldedatum ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS "c_Aunbekannt",
sum(n."d_A00-A04")
OVER (PARTITION BY n.idlandkreis ORDER BY n.meldedatum ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS "d_A00-A04",
sum(n."d_A05-A14")
OVER (PARTITION BY n.idlandkreis ORDER BY n.meldedatum ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS "d_A05-A14",
sum(n."d_A15-A34")
OVER (PARTITION BY n.idlandkreis ORDER BY n.meldedatum ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS "d_A15-A34",
sum(n."d_A35-A59")
OVER (PARTITION BY n.idlandkreis ORDER BY n.meldedatum ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS "d_A35-A59",
sum(n."d_A60-A79")
OVER (PARTITION BY n.idlandkreis ORDER BY n.meldedatum ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS "d_A60-A79",
sum(n."d_A80+")
OVER (PARTITION BY n.idlandkreis ORDER BY n.meldedatum ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS "d_A80+",
sum(n."d_Aunbekannt")
OVER (PARTITION BY n.idlandkreis ORDER BY n.meldedatum ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS "d_Aunbekannt",
p.population,
pa."A00-A04" AS "p_A00-A04",
pa."A05-A14" AS "p_A05-A14",
pa."A15-A34" AS "p_A15-A34",
pa."A35-A59" AS "p_A35-A59",
pa."A60-A79" AS "p_A60-A79",
pa."A80+" AS "p_A80+",
pa.name AS p_name
FROM new_data_per_day n
JOIN (SELECT bevoelkerung.kreisschluessel,
sum(bevoelkerung.anzahl) AS population
FROM bevoelkerung
GROUP BY bevoelkerung.kreisschluessel) p ON p.kreisschluessel = n.idlandkreis::integer
JOIN landkreise_extended le ON n.idlandkreis::text = le.ids::text
LEFT JOIN population_rki_agegroup pa ON n.idlandkreis::text = pa.ags::text
ORDER BY n.idlandkreis, n.meldedatum
),
b AS (
SELECT bb.ids::text AS ids,
bb."timestamp",
bb.cases / bb.population * 100000::numeric AS cases_7days_ago_per_100k,
bb.cases AS cases7_days_ago
FROM combined_stats bb
),
cc AS (
SELECT comc."timestamp",
comc.ids,
comc.name,
comc."desc",
comc.geom,
comc.cases,
comc.deaths,
comc."c_A00-A04",
comc."c_A05-A14",
comc."c_A15-A34",
comc."c_A35-A59",
comc."c_A60-A79",
comc."c_A80+",
comc."c_Aunbekannt",
comc."d_A00-A04",
comc."d_A05-A14",
comc."d_A15-A34",
comc."d_A35-A59",
comc."d_A60-A79",
comc."d_A80+",
comc."d_Aunbekannt",
comc.population,
comc."p_A00-A04",
comc."p_A05-A14",
comc."p_A15-A34",
comc."p_A35-A59",
comc."p_A60-A79",
comc."p_A80+",
comc.p_name,
b.cases_7days_ago_per_100k,
b.cases7_days_ago
FROM combined_stats comc
JOIN b ON comc.ids::text = b.ids AND (comc."timestamp" - '7 days'::interval) = b."timestamp"
)
SELECT DISTINCT a."timestamp",
a."timestamp" AS last_updated,
a."timestamp" AS inserted,
a.ids,
a.name,
a."desc",
a.geom,
a.cases,
a.deaths,
a.population,
a.deaths / a.cases * 100::numeric AS death_rate,
a.cases / a.population * 100000::numeric AS cases_per_100k,
a.cases / a.population * 100::numeric AS cases_per_population,
a.cases / a.population * 100000::numeric - a.cases_7days_ago_per_100k AS cases7_per_100k,
a.cases7_days_ago,
a."c_A00-A04",
a."c_A05-A14",
a."c_A15-A34",
a."c_A35-A59",
a."c_A60-A79",
a."c_A80+",
a."c_Aunbekannt",
a."d_A00-A04",
a."d_A05-A14",
a."d_A15-A34",
a."d_A35-A59",
a."d_A60-A79",
a."d_A80+",
a."d_Aunbekannt",
a."p_A00-A04",
a."p_A05-A14",
a."p_A15-A34",
a."p_A35-A59",
a."p_A60-A79",
a."p_A80+",
bc.betten_gesamt AS beds_total,
bc.betten_belegt AS beds_occupied,
bc.betten_frei AS beds_free,
bc.faelle_covid_aktuell AS cases_covid,
bc.faelle_covid_aktuell_beatmet AS cases_covid_ventilated,
bc.anzahl_standorte AS num_locations,
bc.anzahl_meldebereiche AS num_reporting_areas,
bc.anteil_betten_frei AS proportion_beds_free,
bc.anteil_covid_betten AS proportion_covid_beds,
bc.anteil_covid_beatmet AS proportion_covid_ventilated
FROM cc a
LEFT JOIN bed_capacity2landkreise_extended bc2le ON a.ids::text = bc2le.landkreise_extended_ids::text
LEFT JOIN bed_capacity bc
ON bc2le.bed_capacity_name::text = bc.county::text AND bc.datenbestand::date = a."timestamp"::date
ORDER BY a."timestamp", a.name;
""")
def downgrade():
op.get_bind().execute("""
drop materialized view cases_per_county_and_day;
""")
|
from flask import Flask ,render_template ,request
app = Flask(__name__)
@app.route("/")
def website():
return render_template('03-cv-business.html')
@app.route("/read")
def about():
return render_template('letter.html')
@app.route("/cer11")
def form():
return render_template('certf11.html')
@app.route("/cer10")
def us():
return render_template('certf10.html')
if __name__== "__main__":
app.run(port=5055,debug=True) |
#!/usr/bin/env python
from Bio import SeqIO
from threading import Thread
from glob import glob
from itsxcmd import ITSxCommandLine
from itsx import make_path, BinPacker
import os
import shutil
__author__ = 'mike knowles'
class ITSx(object):
def __init__(self, i, o, cpu, **kwargs):
from Queue import Queue
self.threads = cpu
self.path = o
self.itsxargs = kwargs
self.fasta = i
self.itsxqueue = Queue()
self.name = ""
self.kwargs = kwargs
self.cmd = 'ITSx'
def __str__(self):
return str(ITSxCommandLine(i=self.fasta, o=self.path, cpu=self.threads, **self.kwargs))
def __repr__(self):
"""Return a representation of the command line object for debugging.
e.g.
>>> from itsx.parallel import ITSx
>>> itsx = ITSx(i='/data', o='/data/out', cpu=5)
>>> itsx(name='name')
>>> print(itsx)
ITSx -i /data -o /data/out --cpu 5
>>> itsx
ITSx(cmd='ITSx', i='/data', o='/data/out', cpu=5)
"""
answer = "%s(cmd=%s" % (self.__class__.__name__, repr(self.cmd))
for parameter in self.kwargs:
if parameter:
answer += ", %s=%s" \
% (parameter, repr(self.kwargs[parameter]))
answer += ")"
return answer
def parallel(self):
while True:
filename, output, cwd = self.itsxqueue.get()
ITSxCommandLine(i=filename, o=self.name, cpu=self.threads, **self.itsxargs)(cwd=cwd)
self.itsxqueue.task_done()
def __call__(self, name=None, total=0):
import math
self.name = name if name else os.path.splitext(os.path.basename(self.fasta))[0]
baselist = []
for _ in range(self.threads):
# Send the threads to the merge method. :args is empty as I'm using
threads = Thread(target=self.parallel, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
with open(self.fasta) as fastafile:
# Open the fasta file, and sum the length
# Is there a better way to get length of a fasta?
total = total if total else sum(map(len, SeqIO.parse(fastafile, "fasta")))
fastafile.seek(0)
# Reset the pointer to zero to re-read
cap = int(math.ceil(float(total) / self.threads))
cap += int(cap / 2e4)
# Calculated ceiling of size / cpus
record = SeqIO.parse(fastafile, "fasta")
# Use the file with SeqIO
for i, batch in enumerate(BinPacker(record, cap)):
# Make a base folder for ITSx
base = os.path.join(self.path, str(i+1))
output = os.path.join(base, self.name)
make_path(base)
# Use the .x extension to help with globbing
filename = output + ".x"
with open(filename, "w") as handle:
SeqIO.write(list(batch), handle, "fasta")
# Write the fasta piece
self.itsxqueue.put((name + '.x', output, base))
baselist.append(base)
self.itsxqueue.join()
# Wait for the threads to finish
finalfiles = glob(os.path.join(self.path, '1/*[!x]'))
for output in finalfiles:
# Low level file i/o operation to quickly append files without significant overhead
if hasattr(os, 'O_BINARY'):
o_binary = getattr(os, 'O_BINARY')
else:
o_binary = 0
f = os.path.basename(output)
output_file = os.open(os.path.join(self.path, f), os.O_WRONLY | o_binary | os.O_CREAT)
for intermediate in baselist:
input_filename = os.path.join(intermediate, f)
input_file = os.open(input_filename, os.O_RDONLY | o_binary)
while True:
input_block = os.read(input_file, 1024 * 1024)
if not input_block:
break
os.write(output_file, input_block)
os.close(input_file)
os.close(output_file)
try:
summarylines = self.summary(baselist)
with open(os.path.join(self.path, self.name + '.summary.txt'), 'w+') as full:
full.writelines(summarylines)
except IOError:
print "ITSx failed to run!"
for intermediate in baselist:
shutil.rmtree(intermediate)
def summary(self, baselist):
"""
Compile summary report is generated by adding up all the values in temp .summary.txt files
:param baselist: list of folders and temporary files
:return: compiled summary report
"""
import re
summarylines = list()
regex = re.compile('\d+$')
for intermediate in baselist:
with open(os.path.join(intermediate, self.name + ".summary.txt")) as summary:
if summarylines:
for idx, line in enumerate(summary):
match = regex.search(line)
if match:
summatch = regex.search(summarylines[idx])
start = match.start() if match.start() <= summatch.start() else summatch.start()
summarylines[idx] = '{0:s}{1:d}\n'.format(summarylines[idx][:start],
int(match.group(0)) + int(summatch.group(0)))
else:
summarylines = summary.readlines()
return summarylines
if __name__ == '__main__':
pass
|
"""
January 19th 2020
Author T.Mizumoto
"""
#! python 3
# ver.x1.00
# SuperFigure.py - this program make a figure that be drown calculated the area and mesured point.
from stl import mesh
import numpy as np
import matplotlib.pyplot as plt
from rectselect import RectSelect
class SuperFigure(object):
stl_path = ""
# index-0: x-axis, index-1: y-axis
PlotPoint = np.empty((0, 2))
PlotData = np.empty((0,0))
# cut_axis: x=0, y=1, z=2
cut_axis = 2
cut_point = 0.0
# x=0, y=1, z=2
plot_Xaxis = 0
plot_Yaxis = 1
# calculation area
X_area = []
Y_area = []
def read_stl(self):
stl_data = mesh.Mesh.from_file(self.stl_path)
stl_2d = stl_data.vectors.reshape(-1, 3)
# return Cross Section(CS) index
index_CS = np.where(stl_2d[:, self.cut_axis] == self.cut_point)
CS = np.empty((0, 3))
for i in index_CS[0]:
CS_1d = stl_2d[i, :]
CS = np.append(CS, np.reshape(CS_1d, (1, 3)), axis = 0)
self.CrossSection = CS
def show(self):
fig1 = plt.figure(1)
ax1 = fig1.add_subplot(111)
ax1.plot(self.CrossSection[:, self.plot_Xaxis], self.CrossSection[:, self.plot_Yaxis], \
color = "black", label = "object")
ax1.set_xlim(self.X_area[0], self.X_area[1])
ax1.set_ylim(self.Y_area[0], self.Y_area[1])
region = RectSelect()
region.plot = self.PlotPoint
region.data = self.PlotData
ax1.plot(region.plot[:, 0], region.plot[:, 1], ".", label = "plot")
ax1.legend()
ax1.grid()
plt.show()
if __name__ == "__main__":
from FFB_HISTORY import HISTORY
from Gui import FilePath
from fun_ConvertFilename import fun_basename
### read HISTORY file
fpH = FilePath()
fpH.path("txt", "HISTORY")
hs = HISTORY()
hs.path = fpH.filepath_list[0]
basename, directoryname = fun_basename(hs.path, ".txt")
print((basename + " now loading..."))
hs.read()
hs.coordinate()
hs.measure_data()
print("Successful!")
## personal option
hs.separate_linenum(21)
line = hs.line["line1"]
### make Super Figure
fpS = FilePath()
fpS.path("stl", "Calculation Traget")
SF = SuperFigure()
SF.stl_path = fpS.filepath_list[0]
basename, directoryname = fun_basename(SF.stl_path, ".stl")
print((basename + " now loading..."))
SF.read_stl()
print("Successful!")
## personal option
# calculation area
SF.X_area = [-2, 7]
SF.Y_area = [-3, 3]
# plot point
SF.PlotPoint = line[:, :2]
SF.PlotData = line[:, 3:]
SF.show() |
from django.utils.unittest.case import skipIf
from datetime import datetime
from django.test import TestCase
from django.contrib.auth.models import User
from transit_subsidy.models import TransitSubsidy,Mode,OfficeLocation
import StringIO
import csv
import json as simplejson
class TransportationSubsidyViewTest(TestCase):
fixtures = ['offices.json','transit_modes.json', 'users.json']
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def setUp(self):
"""
Assumes valid user
"""
self.user = User.objects.get(username='ted')
is_logged_in = self.client.login(username='ted',password='ted')
self.assertTrue(is_logged_in, 'Client not able to login?! Check fixture data or User creation method in setUp.')
self.office = OfficeLocation.objects.order_by('city')[0]
def tearDown(self):
pass
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def test_simple_json_fetch(self):
response = self.client.post('/transit/modes')
self.assertEquals(200, response.status_code)
json_obj = simplejson.loads( response.content )
self.assertTrue( len(json_obj) > 1 )
self.assertEquals('AMTRAK', json_obj[0]['short_name'] )
self.assertEquals('MARTZ', json_obj[12]['short_name'] )
print json_obj[0]
def test_that_get_request_fails_must_be_a_POST(self):
expected = 'error: please use POST'
response = self.client.get('/transit/modes')
print response
self.assertEquals( expected, response.content )
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
from copy import deepcopy
from junitparser import (
TestCase,
TestSuite,
Skipped,
Failure,
Error,
Attr,
JUnitXmlError,
JUnitXml,
Property,
Properties,
IntAttr,
FloatAttr,
)
from xml.etree import ElementTree as etree
try:
import itertools.izip as zip
except ImportError:
pass
import locale
try:
locale.setlocale(locale.LC_NUMERIC, 'en_US.UTF-8')
has_us_locale = True
except locale.Error: # pragma: no cover
has_us_locale = False
class Test_MergeSuiteCounts(unittest.TestCase):
def test_merge_test_count(self):
text1 = """<testsuite name="suitename1" tests="2" failures="1">
<testcase name="testname1"><failure message="failed"/></testcase>
<testcase name="testname2"></testcase>
</testsuite>"""
test_suite1 = TestSuite.fromstring(text1)
text2 = """<testsuite name="suitename2" tests="2" skipped="1">
<testcase name="testname3"><skipped message="no reason given"/></testcase>
<testcase name="testname4"></testcase>
</testsuite>"""
test_suite2 = TestSuite.fromstring(text2)
combined_suites = JUnitXml()
combined_suites += test_suite1
combined_suites += test_suite2
self.assertEqual(combined_suites.tests, 4)
self.assertEqual(combined_suites.failures, 1)
self.assertEqual(combined_suites.skipped, 1)
def test_merge_same_suite(self):
text1 = """<testsuite name="suitename1" tests="2" failures="1">
<testcase name="testname1"><failure message="failed"/></testcase>
<testcase name="testname2"></testcase>
</testsuite>"""
test_suite1 = TestSuite.fromstring(text1)
text2 = """<testsuite name="suitename1" tests="2" skipped="1">
<testcase name="testname3"><skipped message="no reason given"/></testcase>
<testcase name="testname4"></testcase>
</testsuite>"""
test_suite2 = TestSuite.fromstring(text2)
combined_suites = JUnitXml()
combined_suites += test_suite1
combined_suites += test_suite2
suites = list(suite for suite in combined_suites)
self.assertEqual(len(suites), 1)
self.assertEqual(combined_suites.tests, 4)
self.assertEqual(combined_suites.failures, 1)
self.assertEqual(combined_suites.skipped, 1)
class Test_JunitXml(unittest.TestCase):
def test_fromstring(self):
text = """<testsuites><testsuite name="suitename1">
<testcase name="testname1">
</testcase></testsuite>
<testsuite name="suitename2">
<testcase name="testname2">
</testcase></testsuite></testsuites>"""
result = JUnitXml.fromstring(text)
self.assertEqual(result.time, 0)
self.assertEqual(len(result), 2)
def test_fromstring_no_testsuites(self):
text = """<testsuite name="suitename1">
<testcase name="testname1">
</testcase></testsuite>"""
result = JUnitXml.fromstring(text)
self.assertEqual(result.time, 0)
self.assertEqual(len(result), 1)
def test_fromstring_multiple_fails(self):
text = """<testsuites>
<testsuite errors="1" failures="0" hostname="hooch" name="pytest" skipped="1" tests="3" time="0.025" timestamp="2020-02-05T10:52:33.843536">
<testcase classname="test_x" file="test_x.py" line="7" name="test_comp_1" time="1""" + ("," if has_us_locale else "") + """000.000"/>
<testcase classname="test_x" file="test_x.py" line="10" name="test_comp_2" time="0.000">
<skipped message="unconditional skip" type="pytest.skip">test_x.py:11: unconditional skip</skipped>
<error message="test teardown failure">
@pytest.fixture(scope="module") def compb(): yield > raise PermissionError E PermissionError test_x.py:6: PermissionError
</error>
</testcase>
</testsuite>
</testsuites>"""
result = JUnitXml.fromstring(text)
self.assertEqual(result.errors, 1)
self.assertEqual(result.skipped, 1)
suite = list(iter(result))[0]
cases = list(iter(suite))
self.assertEqual(len(cases[0].result), 0)
self.assertEqual(len(cases[1].result), 2)
text = cases[1].result[1].text
self.assertTrue("@pytest.fixture" in text)
def test_fromstring_invalid(self):
text = """<random name="suitename1"></random>"""
with self.assertRaises(Exception) as context:
JUnitXml.fromstring(text)
self.assertTrue(isinstance(context.exception, JUnitXmlError))
def test_add_suite(self):
suite1 = TestSuite("suite1")
suite2 = TestSuite("suite2")
result = JUnitXml()
result.add_testsuite(suite1)
result.add_testsuite(suite2)
self.assertEqual(len(result), 2)
def test_construct_xml(self):
suite1 = TestSuite()
suite1.name = "suite1"
case1 = TestCase()
case1.name = "case1"
suite1.add_testcase(case1)
result = JUnitXml()
result.add_testsuite(suite1)
self.assertEqual(result._elem.tag, "testsuites")
suite = result._elem.findall("testsuite")
self.assertEqual(len(suite), 1)
self.assertEqual(suite[0].attrib["name"], "suite1")
case = suite[0].findall("testcase")
self.assertEqual(len(case), 1)
self.assertEqual(case[0].attrib["name"], "case1")
def test_add(self):
result1 = JUnitXml()
suite1 = TestSuite("suite1")
result1.add_testsuite(suite1)
result2 = JUnitXml()
suite2 = TestSuite("suite2")
result2.add_testsuite(suite2)
result3 = result1 + result2
self.assertEqual(len(result3), 2)
def test_add_same_suite(self):
result1 = JUnitXml()
suite1 = TestSuite()
result1.add_testsuite(suite1)
result2 = JUnitXml()
suite2 = TestSuite()
result2.add_testsuite(suite2)
result3 = result1 + result2
self.assertEqual(len(result3), 1)
def test_iadd(self):
result1 = JUnitXml()
suite1 = TestSuite("suite1")
result1.add_testsuite(suite1)
result2 = JUnitXml()
suite2 = TestSuite("suite2")
result2.add_testsuite(suite2)
result1 += result2
self.assertEqual(len(result1), 2)
def test_iadd_same_suite(self):
result1 = JUnitXml()
suite1 = TestSuite()
result1.add_testsuite(suite1)
result2 = JUnitXml()
suite2 = TestSuite()
result2.add_testsuite(suite2)
result1 += result2
self.assertEqual(len(result1), 1)
def test_add_two_same_suites(self):
suite1 = TestSuite()
case1 = TestCase(name="case1")
suite1.add_testcase(case1)
suite2 = TestSuite()
case2 = TestCase(name="case2")
suite2.add_testcase(case2)
suite3 = TestSuite()
suite2.add_testsuite(suite3)
result = suite1 + suite2
self.assertIsInstance(result, TestSuite)
self.assertEqual(len(list(iter(result))), 2)
self.assertEqual(len(list(iter(result.testsuites()))), 1)
def test_iadd_two_same_suites(self):
suite1 = TestSuite()
case1 = TestCase(name="case1")
suite1.add_testcase(case1)
suite2 = TestSuite()
case2 = TestCase(name="case2")
suite2.add_testcase(case2)
suite3 = TestSuite()
suite2.add_testsuite(suite3)
suite1 += suite2
self.assertIsInstance(suite1, TestSuite)
self.assertEqual(len(list(iter(suite1))), 2)
self.assertEqual(len(list(iter(suite1.testsuites()))), 1)
def test_add_two_different_suites(self):
suite1 = TestSuite(name="suite1")
case1 = TestCase(name="case1")
suite1.add_testcase(case1)
suite2 = TestSuite(name="suite2")
case2 = TestCase(name="case2")
suite2.add_testcase(case2)
result = suite1 + suite2
self.assertIsInstance(result, JUnitXml)
self.assertEqual(len(list(iter(result))), 2)
def test_iadd_two_different_suites(self):
suite1 = TestSuite(name="suite1")
case1 = TestCase(name="case1")
suite1.add_testcase(case1)
suite2 = TestSuite(name="suite2")
case2 = TestCase(name="case2")
suite2.add_testcase(case2)
suite1 += suite2
self.assertIsInstance(suite1, JUnitXml)
self.assertEqual(len(list(iter(suite1))), 2)
def test_xml_statistics(self):
result1 = JUnitXml()
suite1 = TestSuite()
result1.add_testsuite(suite1)
result2 = JUnitXml()
suite2 = TestSuite()
result2.add_testsuite(suite2)
result3 = result1 + result2
result3.update_statistics()
self.assertEqual(result3.tests, 0)
class Test_TestSuite(unittest.TestCase):
def test_fromstring(self):
text = """<testsuite name="suitename" time="1.32">
<testcase name="testname">
<failure message="failure message" type="FailureType"/>
</testcase></testsuite>"""
suite = TestSuite.fromstring(text)
self.assertEqual(suite.time, 1.32)
suite.update_statistics()
self.assertEqual(suite.name, "suitename")
self.assertEqual(suite.tests, 1)
def test_props_fromstring(self):
text = """<testsuite name="suitename">
<properties><property name="name1" value="value1"/></properties>
</testsuite>"""
suite = TestSuite.fromstring(text)
for prop in suite.properties():
self.assertEqual(prop.name, "name1")
self.assertEqual(prop.value, "value1")
def test_quoted_attr(self):
text = """<testsuite name="suitename with "quotes"">
</testsuite>"""
suite = TestSuite.fromstring(text)
self.assertEqual(suite.name, "suitename with "quotes"")
def test_combining_testsuite_should_keep_name(self):
text1 = """<testsuite name="suitename1" tests="2" failures="1">
<testcase name="testname1"><failure message="failed"/></testcase>
<testcase name="testname2"></testcase>
</testsuite>"""
test_suite1 = TestSuite.fromstring(text1)
text2 = """<testsuite name="suitename2" tests="2" skipped="1">
<testcase name="testname3"><skipped message="no reason given"/></testcase>
<testcase name="testname4"></testcase>
</testsuite>"""
test_suite2 = TestSuite.fromstring(text2)
combined_suites = JUnitXml()
combined_suites += test_suite1
combined_suites += test_suite2
self.assertEqual(
[s.name for s in combined_suites], ["suitename1", "suitename2"]
)
def test_len(self):
text = """<testsuite name="suitename"><testcase name="testname"/>
<testcase name="testname2"/>
</testsuite>"""
suite = TestSuite.fromstring(text)
self.assertEqual(len(suite), 2)
def test_add_case(self):
suite = TestSuite()
self.assertEqual(suite.tests, 0)
case1 = TestCase()
case2 = TestCase()
case2.result = [Failure()]
case3 = TestCase()
case3.result = [Error()]
case4 = TestCase()
case4.result = [Skipped()]
suite.add_testcase(case1)
suite.add_testcase(case2)
suite.add_testcase(case3)
suite.add_testcase(case4)
suite.update_statistics()
self.assertEqual(suite.tests, 4)
self.assertEqual(suite.failures, 1)
self.assertEqual(suite.errors, 1)
self.assertEqual(suite.skipped, 1)
def test_case_count(self):
suite = TestSuite()
case1 = TestCase()
suite.add_testcase(case1)
self.assertEqual(suite.tests, 1)
self.assertEqual(suite.failures, 0)
def test_add_property(self):
suite = TestSuite()
suite.add_property("name1", "value1")
res_prop = next(suite.properties())
self.assertEqual(res_prop.name, "name1")
self.assertEqual(res_prop.value, "value1")
def test_remove_case(self):
suite = TestSuite()
case1 = TestCase()
case1.name = "test1"
case2 = TestCase()
case2.name = "test2"
suite.add_testcase(case1)
suite.add_testcase(case2)
suite.remove_testcase(case1)
self.assertEqual(len(suite), 1)
def test_remove_property(self):
suite = TestSuite()
suite.add_property("name1", "value1")
suite.add_property("name2", "value2")
suite.add_property("name3", "value3")
for prop in suite.properties():
if prop.name == "name2":
suite.remove_property(prop)
self.assertEqual(len(list(suite.properties())), 2)
def test_remove_property_from_none(self):
suite = TestSuite()
suite.remove_property(Property("key", "value"))
# Nothing should happen
def test_suite_in_suite(self):
suite = TestSuite("parent")
childsuite = TestSuite("child")
suite.add_testsuite(childsuite)
self.assertEqual(len(list(suite.testsuites())), 1)
def test_case_time(self):
suite = TestSuite()
case1 = TestCase()
case1.name = "test1"
case1.time = 15
suite.add_testcase(case1)
suite.update_statistics()
self.assertEqual(suite.time, 15)
def test_wrong_attr_type(self):
suite = TestSuite()
with self.assertRaises(TypeError):
suite.time = "abc"
with self.assertRaises(TypeError):
suite.tests = 10.5
def test_suite_eq(self):
suite = TestSuite()
suite.add_property("name1", "value1")
suite2 = deepcopy(suite)
self.assertEqual(suite, suite2)
def test_suite_ne(self):
suite = TestSuite()
suite.add_property("name1", "value1")
suite2 = deepcopy(suite)
suite2.add_property("name2", "value2")
self.assertNotEqual(suite, suite2)
class Test_TestCase(unittest.TestCase):
def test_case_fromstring(self):
text = """<testcase name="testname">
<failure message="failure message" type="FailureType"/>
<system-out>System out</system-out>
<system-err>System err</system-err>
</testcase>"""
case = TestCase.fromstring(text)
self.assertEqual(case.name, "testname")
self.assertIsInstance(case.result[0], Failure)
self.assertEqual(case.system_out, "System out")
self.assertEqual(case.system_err, "System err")
def test_illegal_xml_multi_results(self):
text = """<testcase name="testname">
<failure message="failure message" type="FailureType"/>
<skipped message="skipped message" type="FailureType"/>
</testcase>
"""
case = TestCase.fromstring(text)
self.assertRaises(JUnitXmlError)
def test_case_attributes(self):
case = TestCase()
case.name = "testname"
case.classname = "testclassname"
case.time = 15.123
case.result = [Skipped()]
case.result[0].text = "woah skipped"
self.assertEqual(case.name, "testname")
self.assertEqual(case.classname, "testclassname")
self.assertEqual(case.time, 15.123)
self.assertIsInstance(case.result[0], Skipped)
self.assertEqual(case.result[0].text, "woah skipped")
def test_case_init_with_attributes(self):
case = TestCase("testname", "testclassname", 15.123)
case.result = [Skipped()]
self.assertEqual(case.name, "testname")
self.assertEqual(case.classname, "testclassname")
self.assertEqual(case.time, 15.123)
self.assertIsInstance(case.result[0], Skipped)
def test_case_output(self):
case = TestCase()
case.system_err = "error message"
case.system_out = "out message"
self.assertEqual(case.system_err, "error message")
self.assertEqual(case.system_out, "out message")
case.system_err = "error2"
case.system_out = "out2"
self.assertEqual(case.system_err, "error2")
self.assertEqual(case.system_out, "out2")
def test_update_results(self):
case = TestCase()
case.result = [Skipped()]
case.result = [Failure(), Skipped()]
self.assertEqual(len(case.result), 2)
def test_monkypatch(self):
TestCase.id = Attr("id")
case = TestCase()
case.id = "100"
self.assertEqual(case.id, "100")
def test_equal(self):
case = TestCase()
case.name = "test1"
case2 = TestCase()
case2.name = "test1"
self.assertEqual(case, case2)
def test_not_equal(self):
case = TestCase()
case.name = "test1"
case2 = TestCase()
case2.name = "test2"
self.assertNotEqual(case, case2)
def test_from_elem(self):
elem = etree.Element("testcase", name="case1")
case = TestCase.fromelem(elem)
self.assertEqual(case.name, "case1")
def test_from_junit_elem(self):
case = TestCase()
case.name = "test1"
class TestOtherCase(TestCase):
_tag = "TestOtherCase"
assertions = Attr()
other_case = TestOtherCase.fromelem(case)
self.assertEqual(case.name, other_case.name)
self.assertRaises(AttributeError, lambda: case.assertions)
other_case.assertions = 20
self.assertEqual(other_case.assertions, "20")
def test_to_string(self):
case = TestCase()
case.name = "test1"
case_str = case.tostring()
self.assertIn(b"test1", case_str)
def test_to_nonascii_string(self):
case = TestCase()
case.name = "测试1"
case.result = [Failure("失败", "类型")]
case_str = case.tostring()
self.assertIn("测试1", case_str.decode("utf-8"))
self.assertIn("失败", case_str.decode("utf-8"))
self.assertIn("类型", case_str.decode("utf-8"))
def test_system_out(self):
case = TestCase()
case.name = "case1"
self.assertIsNone(case.system_out)
case.system_out = "output"
self.assertEqual(case.system_out, "output")
def test_system_err(self):
case = TestCase()
case.name = "case1"
self.assertIsNone(case.system_err)
case.system_err = "error"
self.assertEqual(case.system_err, "error")
def test_result_eq(self):
# TODO: Weird, need to think of a better API
self.assertEqual(Failure("A"), Failure("A"))
self.assertNotEqual(Skipped("B"), Skipped("A"))
self.assertNotEqual(Error("C"), Error("B"))
def test_result_attrs(self):
res1 = Failure("A")
# NOTE: lxml gives spaceless result
self.assertIn(
res1.tostring(), [b'<failure message="A" />', b'<failure message="A"/>']
)
class Test_Properties(unittest.TestCase):
def test_property_repr1(self):
prop1 = Property("prop1", "1")
self.assertEqual(
prop1.__repr__(), '<Element \'property\' name="prop1" value="1">'
)
def test_property_repr2(self):
prop1 = TestSuite()
self.assertEqual(prop1.__repr__(), "<Element 'testsuite'>")
def test_property_eq(self):
prop1 = Property("prop1", "1")
prop2 = Property("prop1", "1")
self.assertEqual(prop1, prop2)
def test_property_ne(self):
prop1 = Property("prop1", "1")
prop2 = Property("prop1", "2")
self.assertNotEqual(prop1, prop2)
def test_properties_eq(self):
prop1 = Property("prop1", "1")
prop2 = Property("prop1", "2")
# Note: an attribute can only be used at one place.
prop3 = deepcopy(prop1)
prop4 = deepcopy(prop2)
props1 = Properties()
props1.add_property(prop1)
props1.add_property(prop2)
props2 = Properties()
props2.add_property(prop3)
props2.add_property(prop4)
self.assertEqual(props1, props2)
def test_properties_ne(self):
prop1 = Property("prop1", "1")
prop2 = Property("prop1", "2")
prop3 = deepcopy(prop1)
prop4 = deepcopy(prop1)
props1 = Properties()
props1.add_property(prop1)
props1.add_property(prop2)
props2 = Properties()
props2.add_property(prop3)
props2.add_property(prop4)
self.assertNotEqual(props1, props2)
def test_properties_ne2(self):
prop1 = Property("prop1", "1")
prop2 = Property("prop1", "2")
prop3 = deepcopy(prop1)
props1 = Properties()
props1.add_property(prop1)
props1.add_property(prop2)
props2 = Properties()
props2.add_property(prop3)
self.assertNotEqual(props1, props2)
class Test_Attrs(unittest.TestCase):
def test_attr(self):
TestCase.text = Attr("text")
TestCase.int = IntAttr("int")
TestCase.float = FloatAttr("float")
element = TestCase("foo")
element.text = "foo"
element.int = 10
element.float = 8.5
self.assertEqual(element.text, "foo")
self.assertEqual(element.int, 10)
self.assertEqual(element.float, 8.5)
if __name__ == "__main__":
unittest.main()
|
from neuroio import Client
def test_client_default_api_version():
neuroio = Client()
assert neuroio.api_version == 1
def test_client_api_version():
neuroio = Client(api_version=2)
assert neuroio.api_version == 2
def test_get_incorrect_attr():
neuroio = Client(api_version=1)
in_exception = False
try:
neuroio.incorrect
except BaseException:
in_exception = True
assert in_exception
|
import os.path as osp
import torch
import torch.nn.functional as F
from torch_geometric.datasets import LINKXDataset
from torch_geometric.nn import LINKX
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'LINKX')
dataset = LINKXDataset(path, name='Penn94')
data = dataset[0].to(device)
model = LINKX(data.num_nodes, data.num_features, hidden_channels=32,
out_channels=dataset.num_classes, num_layers=1,
num_edge_layers=1, num_node_layers=1, dropout=0.5).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=1e-3)
def train():
model.train()
optimizer.zero_grad()
out = model(data.x, data.edge_index)
mask = data.train_mask[:, 0] # Use the first set of the five masks.
loss = F.cross_entropy(out[mask], data.y[mask])
loss.backward()
optimizer.step()
return float(loss)
@torch.no_grad()
def test():
accs = []
model.eval()
pred = model(data.x, data.edge_index).argmax(dim=-1)
for _, mask in data('train_mask', 'val_mask', 'test_mask'):
mask = mask[:, 0] # Use the first set of the five masks.
accs.append(int((pred[mask] == data.y[mask]).sum()) / int(mask.sum()))
return accs
for epoch in range(1, 201):
loss = train()
train_acc, val_acc, test_acc = test()
print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Train: {train_acc:.4f}, '
f'Val: {val_acc:.4f}, Test: {test_acc:.4f}')
|
{
'targets': [
{
'target_name': '<!(node -e \"require(\'./package.json\').binary.module_name\")',
'module_name': '<!(node -e \"require(\'./package.json\').binary.module_name\")',
'module_path': '<!(node -e \"require(\'./package.json\').binary.module_path\")',
'sources': [
'<!@(tools/genmoc.sh)',
'src/brig.cpp',
'src/utils.cpp',
'src/eventdispatcher/eventdispatcher.cpp',
'src/QApplication.cpp',
'src/callback.cpp',
'src/signal_handler.cpp',
'src/QmlEngine.cpp',
'src/QmlComponent.cpp',
'src/QmlContext.cpp',
'src/QuickItem.cpp',
'src/DynamicQMetaObjectBuilder.cpp',
'src/DynamicQObject.cpp',
'src/QmlTypeBuilder.cpp'
],
'include_dirs': [
"<!(node -e \"require('nan')\")"
],
'conditions': [
['OS=="linux"', {
'sources': [
'<!@(tools/internal.sh)',
],
'cflags': [
'-std=c++11',
'-fPIC',
'-I./src',
'<!@(pkg-config --cflags Qt5Core Qt5Quick Qt5Qml Qt5Multimedia)'
],
'ldflags': [
'<!@(pkg-config --libs-only-L --libs-only-other Qt5Core Qt5Quick Qt5Qml Qt5Multimedia)'
],
'libraries': [
'<!@(pkg-config --libs-only-l Qt5Core Qt5Quick Qt5Qml Qt5Multimedia)'
]
}],
['OS=="mac"', {
'sources': [
'<!@(tools/mac-config.sh --internal)',
'src/eventdispatcher/platform/mac.mm'
],
'xcode_settings': {
'LD_RUNPATH_SEARCH_PATHS': [
'@excutable_path/node_modules/qt-darwin/Frameworks',
'@loader_path/node_modules/qt-darwin/Frameworks',
'@excutable_path/../node_modules/qt-darwin/Frameworks',
'@loader_path/../node_modules/qt-darwin/Frameworks',
'@excutable_path/../../node_modules/qt-darwin/Frameworks',
'@loader_path/../../node_modules/qt-darwin/Frameworks'
],
'OTHER_CPLUSPLUSFLAGS': [
'-stdlib=libc++',
'-std=c++11',
'-mmacosx-version-min=10.7',
'-Wno-inconsistent-missing-override',
'-Woverloaded-virtual',
'<!@(tools/mac-config.sh --cflags)'
],
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES'
},
'defines': [
'__MACOSX_CORE__'
],
'include_dirs': [
'build/src',
'<!@(tools/mac-config.sh --include-dirs QtGui QtCore QtQuick QtQml QtMultimedia QtWidgets QtQuickControls2)'
],
'libraries': [
'-undefined dynamic_lookup',
'<!@(tools/mac-config.sh --libs QtGui QtCore QtQuick QtQml QtMultimedia QtWidgets QtQuickControls2)'
]
}]
]
}
]
}
|
# @author: Michael Vorotyntsev
# @email: linkofwise@gmail.com
# @github: unaxfromsibiria
import signal
import socket
import time
import weakref
from threading import Thread
from .config import Configuration, LoggerWrapper
from .common import CommandBuilder
from .processing import CommandExecuter, WorkerStatusEnum
class ManageAdapter(object):
service_methods = {}
_cid = None
def __init__(self, **methods):
for name, m in methods.items():
if callable(m):
self.service_methods[name] = weakref.WeakMethod(m)
def _external_method_call(self, method):
result = None
external_method_ref = self.service_methods.get(method)
if external_method_ref:
result = external_method_ref()
return result
def get_public_methods(self):
return self._external_method_call('public_methods')
def has_cid(self):
return not(self._cid is None)
def get_cid(self):
return self._cid
def setup_cid(self, value):
self._cid = value
def get_status(self):
check_free = self._external_method_call('has_free_workers')
result = WorkerStatusEnum.busy
if check_free:
result = WorkerStatusEnum.free
return result
class ConnectionThread(Thread):
_server = None
_logger = None
_conf = None
_lock = None
_connection = None
_update_status_method = None
cls_command_builder = CommandBuilder
cls_command_executer = CommandExecuter
def __init__(self, server, conf):
super().__init__(name='roolt-connection')
self._server = weakref.ref(server)
self._conf = dict(**conf)
self._logger = conf.get_logger(wraper=LoggerWrapper('connection'))
def _connection_access(self, data=None, write=False):
result = None
if self._connection:
while self._lock:
time.sleep(0.1)
self._lock = True
try:
if write:
self._connection.send(data)
else:
read_buffer = self._conf.get('read_buffer') or 1024
result = self._connection.recv(read_buffer)
finally:
self._lock = False
return result
def flush(self):
pass
def run(self):
self._lock = False
server_active = True
logger = self._logger
retry_time = self._conf.get('retry_time') or 2.5
encoding = self._conf.get('encoding') or 'utf-8'
# connect
first_run = True
has_connection = False
manager = ManageAdapter()
command_builder = self.cls_command_builder()
command_executer = self.cls_command_executer()
command_executer.setup(
configuration=self._conf,
manager=manager,
encoding=encoding)
while server_active:
# create connection or reconnect
while not has_connection:
if not first_run:
logger.warn('try reconnect after {}'.format(retry_time))
time.sleep(retry_time)
try:
self._connection = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
self._connection.connect((
self._conf.get('addr'),
int(self._conf.get('port')),
))
except Exception as err:
logger.error(err)
has_connection = False
else:
has_connection = True
logger.info('Connected to {}:{}'.format(
self._conf.get('addr'), int(self._conf.get('port'))))
finally:
server = self._server()
server_active = server and server.is_alive()
if not server_active:
has_connection = False
first_run = False
if has_connection and not manager.has_cid():
# send hello
answer = command_executer.get_hello()
try:
self._connection_access(write=True, data=bytes(
'{}\n'.format(answer), encoding))
except Exception as err:
logger.error(err)
has_connection = False
if has_connection:
# data change
new_data = True
while new_data and server_active:
try:
new_data = self._connection_access()
except Exception as err:
logger.error(err)
has_connection = False
new_data = None
if new_data:
new_data = new_data.decode(encoding)
new_data = new_data.split('\n')
for line_data in new_data:
command_builder.append(line_data)
if command_builder.is_done():
try:
command = command_builder.get_command()
except ValueError as err:
logger.error(err)
answer = None
else:
try:
answer = command_executer.run(command)
except Exception as err:
logger.error(err)
answer = command_executer.get_error(err)
if answer:
logger.debug(answer)
try:
self._connection_access(
write=True,
data=bytes(
'{}\n'.format(answer), encoding))
except Exception as err:
logger.error(err)
logger.warn('Lost command: {}'.format(
command))
new_data = None
has_connection = False
# if lost connection
if not has_connection:
self._connection = None
server = self._server()
server_active = server and server.is_alive()
# send close
server = self._server()
if self:
server.close()
class Server(object):
_conf = None
_active = True
_can_exit = False
_connection = None
def __init__(self, conf=None):
if not conf:
conf = Configuration()
self._conf = conf
self._connection = ConnectionThread(self, conf)
self._logger = conf.get_logger(wraper=LoggerWrapper('server'))
signal.signal(signal.SIGTERM, self._stop_sig_handler)
signal.signal(signal.SIGINT, self._stop_sig_handler)
def _stop_sig_handler(self, *args, **kwargs):
self.stop()
def stop(self):
if self._active:
self._logger.info('stoping')
self._active = False
def is_alive(self):
return bool(self._active)
def run(self):
self._active = True
self._can_exit = False
self._logger.info('Server started...')
self._connection.start()
while self._active:
time.sleep(self._conf.iter_time)
self._connection.flush()
not_first = False
while not self._can_exit:
time.sleep(1)
if not_first:
self._logger.warn('wait process finished')
not_first = True
def close(self):
self._can_exit = True
|
from flask import Flask, jsonify, request
from flask_socketio import SocketIO, send
from flask_cors import CORS
from pymongo import MongoClient
from bson import json_util
import json
app = Flask(__name__)
CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
#socket = SocketIO(app, cors_allowed_origins="*")
myclient = MongoClient('mongodb://db:27017/',username = 'admin', password = 'admin')
db = myclient["db_sa"]
reports = db["reports"]
@app.route('/', methods = ['GET'])
def init():
return "Server B working correctly."
@app.route('/reports', methods = ['POST'])
def insertReport():
try:
data = request.get_json()
reports.insert({
"carnet" : data['carnet'],
"nombre" : data['nombre'],
"curso" : data['curso'],
"mensaje" : data['mensaje'],
"servidor" : "B"
})
res = {
"status" : 200,
"msg" : "Reporte guardado exitosamente en server B."
}
return jsonify(res)
except:
res = {
"status" : 500,
"msg" : "Error interno server B."
}
return jsonify(res)
@app.route('/reports', methods = ['GET'])
def getReports():
cursor = reports.find()
json_docs = []
for doc in cursor:
json_doc = {
"carnet" : doc["carnet"],
"nombre" : doc["nombre"],
"curso" : doc["curso"],
"mensaje" : doc["mensaje"],
"servidor" : doc["servidor"]
}
json_docs.append(json_doc)
return jsonify(json_docs)
if __name__ == "__main__":
app.run(host = '0.0.0.0', port = 3001, debug = True) |
# uniform content loss + adaptive threshold + per_class_input + recursive G
# improvement upon cqf37
from __future__ import division
import os, scipy.io
import tensorflow.compat.v1 as tf
import tf_slim as slim
tf.disable_v2_behavior()
from d2s_numpy import depth_to_space
from PIL import Image
#import tensorflow.contrib.slim as slim
import numpy as np
import rawpy
import glob
_errstr = "Mode is unknown or incompatible with input array shape."
d2s_type = "tf"
def bytescale(data, cmin=None, cmax=None, high=255, low=0):
"""
Byte scales an array (image).
Byte scaling means converting the input image to uint8 dtype and scaling
the range to ``(low, high)`` (default 0-255).
If the input image already has dtype uint8, no scaling is done.
This function is only available if Python Imaging Library (PIL) is installed.
Parameters
----------
data : ndarray
PIL image data array.
cmin : scalar, optional
Bias scaling of small values. Default is ``data.min()``.
cmax : scalar, optional
Bias scaling of large values. Default is ``data.max()``.
high : scalar, optional
Scale max value to `high`. Default is 255.
low : scalar, optional
Scale min value to `low`. Default is 0.
Returns
-------
img_array : uint8 ndarray
The byte-scaled array.
Examples
--------
>>> from scipy.misc import bytescale
>>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ],
... [ 73.88003259, 80.91433048, 4.88878881],
... [ 51.53875334, 34.45808177, 27.5873488 ]])
>>> bytescale(img)
array([[255, 0, 236],
[205, 225, 4],
[140, 90, 70]], dtype=uint8)
>>> bytescale(img, high=200, low=100)
array([[200, 100, 192],
[180, 188, 102],
[155, 135, 128]], dtype=uint8)
>>> bytescale(img, cmin=0, cmax=255)
array([[91, 3, 84],
[74, 81, 5],
[52, 34, 28]], dtype=uint8)
"""
if data.dtype == np.uint8:
return data
if high > 255:
raise ValueError("`high` should be less than or equal to 255.")
if low < 0:
raise ValueError("`low` should be greater than or equal to 0.")
if high < low:
raise ValueError("`high` should be greater than or equal to `low`.")
if cmin is None:
cmin = data.min()
if cmax is None:
cmax = data.max()
cscale = cmax - cmin
if cscale < 0:
raise ValueError("`cmax` should be larger than `cmin`.")
elif cscale == 0:
cscale = 1
scale = float(high - low) / cscale
bytedata = (data - cmin) * scale + low
return (bytedata.clip(low, high) + 0.5).astype(np.uint8)
def toimage(arr, high=255, low=0, cmin=None, cmax=None, pal=None,
mode=None, channel_axis=None):
"""Takes a numpy array and returns a PIL image.
This function is only available if Python Imaging Library (PIL) is installed.
The mode of the PIL image depends on the array shape and the `pal` and
`mode` keywords.
For 2-D arrays, if `pal` is a valid (N,3) byte-array giving the RGB values
(from 0 to 255) then ``mode='P'``, otherwise ``mode='L'``, unless mode
is given as 'F' or 'I' in which case a float and/or integer array is made.
.. warning::
This function uses `bytescale` under the hood to rescale images to use
the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``.
It will also cast data for 2-D images to ``uint32`` for ``mode=None``
(which is the default).
Notes
-----
For 3-D arrays, the `channel_axis` argument tells which dimension of the
array holds the channel data.
For 3-D arrays if one of the dimensions is 3, the mode is 'RGB'
by default or 'YCbCr' if selected.
The numpy array must be either 2 dimensional or 3 dimensional.
"""
data = np.asarray(arr)
if np.iscomplexobj(data):
raise ValueError("Cannot convert a complex-valued array.")
shape = list(data.shape)
valid = len(shape) == 2 or ((len(shape) == 3) and
((3 in shape) or (4 in shape)))
if not valid:
raise ValueError("'arr' does not have a suitable array shape for "
"any mode.")
if len(shape) == 2:
shape = (shape[1], shape[0]) # columns show up first
if mode == 'F':
data32 = data.astype(np.float32)
image = Image.frombytes(mode, shape, data32.tostring())
return image
if mode in [None, 'L', 'P']:
bytedata = bytescale(data, high=high, low=low,
cmin=cmin, cmax=cmax)
image = Image.frombytes('L', shape, bytedata.tostring())
if pal is not None:
image.putpalette(np.asarray(pal, dtype=np.uint8).tostring())
# Becomes a mode='P' automagically.
elif mode == 'P': # default gray-scale
pal = (np.arange(0, 256, 1, dtype=np.uint8)[:, np.newaxis] *
np.ones((3,), dtype=np.uint8)[np.newaxis, :])
image.putpalette(np.asarray(pal, dtype=np.uint8).tostring())
return image
if mode == '1': # high input gives threshold for 1
bytedata = (data > high)
image = Image.frombytes('1', shape, bytedata.tostring())
return image
if cmin is None:
cmin = np.amin(np.ravel(data))
if cmax is None:
cmax = np.amax(np.ravel(data))
data = (data*1.0 - cmin)*(high - low)/(cmax - cmin) + low
if mode == 'I':
data32 = data.astype(np.uint32)
image = Image.frombytes(mode, shape, data32.tostring())
else:
raise ValueError(_errstr)
return image
# if here then 3-d array with a 3 or a 4 in the shape length.
# Check for 3 in datacube shape --- 'RGB' or 'YCbCr'
if channel_axis is None:
if (3 in shape):
ca = np.flatnonzero(np.asarray(shape) == 3)[0]
else:
ca = np.flatnonzero(np.asarray(shape) == 4)
if len(ca):
ca = ca[0]
else:
raise ValueError("Could not find channel dimension.")
else:
ca = channel_axis
numch = shape[ca]
if numch not in [3, 4]:
raise ValueError("Channel axis dimension is not valid.")
bytedata = bytescale(data, high=high, low=low, cmin=cmin, cmax=cmax)
if ca == 2:
strdata = bytedata.tostring()
shape = (shape[1], shape[0])
elif ca == 1:
strdata = np.transpose(bytedata, (0, 2, 1)).tostring()
shape = (shape[2], shape[0])
elif ca == 0:
strdata = np.transpose(bytedata, (1, 2, 0)).tostring()
shape = (shape[2], shape[1])
if mode is None:
if numch == 3:
mode = 'RGB'
else:
mode = 'RGBA'
if mode not in ['RGB', 'RGBA', 'YCbCr', 'CMYK']:
raise ValueError(_errstr)
if mode in ['RGB', 'YCbCr']:
if numch != 3:
raise ValueError("Invalid array shape for mode.")
if mode in ['RGBA', 'CMYK']:
if numch != 4:
raise ValueError("Invalid array shape for mode.")
# Here we know data and mode is correct
image = Image.frombytes(mode, shape, strdata)
return image
input_dir = './dataset/Sony/short/'
gt_dir = './dataset/Sony/long/'
checkpoint_dir = './checkpoint/Sony/'
result_dir = './result_Sony/'
# get test IDs
test_fns = glob.glob(gt_dir + '/1*.ARW')
test_ids = [int(os.path.basename(test_fn)[0:5]) for test_fn in test_fns]
DEBUG = 0
if DEBUG == 1:
save_freq = 2
test_ids = test_ids[0:5]
def lrelu(x):
return tf.maximum(x * 0.2, x)
def upsample_and_concat(x1, x2, output_channels, in_channels):
pool_size = 2
deconv_filter = tf.Variable(tf.truncated_normal([pool_size, pool_size, output_channels, in_channels], stddev=0.02))
deconv = tf.nn.conv2d_transpose(x1, deconv_filter, tf.shape(x2), strides=[1, pool_size, pool_size, 1])
deconv_output = tf.concat([deconv, x2], 3)
deconv_output.set_shape([None, None, None, output_channels * 2])
return deconv_output
def network(input):
conv1 = slim.conv2d(input, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv1_1')
conv1 = slim.conv2d(conv1, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv1_2')
pool1 = slim.max_pool2d(conv1, [2, 2], padding='SAME')
conv2 = slim.conv2d(pool1, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv2_1')
conv2 = slim.conv2d(conv2, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv2_2')
pool2 = slim.max_pool2d(conv2, [2, 2], padding='SAME')
conv3 = slim.conv2d(pool2, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv3_1')
conv3 = slim.conv2d(conv3, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv3_2')
pool3 = slim.max_pool2d(conv3, [2, 2], padding='SAME')
conv4 = slim.conv2d(pool3, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv4_1')
conv4 = slim.conv2d(conv4, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv4_2')
pool4 = slim.max_pool2d(conv4, [2, 2], padding='SAME')
conv5 = slim.conv2d(pool4, 512, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv5_1')
conv5 = slim.conv2d(conv5, 512, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv5_2')
up6 = upsample_and_concat(conv5, conv4, 256, 512)
conv6 = slim.conv2d(up6, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv6_1')
conv6 = slim.conv2d(conv6, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv6_2')
up7 = upsample_and_concat(conv6, conv3, 128, 256)
conv7 = slim.conv2d(up7, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv7_1')
conv7 = slim.conv2d(conv7, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv7_2')
up8 = upsample_and_concat(conv7, conv2, 64, 128)
conv8 = slim.conv2d(up8, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv8_1')
conv8 = slim.conv2d(conv8, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv8_2')
up9 = upsample_and_concat(conv8, conv1, 32, 64)
conv9 = slim.conv2d(up9, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv9_1')
conv9 = slim.conv2d(conv9, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv9_2')
conv10 = slim.conv2d(conv9, 12, [1, 1], rate=1, activation_fn=None, scope='g_conv10')
"""if d2s_type is "tf":
out = tf.depth_to_space(conv10, 2)
else:
out = conv10"""
return conv10
def pack_raw(raw):
# pack Bayer image to 4 channels
im = raw.raw_image_visible.astype(np.float32)
im = np.maximum(im - 512, 0) / (16383 - 512) # subtract the black level
im = np.expand_dims(im, axis=2)
img_shape = im.shape
H = img_shape[0]
W = img_shape[1]
out = np.concatenate((im[0:H:2, 0:W:2, :],
im[0:H:2, 1:W:2, :],
im[1:H:2, 1:W:2, :],
im[1:H:2, 0:W:2, :]), axis=2)
return out
if __name__ == "__main__":
sess = tf.Session()
#in_image = tf.placeholder(tf.float32, [None, None, None, 4])
#gt_image = tf.placeholder(tf.float32, [None, None, None, 3])
in_image = tf.placeholder(tf.float32, [1, 1424, 2128, 4])
gt_image = tf.placeholder(tf.float32, [1, 2848, 4256, 3])
out_image = network(in_image)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt:
print('loaded ' + ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
if not os.path.isdir(result_dir + 'final/'):
os.makedirs(result_dir + 'final/')
print(test_ids)
for test_id in test_ids:
# test the first image in each sequence
in_files = glob.glob(input_dir + '%05d_00*.ARW' % test_id)
for k in range(len(in_files)):
#Begin input pre process
in_path = in_files[k]
in_fn = os.path.basename(in_path)
print(in_fn)
gt_files = glob.glob(gt_dir + '%05d_00*.ARW' % test_id)
gt_path = gt_files[0]
gt_fn = os.path.basename(gt_path)
in_exposure = float(in_fn[9:-5])
gt_exposure = float(gt_fn[9:-5])
ratio = min(gt_exposure / in_exposure, 300)
raw = rawpy.imread(in_path)
input_full = np.expand_dims(pack_raw(raw), axis=0) * ratio
im = raw.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16)
# scale_full = np.expand_dims(np.float32(im/65535.0),axis = 0)*ratio
scale_full = np.expand_dims(np.float32(im / 65535.0), axis=0)
gt_raw = rawpy.imread(gt_path)
im = gt_raw.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16)
gt_full = np.expand_dims(np.float32(im / 65535.0), axis=0)
#print(gt_full.shape)
input_full = np.minimum(input_full, 1.0)
#End input pre process
output = sess.run(out_image, feed_dict={in_image: input_full}) # This is where the model is run
output = depth_to_space(output, 2)
#np.save("test_output_{}".format(d2s_type), output)
print(output.shape)
output = np.minimum(np.maximum(output, 0), 1)
output = output[0, :, :, :]
gt_full = gt_full[0, :, :, :]
scale_full = scale_full[0, :, :, :]
scale_full = scale_full * np.mean(gt_full) / np.mean(
scale_full) # scale the low-light image to the same mean of the groundtruth
toimage(output * 255, high=255, low=0, cmin=0, cmax=255).save(
result_dir + 'final/%5d_00_%d_out.png' % (test_id, ratio))
toimage(scale_full * 255, high=255, low=0, cmin=0, cmax=255).save(
result_dir + 'final/%5d_00_%d_scale.png' % (test_id, ratio))
toimage(gt_full * 255, high=255, low=0, cmin=0, cmax=255).save(
result_dir + 'final/%5d_00_%d_gt.png' % (test_id, ratio))
#break
|
"""Predictors based on gammatone spectrograms"""
from pathlib import Path
import numpy as np
from eelbrain import *
from trftools.neural import edge_detector
DATA_ROOT = Path("~").expanduser() / 'Data' / 'Alice'
STIMULUS_DIR = DATA_ROOT / 'stimuli'
PREDICTOR_DIR = DATA_ROOT / 'predictors'
PREDICTOR_DIR.mkdir(exist_ok=True)
for i in range(1, 13):
gt = load.unpickle(STIMULUS_DIR / f'{i}-gammatone.pickle')
# Remove resampling artifacts
gt = gt.clip(0, out=gt)
# apply log transform
gt = (gt + 1).log()
# generate on- and offset detector model
gt_on = edge_detector(gt, c=30)
# 1 band predictors
save.pickle(gt.sum('frequency'), PREDICTOR_DIR / f'{i}~gammatone-1.pickle')
save.pickle(gt_on.sum('frequency'), PREDICTOR_DIR / f'{i}~gammatone-on-1.pickle')
# 8 band predictors
x = gt.bin(nbins=8, func=np.sum, dim='frequency')
save.pickle(x, PREDICTOR_DIR / f'{i}~gammatone-8.pickle')
x = gt_on.bin(nbins=8, func=np.sum, dim='frequency')
save.pickle(x, PREDICTOR_DIR / f'{i}~gammatone-on-8.pickle')
|
# python 3.7
"""Utility functions for visualizing results on html page."""
import base64
import os.path
import cv2
import numpy as np
__all__ = [
'get_grid_shape', 'get_blank_image', 'load_image', 'save_image',
'resize_image', 'add_text_to_image', 'fuse_images', 'HtmlPageVisualizer',
'VideoReader', 'VideoWriter', 'adjust_pixel_range'
]
def adjust_pixel_range(images, min_val=-1.0, max_val=1.0, channel_order='NCHW'):
"""Adjusts the pixel range of the input images.
This function assumes the input array (image batch) is with shape [batch_size,
channel, height, width] if `channel_order = NCHW`, or with shape [batch_size,
height, width] if `channel_order = NHWC`. The returned images are with shape
[batch_size, height, width, channel] and pixel range [0, 255].
NOTE: The channel order of output images will remain the same as the input.
Args:
images: Input images to adjust pixel range.
min_val: Min value of the input images. (default: -1.0)
max_val: Max value of the input images. (default: 1.0)
channel_order: Channel order of the input array. (default: NCHW)
Returns:
The postprocessed images with dtype `numpy.uint8` and range [0, 255].
Raises:
ValueError: If the input `images` are not with type `numpy.ndarray` or the
shape is invalid according to `channel_order`.
"""
if not isinstance(images, np.ndarray):
raise ValueError(f'Images should be with type `numpy.ndarray`!')
channel_order = channel_order.upper()
if channel_order not in ['NCHW', 'NHWC']:
raise ValueError(f'Invalid channel order `{channel_order}`!')
if images.ndim != 4:
raise ValueError(f'Input images are expected to be with shape `NCHW` or '
f'`NHWC`, but `{images.shape}` is received!')
if channel_order == 'NCHW' and images.shape[1] not in [1, 3]:
raise ValueError(f'Input images should have 1 or 3 channels under `NCHW` '
f'channel order!')
if channel_order == 'NHWC' and images.shape[3] not in [1, 3]:
raise ValueError(f'Input images should have 1 or 3 channels under `NHWC` '
f'channel order!')
images = images.astype(np.float32)
images = (images - min_val) * 255 / (max_val - min_val)
images = np.clip(images + 0.5, 0, 255).astype(np.uint8)
if channel_order == 'NCHW':
images = images.transpose(0, 2, 3, 1)
return images
def get_grid_shape(size, row=0, col=0, is_portrait=False):
"""Gets the shape of a grid based on the size.
This function makes greatest effort on making the output grid square if
neither `row` nor `col` is set. If `is_portrait` is set as `False`, the height
will always be equal to or smaller than the width. For example, if input
`size = 16`, output shape will be `(4, 4)`; if input `size = 15`, output shape
will be (3, 5). Otherwise, the height will always be equal to or larger than
the width.
Args:
size: Size (height * width) of the target grid.
is_portrait: Whether to return a portrait size of a landscape size.
(default: False)
Returns:
A two-element tuple, representing height and width respectively.
"""
assert isinstance(size, int)
assert isinstance(row, int)
assert isinstance(col, int)
if size == 0:
return (0, 0)
if row > 0 and col > 0 and row * col != size:
row = 0
col = 0
if row > 0 and size % row == 0:
return (row, size // row)
if col > 0 and size % col == 0:
return (size // col, col)
row = int(np.sqrt(size))
while row > 0:
if size % row == 0:
col = size // row
break
row = row - 1
return (col, row) if is_portrait else (row, col)
def get_blank_image(height, width, channels=3, is_black=True):
"""Gets a blank image, either white of black.
NOTE: This function will always return an image with `RGB` channel order for
color image and pixel range [0, 255].
Args:
height: Height of the returned image.
width: Width of the returned image.
channels: Number of channels. (default: 3)
is_black: Whether to return a black image or white image. (default: True)
"""
shape = (height, width, channels)
if is_black:
return np.zeros(shape, dtype=np.uint8)
return np.ones(shape, dtype=np.uint8) * 255
def load_image(path):
"""Loads an image from disk.
NOTE: This function will always return an image with `RGB` channel order for
color image and pixel range [0, 255].
Args:
path: Path to load the image from.
Returns:
An image with dtype `np.ndarray` or `None` if input `path` does not exist.
"""
if not os.path.isfile(path):
return None
image = cv2.imread(path)
return image[:, :, ::-1]
def save_image(path, image):
"""Saves an image to disk.
NOTE: The input image (if colorful) is assumed to be with `RGB` channel order
and pixel range [0, 255].
Args:
path: Path to save the image to.
image: Image to save.
"""
if image is None:
return
assert len(image.shape) == 3 and image.shape[2] in [1, 3]
cv2.imwrite(path, image[:, :, ::-1])
def resize_image(image, *args, **kwargs):
"""Resizes image.
This is a wrap of `cv2.resize()`.
NOTE: THe channel order of the input image will not be changed.
Args:
image: Image to resize.
"""
if image is None:
return None
assert image.ndim == 3 and image.shape[2] in [1, 3]
image = cv2.resize(image, *args, **kwargs)
if image.ndim == 2:
return image[:, :, np.newaxis]
return image
def add_text_to_image(image,
text='',
position=None,
font=cv2.FONT_HERSHEY_TRIPLEX,
font_size=1.0,
line_type=cv2.LINE_8,
line_width=1,
color=(255, 255, 255)):
"""Overlays text on given image.
NOTE: The input image is assumed to be with `RGB` channel order.
Args:
image: The image to overlay text on.
text: Text content to overlay on the image. (default: '')
position: Target position (bottom-left corner) to add text. If not set,
center of the image will be used by default. (default: None)
font: Font of the text added. (default: cv2.FONT_HERSHEY_TRIPLEX)
font_size: Font size of the text added. (default: 1.0)
line_type: Line type used to depict the text. (default: cv2.LINE_8)
line_width: Line width used to depict the text. (default: 1)
color: Color of the text added in `RGB` channel order. (default:
(255, 255, 255))
Returns:
An image with target text overlayed on.
"""
if image is None or not text:
return image
cv2.putText(img=image,
text=text,
org=position,
fontFace=font,
fontScale=font_size,
color=color,
thickness=line_width,
lineType=line_type,
bottomLeftOrigin=False)
return image
def fuse_images(images,
image_size=None,
row=0,
col=0,
is_row_major=True,
is_portrait=False,
row_spacing=0,
col_spacing=0,
border_left=0,
border_right=0,
border_top=0,
border_bottom=0,
black_background=True):
"""Fuses a collection of images into an entire image.
Args:
images: A collection of images to fuse. Should be with shape [num, height,
width, channels].
image_size: Int or two-element tuple. This field is used to resize the image
before fusing. `None` disables resizing. (default: None)
row: Number of rows used for image fusion. If not set, this field will be
automatically assigned based on `col` and total number of images.
(default: None)
col: Number of columns used for image fusion. If not set, this field will be
automatically assigned based on `row` and total number of images.
(default: None)
is_row_major: Whether the input images should be arranged row-major or
column-major. (default: True)
is_portrait: Only active when both `row` and `col` should be assigned
automatically. (default: False)
row_spacing: Space between rows. (default: 0)
col_spacing: Space between columns. (default: 0)
border_left: Width of left border. (default: 0)
border_right: Width of right border. (default: 0)
border_top: Width of top border. (default: 0)
border_bottom: Width of bottom border. (default: 0)
Returns:
The fused image.
Raises:
ValueError: If the input `images` is not with shape [num, height, width,
width].
"""
if images is None:
return images
if not images.ndim == 4:
raise ValueError(f'Input `images` should be with shape [num, height, '
f'width, channels], but {images.shape} is received!')
num, image_height, image_width, channels = images.shape
if image_size is not None:
if isinstance(image_size, int):
image_size = (image_size, image_size)
assert isinstance(image_size, (list, tuple)) and len(image_size) == 2
width, height = image_size
else:
height, width = image_height, image_width
row, col = get_grid_shape(num, row=row, col=col, is_portrait=is_portrait)
fused_height = (
height * row + row_spacing * (row - 1) + border_top + border_bottom)
fused_width = (
width * col + col_spacing * (col - 1) + border_left + border_right)
fused_image = get_blank_image(
fused_height, fused_width, channels=channels, is_black=black_background)
images = images.reshape(row, col, image_height, image_width, channels)
if not is_row_major:
images = images.transpose(1, 0, 2, 3, 4)
for i in range(row):
y = border_top + i * (height + row_spacing)
for j in range(col):
x = border_left + j * (width + col_spacing)
if image_size is not None:
image = cv2.resize(images[i, j], image_size)
else:
image = images[i, j]
fused_image[y:y + height, x:x + width] = image
return fused_image
def get_sortable_html_header(column_name_list, sort_by_ascending=False):
"""Gets header for sortable html page.
Basically, the html page contains a sortable table, where user can sort the
rows by a particular column by clicking the column head.
Example:
column_name_list = [name_1, name_2, name_3]
header = get_sortable_html_header(column_name_list)
footer = get_sortable_html_footer()
sortable_table = ...
html_page = header + sortable_table + footer
Args:
column_name_list: List of column header names.
sort_by_ascending: Default sorting order. If set as `True`, the html page
will be sorted by ascending order when the header is clicked for the first
time.
Returns:
A string, which represents for the header for a sortable html page.
"""
header = '\n'.join([
'<script type="text/javascript">',
'var column_idx;',
'var sort_by_ascending = ' + str(sort_by_ascending).lower() + ';',
'',
'function sorting(tbody, column_idx){',
' this.column_idx = column_idx;',
' Array.from(tbody.rows)',
' .sort(compareCells)',
' .forEach(function(row) { tbody.appendChild(row); })',
' sort_by_ascending = !sort_by_ascending;',
'}',
'',
'function compareCells(row_a, row_b) {',
' var val_a = row_a.cells[column_idx].innerText;',
' var val_b = row_b.cells[column_idx].innerText;',
' var flag = sort_by_ascending ? 1 : -1;',
' return flag * (val_a > val_b ? 1 : -1);',
'}',
'</script>',
'',
'<html>',
'',
'<head>',
'<style>',
' table {',
' border-spacing: 0;',
' border: 1px solid black;',
' }',
' th {',
' cursor: pointer;',
' }',
' th, td {',
' text-align: left;',
' vertical-align: middle;',
' border-collapse: collapse;',
' border: 0.5px solid black;',
' padding: 8px;',
' }',
' tr:nth-child(even) {',
' background-color: #d2d2d2;',
' }',
'</style>',
'</head>',
'',
'<body>',
'',
'<table>',
'<thead>',
'<tr>',
''])
for idx, column_name in enumerate(column_name_list):
header += f' <th onclick="sorting(tbody, {idx})">{column_name}</th>\n'
header += '</tr>\n'
header += '</thead>\n'
header += '<tbody id="tbody">\n'
return header
def get_sortable_html_footer():
"""Gets footer for sortable html page.
Check function `get_sortable_html_header()` for more details.
"""
return '</tbody>\n</table>\n\n</body>\n</html>\n'
def encode_image_to_html_str(image, image_size=None):
"""Encodes an image to html language.
Args:
image: The input image to encode. Should be with `RGB` channel order.
image_size: Int or two-element tuple. This field is used to resize the image
before encoding. `None` disables resizing. (default: None)
Returns:
A string which represents the encoded image.
"""
if image is None:
return ''
assert len(image.shape) == 3 and image.shape[2] in [1, 3]
# Change channel order to `BGR`, which is opencv-friendly.
image = image[:, :, ::-1]
# Resize the image if needed.
if image_size is not None:
if isinstance(image_size, int):
image_size = (image_size, image_size)
assert isinstance(image_size, (list, tuple)) and len(image_size) == 2
image = cv2.resize(image, image_size)
# Encode the image to html-format string.
encoded_image = cv2.imencode(".jpg", image)[1].tostring()
encoded_image_base64 = base64.b64encode(encoded_image).decode('utf-8')
html_str = f'<img src="data:image/jpeg;base64, {encoded_image_base64}"/>'
return html_str
class HtmlPageVisualizer(object):
"""Defines the html page visualizer.
This class can be used to visualize image results as html page. Basically, it
is based on an html-format sorted table with helper functions
`get_sortable_html_header()`, `get_sortable_html_footer()`, and
`encode_image_to_html_str()`. To simplify the usage, specifying the following
fields is enough to create a visualization page:
(1) num_rows: Number of rows of the table (header-row exclusive).
(2) num_cols: Number of columns of the table.
(3) header contents (optional): Title of each column.
NOTE: `grid_size` can be used to assign `num_rows` and `num_cols`
automatically.
Example:
html = HtmlPageVisualizer(num_rows, num_cols)
html.set_headers([...])
for i in range(num_rows):
for j in range(num_cols):
html.set_cell(i, j, text=..., image=...)
html.save('visualize.html')
"""
def __init__(self,
num_rows=0,
num_cols=0,
grid_size=0,
is_portrait=False,
viz_size=None):
if grid_size > 0:
num_rows, num_cols = get_grid_shape(
grid_size, row=num_rows, col=num_cols, is_portrait=is_portrait)
assert num_rows > 0 and num_cols > 0
self.num_rows = num_rows
self.num_cols = num_cols
self.viz_size = viz_size
self.headers = ['' for _ in range(self.num_cols)]
self.cells = [[{
'text': '',
'image': '',
} for _ in range(self.num_cols)] for _ in range(self.num_rows)]
def set_header(self, column_idx, content):
"""Sets the content of a particular header by column index."""
self.headers[column_idx] = content
def set_headers(self, contents):
"""Sets the contents of all headers."""
if isinstance(contents, str):
contents = [contents]
assert isinstance(contents, (list, tuple))
assert len(contents) == self.num_cols
for column_idx, content in enumerate(contents):
self.set_header(column_idx, content)
def set_cell(self, row_idx, column_idx, text='', image=None):
"""Sets the content of a particular cell.
Basically, a cell contains some text as well as an image. Both text and
image can be empty.
Args:
row_idx: Row index of the cell to edit.
column_idx: Column index of the cell to edit.
text: Text to add into the target cell.
image: Image to show in the target cell. Should be with `RGB` channel
order.
"""
self.cells[row_idx][column_idx]['text'] = text
self.cells[row_idx][column_idx]['image'] = encode_image_to_html_str(
image, self.viz_size)
def save(self, save_path):
"""Saves the html page."""
html = ''
for i in range(self.num_rows):
html += f'<tr>\n'
for j in range(self.num_cols):
text = self.cells[i][j]['text']
image = self.cells[i][j]['image']
if text:
html += f' <td>{text}<br><br>{image}</td>\n'
else:
html += f' <td>{image}</td>\n'
html += f'</tr>\n'
header = get_sortable_html_header(self.headers)
footer = get_sortable_html_footer()
with open(save_path, 'w') as f:
f.write(header + html + footer)
class VideoReader(object):
"""Defines the video reader.
This class can be used to read frames from a given video.
"""
def __init__(self, path):
"""Initializes the video reader by loading the video from disk."""
if not os.path.isfile(path):
raise ValueError(f'Video `{path}` does not exist!')
self.path = path
self.video = cv2.VideoCapture(path)
assert self.video.isOpened()
self.position = 0
self.length = int(self.video.get(cv2.CAP_PROP_FRAME_COUNT))
self.frame_height = int(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.frame_width = int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH))
self.fps = self.video.get(cv2.CAP_PROP_FPS)
def __del__(self):
"""Releases the opened video."""
self.video.release()
def read(self, position=None):
"""Reads a certain frame.
NOTE: The returned frame is assumed to be with `RGB` channel order.
Args:
position: Optional. If set, the reader will read frames from the exact
position. Otherwise, the reader will read next frames. (default: None)
"""
if position is not None and position < self.length:
self.video.set(cv2.CAP_PROP_POS_FRAMES, position)
self.position = position
success, frame = self.video.read()
self.position = self.position + 1
return frame[:, :, ::-1] if success else None
class VideoWriter(object):
"""Defines the video writer.
This class can be used to create a video.
NOTE: `.avi` and `DIVX` is the most recommended codec format since it does not
rely on other dependencies.
"""
def __init__(self, path, frame_height, frame_width, fps=24, codec='DIVX'):
"""Creates the video writer."""
self.path = path
self.frame_height = frame_height
self.frame_width = frame_width
self.fps = fps
self.codec = codec
self.video = cv2.VideoWriter(filename=path,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=fps,
frameSize=(frame_width, frame_height))
def __del__(self):
"""Releases the opened video."""
self.video.release()
def write(self, frame):
"""Writes a target frame.
NOTE: The input frame is assumed to be with `RGB` channel order.
"""
self.video.write(frame[:, :, ::-1])
|
import sys
from skbuild import setup
setup(
name="pywasm3",
version="0.1",
description="Python 3 bindings for wasm3",
author='arrisde',
license="MIT",
packages=['pywasm3'],
install_requires=["cffi==1.14.0"]
)
|
"""This defines a basic set of data for our Star Wars Schema.
This data is hard coded for the sake of the demo, but you could imagine
fetching this data from a backend service rather than from hardcoded
JSON objects in a more complex demo.
"""
from typing import List, NamedTuple, Optional
class Ship(NamedTuple):
id: str
name: str
all_ships = [
Ship("1", "X-Wing"),
Ship("2", "Y-Wing"),
Ship("3", "A-Wing"),
# Yeah, technically it's Corellian. But it flew in the service of the rebels,
# so for the purposes of this demo it's a rebel ship.
Ship("4", "Millenium Falcon"),
Ship("5", "Home One"),
Ship("6", "TIE Fighter"),
Ship("7", "TIE Interceptor"),
Ship("8", "Executor"),
]
class Faction(NamedTuple):
id: str
name: str
ships: List[str]
rebels = Faction("1", "Alliance to Restore the Republic", ["1", "2", "3", "4", "5"])
empire = Faction("2", "Galactic Empire", ["6", "7", "8"])
all_factions = [rebels, empire]
def create_ship(ship_name: str, faction_id: str) -> Ship:
new_ship = Ship(str(len(all_ships) + 1), ship_name)
all_ships.append(new_ship)
faction = get_faction(faction_id)
if faction:
faction.ships.append(new_ship.id)
return new_ship
def get_ship(id_: str) -> Optional[Ship]:
return next(filter(lambda ship: ship.id == id_, all_ships), None) # type: ignore
def get_faction(id_: str) -> Optional[Faction]:
return next(
filter(lambda faction: faction.id == id_, all_factions), None # type: ignore
)
def get_rebels() -> Faction:
return rebels
def get_empire() -> Faction:
return empire
|
# Generated by Django 2.2.7 on 2020-04-23 11:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chat', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('me', models.CharField(max_length=255)),
('myFriend', models.CharField(max_length=255)),
('chat', models.TextField(max_length=100000)),
],
),
migrations.AlterField(
model_name='msg',
name='status',
field=models.CharField(choices=[('r', 'read'), ('u', 'unread')], default='u', max_length=1),
),
]
|
from django.http import request
import pandas as pd
import numpy as np
import json
import sklearn.neighbors
from polls.models import *
pd.set_option('chained_assignment',None)
pd.set_option('display.max_columns',100)
class DistanceH:
def distance_haversine(self, user_id):
# School Data
school_data = School.objects.values()
df_school = pd.DataFrame(school_data)
df_school_filtr = df_school[['school_name', 'lat', 'longi']]
df_school_filtr['lat'] = df_school_filtr['lat'].apply(lambda x: np.radians(float(x)))
df_school_filtr['longi'] = df_school_filtr['longi'].apply(lambda x: np.radians(float(x)))
# User Data
user_data = userData.objects.values()
df_user = pd.DataFrame(user_data)
df_user = df_user[df_user['user_id']==user_id]
df_user_latLong = df_user[['id', 'lat', 'longi']]
df_user_latLong['lat'] = df_user_latLong['lat'].apply(lambda x: np.radians(float(x)))
df_user_latLong['longi'] = df_user_latLong['longi'].apply(lambda x: np.radians(float(x)))
dist = sklearn.neighbors.DistanceMetric.get_metric('haversine')
dist_matrix = (dist.pairwise
(df_school_filtr[['lat','longi']],
df_user_latLong[['lat','longi']])*6371
)
df_dist_matrix = (
pd.DataFrame(dist_matrix)
).rename({0:'distance'}, axis=1)
df_dist_matrix['school_name'] = df_school_filtr['school_name']
df_dist_matrix = df_dist_matrix[['school_name', 'distance']]
return df_dist_matrix
class mainFunction:
def mainfunctiondf(self, user_id):
user_data = userData.objects.values()
df_user = pd.DataFrame(user_data)
df_user = df_user[df_user['user_id']==user_id].drop(['user_id', 'lat', 'longi'], axis=1).reset_index(drop=True)
school_data = School.objects.values()
df_school = pd.DataFrame(school_data)
dist = DistanceH()
df_dist = dist.distance_haversine(user_id)
df_tot = df_school.merge(df_dist, on='school_name').drop(['lat', 'longi'], axis=1)
df_tot['distance'] = df_tot['distance'].apply(lambda x: round(x, 2))
# scleta della distanza
if df_user.iloc[0]['choice_distance'] != 0:
df_tot = df_tot[df_tot['distance']<=df_user.iloc[0]['choice_distance']]
list_speed = ['crecupero', 'extracurr', 'bStudio', 'stage', 'certificazioni', 'tutOrient']
if str(df_user.iloc[0]['parit_choice']) != '0':
df_tot = df_tot[df_tot['parit_choice']==df_user.iloc[0]['parit_choice']]
for i in list_speed:
if df_user.iloc[0][i] != 0 or df_user.iloc[0][i]:
df_tot = df_tot[df_tot[i]==df_user.iloc[0][i]]
# parent subject / subject
if len(userData.objects.filter(user_id=user_id, parsub__pk__isnull=False).values('parsub__pk')) != 0:
user_parsub = userData.objects.filter(parsub__pk__isnull=False).values('parsub__pk', 'user_id')
df_parsub = pd.DataFrame(user_parsub)
df_parsub_user = df_parsub[df_parsub['user_id']==user_id]
id_parsub_user = df_parsub_user['parsub__pk']
sub = Subject.objects.values()
df_sub = pd.DataFrame(sub)
df_sub_user = df_sub[df_sub['parsub_id'].isin(id_parsub_user)]
else:
Sub = Subject.objects.values()
df_sub_user = pd.DataFrame(Sub)
# area interesse / dettagli area interesse
if len(userData.objects.filter(user_id=user_id, areaint__pk__isnull=False).values('areaint__pk')) != 0:
user_parai = userData.objects.filter(user_id=user_id, areaint__pk__isnull=False).values('areaint__pk', 'user_id')
df_ai = pd.DataFrame(user_parai)
df_ai_user = df_ai[df_ai['user_id']==user_id] # ho il pk dell'area di interesse
id_ai_user = df_ai_user['areaint__pk']
detai = DettagliAreaInteresse.objects.values()
df_detai = pd.DataFrame(detai)
df_detai_user = df_detai[df_detai['name_ai_id'].isin(id_ai_user)]
else:
ai = DettagliAreaInteresse.objects.values()
df_detai_user = pd.DataFrame(ai)
durata = userData.objects.filter(user_id=user_id, durata__pk__isnull=False).values('durata__pk')
df_durata_user = pd.DataFrame(durata, columns=['durata__pk'])
# da school a durata
if len(durata) != 0:
durata_school = School.objects.filter(durata__pk__isnull=False).values('durata__pk', 'school_name')
df_durata_school = pd.DataFrame(durata_school)
df_durata_school = df_durata_school.groupby('school_name')['durata__pk'].apply(list).reset_index()
query = df_durata_school['durata__pk'].apply(lambda x: any([k in x for k in df_durata_user['durata__pk'].values]))
df_final_durata = df_durata_school[query]
df_tot = df_tot.merge(df_final_durata, on='school_name')
dettagliareainteresse = userData.objects.filter(user_id=user_id, dettagli_area_interesse__pk__isnull=False).values('dettagli_area_interesse__pk')
df_dettagli_user = pd.DataFrame(dettagliareainteresse, columns=['dettagli_area_interesse__pk'])
# dettagli aree di interesse
if len(dettagliareainteresse) != 0:
detai_school = School.objects.filter(dettagli_area_interesse__pk__isnull=False).values('dettagli_area_interesse__pk', 'school_name')
df_detai_school = pd.DataFrame(detai_school)
df_detai_school = df_detai_school.groupby('school_name')['dettagli_area_interesse__pk'].apply(list).reset_index()
query_detai = df_detai_school['dettagli_area_interesse__pk'].apply(lambda x: any([k in x for k in df_dettagli_user['dettagli_area_interesse__pk'].values]))
df_final_detai = df_detai_school[query_detai]
df_tot = df_tot.merge(df_final_detai, on='school_name')
# area di interesse
areainteresse = userData.objects.filter(user_id=user_id, areaint__pk__isnull=False).values('areaint__pk')
df_area_user = pd.DataFrame(areainteresse, columns=['areaint__pk'])
if len(areainteresse) != 0:
ai_school = School.objects.filter(dettagli_area_interesse__name_ai__pk__isnull=False).values('dettagli_area_interesse__name_ai__pk', 'school_name')
df_ai_school = pd.DataFrame(ai_school)
df_ai_school = df_ai_school.groupby('school_name')['dettagli_area_interesse__name_ai__pk'].apply(list).reset_index()
query_ai = df_ai_school['dettagli_area_interesse__name_ai__pk'].apply(lambda x: any([k in x for k in df_area_user['areaint__pk'].values]))
df_final_ai = df_ai_school[query_ai]
df_tot = df_tot.merge(df_final_ai, on='school_name')
# materie
subUser = userData.objects.filter(user_id=user_id, sub__pk__isnull=False).values('sub__pk')
df_subject_user = pd.DataFrame(subUser, columns=['sub__pk'])
if len(subUser) != 0:
sub_school = School.objects.filter(sub__pk__isnull=False).values('sub__pk', 'school_name')
df_subject_school = pd.DataFrame(sub_school)
df_subject_school = df_subject_school.groupby('school_name')['sub__pk'].apply(list).reset_index()
query_sub = df_subject_school['sub__pk'].apply(lambda x: any([k in x for k in df_subject_user['sub__pk'].values]))
df_final_sub = df_subject_school[query_sub]
df_tot = df_tot.merge(df_final_sub, on='school_name')
# parent materie
parsub = userData.objects.filter(user_id=user_id, parsub__pk__isnull=False).values('parsub__pk')
df_parsub_user = pd.DataFrame(parsub, columns=['parsub__pk'])
if len(parsub) != 0:
parsub_school = School.objects.filter(sub__parsub__pk__isnull=False).values('sub__parsub__pk', 'school_name')
df_parsub_school = pd.DataFrame(parsub_school)
df_parsub_school = df_parsub_school.groupby('school_name')['sub__parsub__pk'].apply(list).reset_index()
query_parsub = df_parsub_school['sub__parsub__pk'].apply(lambda x: any([k in x for k in df_parsub_user['parsub__pk'].values]))
df_final_parsub = df_parsub_school[query_parsub]
df_tot = df_tot.merge(df_final_parsub, on='school_name')
return df_tot['school_name'], df_sub_user, df_detai_user
class Results:
def result(self, user_id):
userdata = userData.objects.filter(user_id=user_id).values()
df = pd.DataFrame(userdata)
df = df.replace({False: 'No', True: 'Si'})
json_records = df.reset_index().to_json(orient ='records')
data = []
data = json.loads(json_records)
choiceUser = userData.objects.filter(user_id=user_id).values('dettagli_area_interesse__det_ai', 'areaint__name_ai', 'sub__name_sub', 'parsub__parsub')
df_user = pd.DataFrame(choiceUser)
schoolData = School.objects.values()
df_school = pd.DataFrame(schoolData)
dist = DistanceH()
df_distance = dist.distance_haversine(user_id)
main = mainFunction()
df_school_user = main.mainfunctiondf(user_id)[0]
df_school = df_school.merge(df_distance, on='school_name').reset_index(drop=True)
df_school = df_school.merge(df_school_user, on="school_name").reset_index(drop=True)
sc_filter = School.objects.values('school_name', 'sub__name_sub', 'sub__parsub__parsub', 'dettagli_area_interesse__det_ai', 'dettagli_area_interesse__name_ai__name_ai')
df_school_filter = pd.DataFrame(sc_filter)
df_school['distance'] = df_school['distance'].apply(lambda x: round(x, 3))
if len(df_school) != 0:
df_school_tmp = df_school.merge(df_school_filter, on='school_name').reset_index(drop=True)
df_school_tmp = df_school_tmp.groupby('school_name').apply(lambda x: [list(x['sub__name_sub']), list(x['sub__parsub__parsub']), list(x['dettagli_area_interesse__det_ai']), list(x['dettagli_area_interesse__name_ai__name_ai'])]).apply(pd.Series).reset_index()
df_school_tmp.columns = ['school_name', 'sub__name_sub', 'sub__parsub__parsub', 'dettagli_area_interesse__det_ai', 'dettagli_area_interesse__name_ai__name_ai']
else:
df_school_tmp = pd.DataFrame({})
school_user = df_school[['school_name', 'distance']]
if len(df_school_tmp) != 0:
df_deta = school_user.merge(df_school_tmp, on='school_name').drop('distance', axis=1)
df_deta['sub__name_sub'] = df_deta['sub__name_sub'].apply(lambda x: set(x))
df_deta['sub__parsub__parsub'] = df_deta['sub__parsub__parsub'].apply(lambda x: set(x))
df_deta['dettagli_area_interesse__det_ai'] = df_deta['dettagli_area_interesse__det_ai'].apply(lambda x: set(x))
df_deta['dettagli_area_interesse__name_ai__name_ai'] = df_deta['dettagli_area_interesse__name_ai__name_ai'].apply(lambda x: set(x))
else:
df_deta = pd.DataFrame({})
for i in range(len(df_school)):
if df_school['lavoro'][i] == 1.0:
df_school['lavoro'][i] = '/'
if df_school['uni'][i] == 1.0:
df_school['uni'][i] = '/'
for i in range(len(df_user['dettagli_area_interesse__det_ai'])):
if df_user['dettagli_area_interesse__det_ai'][i] == None:
df_user['dettagli_area_interesse__det_ai'][i] = '-'
for i in range(len(df_user['areaint__name_ai'])):
if df_user['areaint__name_ai'][i] == None:
df_user['areaint__name_ai'][i] = '-'
for i in range(len(df_user['sub__name_sub'])):
if df_user['sub__name_sub'][i] == None:
df_user['sub__name_sub'][i] = '-'
for i in range(len(df_user['parsub__parsub'])):
if df_user['parsub__parsub'][i] == None:
df_user['parsub__parsub'][i] = '-'
json_recordsSchool = df_school.reset_index().to_json(orient ='records')
dataSchool = []
dataSchool = json.loads(json_recordsSchool)
json_recordsSchoolDeta = df_deta.reset_index().to_json(orient ='records')
dataSchoolDeta = []
dataSchoolDeta = json.loads(json_recordsSchoolDeta)
context = {
'd': data,
'dettagli':set(df_user['dettagli_area_interesse__det_ai']),
'areaint':set(df_user['areaint__name_ai']),
'subject':set(df_user['sub__name_sub']),
'parsub':set(df_user['parsub__parsub']),
'school': dataSchool,
'school_deta':dataSchoolDeta
}
return context
|
import datetime
class Todo:
def __init__(self, task, category,
date_added=None, date_completed=None,
status=None, position=None):
self.task = task
self.category = category
self.date_added = date_added if date_added is not None else datetime.datetime.now().isoformat()
self.date_completed = date_completed if date_completed is not None else None
self.status = status if status is not None else 1 # 1 = open, 2 = completed
self.position = position if position is not None else None
def __repr__(self) -> str:
return f"({self.task}, {self.category}, {self.date_added}, {self.date_completed}, {self.status}, {self.position})" |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import logging
from fnmatch import fnmatch
import mutagen
from mutagen.easyid3 import EasyID3
import urchin.fs.default
import urchin.fs.json
import urchin.fs.plugin
import urchin.fs.mp3
MP3_GLOB = "*.mp3"
class Plugin(urchin.fs.plugin.Plugin):
name = "mp3file"
def __init__(self):
super(Plugin, self).__init__(
indexer=Mp3FileIndexer,
matcher=urchin.fs.default.DefaultMetadataMatcher,
extractor=urchin.fs.mp3.Mp3MetadataExtractor,
merger=urchin.fs.default.DefaultMerger,
munger=urchin.fs.default.DefaultMunger,
formatter=Mp3FileFormatter,
)
class Mp3FileIndexer(urchin.fs.abstract.AbstractFileIndexer):
name = "mp3file"
def __init__(self, config):
super(Mp3FileIndexer, self).__init__(config, MP3_GLOB)
class Mp3FileFormatter(urchin.fs.plugin.Formatter):
name = "mp3file"
def __init__(self, config):
pass
def format(self, original_name, metadata):
# reduce single values from sets
d = {k: list(v)[0] if type(v) == set and len(v) == 1 else v for k,v in metadata.items()}
return set(["%(tracknumber)s - %(artist)s - %(title)s" % d])
|
# -*- coding: utf-8 -*-
"""
This is helper.py module
"""
class Helpers(object):
"""
Helper class
"""
def add_two(self, a, b):
"""
add two value
"""
return a + b
|
# coding=utf-8
class Processor(object):
"""
Processor base class
"""
name = None
parent = None
def __init__(self, name=None, parent=None):
self.name = name
self.parent = parent
@property
def info(self):
return self.name
def process(self, content, debug=False, **kwargs):
return content
def __repr__(self):
return "Processor <%s %s>" % (self.__class__.__name__, self.info)
def __str__(self):
return repr(self)
def __unicode__(self):
return unicode(repr(self))
|
# Read a VLC playlist in xspf format and shuffle it. Compared to
# using the "shuffle" mode, this has the advantage that it can be
# browsed in the playlist - you can see the next and previous, etc.
import xml.etree.ElementTree as ET
from urllib.parse import unquote, urlparse
import os
import sys
import random
files = [unquote(urlparse(el.text).path)
for el in ET.parse(sys.argv[1]).getroot()
.findall(".//*/{http://xspf.org/ns/0/}location")
]
random.shuffle(files)
os.execvp("vlc", ["vlc"] + files)
|
import stripe
from stripe.test.helper import StripeResourceTest
class TransferTest(StripeResourceTest):
def test_list_transfers(self):
stripe.Transfer.list()
self.requestor_mock.request.assert_called_with(
'get',
'/v1/transfers',
{}
)
def test_cancel_transfer(self):
transfer = stripe.Transfer(id='tr_cancel')
transfer.cancel()
self.requestor_mock.request.assert_called_with(
'post',
'/v1/transfers/tr_cancel/cancel',
{},
None
)
class TransferReversalsTests(StripeResourceTest):
def test_create_reversal(self):
stripe.Transfer.create_reversal(
'tr_123',
amount=100
)
self.requestor_mock.request.assert_called_with(
'post',
'/v1/transfers/tr_123/reversals',
{'amount': 100},
None
)
def test_retrieve_reversal(self):
stripe.Transfer.retrieve_reversal(
'tr_123',
'trr_123'
)
self.requestor_mock.request.assert_called_with(
'get',
'/v1/transfers/tr_123/reversals/trr_123',
{},
None
)
def test_modify_reversal(self):
stripe.Transfer.modify_reversal(
'tr_123',
'trr_123',
metadata={'foo': 'bar'}
)
self.requestor_mock.request.assert_called_with(
'post',
'/v1/transfers/tr_123/reversals/trr_123',
{'metadata': {'foo': 'bar'}},
None
)
def test_list_reversals(self):
stripe.Transfer.list_reversals(
'tr_123'
)
self.requestor_mock.request.assert_called_with(
'get',
'/v1/transfers/tr_123/reversals',
{},
None
)
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from apps.generic.models import GenericUUIDMixin
class Sms(GenericUUIDMixin):
phone = models.CharField(_("Phone"), max_length=64)
sms = models.TextField(_("Sms"))
def __str__(self):
return f"Sms {self.phone}: {self.sms}"
class Meta:
verbose_name = _("Sms")
verbose_name_plural = _("Sms")
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import pickle
with open('datas/champion_filter.pkl', 'rb') as f:
champion_datas = pickle.load(f)
palette_1 = sns.color_palette('hls',8)
palette_2 = sns.color_palette("Paired", 9)[1:]
def show_rateplot(df, criteria):
"""
show_rateplot function : 특정 플레이 시간에 대해 오브젝트별 승률에 대한 barplot visualization
Input Arguments : df(dataframe : 알고자하는 시간의 df), criteria(str : barplot의 title)
return barplot
"""
rate_data = {"name": [], "rate": []}
for col in df.columns:
if df[col].dtype == "bool":
win_count = len(df[(df[col] == True) & (df['win'] == 'Win')])
fail_count = len(df[(df[col] == True) & (df['win'] == 'Fail')])
try:
rate = np.round(win_count / (win_count + fail_count), 2)
rate_data['name'].append(col)
rate_data['rate'].append(rate)
except:
pass
rate_df = pd.DataFrame(rate_data).sort_values('rate', ascending=False).reset_index(drop=True)
g = sns.barplot(data=rate_df, x='name', y='rate', palette=palette_1)
for idx, row in rate_df.iterrows():
g.text(row.name, row.rate, f"{round(row.rate*100, 2)}%", color='black', ha='center')
plt.title(f"First Object 선점에 따른 승률(%) - {criteria}", fontsize=15)
plt.xticks(rotation=30, fontsize=10)
plt.xlabel(None)
plt.ylim((0.5, 1))
def preprocess_participant(df):
"""
preprocess_participant function : participant_df의 비정상적으로 플레이 된 데이터를 삭제하는 함수
Input Arguments : None
return preprocessed participant_df
"""
del_idx_1 = df[df['firstbloodkill'].isna()].index
df.drop(index=del_idx_1, inplace=True)
del_idx_2 = df[(df['firsttowerkill'].isna()) & (df['gameduration'] < 900)].sort_values('gameduration', ascending=False).index
df.drop(index=del_idx_2, inplace=True)
df['firsttowerassist'] = df['firsttowerassist'].fillna(False)
df['firsttowerkill'] = df['firsttowerkill'].fillna(False)
df['firstinhibitorkill'] = df['firstinhibitorkill'].fillna(False)
df['firstinhibitorassist'] = df['firstinhibitorassist'].fillna(False)
df['firstinhibitor'] = (df['firstinhibitorkill'] + df['firstinhibitorassist']).astype('bool')
df['firsttower'] = (df['firsttowerkill'] + df['firsttowerassist']).astype('bool')
df['firstblood'] = (df['firstbloodkill'] + df['firstbloodassist']).astype('bool')
df.drop(columns=['firstinhibitorkill', 'firstinhibitorassist', 'firsttowerkill', 'firsttowerassist', 'firstbloodkill', 'firstbloodassist'], inplace=True)
return df
def recommendation_obj(df, target, duration1, duration2=None):
"""
recommendation_obj function : 특정 오브젝트 관여에 대한 포지션별 비율 및
각 포지션에 대한 TOP2 챔피언 정보를 제공해주는 함수
Input Arguments : df(dataframe), target(str : 특정 오브젝트 이름)
, duration1(int : start time), duration2(int : end time)
return 포지션별 관여율, 각 포지션별 TOP2 챔피언
"""
if (duration1 == 1200) & (duration2 is None):
datas = df[(df['win'] == True) & (df[target] == True) & (df['gameduration'] < duration1)]
elif (duration1 == 2400) & (duration2 is None):
datas = df[(df['win'] == True) & (df[target] == True) & (df['gameduration'] >= duration1)]
else:
datas = df[(df['win'] == True) & (df[target] == True) & (df['gameduration'] >= duration1) & (df['gameduration'] < duration2)]
result_1 = datas.groupby('position').size().reset_index(name='count')
result_1['rate'] = np.round((result_1['count'] / np.sum(result_1['count']))*100, 2)
result_2 = datas.groupby(['position', 'championid']).size().reset_index(name='count')
for position in result_2['position'].unique():
total = np.sum(result_2[result_2['position'] == position]['count'])
data = result_2[result_2['position'] == position].sort_values('count', ascending=False).reset_index(drop=True)
data['championid'] = data['championid'].map(champion_datas)
print(f'{position}의 {target} 기여도: {result_1[result_1["position"] == position]["rate"].values[0]}%')
print(f'{position} 중 TOP2 Champion: {data["championid"][0]}({np.round(data["count"][0]/total*100, 2)}%), \
{data["championid"][1]}({np.round(data["count"][1]/total*100, 2)}%)')
print("--------------")
def show_label_plot(df, target_ls, palette=palette_2):
"""
show_label_plot function : 군집별 특정 변수에 통계값(median)에 대한 barplot visualization
Input Arguments : df(dataframe), target_ls(list)
"""
plt.figure(figsize=(20, 4))
plt.subplot(141)
g = sns.barplot(data=df, x='label', y=target_ls[0], palette=palette)
plt.ylabel(None)
plt.title(target_ls[0], fontsize=15)
for idx, row in df.iterrows():
g.text(row.name, row[target_ls[0]], np.round(row[target_ls[0]], 2), color='black', ha='center')
plt.subplot(142)
g = sns.barplot(data=df, x='label', y=target_ls[1], palette=palette)
plt.ylabel(None)
plt.title(target_ls[1], fontsize=15)
for idx, row in df.iterrows():
g.text(row.name, row[target_ls[1]], np.round(row[target_ls[1]], 2), color='black', ha='center')
plt.subplot(143)
g = sns.barplot(data=df, x='label', y=target_ls[2], palette=palette)
plt.ylabel(None)
plt.title(target_ls[2], fontsize=15)
for idx, row in df.iterrows():
g.text(row.name, row[target_ls[2]], np.round(row[target_ls[2]], 2), color='black', ha='center')
plt.subplot(144)
g = sns.barplot(data=df, x='label', y=target_ls[3], palette=palette)
plt.ylabel(None)
plt.title(target_ls[3], fontsize=15)
for idx, row in df.iterrows():
g.text(row.name, row[target_ls[3]], np.round(row[target_ls[3]], 2), color='black', ha='center')
plt.tight_layout()
plt.show()
def show_rank_label(df, target):
"""
show_rank_label function : 특정 변수에 대해 Best, Worst TOP2의 군집을 보여주는 함수
Input Arguments : df(dataframe), target(str)
return TOP2 Cluster of Best, Worst
"""
high = {'label': [], 'value': []}
low = {'label': [], 'value': []}
for idx, row in df.iterrows():
if row[target] >= df[target].mean():
high['label'].append(row['label'])
high['value'].append(row[target])
else:
low['label'].append(row['label'])
low['value'].append(row[target])
high_df = pd.DataFrame(high).sort_values('value', ascending=False).reset_index(drop=True)
low_df = pd.DataFrame(low).sort_values('value').reset_index(drop=True)
print(f'<{target}> : Mean={np.round(df[target].mean(), 2)}')
for i in range(2):
try:
print(f'Best{i+1} >> Cluster: {int(high_df.loc[i]["label"])}, 평균보다 {np.round(high_df.loc[i]["value"] / df[target].mean(), 2)}배 높습니다')
except:
pass
print("---------------------------")
for i in range(2):
try:
if low_df.loc[i]["value"] == 0.0:
print(f'Worst{i+1} >> Cluster: {int(low_df.loc[i]["label"])}, 값이 0입니다.')
continue
print(f'Worst{i+1} >> Cluster: {int(low_df.loc[i]["label"])}, 평균보다 {np.round(1-(low_df.loc[i]["value"] / df[target].mean()), 2)}배 낮습니다')
except:
pass
def show_champion_label(df, extract_one=None):
"""
show_champion_label function : 군집별 픽률이 높은 TOP3 챔피언을 보여주는 함수
Input Arguments : df(dataframe), extract_one(default : None)
"""
datas = df.groupby(['label', 'championid']).size().reset_index(name='count')
# 한 개의 변수에 대해서만 챔피언 정보를 알고 싶을 때 extract_one에 특정 군집의 번호를 입력
if not extract_one is None:
data = datas[datas['label'] == extract_one].sort_values('count', ascending=False).reset_index(drop=True)
data['rate'] = np.round((data['count'] / np.sum(data['count']))*100, 2)
data['championid'] = data['championid'].map(champion_datas)
print(f'Cluster_{extract_one} 중 Pick Rate TOP3 >> {data.loc[0]["championid"]}({data.loc[0]["rate"]}%), \
{data.loc[1]["championid"]}({data.loc[1]["rate"]}%), {data.loc[2]["championid"]}({data.loc[2]["rate"]}%)')
# 일반적으로 모든 군집에 대한 TOP3 챔피언 정보 제공
else:
for label in datas['label'].unique():
data = datas[datas['label'] == label].sort_values('count', ascending=False).reset_index(drop=True)
data['rate'] = np.round((data['count'] / np.sum(data['count']))*100, 2)
data['championid'] = data['championid'].map(champion_datas)
print(f'Cluster_{label} 중 Pick Rate TOP3 >> {data.loc[0]["championid"]}({data.loc[0]["rate"]}%), \
{data.loc[1]["championid"]}({data.loc[1]["rate"]}%), {data.loc[2]["championid"]}({data.loc[2]["rate"]}%)')
print() |
import numpy as np
from torch.utils.data import Dataset
class DependentMaskContrastiveDataset(Dataset):
def __init__(self, data, Z):
self.data = data
self.Z = Z
assert data.shape[0] == Z.shape[0]
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
x = self.data[idx]
p = x.shape[0]
mask = np.random.choice([0, 1], (p, p), p=[0.5, 0.5])
identity = np.eye(p)
D = identity * mask
a1 = 2 * np.matmul(D, x)
a2 = 2 * np.matmul((np.identity(p) - D), x)
assert np.sum(0.5 * (a1 + a2) - x) == 0, "Error in augmentation"
return a1.astype(np.float), a2.astype(np.float), self.Z[idx].astype(np.int) |
import io
import pathlib
import hashlib
import pandas as pd
import requests
from bs4 import BeautifulSoup
from PIL import Image
from selenium import webdriver
def get_content_from_url(url):
driver = webdriver.Firefox() # add "executable_path=" if driver not in running directory
driver.get(url)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
page_content = driver.page_source
driver.quit() # We do not need the browser instance for further steps.
return page_content
def parse_image_urls(content, classes, location, source):
soup = BeautifulSoup(content)
results = []
for a in soup.findAll(attrs={"class": classes}):
name = a.find(location)
if name not in results:
results.append(name.get(source))
return results
def save_urls_to_csv(image_urls):
df = pd.DataFrame({"links": image_urls})
df.to_csv("links.csv", index=False, encoding="utf-8")
def get_and_save_image_to_file(image_url, output_dir):
response = requests.get(image_url, headers={"User-agent": "Mozilla/5.0"})
image_content = response.content
image_file = io.BytesIO(image_content)
image = Image.open(image_file).convert("RGB")
filename = hashlib.sha1(image_content).hexdigest()[:10] + ".png"
file_path = output_dir / filename
image.save(file_path, "PNG", quality=80)
def main():
url = "https://esahubble.org/images/archive/category/nebulae/"
content = get_content_from_url(url)
image_urls = parse_image_urls(
content=content, classes="blog-card__link", location="img", source="src",
)
save_urls_to_csv(image_urls)
for image_url in image_urls:
get_and_save_image_to_file(
image_url, output_dir=pathlib.Path("nix/path/to/test"),
)
if __name__ == "__main__": # only executes if imported as main file
main()
|
import numpy as np
import torch
from torch import tensor
import torchvision.transforms as transforms
from .base_model import BaseModel
from . import networks
from .patchnce import PatchNCELoss
import util.util as util
import cv2
import face_alignment as F
import os
from PIL import Image
class CUTModel(BaseModel):
""" This class implements CUT and FastCUT model, described in the paper
Contrastive Learning for Unpaired Image-to-Image Translation
Taesung Park, Alexei A. Efros, Richard Zhang, Jun-Yan Zhu
ECCV, 2020
The code borrows heavily from the PyTorch implementation of CycleGAN
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
""" Configures options specific for CUT model
"""
parser.add_argument('--CUT_mode', type=str, default="CUT", choices='(CUT, cut, FastCUT, fastcut)')
parser.add_argument('--lambda_GAN', type=float, default=1.0, help='weight for GAN loss:GAN(G(X))')
parser.add_argument('--lambda_NCE', type=float, default=1.0, help='weight for NCE loss: NCE(G(X), X)')
parser.add_argument('--nce_idt', type=util.str2bool, nargs='?', const=True, default=False, help='use NCE loss for identity mapping: NCE(G(Y), Y))')
parser.add_argument('--nce_layers', type=str, default='0,4,8,12,16', help='compute NCE loss on which layers')
parser.add_argument('--nce_includes_all_negatives_from_minibatch',
type=util.str2bool, nargs='?', const=True, default=False,
help='(used for single image translation) If True, include the negatives from the other samples of the minibatch when computing the contrastive loss. Please see models/patchnce.py for more details.')
parser.add_argument('--netF', type=str, default='mlp_sample', choices=['sample', 'reshape', 'mlp_sample'], help='how to downsample the feature map')
parser.add_argument('--netF_nc', type=int, default=256)
parser.add_argument('--nce_T', type=float, default=0.07, help='temperature for NCE loss')
parser.add_argument('--num_patches', type=int, default=256, help='number of patches per layer')
parser.add_argument('--flip_equivariance',
type=util.str2bool, nargs='?', const=True, default=False,
help="Enforce flip-equivariance as additional regularization. It's used by FastCUT, but not CUT")
parser.set_defaults(pool_size=0) # no image pooling
opt, _ = parser.parse_known_args()
# Set default parameters for CUT and FastCUT
if opt.CUT_mode.lower() == "cut":
parser.set_defaults(nce_idt=True, lambda_NCE=1.0)
elif opt.CUT_mode.lower() == "fastcut":
parser.set_defaults(
nce_idt=False, lambda_NCE=10.0, flip_equivariance=True,
n_epochs=150, n_epochs_decay=50
)
else:
raise ValueError(opt.CUT_mode)
return parser
def __init__(self, opt):
BaseModel.__init__(self, opt)
# specify the training losses you want to print out.
# The training/test scripts will call <BaseModel.get_current_losses>
os.environ['TORCH_HOME'] = "/home6/liuhy/torch_home"
self.loss_names = ['G_GAN', 'D_real', 'D_fake', 'G', 'NCE']
self.visual_names = ['real_A', 'fake_B', 'real_B']
self.nce_layers = [int(i) for i in self.opt.nce_layers.split(',')]
# if opt.nce_idt and self.isTrain:
# self.loss_names += ['NCE_Y']
# self.visual_names += ['idt_B']
if self.isTrain:
self.model_names = ['G', 'F', 'D']
# self.model_names = ['G', 'F', 'D', 'M'] #adding mask in visualization
else: # during test time, only load G
self.model_names = ['G']
# define networks (both generator and discriminator)
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, opt.no_antialias_up, self.gpu_ids, opt)
self.netF = networks.define_F(opt.input_nc, opt.netF, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt)
if self.isTrain:
self.netD = networks.define_D(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.normD, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt)
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionNCE = []
for nce_layer in self.nce_layers:
self.criterionNCE.append(PatchNCELoss(opt).to(self.device))
self.criterionIdt = torch.nn.L1Loss().to(self.device)
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
from . import perceptual_model
self.vgg_perceptual = perceptual_model.VGG16_for_Perceptual()
self.vgg_perceptual.to('cuda:1')
def data_dependent_initialize(self, data):
"""
The feature network netF is defined in terms of the shape of the intermediate, extracted
features of the encoder portion of netG. Because of this, the weights of netF are
initialized at the first feedforward pass with some input images.
Please also see PatchSampleF.create_mlp(), which is called at the first forward() call.
"""
self.set_input(data)
bs_per_gpu = self.real_A.size(0) // max(len(self.opt.gpu_ids), 1)
self.real_A = self.real_A[:bs_per_gpu]
self.real_B = self.real_B[:bs_per_gpu]
self.forward() # compute fake images: G(A)
if self.opt.isTrain:
self.compute_D_loss().backward() # calculate gradients for D
self.compute_G_loss().backward() # calculate graidents for G
if self.opt.lambda_NCE > 0.0:
self.optimizer_F = torch.optim.Adam(self.netF.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, self.opt.beta2))
self.optimizers.append(self.optimizer_F)
def optimize_parameters(self):
# forward
self.forward()
# update D
self.set_requires_grad(self.netD, True)
self.optimizer_D.zero_grad()
self.loss_D = self.compute_D_loss()
self.loss_D.backward()
self.optimizer_D.step()
# update G
self.set_requires_grad(self.netD, False)
self.optimizer_G.zero_grad()
if self.opt.netF == 'mlp_sample':
self.optimizer_F.zero_grad()
self.loss_G = self.compute_G_loss()
self.loss_G.backward()
self.optimizer_G.step()
if self.opt.netF == 'mlp_sample':
self.optimizer_F.step()
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.real = torch.cat((self.real_A, self.real_B), dim=0)
if self.opt.flip_equivariance:
self.flipped_for_equivariance = self.opt.isTrain and (np.random.random() < 0.5)
if self.flipped_for_equivariance:
self.real = torch.flip(self.real, [3])
self.fake = self.netG(self.real)
self.fake_B = self.fake[:self.real_A.size(0)] # netG(real_A)
# if self.opt.nce_idt:
# self.idt_B = self.fake[self.real_A.size(0):] # netG(real_B)
def compute_D_loss(self):
"""Calculate GAN loss for the discriminator"""
fake = self.fake_B.detach()
# Fake; stop backprop to the generator by detaching fake_B
pred_fake = self.netD(fake)
self.loss_D_fake = self.criterionGAN(pred_fake, False).mean()
# Real
self.pred_real = self.netD(self.real_B)
loss_D_real = self.criterionGAN(self.pred_real, True)
self.loss_D_real = loss_D_real.mean()
# combine loss and calculate gradients
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
return self.loss_D
def compute_G_loss(self):
"""Calculate GAN and NCE loss for the generator"""
fake = self.fake_B
# First, G(A) should fake the discriminator
if self.opt.lambda_GAN > 0.0:
pred_fake = self.netD(fake)
self.loss_G_GAN = self.criterionGAN(pred_fake, True).mean() * self.opt.lambda_GAN
else:
self.loss_G_GAN = 0.0
if self.opt.lambda_NCE > 0.0:
self.loss_NCE = self.calculate_NCE_loss(self.real_A, self.fake_B)
else:
self.loss_NCE, self.loss_NCE_bd = 0.0, 0.0
if self.opt.nce_idt and self.opt.lambda_NCE > 0.0:
# self.loss_NCE_Y = self.calculate_NCE_loss(self.real_B, self.idt_B)
# loss_NCE_both = (self.loss_NCE + self.loss_NCE_Y) / 2
# print(self.real_A.flatten(start_dim=1).shape, self.fake_B.flatten(start_dim=1).shape)
# flatten_real_A = self.real_A.flatten(start_dim=1)
# flatten_fake_B = self.fake_B.flatten(start_dim=1)
# self.loss_NCE_Y = torch.nn.MSELoss()(flatten_real_A, flatten_fake_B)
# loss_NCE_both = (self.loss_NCE + self.loss_NCE_Y + torch.nn.L1Loss()(self.real_A, self.fake_B)) / 3
mask = self.mask_tensor(self.real_A).detach()
self.masked_A = self.real_A * mask
self.masked_B = self.fake_B * mask
# self.masked_A = self.mask_image(self.real_A)
# self.masked_A2 = self.mask_tensor(self.real_A)
# self.masked_B = self.mask_tensor(self.fake_B)
self.masked_L1_loss = torch.nn.L1Loss()(self.masked_A, self.masked_B)
loss_NCE_both = (self.loss_NCE * 10 + self.masked_L1_loss ) / 11
else:
loss_NCE_both = self.loss_NCE
self.loss_G = self.loss_G_GAN + loss_NCE_both
return self.loss_G
def calculate_NCE_loss(self, src, tgt):
n_layers = len(self.nce_layers)
# feat_q = self.netG(tgt, self.nce_layers, encode_only=True)
# if self.opt.flip_equivariance and self.flipped_for_equivariance:
# feat_q = [torch.flip(fq, [3]) for fq in feat_q]
# feat_k = self.netG(src, self.nce_layers, encode_only=True)
# feat_k_pool, sample_ids = self.netF(feat_k, self.opt.num_patches, None)
# feat_q_pool, _ = self.netF(feat_q, self.opt.num_patches, sample_ids)
feat_k = [src]
for h in self.vgg_perceptual.forward(src):
feat_k.append(h)
# print(h)
feat_k_pool, sample_ids = self.netF(feat_k, self.opt.num_patches, None)
# print(feat_k_pool)
feat_q = [tgt]
for h in self.vgg_perceptual.forward(tgt):
feat_q.append(h)
feat_q_pool, _ = self.netF(feat_q, self.opt.num_patches, sample_ids)
total_nce_loss = 0.0
for f_q, f_k, crit in zip(feat_q_pool, feat_k_pool, self.criterionNCE):
loss = crit(f_q, f_k) * self.opt.lambda_NCE
total_nce_loss += loss.mean()
return total_nce_loss / n_layers
def return_BBox(self, start, end, lms):
bbox = []
min_x = 10000
min_y = 10000
max_x = -10000
max_y = -10000
for i in range(start, end):
x = lms[i][0]
y = lms[i][1]
min_x = x if x < min_x else min_x
min_y = y if y < min_y else min_y
max_x = x if x > max_x else max_x
max_y = y if y > max_y else max_y
return min_x, min_y, max_x, max_y
# for tensor: xmin, xmax, ymin, ymax
def mask_tensor(self, image):
image_numpy = util.tensor2im(image.clone().detach())
# util.save_image(image_numpy, '/home6/liuhy/contrastive-unpaired-translation/test/npmask.png' )
image_cv2 = cv2.cvtColor(np.array(image_numpy), cv2.COLOR_BGR2RGB)
image_cv2 = cv2.cvtColor(image_cv2, cv2.COLOR_RGB2BGR)
# print(type(image_cv2), image_cv2.shape)
device = 'cuda:1' if torch.cuda.is_available() else 'cpu'
align = F.FaceAlignment(landmarks_type=F.LandmarksType._3D, device=device)
try:
lms68 = align.get_landmarks(image_cv2)[0]
except UserWarning:
return []
feature_id = [17, 22, 27, 36, 42, 48, 60, 68]
feature_name = ['eyebrow1', 'eyebrow2', 'nose', 'eye1', 'eye2', 'lips', 'teeth']
image_tensor = torch.ones_like(image)
for i in range(len(feature_name)):
xmin, xmax, ymin, ymax = self.return_BBox(feature_id[i], feature_id[i+1], lms68)
xmin, ymin, xmax, ymax = int(xmin), int(xmax), int(ymin), int(ymax)
# print(feature_name[i],': ',xmin, ymin, xmax, ymax)
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
for j in range(3):
image_tensor[0][j][y][x] = 0
# util.save_image(util.tensor2im(image_tensor * 255), '/home6/liuhy/contrastive-unpaired-translation/test/testmask3.png')
# image_ret = image_tensor * image
# image_numpy = util.tensor2im(image_ret)
# util.save_image(image_numpy, '/home6/liuhy/contrastive-unpaired-translation/test/testmask2.png')
# print("Current image saved...")
return image_tensor
def mask_image(self, image):
image_numpy = util.tensor2im(image)
# util.save_image(image_numpy, '/home6/liuhy/contrastive-unpaired-translation/test/npmask.png' )
image_cv2 = cv2.cvtColor(np.array(image_numpy), cv2.COLOR_BGR2RGB)
image_cv2 = cv2.cvtColor(image_cv2, cv2.COLOR_RGB2BGR)
# print(type(image_cv2), image_cv2.shape)
device = 'cuda:1' if torch.cuda.is_available() else 'cpu'
align = F.FaceAlignment(landmarks_type=F.LandmarksType._3D, device=device)
try:
lms68 = align.get_landmarks(image_cv2)[0]
except UserWarning:
return []
feature_id = [17, 22, 27, 36, 42, 48, 60, 68]
feature_name = ['eyebrow1', 'eyebrow2', 'nose', 'eye1', 'eye2', 'lips', 'teeth']
for i in range(len(feature_name)):
xmin, ymin, xmax, ymax = self.return_BBox(feature_id[i], feature_id[i+1], lms68)
print(feature_name[i],': ',xmin, ymin, xmax, ymax)
image_cv2 = cv2.rectangle(image_cv2, (xmin, ymin), (xmax, ymax), (0, 0, 0), -1)
# util.save_image(image_cv2, '/home6/liuhy/contrastive-unpaired-translation/test/testmask.png')
# print("Current image saved...")
transf = transforms.ToTensor()
mask_tensor = transf(image_cv2)
return mask_tensor
# torch.Size([1, 64, 256, 256])
# torch.Size([1, 64, 256, 256])
# torch.Size([1, 256, 64, 64])
# torch.Size([1, 512, 32, 32])
# 两种方案
# 1. 每个src,tgt过vgg后四个feat_q&k,过4个netF(确认不同大小会不会对netF有影响)
# 2. 后两维upsample到512,cat到第一维,变成1,XXX
# L1:提人脸关键点(68)包围和, mask内部不算L1只算外部
# 68 keypoints through mtcnn / face alignment
# bbox of 5 guan both A&B, 求并集
# in result show the bounding box
# delete idt_B
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (c) 2014 Bjoern Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of 'trex' for details.
#
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "trex.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
# Generated by Django 3.0.8 on 2020-07-10 09:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("furport", "0009_event_google_map_location")]
operations = [
migrations.RemoveField(model_name="event", name="google_map_location"),
migrations.AddField(
model_name="event",
name="google_map_description",
field=models.CharField(
blank=True, default="", max_length=255, verbose_name="グーグルマップ位置情報ワード"
),
),
migrations.AddField(
model_name="event",
name="google_map_location_id",
field=models.CharField(
blank=True, default="", max_length=255, verbose_name="グーグルマップ位置情報id"
),
),
]
|
# noqa: E501 # verify-helper: PROBLEM https://onlinejudge.u-aizu.ac.jp/courses/library/7/DPL/5/DPL_5_B
from cpl.combinatronics.enumerator import Enumerator
def main() -> None:
n, k = map(int, input().split())
e = Enumerator(k, 1_000_000_007)
print(e.permutate(k, n))
if __name__ == "__main__":
main()
|
# This file is part of the Data Cleaning Library (openclean).
#
# Copyright (C) 2018-2021 New York University.
#
# openclean is released under the Revised BSD License. See file LICENSE for
# full license details.
"""Class that implements the DataframeMapper abstract class to perform groupby
operations on a pandas dataframe.
"""
from openclean.data.groupby import DataFrameGrouping
from openclean.operator.base import DataGroupReducer
from typing import Optional, List, Callable, Union, Dict
import pandas as pd
from collections import defaultdict
def aggregate(
groups: DataFrameGrouping, func: Union[Dict[str, Callable], Callable],
schema: Optional[List[str]] = None
):
"""Aggregate helper function that takes the DataFrameGouping, a schema and a function(s)
and returns a dataframe created from the groupings using the functions following that schema
Parameters
----------
groups: DataFrameGrouping
object returned from a GroupBy operation
schema: list of string, optional
list of column names
func: (
callable,
dict of str:callables
)
If a single callable provided, it must handle the each dataframe group to create an aggregate value
If a dict of str:callables provided, the keys are column names and the values are aggregate functions
for each of those columns
Returns
-------
pd.DataFrame
"""
return Aggregate(schema=schema, func=func).reduce(groups=groups)
class Aggregate(DataGroupReducer):
"""Aggregate class that takes in a DataFrameGrouping and aggregate function(s),
aggregates them and returns a dataframe
"""
def __init__(self, func: Union[Dict[str, Callable], Callable], schema: Optional[List[str]] = None):
"""Initialize the schema and the aggregate function(s).
Parameters
----------
func: callable or dict of str:callable
The aggregate functions
schema: list of str
column names of the returned dataframe
"""
super(Aggregate, self).__init__()
self._is_input_dict = isinstance(func, dict) # to retain memory of user input
self.funcs = get_agg_funcs(func=func)
self.schema = schema
def reduce(self, groups):
"""Reduces the groups using the agg functions and returns a dataframe
Parameters
----------
groups: DataFrameGrouping
grouping object returned by some groupby operation
Returns
-------
pd.DataFrame
Raises
------
KeyError:
if the input column isn't found
Type Error:
if the provided schema is invalid
"""
single_input = not self._is_input_dict
function = self.funcs
result = defaultdict()
for key, group in groups.items():
result[key] = defaultdict()
for col, func in function.items():
if single_input:
val, single_output = is_single_or_dict(func(group))
else:
if col not in group.columns:
raise KeyError()
val, single_output = is_single_or_dict(func(group[col]))
if single_output:
result[key][col] = val
else:
if single_input:
result[key] = val
else:
result[key][col] = val
result = pd.DataFrame(result).T
if self.schema is not None:
if len(result.columns) != len(self.schema):
raise TypeError('Invalid schema for dataframe of size {}'.format(result.shape[1]))
result.columns = self.schema
return result
# -- Helper Methods -----------------------------------------------------------
def get_agg_funcs(func):
"""Helper method used to create a mapping of the aggregation functions with their columns.
Parameters
----------
functions: dict of str:Callable or Callable
Single Callable that aggregates on the entire df or a dict of callables
where the keys are column names and the values are the functions for the respective column
Returns
-------
dict
"""
if not isinstance(func, dict) and callable(func):
name = getattr(func, '__name__', repr(func))
function = {name: func}
elif isinstance(func, dict):
function = func
else:
raise TypeError("aggregate function: {} not acceptable.".format(getattr(func, '__name__', repr(func))))
return function
def is_single_or_dict(Y):
if isinstance(Y, dict):
return Y, False
elif isinstance(Y, pd.Series):
return Y.to_dict(), False
elif len([Y]) == 1 and type(Y) not in (list, set, tuple, range, frozenset):
return Y, True
raise TypeError('func returns unacceptable type: {}'.format(type(Y)))
|
import plotly.express as px
import plotly.io as pio
import chart_studio.tools as tls
import plotly.graph_objects as go
from urllib.request import urlopen
import pandas as pd
import matplotlib.pyplot as plt
import os, sys
import json
import imageio
import glob
__author__ = 'Duy Cao'
__license__ = 'MIT'
__status__ = 'release'
__url__ = 'https://github.com/caominhduy/TH-Flu-Modulation'
__version__ = '1.0.0'
def epi_graph(df, ss, wk):
df = df[df['year'] == ss]
df = df[df['week'] == wk]
fig = px.choropleth(df, \
locations='state',\
locationmode = 'USA-states',\
color='level',\
color_continuous_scale="Spectral_r",\
range_color=(0, 13),\
hover_name='state',\
hover_data={'state':False},\
scope="usa",\
labels={'level':'Level'},\
width=1100,\
height=600)
fig.update_layout(
title=ss+' - Week '+str(wk),
title_x=0.5,
title_font_size=25)
if not os.path.exists('image/epidemiology/' + ss):
os.mkdir('image/epidemiology/' + ss)
pio.write_image(fig, file='image/epidemiology/'+ ss + '/' + \
ss + 'W' + str(wk) + '.png')
# Take processed epidemiological data as input and output:
# - Choropleth maps for each year and week as
# - Combine separate maps of each year into GIFs
def epi_render(df):
# Handle exceptions
if not os.path.exists('image'):
os.mkdir('image')
if not os.path.exists('image/epidemiology'):
os.mkdir('image/epidemiology')
# Make static images (may take a while for >600 maps)
counter = 0
for year in df['year'].unique():
df_filtered = df[df['year']==year]
for week in df_filtered['week'].unique():
epi_graph(df, year, week)
counter += 1
if counter == 1:
print(f'\r Generated {counter} map', end='')
else:
print(f'\r Generated {counter} maps', end='')
print('\n')
# Make GIF images for each year
counter = 0
for year in df['year'].unique():
prefix = 'image/epidemiology/' + year + '/' + year + 'W'
filenames = []
for i in range(53):
p = prefix + str(i) + '.png'
if os.path.exists(p):
filenames.append(p)
images = []
for filename in filenames:
images.append(imageio.imread(filename))
imageio.mimsave('image/epidemiology/' + year + '/' + year + '.gif', images)
counter += 1
if counter == 1:
print(f'\r Generated {counter} GIF', end='')
else:
print(f'\r Generated {counter} GIFs', end='')
# Graph the weather station locations
def station_render(df, name):
fig = go.Figure(data=go.Scattergeo(lon = df['LON'],\
lat = df['LAT'],\
text = df['STATION NAME'],\
mode = 'markers',\
marker_color = 'dodgerblue'
))
fig.update_layout(
title = 'US Weather Stations (filtered)',\
title_x=0.5,\
title_font_size=16,\
geo_scope='usa',
width=1100,
height=600)
if not os.path.exists('image/'):
os.mkdir('image')
if not os.path.exists('image/weather/'):
os.mkdir('image/weather')
pio.write_image(fig, file='image/weather/'+ name + '.png')
def temp_render():
counter = 0
for year in list(range(2008, 2020)):
df = pd.read_csv('data/weather/' + str(year)+'-temp.csv')
weeks = df['week'].to_list()
for week in weeks:
df_new = df[df['week'] == week]
df_new = df_new.drop(['week'], 1).reset_index(drop=True).T
df_new['state'] = df_new.index
df_new = df_new.rename(columns={0:'temp'})
fig = px.choropleth(df_new, \
locations='state',\
locationmode = 'USA-states',\
color='temp',\
color_continuous_scale="Spectral_r",\
range_color=(0, 100),\
hover_name='state',\
hover_data={'state':False},\
scope="usa",\
labels={'temp':'Temp (oF)'},\
width=1100,\
height=600)
fig.update_layout(
title=str(year) + ' - Week ' + str(week),
title_x=0.5,
title_font_size=25)
if not os.path.exists('image/weather/' + str(year)):
os.mkdir('image/weather/' + str(year))
if not os.path.exists('image/weather/' + str(year) + '/temp'):
os.mkdir('image/weather/' + str(year) + '/temp')
pio.write_image(fig, file='image/weather/'+ str(year) + '/temp/' + \
str(year) + 'W' + str(week) + '.png')
counter += 1
if counter == 1:
print(f'\r Generated {counter} map', end='')
else:
print(f'\r Generated {counter} maps', end='')
prefix = 'image/weather/' + str(year) + '/temp/' + str(year) + 'W'
filenames = []
for i in range(53):
p = prefix + str(i) + '.png'
if os.path.exists(p):
filenames.append(p)
images = []
for filename in filenames:
images.append(imageio.imread(filename))
imageio.mimsave('image/weather/' + str(year) + '/temp/' + str(year) + '.gif', images)
print('\n Finished rendering temperature graphs')
def humid_render():
counter = 0
for year in list(range(2008, 2020)):
df = pd.read_csv('data/weather/' + str(year)+'-humid.csv')
weeks = df['week'].to_list()
for week in weeks:
df_new = df[df['week'] == week]
df_new = df_new.drop(['week'], 1).reset_index(drop=True).T
df_new['state'] = df_new.index
df_new = df_new.rename(columns={0:'humid'})
fig = px.choropleth(df_new, \
locations='state',\
locationmode = 'USA-states',\
color='humid',\
color_continuous_scale="Spectral_r",\
range_color=(0, 100),\
hover_name='state',\
hover_data={'state':False},\
scope="usa",\
labels={'humid':'Humidity'},\
width=1100,\
height=600)
fig.update_layout(
title=str(year) + ' - Week ' + str(week),
title_x=0.5,
title_font_size=25)
if not os.path.exists('image/weather/' + str(year)):
os.mkdir('image/weather/' + str(year))
if not os.path.exists('image/weather/' + str(year) + '/humid'):
os.mkdir('image/weather/' + str(year) + '/humid')
pio.write_image(fig, file='image/weather/'+ str(year) + '/humid/' + \
str(year) + 'W' + str(week) + '.png')
counter += 1
if counter == 1:
print(f'\r Generated {counter} map', end='')
else:
print(f'\r Generated {counter} maps', end='')
prefix = 'image/weather/' + str(year) + '/humid/' + str(year) + 'W'
filenames = []
for i in range(53):
p = prefix + str(i) + '.png'
if os.path.exists(p):
filenames.append(p)
images = []
for filename in filenames:
images.append(imageio.imread(filename))
imageio.mimsave('image/weather/' + str(year) + '/humid/' + str(year) + '.gif', images)
print('\n Finished rendering humidity graphs')
def weather_render():
temp_render()
humid_render()
|
import numpy as np
import os
import torch
import pandas as pd
from shutil import copyfile
source_root_url = "../../egs/dataset/Giant-MIDI"
data_root_url = "../../egs/dataset/giant_midis"
data_train_url = "../../egs/dataset/giant_midis/train"
data_vaild_url = "../../egs/dataset/giant_midis/vaild"
data_test_url = "../../egs/dataset/giant_midis/test"
def makedir(data_url):
if not os.path.exists(data_url):
os.makedirs(data_url)
makedir(data_root_url)
makedir(data_train_url)
makedir(data_vaild_url)
makedir(data_test_url)
dataset = os.listdir(source_root_url)
print(dataset)
#copyfile(source_file, destination_file)
portion = int(0.1*len(dataset))
train_set = dataset[:-2*portion]
validation_set = dataset[-2*portion:-portion]
test_set = dataset[-portion:]
def transition(dataset,des_url):
for item in dataset:
copyfile(source_root_url+'/'+item, des_url+'/'+item)
transition(train_set,data_train_url)
transition(validation_set,data_vaild_url)
transition(test_set,data_test_url)
|
# Copyright 2021 JD.com, Inc., JD AI
"""
@author: Yehao Li
@contact: yehaoli.sysu@gmail.com
"""
import torch
from xmodaler.utils.registry import Registry
EVALUATION_REGISTRY = Registry("EVALUATION")
EVALUATION_REGISTRY.__doc__ = """
Registry for evaluation
"""
def build_evaluation(cfg, annfile, output_dir):
evaluation = EVALUATION_REGISTRY.get(cfg.INFERENCE.NAME)(cfg, annfile, output_dir) if len(cfg.INFERENCE.NAME) > 0 else None
return evaluation |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : BobAnkh
# @Github : https://github.com/BobAnkh
# @Date : 2020-12-09 19:33:29
# @LastEditTime : 2020-12-12 12:49:58
# @Description :
import cmd_control as cctl
import send
def get_location():
'''
获取当前位置
Returns:
list: 位置坐标
'''
cmd = "robot1" # 假定机器人是robot1
Location_string = send.send_common(cmd)
Location_num = int(Location_string)
location = [Location_num//10%10, Location_num//1%10]
return location
def walk_step(current_point, next_point):
'''
向前走1格
Args:
current_point (list): 当前坐标点
next_point (list): 下一个坐标点
Returns:
int: 1表示成功,0表示失败
'''
location_now = current_point
L = 1
while True:
if location_now == current_point:
cctl.run_action('SlowForward')
location_now = get_location()
else:
for i in range(0, L):
cctl.run_action('SlowForward')
break
return get_location()
def turn(angle):
'''
转向
Args:
angle (int): 正表示顺时针,负表示逆时针,数值*90表示转动角度。如2表示顺时针转180度,-1表示逆时针转90度
Returns:
int: 1表示成功,0表示失败
'''
if angle != 0:
cctl.run_action('SlowForward')
time = [3, 3]
if angle < 0:
for i in range(0, -angle*time[0]):
cctl.run_action('LeftTurn')
else:
for i in range(0, angle*time[1]):
cctl.run_action('RightTurn')
return 1
def read_map_info():
data_frame = {"Location": [1, 4],"Barrier": [1, 5],"Target": [2, 5], "Start": [1, 2], "End": [3, 9]}
return data_frame['Location'], data_frame['Barrier'], data_frame['Target'], data_frame['Start'], data_frame['End']
def go_upstairs():
pass
|
import json
import click
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import UniqueConstraint
from pgsync.base import create_database, pg_engine
from pgsync.helper import teardown
from pgsync.utils import get_config
Base = declarative_base()
class User(Base):
__tablename__ = "user"
__table_args__ = (UniqueConstraint("name"),)
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String, nullable=False)
age = sa.Column(sa.Integer, nullable=True)
gender = sa.Column(sa.String, nullable=True)
class Post(Base):
__tablename__ = "post"
__table_args__ = ()
id = sa.Column(sa.Integer, primary_key=True)
title = sa.Column(sa.String, nullable=False)
slug = sa.Column(sa.String, nullable=True)
class Comment(Base):
__tablename__ = "comment"
__table_args__ = ()
id = sa.Column(sa.Integer, primary_key=True)
title = sa.Column(sa.String, nullable=True)
content = sa.Column(sa.String, nullable=True)
class Tag(Base):
__tablename__ = "tag"
__table_args__ = (UniqueConstraint("name"),)
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String, nullable=False)
class UserPost(Base):
__tablename__ = "user_post"
__table_args__ = ()
id = sa.Column(sa.Integer, primary_key=True)
user_id = sa.Column(sa.Integer, sa.ForeignKey(User.id))
user = sa.orm.relationship(
User,
backref=sa.orm.backref("users"),
)
post_id = sa.Column(sa.Integer, sa.ForeignKey(Post.id))
post = sa.orm.relationship(
Post,
backref=sa.orm.backref("posts"),
)
class PostComment(Base):
__tablename__ = "post_comment"
__table_args__ = (UniqueConstraint("post_id", "comment_id"),)
id = sa.Column(sa.Integer, primary_key=True)
post_id = sa.Column(sa.Integer, sa.ForeignKey(Post.id))
post = sa.orm.relationship(
Post,
backref=sa.orm.backref("post"),
)
comment_id = sa.Column(sa.Integer, sa.ForeignKey(Comment.id))
comment = sa.orm.relationship(Comment, backref=sa.orm.backref("comments"))
class UserTag(Base):
__tablename__ = "user_tag"
__table_args__ = (UniqueConstraint("user_id", "tag_id"),)
id = sa.Column(sa.Integer, primary_key=True)
user_id = sa.Column(sa.Integer, sa.ForeignKey(User.id))
user = sa.orm.relationship(
User,
backref=sa.orm.backref("user"),
)
tag_id = sa.Column(sa.Integer, sa.ForeignKey(Tag.id))
tag = sa.orm.relationship(
Tag,
backref=sa.orm.backref("tags"),
)
def setup(config=None):
for document in json.load(open(config)):
database = document.get("database", document["index"])
create_database(database)
engine = pg_engine(database=database)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
@click.command()
@click.option(
"--config",
"-c",
help="Schema config",
type=click.Path(exists=True),
)
def main(config):
config = get_config(config)
teardown(config=config)
setup(config)
if __name__ == "__main__":
main()
|
# -*- coding:utf-8 -*-
from itertools import groupby
import io
from datetime import datetime
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.dates import DateFormatter
from matplotlib.figure import Figure
from matplotlib.ticker import MaxNLocator
import numpy as np
from django.http import (
HttpResponse, HttpResponseNotFound, HttpResponseForbidden)
from django.shortcuts import get_object_or_404, render
from django.utils.timezone import utc
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from meteography.django.broadcaster import forecast
from meteography.django.broadcaster.models import (
Webcam, Picture, Prediction, PredictionParams)
def index(request):
webcams = Webcam.objects.order_by('name')
context = {
'webcams': webcams,
}
return render(request, 'broadcaster/index.html', context)
def webcam(request, webcam_id):
webcam = get_object_or_404(Webcam, pk=webcam_id)
context = {
'webcam': webcam,
'histories': [{
'id': 'hist-latest',
'name': "Latest",
'orderby': '-comp_date',
}, {
'id': 'hist-worst',
'name': "Highest error",
'orderby': '-error',
}, {
'id': 'hist-best',
'name': "Lowest error",
'orderby': 'error',
}, {
'id': 'hist-random',
'name': "Random",
'orderby': '?',
}],
}
return render(request, 'broadcaster/webcam.html', context)
@csrf_exempt
@require_http_methods(['PUT'])
def picture(request, webcam_id, timestamp):
"Handles upload of pictures"
# FIXME have proper authentication
hostname = request.get_host()
has_port = hostname.find(':')
if has_port > 0:
hostname = hostname[:has_port]
if hostname != '127.0.0.1':
return HttpResponseForbidden()
# check the webcam exists, return 404 if not
try:
webcam = Webcam.objects.get(webcam_id=webcam_id)
except Webcam.DoesNotExist:
return HttpResponseNotFound("The webcam %s does not exist" % webcam_id)
timestamp = int(timestamp)
# Save the new picture
img_bytes = io.BytesIO(request.read())
pic = Picture(webcam, timestamp, img_bytes)
pic.save()
# Make a new prediction and save it for each set of prediction params
params_list = webcam.prediction_params()
for params in params_list:
prediction = forecast.make_prediction(webcam, params, timestamp)
# Check if there was any prediction targetting this timestamp,
# and if yes compute the error
pred_target = params.intervals[-1]
comp_timestamp = timestamp - pred_target
comp_date = datetime.fromtimestamp(float(comp_timestamp), utc)
old_predictions = Prediction.objects.filter(comp_date=comp_date,
params=params)
for prediction in old_predictions:
forecast.update_prediction(prediction, pic)
return HttpResponse(status=204)
def _mean_error(points):
return np.mean([p[1] for p in points])
def error_graph(request, webcam_id, pname):
"Generate a graph of error evolution over time"
pred_params = get_object_or_404(PredictionParams,
features__webcam_id=webcam_id, name=pname)
error_data = list(pred_params.error_data())
if error_data:
# Compute daily average if enough data
latest_error = error_data[-1]
time_span = latest_error[0] - error_data[0][0]
print(time_span.days)
if time_span.days > 14:
def key(elem):
return (latest_error[0] - elem[0]).days
day_split = (list(g) for k, g in groupby(error_data, key))
daily_average = [(g[0][0], _mean_error(g)) for g in day_split]
dates, errors = zip(*daily_average)
graph_title = "Evolution of daily average error over time"
else:
dates, errors = zip(*error_data)
graph_title = "Evolution of error value over time"
else:
graph_title = "No data yet"
# Generate the graph with matplotlib
fig = Figure()
FigureCanvas(fig)
ax = fig.add_subplot(111)
ax.set_title(graph_title)
ax.set_xlabel("Time")
ax.set_ylabel("Error")
if error_data:
ax.plot(dates, errors)
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.xaxis.set_major_formatter(DateFormatter('%d/%m/%Y'))
# Write the graph image to the response
response = HttpResponse(content_type='image/png')
fig.savefig(response, format='png')
return response
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from config import config
import math
#https://github.com/romulus0914/MixNet-Pytorch/blob/master/mixnet.py
class Attention(nn.Module):
def __init__(self, feature_dim, step_dim, bias=True, **kwargs):
super(Attention, self).__init__(**kwargs)
self.supports_masking = True
self.bias = bias
self.feature_dim = feature_dim
self.step_dim = step_dim
self.features_dim = 0
weight = torch.zeros(feature_dim, 1)
nn.init.kaiming_uniform_(weight)
self.weight = nn.Parameter(weight)
if bias:
self.b = nn.Parameter(torch.zeros(step_dim))
def forward(self, x, mask=None):
feature_dim = self.feature_dim
step_dim = self.step_dim
eij = torch.mm(
x.contiguous().view(-1, feature_dim),
self.weight
).view(-1, step_dim)
if self.bias:
eij = eij + self.b
eij = torch.tanh(eij)
a = torch.exp(eij)
if mask is not None:
a = a * mask
a = a / (torch.sum(a, 1, keepdim=True) + 1e-10)
weighted_input = x * torch.unsqueeze(a, -1)
return torch.sum(weighted_input, 1)
class Inception(nn.Module):
def __init__(self, input_channels, n1x1, n3x3_reduce, n3x3, n5x5_reduce, n5x5, pool_proj):
super().__init__()
#1x1conv branch
self.b1 = nn.Sequential(
nn.Conv1d(input_channels, n1x1, kernel_size=1),
nn.BatchNorm1d(n1x1),
nn.ReLU(inplace=True)
)
#1x1conv -> 3x3conv branch
self.b2 = nn.Sequential(
nn.Conv1d(input_channels, n3x3_reduce, kernel_size=1),
nn.BatchNorm1d(n3x3_reduce),
nn.ReLU(inplace=True),
nn.Conv1d(n3x3_reduce, n3x3, kernel_size=3, padding=1),
nn.BatchNorm1d(n3x3),
nn.ReLU(inplace=True)
)
#1x1conv -> 5x5conv branch
#we use 2 3x3 conv filters stacked instead
#of 1 5x5 filters to obtain the same receptive
#field with fewer parameters
self.b3 = nn.Sequential(
nn.Conv1d(input_channels, n5x5_reduce, kernel_size=1),
nn.BatchNorm1d(n5x5_reduce),
nn.ReLU(inplace=True),
nn.Conv1d(n5x5_reduce, n5x5, kernel_size=3, padding=1),
nn.BatchNorm1d(n5x5, n5x5),
nn.ReLU(inplace=True),
nn.Conv1d(n5x5, n5x5, kernel_size=3, padding=1),
nn.BatchNorm1d(n5x5),
nn.ReLU(inplace=True)
)
#3x3pooling -> 1x1conv
#same conv
self.b4 = nn.Sequential(
nn.MaxPool1d(3, stride=1, padding=1),
nn.Conv1d(input_channels, pool_proj, kernel_size=1),
nn.BatchNorm1d(pool_proj),
nn.ReLU(inplace=True)
)
def forward(self, x):
return torch.cat([self.b1(x), self.b2(x), self.b3(x), self.b4(x)], dim=1)
class GoogleNet(nn.Module):
def __init__(self, num_class=34):
super().__init__()
self.prelayer = nn.Sequential(
nn.Conv1d(12, 12, kernel_size=3, padding=1),
nn.BatchNorm1d(12),
nn.ReLU(inplace=True)
)
#although we only use 1 conv layer as prelayer,
#we still use name a3, b3.......
# input_channels, n1x1, n3x3_reduce, n3x3, n5x5_reduce, n5x5, pool_proj
# self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
# self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.a3 = Inception(12, 24, 12, 24, 24, 24, 24)
self.b3 = Inception(96, 32, 32, 32, 24, 24, 24)
#"""In general, an Inception network is a network consisting of
#modules of the above type stacked upon each other, with occasional
#max-pooling layers with stride 2 to halve the resolution of the
#grid"""
self.maxpool = nn.MaxPool1d(3, stride=2, padding=1)
# self.maxpool = nn.MaxPool1d(2, stride=1, padding=1)
self.a4 = Inception(112, 32, 64, 32, 12, 24, 32)
self.b4 = Inception(120, 32, 64, 64, 24, 32, 32)
# self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
# self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
# self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
# self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
# self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.lstm = nn.LSTM(300, 112, bidirectional=True, batch_first=True)
self.gru = nn.GRU(224, 64, bidirectional=True, batch_first=True)
self.attention_layer = Attention(128, 112)
self.linear = nn.Linear(128, num_class)
def forward(self, x):
output = self.prelayer(x)
output = self.a3(output)
output = self.b3(output)
# output = self.maxpool(output)
# output = self.a4(output)
# output = self.b4(output)
# output = self.c4(output)
# output = self.d4(output)
# output = self.e4(output)
# output = self.maxpool(output)
# output = self.a5(output)
# output = self.b5(output)
#"""It was found that a move from fully connected layers to
#average pooling improved the top-1 accuracy by about 0.6%,
#however the use of dropout remained essential even after
#removing the fully connected layers."""
# output = self.avgpool(output)
# output = self.dropout(output)
# max_pooled = F.adaptive_max_pool1d(output, 1)
# avg_pooled = F.adaptive_avg_pool1d(output, 1)
# output1 = torch.cat([max_pooled, avg_pooled], dim=1)
# output1 = output1.view(output1.size(0), -1)
output,_ = self.lstm(output)
output,_ = self.gru(output)
output = self.attention_layer(output)
output = output.view(output.size(0), -1)
# output = torch.cat([output, output1], dim=1)
# output = output.view(output.size()[0], -1)
# output = self.linear(output)
return output
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
return x * self.sigmoid(x)
NON_LINEARITY = {
'ReLU': nn.ReLU(inplace=True),
'Swish': Swish(),
}
def _RoundChannels(c, divisor=8, min_value=None):
if min_value is None:
min_value = divisor
new_c = max(min_value, int(c + divisor / 2) // divisor * divisor)
if new_c < 0.9 * c:
new_c += divisor
return new_c
def _SplitChannels(channels, num_groups):
split_channels = [channels//num_groups for _ in range(num_groups)]
split_channels[0] += channels - sum(split_channels)
return split_channels
def Conv3x3Bn(in_channels, out_channels, stride, non_linear='ReLU'):
return nn.Sequential(
nn.Conv1d(in_channels, out_channels, 3, stride, 1, bias=False),
nn.BatchNorm1d(out_channels),
NON_LINEARITY[non_linear]
)
def Conv1x1Bn(in_channels, out_channels, non_linear='ReLU'):
return nn.Sequential(
nn.Conv1d(in_channels, out_channels, 1, 1, 0, bias=False),
nn.BatchNorm1d(out_channels),
NON_LINEARITY[non_linear]
)
class SqueezeAndExcite(nn.Module):
def __init__(self, channels, squeeze_channels, se_ratio):
super(SqueezeAndExcite, self).__init__()
squeeze_channels = squeeze_channels * se_ratio
if not squeeze_channels.is_integer():
raise ValueError('channels must be divisible by 1/ratio')
squeeze_channels = int(squeeze_channels)
self.se_reduce = nn.Conv1d(channels, squeeze_channels, 1, 1, 0, bias=True)
self.non_linear1 = NON_LINEARITY['Swish']
self.se_expand = nn.Conv1d(squeeze_channels, channels, 1, 1, 0, bias=True)
self.non_linear2 = nn.Sigmoid()
def forward(self, x):
y = torch.mean(x, 2, keepdim=True)#(2, 3),
y = self.non_linear1(self.se_reduce(y))
y = self.non_linear2(self.se_expand(y))
y = x * y
return y
class GroupedConv1d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
super(GroupedConv1d, self).__init__()
self.num_groups = len(kernel_size)
self.split_in_channels = _SplitChannels(in_channels, self.num_groups)
self.split_out_channels = _SplitChannels(out_channels, self.num_groups)
self.grouped_conv = nn.ModuleList()
for i in range(self.num_groups):
self.grouped_conv.append(nn.Conv1d(
self.split_in_channels[i],
self.split_out_channels[i],
kernel_size[i],
stride=stride,
padding=padding,
bias=False
))
def forward(self, x):
if self.num_groups == 1:
return self.grouped_conv[0](x)
x_split = torch.split(x, self.split_in_channels, dim=1)
x = [conv(t) for conv, t in zip(self.grouped_conv, x_split)]
x = torch.cat(x, dim=1)
return x
class MDConv(nn.Module):
def __init__(self, channels, kernel_size, stride):
super(MDConv, self).__init__()
self.num_groups = len(kernel_size)
self.split_channels = _SplitChannels(channels, self.num_groups)
self.mixed_depthwise_conv = nn.ModuleList()
for i in range(self.num_groups):
self.mixed_depthwise_conv.append(nn.Conv1d(
self.split_channels[i],
self.split_channels[i],
kernel_size[i],
stride=stride,
padding=kernel_size[i]//2,
groups=self.split_channels[i],
bias=False
))
def forward(self, x):
if self.num_groups == 1:
return self.mixed_depthwise_conv[0](x)
x_split = torch.split(x, self.split_channels, dim=1)
x = [conv(t) for conv, t in zip(self.mixed_depthwise_conv, x_split)]
x = torch.cat(x, dim=1)
return x
class MixNetBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=[3],
expand_ksize=[1],
project_ksize=[1],
stride=1,
expand_ratio=1,
non_linear='ReLU',
se_ratio=0.0
):
super(MixNetBlock, self).__init__()
expand = (expand_ratio != 1)
expand_channels = in_channels * expand_ratio
se = (se_ratio != 0.0)
self.residual_connection = (stride == 1 and in_channels == out_channels)
conv = []
if expand:
# expansion phase
pw_expansion = nn.Sequential(
GroupedConv1d(in_channels, expand_channels, expand_ksize),
nn.BatchNorm1d(expand_channels),
NON_LINEARITY[non_linear]
)
conv.append(pw_expansion)
# depthwise convolution phase
dw = nn.Sequential(
MDConv(expand_channels, kernel_size, stride),
nn.BatchNorm1d(expand_channels),
NON_LINEARITY[non_linear]
)
conv.append(dw)
if se:
# squeeze and excite
squeeze_excite = SqueezeAndExcite(expand_channels, in_channels, se_ratio)
conv.append(squeeze_excite)
# projection phase
pw_projection = nn.Sequential(
GroupedConv1d(expand_channels, out_channels, project_ksize),
nn.BatchNorm1d(out_channels)
)
conv.append(pw_projection)
self.conv = nn.Sequential(*conv)
def forward(self, x):
if self.residual_connection:
return x + self.conv(x)
else:
return self.conv(x)
class MixNet(nn.Module):
# [in_channels, out_channels, kernel_size, expand_ksize, project_ksize, stride, expand_ratio, non_linear, se_ratio]
mixnet_s = [(16, 16, [3], [1], [1], 1, 1, 'ReLU', 0.0),
(16, 24, [3], [1, 1], [1, 1], 2, 6, 'ReLU', 0.0),
(24, 24, [3], [1, 1], [1, 1], 1, 3, 'ReLU', 0.0),
(24, 40, [3, 5, 7], [1], [1], 2, 6, 'Swish', 0.5),
(40, 40, [3, 5], [1, 1], [1, 1], 1, 6, 'Swish', 0.5),
(40, 40, [3, 5], [1, 1], [1, 1], 1, 6, 'Swish', 0.5),
(40, 40, [3, 5], [1, 1], [1, 1], 1, 6, 'Swish', 0.5),
(40, 80, [3, 5, 7], [1], [1, 1], 2, 6, 'Swish', 0.25),
(80, 80, [3, 5], [1], [1, 1], 1, 6, 'Swish', 0.25),
(80, 80, [3, 5], [1], [1, 1], 1, 6, 'Swish', 0.25),
(80, 120, [3, 5, 7], [1, 1], [1, 1], 1, 6, 'Swish', 0.5),
(120, 120, [3, 5, 7, 9], [1, 1], [1, 1], 1, 3, 'Swish', 0.5),
(120, 120, [3, 5, 7, 9], [1, 1], [1, 1], 1, 3, 'Swish', 0.5),
(120, 200, [3, 5, 7, 9, 11], [1], [1], 2, 6, 'Swish', 0.5),
(200, 200, [3, 5, 7, 9], [1], [1, 1], 1, 6, 'Swish', 0.5),
(200, 200, [3, 5, 7, 9], [1], [1, 1], 1, 6, 'Swish', 0.5)]
mixnet_m = [(24, 24, [3], [1], [1], 1, 1, 'ReLU', 0.0),
(24, 32, [3, 5, 7], [1, 1], [1, 1], 2, 6, 'ReLU', 0.0),
(32, 32, [3], [1, 1], [1, 1], 1, 3, 'ReLU', 0.0),
(32, 40, [3, 5, 7, 9], [1], [1], 2, 6, 'Swish', 0.5),
(40, 40, [3, 5], [1, 1], [1, 1], 1, 6, 'Swish', 0.5),
(40, 40, [3, 5], [1, 1], [1, 1], 1, 6, 'Swish', 0.5),
(40, 40, [3, 5], [1, 1], [1, 1], 1, 6, 'Swish', 0.5),
(40, 80, [3, 5, 7], [1], [1], 2, 6, 'Swish', 0.25),
(80, 80, [3, 5, 7, 9], [1, 1], [1, 1], 1, 6, 'Swish', 0.25),
(80, 80, [3, 5, 7, 9], [1, 1], [1, 1], 1, 6, 'Swish', 0.25),
(80, 80, [3, 5, 7, 9], [1, 1], [1, 1], 1, 6, 'Swish', 0.25),
(80, 120, [3], [1], [1], 1, 6, 'Swish', 0.5),
(120, 120, [3, 5, 7, 9], [1, 1], [1, 1], 1, 3, 'Swish', 0.5),
(120, 120, [3, 5, 7, 9], [1, 1], [1, 1], 1, 3, 'Swish', 0.5),
(120, 120, [3, 5, 7, 9], [1, 1], [1, 1], 1, 3, 'Swish', 0.5),
(120, 200, [3, 5, 7, 9], [1], [1], 2, 6, 'Swish', 0.5),
(200, 200, [3, 5, 7, 9], [1], [1, 1], 1, 6, 'Swish', 0.5),
(200, 200, [3, 5, 7, 9], [1], [1, 1], 1, 6, 'Swish', 0.5),
(200, 200, [3, 5, 7, 9], [1], [1, 1], 1, 6, 'Swish', 0.5)]
def __init__(self, net_type='mixnet_s', beat_net_type='mixnet_m',input_size=2560, num_classes=34, stem_channels=16,
feature_size=256, depth_multiplier=1.0,beat_feature_size=256, beat_depth_multiplier=1.0):
super(MixNet, self).__init__()
if net_type == 'mixnet_s':
config = self.mixnet_s
stem_channels = 16
dropout_rate = 0.2
elif net_type == 'mixnet_m':
config = self.mixnet_m
stem_channels = 24
dropout_rate = 0.25
elif net_type == 'mixnet_l':
config = self.mixnet_m
stem_channels = 24
depth_multiplier *= 1.3
dropout_rate = 0.25
else:
raise TypeError('Unsupported MixNet type')
# for beat
if beat_net_type == 'mixnet_s':
beat_config = self.mixnet_s
beat_stem_channels = 16
beat_dropout_rate = 0.2
elif beat_net_type == 'mixnet_m':
beat_config = self.mixnet_m
beat_stem_channels = 24
beat_dropout_rate = 0.25
elif beat_net_type == 'mixnet_l':
beat_config = self.mixnet_m
beat_stem_channels = 24
beat_depth_multiplier *= 1.3
beat_dropout_rate = 0.25
else:
raise TypeError('Unsupported MixNet type')
assert input_size % 32 == 0
# depth multiplier
if depth_multiplier != 1.0:
stem_channels = _RoundChannels(stem_channels*depth_multiplier)
for i, conf in enumerate(config):
conf_ls = list(conf)
conf_ls[0] = _RoundChannels(conf_ls[0]*depth_multiplier)
conf_ls[1] = _RoundChannels(conf_ls[1]*depth_multiplier)
config[i] = tuple(conf_ls)
# for beat
# depth multiplier
if beat_depth_multiplier != 1.0:
beat_stem_channels = _RoundChannels(beat_stem_channels*beat_depth_multiplier)
for i, conf in enumerate(beat_config):
beat_conf_ls = list(conf)
beat_conf_ls[0] = _RoundChannels(beat_conf_ls[0]*beat_depth_multiplier)
beat_conf_ls[1] = _RoundChannels(beat_conf_ls[1]*beat_depth_multiplier)
beat_config[i] = tuple(beat_conf_ls)
# stem convolution
self.stem_conv = Conv3x3Bn(12, stem_channels, 2)
# for beat
# stem convolution
self.beat_stem_conv = Conv3x3Bn(12, beat_stem_channels, 2)
# building MixNet blocks
layers = []
for in_channels, out_channels, kernel_size, expand_ksize, project_ksize, stride, expand_ratio, non_linear, se_ratio in config:
layers.append(MixNetBlock(
in_channels,
out_channels,
kernel_size=kernel_size,
expand_ksize=expand_ksize,
project_ksize=project_ksize,
stride=stride,
expand_ratio=expand_ratio,
non_linear=non_linear,
se_ratio=se_ratio
))
self.layers = nn.Sequential(*layers)
# for beat
# building MixNet blocks
beat_layers = []
for beat_in_channels, beat_out_channels, beat_kernel_size, beat_expand_ksize, beat_project_ksize, beat_stride, \
beat_expand_ratio, beat_non_linear, beat_se_ratio in beat_config:
beat_layers.append(MixNetBlock(
beat_in_channels,
beat_out_channels,
kernel_size=beat_kernel_size,
expand_ksize=beat_expand_ksize,
project_ksize=beat_project_ksize,
stride=beat_stride,
expand_ratio=beat_expand_ratio,
non_linear=beat_non_linear,
se_ratio=beat_se_ratio
))
self.beat_layers = nn.Sequential(*beat_layers)
# last several layers
self.head_conv = Conv1x1Bn(config[-1][1], feature_size)
# for beat
# last several layers
self.beat_head_conv = Conv1x1Bn(beat_config[-1][1], beat_feature_size)
self.googlenet = GoogleNet()
#self.avgpool = nn.AvgPool1d(input_size//32, stride=1)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.classifier = nn.Linear(1024, num_classes)#
# self.dropout = nn.Dropout(dropout_rate)
self._initialize_weights()
def forward(self, x, beat):#
# print(beat.shape)
# beat = x[:,:,-256:]
x = self.stem_conv(x)
x = self.layers(x)
x = self.head_conv(x)
# print(beat.shape)
# beat = self.googlenet(beat)
beat = self.beat_stem_conv(beat)
beat = self.beat_layers(beat)
beat = self.beat_head_conv(beat)
# print(x.shape)
# print(beat.shape)
# print(x.shape)
# x = self.avgpool(x)
max_pooled = F.adaptive_max_pool1d(x, 1)
avg_pooled = F.adaptive_avg_pool1d(x, 1)
x = torch.cat([max_pooled, avg_pooled], dim=1)
x = x.view(x.size(0), -1)
beat = torch.cat([F.adaptive_max_pool1d(beat, 1), F.adaptive_avg_pool1d(beat, 1)], dim=1)
beat = beat.view(beat.size(0), -1)
# print(x.shape)
# print(beat.shape)
x = torch.cat([x, beat], dim=1)
x = self.classifier(x)
# x = self.dropout(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
# def mixnet_s(pretrained=False, **kwargs):
# model = MixNet(net_type='mixnet_s')
# return model
# def mixnet_m(pretrained=False, **kwargs):
# model = MixNet(net_type='mixnet_m')
# return model
# def mixnet_l(pretrained=False, **kwargs):
# model = MixNet(net_type='mixnet_l')
# return model
def mixnet_sm_pretrain(pretrained=False, **kwargs):#True
model = MixNet(net_type='mixnet_s',beat_net_type='mixnet_m',num_classes=55)
if pretrained:
print("pretrain")# #mixnet_sm_all_data_transform_best_weight
model.load_state_dict(torch.load("./round2/mixnet_sm_transform_best_weight.pth", map_location='cpu')['state_dict'])
return model
def mixnet_sm(pretrained=True, **kwargs):#True
model = MixNet(net_type='mixnet_s',beat_net_type='mixnet_m',num_classes=55)
if pretrained:
print("pretrain")# #mixnet_sm_all_data_transform_best_weight#"./round2/mixnet_sm_transform_best_weight.pth"
model.load_state_dict(torch.load(config.round1_pretrain_mixnet_sm_weight, map_location='cpu')['state_dict'])
return model
def mixnet_sm_predict(pretrained=False, **kwargs):#True
model = MixNet(net_type='mixnet_s',beat_net_type='mixnet_m',num_classes=34)
if pretrained:
print("pretrain")# #mixnet_sm_all_data_transform_best_weight
model.load_state_dict(torch.load("./round2/mixnet_sm_transform_best_weight.pth", map_location='cpu')['state_dict'])
return model
if __name__ == '__main__':
net = mixnet_sm()
print(net)
x = Variable(torch.randn(10, 12, 2560))
x_beat = Variable(torch.randn(10, 12, 300))
y = net(x,x_beat) |
import core
import datetime
import logging
import time
import json
import os
import shutil
from core.helpers import Comparisons
from sqlalchemy import *
logging = logging.getLogger(__name__)
class SQL(object):
'''
All methods will return False on failure.
On success they will return the expected data or True.
'''
def __init__(self):
DB_NAME = u'sqlite:///{}'.format(core.DB_FILE)
try:
self.engine = create_engine(DB_NAME, echo=False, connect_args={'timeout': 30})
self.metadata = MetaData()
except (SystemExit, KeyboardInterrupt):
raise
except Exception, e: # noqa
logging.error(u'Opening SQL DB.', exc_info=True)
raise
# These definitions only exist to CREATE tables.
self.ARTISTS = Table('ARTISTS', self.metadata,
Column('name', TEXT),
Column('artist_id', TEXT),
Column('amgid', TEXT),
Column('link', TEXT),
Column('genre', TEXT),
Column('image', TEXT)
)
self.ALBUMS = Table('ALBUMS', self.metadata,
Column('artist_id', TEXT),
Column('album_id', TEXT),
Column('disc_count', SMALLINT),
Column('track_count', SMALLINT),
Column('release_date', TEXT),
Column('tracks', TEXT),
Column('album_title', TEXT),
Column('image', TEXT),
Column('status', TEXT)
)
self.MARKEDRESULTS = Table('MARKEDRESULTS', self.metadata,
Column('imdbid', TEXT),
Column('guid', TEXT),
Column('status', TEXT)
)
# {TABLENAME: [(new_col, old_col), (new_col, old_col)]}
self.convert_names = {}
def create_database(self):
logging.info(u'Creating tables.')
self.metadata.create_all(self.engine)
return
def execute(self, command):
''' Executes SQL command
command: str or list of SQL commands
We are going to loop this up to 5 times in case the database is locked.
After each attempt we wait 1 second to try again. This allows the query
that has the database locked to (hopefully) finish. It might
(i'm not sure) allow a query to jump in line between a series of
queries. So if we are writing searchresults to every movie at once,
the get_user_movies request may be able to jump in between them to
get the user's movies to the browser. Maybe.
Returns result of command, or False if unable to execute
'''
tries = 0
while tries < 5:
try:
if type(command) == list:
result = self.engine.execute(*command)
else:
result = self.engine.execute(command)
return result
except Exception as e:
logging.error(u'SQL Database Query: {}.'.format(command), exc_info=True)
if 'database is locked' in e.args[0]:
logging.debug(u'SQL Query attempt # {}.'.format(tries))
tries += 1
time.sleep(1)
else:
logging.error(u'SQL Databse Query: {}.'.format(command), exc_info=True)
raise
# all tries exhausted
return False
def write(self, TABLE, DB_STRING):
'''
Takes dict DB_STRING and writes to TABLE.
DB_STRING must have key:val matching Column:Value in table.
Returns Bool on success.
'''
logging.info(u'Writing data to {}.'.format(TABLE))
cols = u', '.join(DB_STRING.keys())
vals = DB_STRING.values()
qmarks = u', '.join(['?'] * len(DB_STRING))
sql = "INSERT INTO %s ( %s ) VALUES ( %s )" % (TABLE, cols, qmarks)
command = [sql, vals]
if self.execute(command):
return True
else:
logging.error(u'Unable to write to database.')
return False
def write_search_results(self, LIST):
'''
Takes list of dicts to write into SEARCHRESULTS.
'''
if not LIST:
return True
logging.info(u'Writing batch into SEARCHRESULTS.')
INSERT = self.SEARCHRESULTS.insert()
command = [INSERT, LIST]
if self.execute(command):
return True
else:
logging.error(u'Unable to write search results.')
return False
def update(self, TABLE, COLUMN, VALUE, imdbid='', guid=''):
'''
Updates single value in existing table row.
Selects row to update from imdbid or guid.
Sets COLUMN to VALUE.
Returns Bool.
'''
if imdbid:
idcol = u'imdbid'
idval = imdbid
elif guid:
idcol = u'guid'
idval = guid
else:
return 'ID ERROR'
logging.info(u'Updating {} to {} in {}.'.format(idval.split('&')[0], VALUE, TABLE))
sql = u'UPDATE {} SET {}=? WHERE {}=?'.format(TABLE, COLUMN, idcol)
vals = (VALUE, idval)
command = [sql, vals]
if self.execute(command):
return True
else:
logging.error(u'Unable to update database row.')
return False
def update_multiple(self, TABLE, data, imdbid='', guid=''):
''' Updates mulitple values in sql row
TABLE: str database table to access
data: dict key/value pairs to update in table
imdbid: str imdbid # of movie to update
guid: str guid of search result to update
Returns list of dicts with all information in ARTISTS
'''
logging.info(u'Retreving ARTISTS.')
TABLE = u'ARTISTS'
command = u'SELECT * FROM {} ORDER BY name ASC'.format(TABLE)
result = self.execute(command)
if result:
lst = []
for i in result:
lst.append(dict(i))
return lst
else:
logging.error(u'EXECUTE SQL.GET_ARTISTS FAILED.')
return False
def get_artists(self):
''' Gets all info in ARTISTS
Returns list of dicts with all information in ARTISTS
'''
logging.info(u'Retreving ARTISTS.')
TABLE = u'ARTISTS'
command = u'SELECT * FROM {} ORDER BY name ASC'.format(TABLE)
result = self.execute(command)
if result:
lst = []
for i in result:
lst.append(dict(i))
return lst
else:
logging.error(u'EXECUTE SQL.GET_ARTISTS FAILED.')
return False
def get_artist(self, artist_id):
''' Returns dict of info for a single artist_id
artist_id: str atrist id #
Looks through ARTISTS for artist_id
Returns dict of first match
'''
logging.info(u'Retreving artist {}.'.format(artist_id))
command = u'SELECT * FROM ARTISTS WHERE artist_id="{}"'.format(artist_id)
result = self.execute(command)
if result:
data = result.fetchone()
if data:
return dict(data)
else:
return False
else:
return False
def get_albums(self, artist_id):
''' Gets all album info from artist artist_id
artist_id: str atrist id #
Looks through ALBUMS for artist_id
Returns list of dicts
'''
logging.info(u'Retreving albums from artist {}.'.format(artist_id))
command = u'SELECT * FROM ALBUMS WHERE artist_id="{}" ORDER BY release_date DESC'.format(artist_id)
results = self.execute(command)
if results:
return [dict(i) for i in results]
else:
return False
def get_search_results(self, imdbid, quality):
''' Gets all search results for a given movie
:param imdbid: str imdb id #
quality: str quality profile. Used to sort order
Returns list of dicts for all SEARCHRESULTS that match imdbid
'''
if quality in core.CONFIG['Quality']['Profiles'] and core.CONFIG['Quality']['Profiles'][quality]['prefersmaller']:
sort = 'ASC'
else:
sort = 'DESC'
logging.info(u'Retreving Search Results for {}.'.format(imdbid))
TABLE = u'SEARCHRESULTS'
command = u'SELECT * FROM {} WHERE imdbid="{}" ORDER BY score DESC, size {}'.format(TABLE, imdbid, sort)
results = self.execute(command)
if results:
return results.fetchall()
else:
return False
def get_marked_results(self, imdbid):
''' Gets all entries in MARKEDRESULTS for given movie
:param imdbid: str imdb id #
Returns dict {guid:status, guid:status, etc}
'''
logging.info(u'Retreving Marked Results for {}.'.format(imdbid))
TABLE = u'MARKEDRESULTS'
results = {}
command = u'SELECT * FROM {} WHERE imdbid="{}"'.format(TABLE, imdbid)
data = self.execute(command)
if data:
for i in data.fetchall():
results[i['guid']] = i['status']
return results
else:
return False
def remove_movie(self, imdbid):
''' Removes movie and search results from DB
:param imdbid: str imdb id #
Doesn't access sql directly, but instructs other methods to delete all information that matches imdbid.
Removes from MOVIE, SEARCHRESULTS, and deletes poster. Keeps MARKEDRESULTS.
Returns True/False on success/fail or None if movie doesn't exist in DB.
'''
logging.info(u'Removing {} from {}.'.format(imdbid, 'MOVIES'))
if not self.row_exists('MOVIES', imdbid=imdbid):
return None
if not self.delete('MOVIES', 'imdbid', imdbid):
return False
logging.info(u'Removing any stored search results for {}.'.format(imdbid))
if self.row_exists('SEARCHRESULTS', imdbid):
if not self.purge_search_results(imdbid=imdbid):
return False
logging.info(u'{} removed.'.format(imdbid))
return True
def delete(self, TABLE, idcol, idval):
''' Deletes row where idcol == idval
:param idcol: str identifying column
:param idval: str identifying value
Returns Bool.
'''
logging.info(u'Removing from {} where {} is {}.'.format(TABLE, idcol, idval.split('&')[0]))
command = u'DELETE FROM {} WHERE {}="{}"'.format(TABLE, idcol, idval)
if self.execute(command):
return True
else:
return False
def purge_search_results(self, imdbid=''):
''' Deletes all search results
:param imdbid: str imdb id # <optional>
Be careful with this one. Supplying an imdbid deletes search results for that
movie. If you do not supply an imdbid it purges FOR ALL MOVIES.
BE CAREFUL.
Returns Bool
'''
TABLE = u'SEARCHRESULTS'
if imdbid:
command = u'DELETE FROM {} WHERE imdbid="{}"'.format(TABLE, imdbid)
else:
command = u'DELETE FROM {}'.format(TABLE)
if self.execute(command):
return True
else:
return False
def get_distinct(self, TABLE, column, idcol=None, idval=None):
''' Gets unique values in TABLE
:param TABLE: str table name
:param column: str column to return
:param idcol: str identifying column
:param idval: str identifying value
Gets values in TABLE:column where idcol == idval
Returns list ['val1', 'val2', 'val3']
'''
logging.info(u'Getting distinct values for {} in {}'.format(idval.split('&')[0], TABLE))
if idcol and idval:
command = u'SELECT DISTINCT {} FROM {} WHERE {}="{}"'.format(column, TABLE, idcol, idval)
else:
command = u'SELECT DISTINCT {} FROM {}'.format(column, TABLE)
data = self.execute(command)
if data:
data = data.fetchall()
if len(data) == 0:
return None
lst = []
for i in data:
lst.append(i[column])
return lst
else:
logging.error(u'Unable to read database.')
return False
def row_exists(self, TABLE, artist_id=''):
''' Checks if row exists in table
:param TABLE: str name of sql table to look through
:param imdbid: str imdb identification number <optional>
:param guid: str download guid <optional>
:param downloadid: str downloader id <optional>
Checks TABLE for imdbid, guid, or downloadid.
Exactly one optional variable must be supplied.
Used to check if we need to add row or update existing row.
Returns Bool of found status
'''
if artist_id:
idcol = u'artist_id'
idval = artist_id
else:
return 'ID ERROR'
command = u'SELECT 1 FROM {} WHERE {}="{}"'.format(TABLE, idcol, idval)
row = self.execute(command)
if row is False or row.fetchone() is None:
return False
else:
return True
def get_single_search_result(self, idcol, idval):
''' Gets single search result
:param idcol: str identifying column
:param idval: str identifying value
Finds in SEARCHRESULTS a row where idcol == idval
Returns dict
'''
logging.info(u'Retreving search result details for {}.'.format(idval.split('&')[0]))
command = u'SELECT * FROM SEARCHRESULTS WHERE {}="{}"'.format(idcol, idval)
result = self.execute(command)
if result:
return result.fetchone()
else:
return False
def _get_existing_schema(self):
table_dict = {}
# get list of tables in db:
command = 'SELECT name FROM sqlite_master WHERE type="table"'
tables = self.execute(command)
table_dict = {}
if not tables:
return False
for i in tables:
i = i[0]
command = u'PRAGMA table_info({})'.format(i)
columns = self.execute(command)
if not columns:
continue
tmp_dict = {}
for col in columns:
tmp_dict[col['name']] = col['type']
table_dict[i] = tmp_dict
return table_dict
def _get_intended_schema(self):
d = {}
for table in self.metadata.tables.keys():
selftable = getattr(self, table)
d2 = {}
for i in selftable.c:
d2[i.name] = str(i.type)
d[table] = d2
return d
def update_tables(self):
existing = self._get_existing_schema()
intended = self._get_intended_schema()
diff = Comparisons.compare_dict(intended, existing)
if not diff:
return True
print 'Database update required. This may take some time.'
backup_dir = os.path.join(core.PROG_PATH, 'db')
logging.info(u'Backing up database to {}.'.format(backup_dir))
print u'Backing up database to {}.'.format(backup_dir)
try:
if not os.path.isdir(backup_dir):
os.mkdir(backup_dir)
backup = u'{}.{}'.format(core.DB_FILE, datetime.date.today())
shutil.copyfile(core.DB_FILE, os.path.join(backup_dir, backup))
except Exception, e: # noqa
print 'Error backing up database.'
logging.error(u'Copying SQL DB.', exc_info=True)
raise
logging.info(u'Modifying database tables.')
print 'Modifying tables.'
'''
For each item in diff, create new column.
Then, if the new columns name is in self.convert_names, copy data from old column
Create the new table, then copy data from TMP table
'''
for table, schema in diff.iteritems():
logging.info(u'Modifying table {}.'.format(table))
print u'Modifying table {}'.format(table)
for name, kind in schema.iteritems():
command = u'ALTER TABLE {} ADD COLUMN {} {}'.format(table, name, kind)
self.execute(command)
if table in self.convert_names.keys():
for pair in self.convert_names[table]:
if pair[0] == name:
command = u'UPDATE {} SET {} = {}'.format(table, pair[0], pair[1])
self.execute(command)
# move TABLE to TABLE_TMP
table_tmp = u'{}_TMP'.format(table)
logging.info(u'Renaming table to {}.'.format(table_tmp))
print u'Renaming table to {}'.format(table_tmp)
command = u'ALTER TABLE {} RENAME TO {}'.format(table, table_tmp)
self.execute(command)
# create new table
logging.info(u'Creating new table {}.'.format(table))
print u'Creating new table {}'.format(table)
table_meta = getattr(self, table)
table_meta.create(self.engine)
# copy data over
logging.info(u'Merging data from {} to {}.'.format(table_tmp, table))
print u'Merging data from {} to {}'.format(table_tmp, table)
names = u', '.join(intended[table].keys())
command = u'INSERT INTO {} ({}) SELECT {} FROM {}'.format(table, names, names, table_tmp)
self.execute(command)
logging.info(u'Dropping table {}.'.format(table_tmp))
print u'Dropping table {}'.format(table_tmp)
command = u'DROP TABLE {}'.format(table_tmp)
self.execute(command)
logging.info(u'Finished updating table {}.'.format(table))
print u'Finished updating table {}'.format(table)
logging.info(u'Database updated')
print 'Database updated.'
# pylama:ignore=W0401
|
import random
#Node
class Node:
def __init__(self,data):
self.data = data
self.next = None
self.random = None
#linked list
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
self.map = []
#add node
def addNode(self,data):
n = Node(data)
self.map.append(n)
if self.head:
n.next = self.head
self.head = n
else:
self.head = n
self.tail = n
#set the random node
def setRandom(self):
curr = self.head
item = [0,1,2,3,4]
random.shuffle(item)
while curr is not None:
index = random.choice(item)
curr.random = self.map[index]
curr = curr.next
def printList(self,head):
curr = head
while curr is not None:
print curr.data, '---->',curr.random.data
curr = curr.next
#clone the linked list
def clone(self):
curr = self.head
while curr is not None:
n = Node(str(str(curr.data) + 'Q'))
print 'n-----',n.data, '---->',n.random
nextNode = curr.next
curr.next = n
n.next = nextNode
curr = nextNode
curr = self.head
tempHead = None
while curr is not None and curr.next is not None:
print '-----',curr.data, '---->',curr.random.data
nextNode = curr.next
#set new head
if tempHead is None:
tempHead = nextNode
#clone random node
nextNode.random = curr.random.next
#wire next for orignal
curr.next = nextNode.next
print '-----',nextNode.data, '---->',nextNode.random.data
#wire next for new clone
nextNode.next = nextNode.next.next
#move
curr = curr.next
return tempHead
#setup code
l = LinkedList()
l.addNode(1)
l.addNode(2)
l.addNode(3)
l.addNode(4)
l.addNode(5)
l.setRandom()
l.printList(l.head)
print '----'
#clone code
l.printList(l.clone())
|
#%%
# This notebook analyzes misc episode-level statistics; i.e. reproduces Fig A.1.
import numpy as np
import pandas as pd
import os
import os.path as osp
import json
import matplotlib.pyplot as plt
import seaborn as sns
import PIL.Image
import torch
from obj_consts import get_variant_labels, get_obj_label
from fp_finder import load_variant
from analyze_utils import prep_plt, load_stats
variant = 'base-full'
ckpt = 31
# variant = 'base4-full'
# ckpt = 34
# ckpt = 33
variant = 'split_clamp-full'
ckpt = 31
variant = 'split_rednet-full'
ckpt = 38
is_eval = True
# is_eval = False
is_gt = False
# is_gt = True
# meta_df, title = load_stats(variant, ckpt, is_gt=is_gt)
meta_df, title = load_stats(variant, ckpt, override_fn=f'{variant}/{ckpt}/eval_gt_False_21.pth')
meta_df['variant'] = 'Tethered'
print("Success\t", meta_df['success'].mean())
print("Coverage\t", meta_df['coverage'].mean())
print("Coverage on Success\t", meta_df[meta_df['success'] == 1.0]['coverage'].mean())
variant = 'split_clamp-full'
ckpt = 31
meta_df_2, _ = load_stats(variant, ckpt, is_gt=is_gt)
meta_df_2['variant'] = 'Base'
meta_df = pd.concat([meta_df, meta_df_2])
# meta_df['obj_d2g'] = meta_df.groupby('obj_cat')['geodesic'].transform('mean')
# meta_df = meta_df.sort_values('obj_d2g', ascending=True)
meta_df['scene'] = pd.Categorical(meta_df.scene)
#%%
#%%
# * Success vs Obj Goal / Scene
prep_plt()
y = "success"
# y = "coverage"
# y = "steps"
# y = "spl"
palette = sns.color_palette(n_colors=2)
x = 'obj_cat'
# x = 'scene'
if x == 'obj_cat':
meta_df = meta_df.sort_values('obj_freq', ascending=False)
ax = sns.barplot(x=x, y=y, data=meta_df, ci=None, hue='variant', palette=palette)
if x == 'scene':
meta_df['scene_diff'] = meta_df.groupby('scene')['success'].transform('mean')
meta_df = meta_df.sort_values('scene_diff', ascending=False)
# Hmm, sort doesn't seem to work by default
scene_diff_order = meta_df['scene'].unique()
# print(scene_diff_order)
ax = sns.barplot(x=x, y=y, data=meta_df, ci=None, hue='variant', palette=palette, order=scene_diff_order)
# ax = sns.barplot(x="obj_cat", y="success", data=meta_df, ci=None)
# ax.set_xlabel("Goal Category in Ascending Average Distance")
# ax.set_xlabel("Goal Category in Descending Frequency")
# ax.set_ylim(0.0, 0.85)
sns.despine(ax=ax)
ax.set_ylabel(f"{y} Ratio")
ax.set_ylabel(f"Average Success")
if x == "obj_cat":
ax.set_xlabel("Goals (Descending Frequency)")
elif x == 'scene':
ax.set_xlabel("Scene")
ax.set_title("")
ax.legend(["Tethered", "Base"], frameon=False, fontsize=16)
# ax.text(8, 0.7, "Tethered", color=palette[0], size=16)
# ax.text(8, 0.64, "Base", color=palette[1], size=16)
strs = map(lambda label: label._text, ax.get_xticklabels())
if x == 'obj_cat':
mapper = get_obj_label
elif x == 'scene':
mapper = lambda x: x.split('/')[-2][:5]
ax.set_xticklabels(map(mapper, strs), rotation=45, horizontalalignment='right')
plt.savefig('test.pdf', dpi=150, bbox_inches="tight")
#%%
meta_df_dummy = meta_df_2.copy(deep=True)
meta_df_dummy['success'] = 1
meta_df_dummy['variant'] = 'Total Episodes'
df3 = pd.concat([meta_df_dummy, meta_df])
df3 = df3[df3['success'] == 1]
def plot_success_vs_geodesic(df, cat=None, scene=None, ax=None):
plot_df = df
if cat is not None:
plot_df = df[df['obj_cat'] == cat]
if scene is not None:
plot_df = df[df['scene'] == scene]
# prep_plt()
# sns.despine(ax=ax)
g = sns.displot(
data=plot_df,
x="geodesic",
hue="variant",
# hue="success",
multiple="dodge",
# col='variant',
ax=ax,
bins=np.arange(0, 30, 2)
)
g.set_axis_labels('Goal Geodesic Distance', 'Success Count')
g.legend.set_title("")
g.legend.set_bbox_to_anchor((0.7, 0.7))
# ax.set_xlabel("Geodesic distance")
# ax.set_title(f"Base")
# ax.set_title(f"Tethered")
# ax.set_title(f"{title}")
# fig = plt.figure()
# ax = fig.add_subplot(111)
# plot_success_vs_geodesic(meta_df, ax=ax)
plot_success_vs_geodesic(df3)
plt.savefig('test.pdf', bbox_inches='tight')
# plot_success_vs_geodesic(meta_df, cat="chair")
# plot_success_vs_geodesic(meta_df, cat="table")
# plot_success_vs_geodesic(meta_df, cat="cushion")
# plot_success_vs_geodesic(meta_df, cat="cabinet")
#%%
# Other random plots below
success = 1.0
success = 0.0
success_df = meta_df[meta_df['success'] == success]
# plot_success_vs_geodesic(meta_df, ax=plt.gca())
ax = sns.histplot(
data=success_df,
x="geodesic",
hue="cat_or_rare",
multiple="stack",
ax=plt.gca(),
shrink=0.5,
bins=np.arange(30),
)
import matplotlib as mpl
legends = [c for c in ax.get_children() if isinstance(c, mpl.legend.Legend)]
legends[0].set_title("Category")
plt.ylim(0, 300)
plt.xlabel("Geodesic Distance")
plt.ylabel(f"{'Failure' if success == 0.0 else 'Success'} Count")
# plt.ylabel("Success Count")
#%%
ax = sns.countplot(data=meta_df, x="obj_cat")
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment='right')
ax.set_xlabel("Category")
ax.set_ylabel("Count")
ax.set_title(f"GT Category Distribution {'EVAL' if is_eval else 'TRAIN'}")
#%%
# ax = sns.barplot(data=meta_df, x="success", y="geodesic", hue="obj_cat")
# ax = sns.barplot(data=meta_df, x="obj_cat", y="geodesic")
ax = sns.barplot(data=meta_df, x="obj_cat", y="geodesic", hue="success")
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment='right')
ax.set_xlabel("Category")
ax.set_ylabel("Geodesic Distance")
ax.set_title(f"{title} Distance per Category") |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""A module containing all the e2e model related benchmarks."""
from superbench.benchmarks.model_benchmarks.model_base import ModelBenchmark
from superbench.benchmarks.model_benchmarks.pytorch_bert import PytorchBERT
from superbench.benchmarks.model_benchmarks.pytorch_gpt2 import PytorchGPT2
from superbench.benchmarks.model_benchmarks.pytorch_cnn import PytorchCNN
from superbench.benchmarks.model_benchmarks.pytorch_lstm import PytorchLSTM
__all__ = ['ModelBenchmark', 'PytorchBERT', 'PytorchGPT2', 'PytorchCNN', 'PytorchLSTM']
|
# Copyright (c) 2018 NEC, Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_config import cfg
from oslo_upgradecheck import upgradecheck
from stevedore import driver as stevedore_driver
# Need to import to load config
from octavia.common import config # noqa: F401 pylint: disable=unused-import
from octavia.common import constants
from octavia.common import policy
from octavia.controller.worker.v2 import taskflow_jobboard_driver as tsk_driver
from octavia.i18n import _
CONF = cfg.CONF
class Checks(upgradecheck.UpgradeCommands):
"""Contains upgrade checks
Various upgrade checks should be added as separate methods in this class
and added to _upgrade_checks tuple.
"""
def _check_persistence(self):
try:
pers_driver = tsk_driver.MysqlPersistenceDriver()
with pers_driver.get_persistence() as pers:
if pers.engine.dialect.name == 'sqlite':
return upgradecheck.Result(
upgradecheck.Code.WARNING,
_('Persistence database is using sqlite backend. '
'Verification required if persistence_connecton URL '
'has been set properly.'))
return pers
except Exception:
return upgradecheck.Result(upgradecheck.Code.FAILURE,
_('Failed to connect to persistence '
'backend for AmphoraV2 provider.'))
def _check_jobboard(self, persistence):
try:
jobboard_driver = stevedore_driver.DriverManager(
namespace='octavia.worker.jobboard_driver',
name=CONF.task_flow.jobboard_backend_driver,
invoke_args=(persistence,),
invoke_on_load=True).driver
with jobboard_driver.job_board(persistence) as jb:
if jb.connected:
return upgradecheck.Result(
upgradecheck.Code.SUCCESS,
_('Persistence database and Jobboard backend for '
'AmphoraV2 provider configured.'))
except Exception:
# Return FAILURE later
pass
return upgradecheck.Result(
upgradecheck.Code.FAILURE,
_('Failed to connect to jobboard backend for AmphoraV2 provider. '
'Check jobboard configuration options in task_flow config '
'section.'))
def _check_amphorav2(self):
default_provider_driver = CONF.api_settings.default_provider_driver
enabled_provider_drivers = CONF.api_settings.enabled_provider_drivers
if (default_provider_driver == constants.AMPHORAV2 or
constants.AMPHORAV2 in enabled_provider_drivers):
persistence = self._check_persistence()
if isinstance(persistence, upgradecheck.Result):
return persistence
return self._check_jobboard(persistence)
return upgradecheck.Result(upgradecheck.Code.SUCCESS,
_('AmphoraV2 provider is not enabled.'))
def _check_yaml_policy(self):
if CONF.oslo_policy.policy_file.lower().endswith('yaml'):
return upgradecheck.Result(upgradecheck.Code.SUCCESS,
_('The [oslo_policy] policy_file '
'setting is configured for YAML '
'policy file format.'))
if CONF.oslo_policy.policy_file.lower().endswith('json'):
return upgradecheck.Result(
upgradecheck.Code.WARNING,
_('The [oslo_policy] policy_file setting is configured for '
'JSON policy file format. JSON format policy files have '
'been deprecated by oslo policy. Please use the oslo policy '
'tool to convert your policy file to YAML format. See this '
'patch for more information: '
'https://review.opendev.org/733650'))
return upgradecheck.Result(upgradecheck.Code.FAILURE,
_('Unable to determine the [oslo_policy] '
'policy_file setting file format. '
'Please make sure your policy file is '
'in YAML format and has the suffix of '
'.yaml for the filename. Oslo policy '
'has deprecated the JSON file format.'))
_upgrade_checks = (
(_('AmphoraV2 Check'), _check_amphorav2),
(_('YAML Policy File'), _check_yaml_policy),
)
def main():
policy.Policy()
return upgradecheck.main(
CONF, project='octavia', upgrade_command=Checks())
if __name__ == '__main__':
sys.exit(main())
|
from Optimizacion.Instruccion import Instruccion
from Optimizacion.reporteOptimizacion import *
class Asignacion(Instruccion):
def __init__(self,id=None, valorizq=None,operador=None,valorder=None,linea=''):
self.valorder=valorder
self.valorizq=valorizq
self.operador=operador
self.id=id
self.linea=linea
def Optimizar(self):
'Metodo Abstracto para obtener el valor de la Instrruccion'
anterior="";
optimizado ="";
if(self.valorder!=None and self.operador!=None):
if (self.valorizq=='['):
'ES STACK'
elif(self.valorizq in 'stack'):
''
else:
print (self.valorizq,'-----------',self.valorder)
if(self.id==self.valorizq):
if(self.operador=="+"):
if(self.valorder=="0"):
optimizado="";
anterior =self.id + '='+ self.valorizq +" "+self.operador+" "+ self.valorder
repOptimizado.append(reporteOptimizacion('Mirilla','Regla 8', anterior,optimizado,self.linea))
return self.valorizq
elif(self.operador=="-"):
if(self.valorder=="0"):
optimizado="";
anterior =self.id + '='+ self.valorizq +" "+self.operador+" "+ self.valorder
repOptimizado.append(reporteOptimizacion('Mirilla','Regla 9', anterior,optimizado,self.linea))
return self.valorizq
elif(self.operador=="*"):
if(self.valorder=="1"):
optimizado="";
anterior =self.id + '='+ self.valorizq +" "+self.operador+" "+ self.valorder
repOptimizado.append(reporteOptimizacion('Mirilla','Regla 10', anterior,optimizado,self.linea))
print("anterior",anterior)
return self.valorizq
elif(self.operador=="/"):
if(self.valorder=="1"):
optimizado="";
anterior =self.id + '='+ self.valorizq +" "+self.operador+" "+ self.valorder
repOptimizado.append(reporteOptimizacion('Mirilla','Regla 11', anterior,optimizado,self.linea))
return self.valorizq
if(self.id==self.valorder):
if(self.operador=="+"):
if(self.valorizq=="0"):
optimizado="";
anterior =self.id + '='+ self.valorizq +" "+self.operador+" "+ self.valorder
repOptimizado.append(reporteOptimizacion('Mirilla','Regla 8', anterior,optimizado,self.linea))
return self.valorder
elif(self.operador=="*"):
if(self.valorizq=="1"):
optimizado="";
anterior =self.id + '='+ self.valorizq +" "+self.operador+" "+ self.valorder
repOptimizado.append(reporteOptimizacion('Mirilla','Regla 10', anterior,optimizado,self.linea))
print("anterior",anterior)
return self.valorder
if(self.id != self.valorizq ):
print("entrooooooooo")
if(self.operador=="+"):
if(self.valorder=="0"):
optimizado=self.id + '='+ self.valorizq
anterior =self.id + '='+ self.valorizq +" "+self.operador+" "+ self.valorder
repOptimizado.append(reporteOptimizacion('Mirilla','Regla 12', anterior,optimizado,self.linea))
return optimizado
elif(self.operador=="-"):
if(self.valorder=="0"):
optimizado=self.id + '='+ self.valorizq
anterior =self.id + '='+ self.valorizq +" "+self.operador+" "+ self.valorder
repOptimizado.append(reporteOptimizacion('Mirilla','Regla 13', anterior,optimizado,self.linea))
return optimizado
elif(self.operador=="*"):
if(self.valorder=="1"):
optimizado=self.id + '='+ self.valorizq
anterior =self.id + '='+ self.valorizq +" "+self.operador+" "+ self.valorder
repOptimizado.append(reporteOptimizacion('Mirilla','Regla 14', anterior,optimizado,self.linea))
return optimizado
elif(self.valorder=="2"):
optimizado=self.id + '='+ self.valorizq +" + "+ self.valorizq
anterior =self.id + '='+ self.valorizq +" "+self.operador+" "+ self.valorder
repOptimizado.append(reporteOptimizacion('Mirilla','Regla 16', anterior,optimizado,self.linea))
return optimizado
elif(self.valorder=="0"):
optimizado=self.id + '='+ self.valorder
anterior =self.id + '='+ self.valorizq +" "+self.operador+" "+ self.valorder
repOptimizado.append(reporteOptimizacion('Mirilla','Regla 17', anterior,optimizado,self.linea))
return optimizado
elif(self.operador=="/"):
if(self.valorder=="1"):
optimizado=self.id + '='+ self.valorizq
anterior =self.id + '='+ self.valorizq +" "+self.operador+" "+ self.valorder
repOptimizado.append(reporteOptimizacion('Mirilla','Regla 15', anterior,optimizado,self.linea))
return optimizado
if(self.id != self.valorder):
if(self.operador=="+"):
if(self.valizq=="0"):
optimizado=self.id + '='+ self.valorder
anterior =self.id + '='+ self.valorizq +" "+self.operador+" "+ self.valorder
repOptimizado.append(reporteOptimizacion('Mirilla','Regla 12', anterior,optimizado,self.linea))
return optimizado
elif(self.operador=="*"):
if(self.valorizq=="1"):
optimizado=self.id + '='+ self.valorder
anterior =self.id + '='+ self.valorizq +" "+self.operador+" "+ self.valorder
repOptimizado.append(reporteOptimizacion('Mirilla','Regla 14', anterior,optimizado,self.linea))
return optimizado
elif(self.valorizq=="2"):
optimizado=self.id + '='+ self.valorder +" + "+ self.valorder
anterior =self.id + '='+ self.valorizq +" "+self.operador+" "+ self.valorder
repOptimizado.append(reporteOptimizacion('Mirilla','Regla 16', anterior,optimizado,self.linea))
return optimizado
elif(self.valorizq=="0"):
optimizado=self.id + '='+ self.valorizq
anterior =self.id + '='+ self.valorizq +" "+self.operador+" "+ self.valorder
repOptimizado.append(reporteOptimizacion('Mirilla','Regla 17', anterior,optimizado,self.linea))
return optimizado
elif(self.operador=="/"):
if(self.valorizq=="0"):
optimizado=self.id + '='+ self.valorizq
anterior =self.id + '='+ self.valorizq +" "+self.operador+" "+ self.valorder
repOptimizado.append(reporteOptimizacion('Mirilla','Regla 18', anterior,optimizado,self.linea))
return optimizado
|
from sqlalchemy import or_
from biz.ds.ds_eip import eip_sync_cmdb
from websdk.db_context import DBContext
from libs.base_handler import BaseHandler
from libs.pagination import pagination_util
from models.eip import FreeEip, model_to_dict
class EipHandler(BaseHandler):
@pagination_util
def get(self, *args, **kwargs):
key = self.get_argument('key', default=None, strip=True)
eip_list = []
with DBContext('r') as session:
if key:
# 模糊查所有
epi_info = session.query(FreeEip).filter(
or_(FreeEip.public_ip.like('%{}%'.format(key)),
FreeEip.allocation_id.like('%{}%'.format(key)),
FreeEip.public_ipv4_pool.like('%{}%'.format(key)),
FreeEip.network_border_group.like('%{}%'.format(key)))
).filter(
FreeEip.is_used == 0
).all()
else:
epi_info = session.query(FreeEip).filter(
FreeEip.is_used == 0
).all()
for data in epi_info:
data_dict = model_to_dict(data)
eip_list.append(data_dict)
return eip_list
# 获取数据,更新数据库的请求
def post(self, *args, **kwargs):
eip_sync_cmdb()
eip_host_urls = [
(r"/v1/cmdb/eip/", EipHandler),
]
if __name__ == '__main__':
pass |
import datetime
import time
from typing import Any, Callable, Dict, Iterable, Optional, Tuple
import pendulum
import prefect
from prefect.client import Client
from prefect.core import Edge, Task
from prefect.engine.result import Result
from prefect.engine.runner import ENDRUN, call_state_handlers
from prefect.engine.state import Cached, ClientFailed, Failed, Queued, Retrying, State
from prefect.engine.task_runner import TaskRunner, TaskRunnerInitializeResult
from prefect.utilities.exceptions import VersionLockError
from prefect.utilities.executors import tail_recursive
class CloudTaskRunner(TaskRunner):
"""
TaskRunners handle the execution of Tasks and determine the State of a Task
before, during and after the Task is run.
In particular, through the TaskRunner you can specify the states of any upstream dependencies,
and what state the Task should be initialized with.
Args:
- task (Task): the Task to be run / executed
- state_handlers (Iterable[Callable], optional): A list of state change handlers
that will be called whenever the task changes state, providing an opportunity to
inspect or modify the new state. The handler will be passed the task runner
instance, the old (prior) state, and the new (current) state, with the following
signature: `state_handler(TaskRunner, old_state, new_state) -> State`; If multiple
functions are passed, then the `new_state` argument will be the result of the
previous handler.
- flow_result: the result instance configured for the flow (if any)
"""
def __init__(
self,
task: Task,
state_handlers: Iterable[Callable] = None,
flow_result: Result = None,
) -> None:
self.client = Client()
super().__init__(
task=task, state_handlers=state_handlers, flow_result=flow_result
)
def call_runner_target_handlers(self, old_state: State, new_state: State) -> State:
"""
A special state handler that the TaskRunner uses to call its task's state handlers.
This method is called as part of the base Runner's `handle_state_change()` method.
Args:
- old_state (State): the old (previous) state
- new_state (State): the new (current) state
Returns:
- State: the new state
"""
raise_on_exception = prefect.context.get("raise_on_exception", False)
try:
new_state = super().call_runner_target_handlers(
old_state=old_state, new_state=new_state
)
# PrefectStateSignals are trapped and turned into States
except prefect.engine.signals.PrefectStateSignal as exc:
self.logger.info(
"{name} signal raised: {rep}".format(
name=type(exc).__name__, rep=repr(exc)
)
)
if raise_on_exception:
raise exc
new_state = exc.state
except Exception as exc:
msg = "Exception raised while calling state handlers: {}".format(repr(exc))
self.logger.exception(msg)
if raise_on_exception:
raise exc
new_state = Failed(msg, result=exc)
task_run_id = prefect.context.get("task_run_id")
version = prefect.context.get("task_run_version")
try:
cloud_state = new_state
state = self.client.set_task_run_state(
task_run_id=task_run_id,
version=version if cloud_state.is_running() else None,
state=cloud_state,
cache_for=self.task.cache_for,
)
except VersionLockError as exc:
state = self.client.get_task_run_state(task_run_id=task_run_id)
if state.is_running():
self.logger.debug(
"Version lock encountered and task {} is already in a running state.".format(
self.task.name
)
)
raise ENDRUN(state=state) from exc
self.logger.debug(
"Version lock encountered for task {}, proceeding with state {}...".format(
self.task.name, type(state).__name__
)
)
try:
new_state = state.load_result(self.result)
except Exception as exc_inner:
self.logger.debug(
"Error encountered attempting to load result for state of {} task...".format(
self.task.name
)
)
self.logger.error(repr(exc_inner))
raise ENDRUN(state=state) from exc_inner
except Exception as exc:
self.logger.exception(
"Failed to set task state with error: {}".format(repr(exc))
)
raise ENDRUN(state=ClientFailed(state=new_state)) from exc
if state.is_queued():
state.state = old_state # type: ignore
raise ENDRUN(state=state)
prefect.context.update(task_run_version=(version or 0) + 1)
return new_state
def initialize_run( # type: ignore
self, state: Optional[State], context: Dict[str, Any]
) -> TaskRunnerInitializeResult:
"""
Initializes the Task run by initializing state and context appropriately.
Args:
- state (Optional[State]): the initial state of the run
- context (Dict[str, Any]): the context to be updated with relevant information
Returns:
- tuple: a tuple of the updated state, context, and upstream_states objects
"""
# load task run info
try:
task_run_info = self.client.get_task_run_info(
flow_run_id=context.get("flow_run_id", ""),
task_id=context.get("task_id", ""),
map_index=context.get("map_index"),
)
# if state was provided, keep it; otherwise use the one from db
state = state or task_run_info.state # type: ignore
context.update(
task_run_id=task_run_info.id, # type: ignore
task_run_version=task_run_info.version, # type: ignore
)
except Exception as exc:
self.logger.exception(
"Failed to retrieve task state with error: {}".format(repr(exc))
)
if state is None:
state = Failed(
message="Could not retrieve state from Prefect Cloud",
result=exc,
)
raise ENDRUN(state=state) from exc
# we assign this so it can be shared with heartbeat thread
self.task_run_id = context.get("task_run_id", "") # type: str
context.update(checkpointing=True)
return super().initialize_run(state=state, context=context)
@call_state_handlers
def check_task_is_cached(self, state: State, inputs: Dict[str, Result]) -> State:
"""
Checks if task is cached in the DB and whether any of the caches are still valid.
Args:
- state (State): the current state of this task
- inputs (Dict[str, Result]): a dictionary of inputs whose keys correspond
to the task's `run()` arguments.
Returns:
- State: the state of the task after running the check
Raises:
- ENDRUN: if the task is not ready to run
"""
if state.is_cached() is True:
assert isinstance(state, Cached) # mypy assert
sanitized_inputs = {key: res.value for key, res in inputs.items()}
if self.task.cache_validator(
state, sanitized_inputs, prefect.context.get("parameters")
):
state = state.load_result(self.result)
return state
if self.task.cache_for is not None:
oldest_valid_cache = datetime.datetime.utcnow() - self.task.cache_for
cached_states = self.client.get_latest_cached_states(
task_id=prefect.context.get("task_id", ""),
cache_key=self.task.cache_key,
created_after=oldest_valid_cache,
)
if cached_states:
self.logger.debug(
"Task '{name}': {num} candidate cached states were found".format(
name=prefect.context.get("task_full_name", self.task.name),
num=len(cached_states),
)
)
for candidate_state in cached_states:
assert isinstance(candidate_state, Cached) # mypy assert
candidate_state.load_cached_results(inputs)
sanitized_inputs = {key: res.value for key, res in inputs.items()}
if self.task.cache_validator(
candidate_state,
sanitized_inputs,
prefect.context.get("parameters"),
):
try:
return candidate_state.load_result(self.result)
except Exception:
location = getattr(
candidate_state._result, "location", None
)
self.logger.warning(
f"Failed to load cached state data from {location}.",
exc_info=True,
)
self.logger.debug(
"Task '{name}': can't use cache because no candidate Cached states "
"were valid".format(
name=prefect.context.get("task_full_name", self.task.name)
)
)
else:
self.logger.debug(
"Task '{name}': can't use cache because no Cached states were found".format(
name=prefect.context.get("task_full_name", self.task.name)
)
)
return state
def load_results(
self, state: State, upstream_states: Dict[Edge, State]
) -> Tuple[State, Dict[Edge, State]]:
"""
Given the task's current state and upstream states, populates all relevant result
objects for this task run.
Args:
- state (State): the task's current state.
- upstream_states (Dict[Edge, State]): the upstream state_handlers
Returns:
- Tuple[State, dict]: a tuple of (state, upstream_states)
"""
upstream_results = {}
try:
if state.is_mapped():
# ensures mapped children are only loaded once
state = state.load_result(self.result)
for edge, upstream_state in upstream_states.items():
upstream_states[edge] = upstream_state.load_result(
edge.upstream_task.result or self.flow_result
)
if edge.key is not None:
upstream_results[edge.key] = (
edge.upstream_task.result or self.flow_result
)
state.load_cached_results(upstream_results)
return state, upstream_states
except Exception as exc:
new_state = Failed(
message=f"Failed to retrieve task results: {exc}", result=exc
)
final_state = self.handle_state_change(old_state=state, new_state=new_state)
raise ENDRUN(final_state) from exc
def set_task_run_name(self, task_inputs: Dict[str, Result]) -> None:
"""
Sets the name for this task run by calling the `set_task_run_name` mutation.
Args:
- task_inputs (Dict[str, Result]): a dictionary of inputs whose keys correspond
to the task's `run()` arguments.
"""
task_run_name = self.task.task_run_name
if task_run_name:
raw_inputs = {k: r.value for k, r in task_inputs.items()}
formatting_kwargs = {
**prefect.context.get("parameters", {}),
**prefect.context,
**raw_inputs,
}
if not isinstance(task_run_name, str):
task_run_name = task_run_name(**formatting_kwargs)
else:
task_run_name = task_run_name.format(**formatting_kwargs)
self.client.set_task_run_name(
task_run_id=self.task_run_id, name=task_run_name # type: ignore
)
@tail_recursive
def run(
self,
state: State = None,
upstream_states: Dict[Edge, State] = None,
context: Dict[str, Any] = None,
is_mapped_parent: bool = False,
) -> State:
"""
The main endpoint for TaskRunners. Calling this method will conditionally execute
`self.task.run` with any provided inputs, assuming the upstream dependencies are in a
state which allow this Task to run. Additionally, this method will wait and perform
Task retries which are scheduled for <= 1 minute in the future.
Args:
- state (State, optional): initial `State` to begin task run from;
defaults to `Pending()`
- upstream_states (Dict[Edge, State]): a dictionary
representing the states of any tasks upstream of this one. The keys of the
dictionary should correspond to the edges leading to the task.
- context (dict, optional): prefect Context to use for execution
- is_mapped_parent (bool): a boolean indicating whether this task run is the run of
a parent mapped task
Returns:
- `State` object representing the final post-run state of the Task
"""
context = context or {}
with prefect.context(context):
end_state = super().run(
state=state,
upstream_states=upstream_states,
context=context,
is_mapped_parent=is_mapped_parent,
)
while (end_state.is_retrying() or end_state.is_queued()) and (
end_state.start_time <= pendulum.now("utc").add(minutes=10) # type: ignore
):
assert isinstance(end_state, (Retrying, Queued))
naptime = max(
(end_state.start_time - pendulum.now("utc")).total_seconds(), 0
)
for _ in range(int(naptime) // 30):
# send heartbeat every 30 seconds to let API know task run is still alive
self.client.update_task_run_heartbeat(
task_run_id=prefect.context.get("task_run_id")
)
naptime -= 30
time.sleep(30)
if naptime > 0:
time.sleep(naptime) # ensures we don't start too early
self.client.update_task_run_heartbeat(
task_run_id=prefect.context.get("task_run_id")
)
end_state = super().run(
state=end_state,
upstream_states=upstream_states,
context=context,
is_mapped_parent=is_mapped_parent,
)
return end_state
|
from metaprogramming.fido import Fido
class Anger(type):
def angry(cls):
print("{} is angry".format(type(cls())))
return True
def __call__(cls, *args, **kwargs):
this = type.__call__(cls, *args, **kwargs)
setattr(this, "angry", cls.angry)
return this
class AngryFido(Fido, metaclass=Anger):
pass
if __name__ == "__main__":
angry_fido = AngryFido()
print(angry_fido.angry())
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from json import load, dump
from os.path import expanduser, join
from time import asctime
from html.parser import HTMLParser
class CorruptedFileError(Exception): pass
user_cfg_path = '/home/bunburya/webspace/cgi-bin/pisg/pum/users.cfg'
class AliasParser(HTMLParser):
def __init__(self, *nicks):
self._nicks = set(nicks)
self.nick_to_alias = {}
self.alias_to_nick = {}
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if not tag == 'user':
return
attrs = dict(attrs)
nick = attrs.get('nick')
alias = attrs.get('alias')
if not ((nick in self._nicks) and alias):
return
aliases = alias.split()
self.nick_to_alias[nick] = aliases
for a in aliases:
self.alias_to_nick[a] = nick
class MessageHandler:
# these default values are to work with bunbot
def __init__(self, store_file):
self.store_file = store_file
def get_msgs(self):
try:
with open(self.store_file, 'r') as f:
return load(f)
except IOError:
# file doesn't exist (yet)
return {}
except ValueError:
# couldn't get a JSON object from the file
raise CorruptedFileError(msg_file)
def save_msgs(self, msgs):
with open(self.store_file, 'w') as f:
dump(msgs, f)
def clear_msgs(self, name=None):
if name is None:
self.save_msgs({})
else:
msgs = self.get_msgs()
if name in msgs:
msgs.pop(name)
self.save_msgs(msgs)
def check_msgs(self, name):
msgs = self.get_msgs()
return msgs.get(name, [])
def send_msg(self, sender, recip, msg):
msgs = self.get_msgs()
data = (msg, sender, asctime())
if recip in msgs:
msgs[recip].append(data)
else:
msgs[recip] = [data]
self.save_msgs(msgs)
|
#!/home/student/anaconda3/envs/deepsort/bin/python
import os
import cv2
from cv_bridge import CvBridge
import time
import argparse
import torch
import warnings
import numpy as np
import sys
import rospy
from sensor_msgs.msg import Image
from sensor_msgs.msg import CameraInfo
import image_geometry
from geometry_msgs.msg import Pose, PoseArray
from std_msgs.msg import Header
# import pdb
sys.path.append(os.path.join(os.path.dirname(__file__), "thirdparty/fast-reid"))
# sys.path.append("/tmp/catkin_ws/devel/lib/python3/dist-packages")
# import tf
from detector import build_detector
from deep_sort import build_tracker
from utils.draw import draw_boxes
from utils.parser import get_config
from utils.log import get_logger
from utils.io import write_results
print(sys.path)
# pdb.set_trace()
rospy.init_node("deepsort_ros", anonymous=True)
def disp_image(image):
cv2.imshow("debug", image)
cv2.waitKey(0)
def convert_point_to_pose(point):
# Convert point from camera frame to odom frame
x, y, z = point[0], point[1], point[2]
pose = Pose()
pose.position.x = x
pose.position.y = y
pose.position.z = z
pose.orientation.x = 0
pose.orientation.y = 0
pose.orientation.z = 0
pose.orientation.w = 1
return pose
class ROS_VideoTracker(object):
def __init__(self, cfg, args, rgb_stream, depth_stream, point_stream):
use_cuda = args.use_cuda and torch.cuda.is_available()
self.cfg = cfg
self.rgb_stream = None
self.depth_stream = None
self.args = args
self.logger = get_logger("root")
self.bridge = CvBridge()
self.logger.info("Initializing ROS Video Tracker")
rospy.Subscriber(rgb_stream, Image, self.rgb_callback)
rospy.Subscriber(depth_stream, Image, self.depth_callback)
self.detector = build_detector(cfg, use_cuda=use_cuda)
self.deepsort = build_tracker(cfg, use_cuda=use_cuda)
self.class_names = self.detector.class_names
self.setup_camera()
self.publish_pose_array = rospy.Publisher("/pedestrian_pose_array", PoseArray, queue_size=1)
def setup_camera(self):
self.camera_info = rospy.wait_for_message("/realsense/color/camera_info", CameraInfo)
self.camera_model = image_geometry.PinholeCameraModel()
self.camera_model.fromCameraInfo(self.camera_info)
def get_camera_info(self, msg):
self.camera_info = msg
def rgb_callback(self, msg):
self.rgb_stream = self.bridge.imgmsg_to_cv2(msg)
def depth_callback(self, msg):
self.depth_stream = self.bridge.imgmsg_to_cv2(msg)
def get_depth_from_pixels(self, ori_depth, bbox_outputs):
def recursive_non_nan_search(depth, x, y):
# TODO: Recursively search for non-nan values around the point (x, y), not just 2 layer for loop
for i in range(x - 3, x + 3):
for j in range(y - 3, y + 3):
try:
if not np.isnan(depth[j, i]):
return x, y, depth[j, i]
except:
pass
return np.nan, np.nan, np.nan
# TODO: Initilize this 100 as a parameter
human_positions = np.zeros((10, 3))
for i in range(len(bbox_outputs)):
xmin = bbox_outputs[i][0]
ymin = bbox_outputs[i][1]
xmax = bbox_outputs[i][2]
ymax = bbox_outputs[i][3]
human_index = bbox_outputs[i][4]
mid_x, mid_y = (xmin + xmax) / 2, (ymin + ymax) / 2
person_x, persdon_y, depth_val = recursive_non_nan_search(ori_depth, int(mid_x), int(mid_y))
if not np.isnan(depth_val):
# Deproject the point from camera to world
# person_x, persdon_y = 325, 225
point_3d = self.camera_model.projectPixelTo3dRay((person_x, persdon_y))
person_3d_point = depth_val * np.array(point_3d)
human_positions[int(human_index)] = person_3d_point
return human_positions
def run(self):
while not rospy.is_shutdown():
results = []
idx_frame = 0
if self.rgb_stream is not None and self.depth_stream is not None:
idx_frame += 1
if idx_frame % self.args.frame_interval:
continue
start = time.time()
ori_im = self.rgb_stream
ori_depth = self.depth_stream
time_stamp = rospy.Time.now()
im = cv2.cvtColor(ori_im, cv2.COLOR_BGR2RGB)
# do detection
bbox_xywh, cls_conf, cls_ids = self.detector(im)
# select person class
mask = cls_ids == 0
bbox_xywh = bbox_xywh[mask]
# bbox dilation just in case bbox too small, delete this line if using a better pedestrian detector
bbox_xywh[:, 3:] *= 1.2
cls_conf = cls_conf[mask]
# do tracking
outputs = self.deepsort.update(bbox_xywh, cls_conf, im)
all_pedestrian_depth = self.get_depth_from_pixels(ori_depth, [])
all_pedestrian = [convert_point_to_pose(x) for x in all_pedestrian_depth]
# draw boxes for visualization
if len(outputs) > 0:
bbox_tlwh = []
bbox_xyxy = outputs[:, :4]
identities = outputs[:, -1]
ori_im = draw_boxes(ori_im, bbox_xyxy, identities)
for bb_xyxy in bbox_xyxy:
bbox_tlwh.append(self.deepsort._xyxy_to_tlwh(bb_xyxy))
results.append((idx_frame - 1, bbox_tlwh, identities))
all_pedestrian_depth = self.get_depth_from_pixels(ori_depth, outputs)
all_pedestrian = [convert_point_to_pose(x) for x in all_pedestrian_depth]
self.publish_pose_array.publish(PoseArray(header=Header(stamp=time_stamp), poses=all_pedestrian))
end = time.time()
if self.args.display:
cv2.imshow("test", ori_im)
cv2.waitKey(1)
if self.args.save_path:
self.writer.write(ori_im)
# save results
if self.args.save_path:
write_results(self.save_results_path, results, "mot")
# logging
self.logger.info(
"time: {:.03f}s, fps: {:.03f}, detection numbers: {}, tracking numbers: {}".format(
end - start, 1 / (end - start), bbox_xywh.shape[0], len(outputs)
)
)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("VIDEO_PATH", type=str)
parser.add_argument("--config_mmdetection", type=str, default="./configs/mmdet.yaml")
parser.add_argument("--config_detection", type=str, default="./configs/yolov3.yaml")
parser.add_argument("--config_deepsort", type=str, default="./configs/deep_sort.yaml")
parser.add_argument("--config_fastreid", type=str, default="./configs/fastreid.yaml")
parser.add_argument("--fastreid", action="store_true")
parser.add_argument("--mmdet", action="store_true")
# parser.add_argument("--ignore_display", dest="display", action="store_false", default=True)
parser.add_argument("--display", action="store_true")
parser.add_argument("--frame_interval", type=int, default=1)
parser.add_argument("--display_width", type=int, default=800)
parser.add_argument("--display_height", type=int, default=600)
parser.add_argument("--save_path", type=str)
# parser.add_argument("--save_path", type=str, default="./output/")
parser.add_argument("--cpu", dest="use_cuda", action="store_false", default=True)
parser.add_argument("--camera", action="store", dest="cam", type=int, default="-1")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
cfg = get_config()
if args.mmdet:
cfg.merge_from_file(args.config_mmdetection)
cfg.USE_MMDET = True
else:
cfg.merge_from_file(args.config_detection)
cfg.USE_MMDET = False
cfg.merge_from_file(args.config_deepsort)
if args.fastreid:
cfg.merge_from_file(args.config_fastreid)
cfg.USE_FASTREID = True
else:
cfg.USE_FASTREID = False
# with VideoTracker(cfg, args, video_path=args.VIDEO_PATH) as vdo_trk:
# vdo_trk.run()
ros_vid_tracker = ROS_VideoTracker(
cfg, args, "/realsense/color/image_raw", "/realsense/depth/image_rect_raw", "/realsense/depth/color/points"
)
ros_vid_tracker.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.