text
stringlengths 8
6.05M
|
|---|
import json
import numpy as np
import re
import argparse
key_name_update_dict = {'object':'annotations', 'bounding_box': 'bbox',
'class': 'category_name',
}
cname_cid_dict = {}
set_of_objects = set()
object_types = ['barrel', 'tableround', 'chair', 'crate',
'tire', 'cardboardbox', 'rock','couch','pallet']
class NumpyEncoder(json.JSONEncoder):
""" Custom encoder for numpy data types """
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8,
np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.complex_, np.complex64, np.complex128)):
return {'real': obj.real, 'imag': obj.imag}
elif isinstance(obj, (np.ndarray,)):
return obj.tolist()
elif isinstance(obj, (np.bool_)):
return bool(obj)
elif isinstance(obj, (np.void)):
return None
return json.JSONEncoder.default(self, obj)
def recursive_parse(data, class_to_instance_flag = False):
new_dict = {}
for k,v in data.items():
if type(v)== dict:
newv = recursive_parse(v)
else:
newv = v
# update key names to match COCO
if k in key_name_update_dict.keys():
newkey = key_name_update_dict[k]
else:
newkey = k
if newkey == 'category_name':
assert type(newv) == str, print("category_name not of type str: found {} of type: {}".format(newv, type(newv)))
set_of_objects.add(newv)
# if class_to_instance_flag:
# use class_to_instance_map to set instance ids:
# update bbox from ymin, xmin, ymax, xmax to xmin, ymin, xmax, ymax
if newkey == 'bbox':
assert type(newv) == list and len(newv)==4, print("Bbox coordinates : {} of type: {} with length {}, expect list of length 4".format(newv, type(newv), len(newv)))
newv = newv[[1,0,3,2]]
# add category_names to set, will add object id and instance id later
new_dict[newkey] = newv
return new_dict
def get_instance_ids_from_set(object_set):
class_to_instance_map = {}
class_instance_count = {}
used_classes = set()
for instances in object_set:
instance = instances.lower()
for classes in object_types:
if classes in instance:
instance_count = class_instance_count.get(classes, -1) + 1
class_to_instance_map[instances] = (classes, instance_count)
class_instance_count[classes] = instance_count
used_classes.add(classes)
used_classes = class_to_instance_map.values()
return class_to_instance_map
# def add_instance_ids_to_json(data):
# for
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_json_file', type=str)
parser.add_argument('output_json_file', type=str)
args = parser.parse_args()
data = json.load(open(args.i,'r'))
new_data = recursive_parse(data)
json.dump(new_data, open(args.o,'w'), indent=4, sort_keys=True,
separators=(', ', ': '), cls=NumpyEncoder)
|
import json
import warnings
import numpy as np
import pytest
from smalldataviewer.ext import h5py, NoSuchModule, z5py, imageio
from tests.constants import INTERNAL_PATH
def hdf5_file(path, array):
if isinstance(h5py, NoSuchModule):
pytest.skip("h5py not installed")
with h5py.File(path, "w") as f:
f.create_dataset(INTERNAL_PATH, data=array)
return True
def npy_file(path, array):
np.save(path, array)
return False
def npz_file(path, array):
np.savez(path, **{INTERNAL_PATH: array})
return True
def json_file(path, array):
with open(path, "w") as f:
json.dump({INTERNAL_PATH: array.tolist()}, f)
return True
def json_file_no_path(path, array):
with open(path, "w") as f:
json.dump(array.tolist(), f)
return False
def n5_file(path, array):
if isinstance(z5py, NoSuchModule):
pytest.skip("z5py not installed")
with z5py.File(path, use_zarr_format=False) as f:
ds = f.create_dataset(
INTERNAL_PATH, shape=array.shape, dtype=array.dtype, chunks=(10, 10, 10)
)
ds[:] = array
return True
def zarr_file(path, array):
if isinstance(z5py, NoSuchModule):
pytest.skip("z5py not installed")
with z5py.File(path, use_zarr_format=True) as f:
ds = f.create_dataset(
INTERNAL_PATH, shape=array.shape, dtype=array.dtype, chunks=(10, 10, 10)
)
ds[:] = array
return True
def imageio_mim_file(path, array):
if isinstance(imageio, NoSuchModule):
pytest.skip("imageio not installed")
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*_tifffile")
imageio.mimwrite(path, array)
return False
def imageio_vol_file(path, array):
if isinstance(imageio, NoSuchModule):
pytest.skip("imageio not installed")
imageio.volwrite(path, array)
return False
file_constructors = [
("hdf5", hdf5_file),
("npy", npy_file),
("npz", npz_file),
("json", json_file),
("json", json_file_no_path),
("n5", n5_file),
("zarr", zarr_file),
("tiff", imageio_mim_file),
("gif", imageio_mim_file),
("bsdf", imageio_mim_file),
# ('dcm', imageio_mim_file), # imageio cannot write
("swf", imageio_mim_file),
]
|
# importer la binliothèque
import tkinter as tk
import pandas as pd
import numpy as np
import pyttsx3
import os
import shutil
import time
from tkinter import filedialog, messagebox, ttk
from tkinter.constants import ACTIVE
from datetime import date
from openpyxl import load_workbook
############################################################################################
# -------------------------------------- Frontend -------------------------------------- #
############################################################################################
# création de l'objet de la fenetre
root = tk.Tk()
# personnaliser la fenetre
root.title(" PyApp Station Data Desktop") # nom d'entête de la fenetre
root.iconbitmap("TotalEnergies.ico") # icone de la fenetre
root.geometry("900x600+15+15") # taille de la fenetre
root.minsize(900, 600)
root.maxsize(1000, 700)
# configuration du font de la fenetre (couleur ou autre)
# root.config(background='#CCCCCC')
# barre de menu
mainMenu = tk.Menu(root)
file_menu = tk.Menu(root, tearoff=0)
file_menu.add_command(label="A propos")
file_menu.add_command(label="Quit", command=root.quit)
mainMenu.add_cascade(label="File", menu=file_menu)
def compare():
"""Si le fichier sélectionné est valide, cela chargera le fichier"""
# file 1
file_path_1 = label_file_1["text"]
try:
excel_filename = r"{}".format(file_path_1)
if excel_filename[-4:] == ".csv":
df1 = pd.read_csv(excel_filename)
else:
if var_entry_1.get() == "":
df1 = pd.read_excel(excel_filename)
else:
df1 = pd.read_excel(
excel_filename, sheet_name=var_entry_1.get())
except ValueError:
tk.messagebox.showerror(
"Information", "The file you have chosen is invalid")
return None
except FileNotFoundError:
tk.messagebox.showerror(
"Information", f"No such file as {file_path_1}")
return None
# file 2
file_path_2 = label_file_2["text"]
try:
excel_filename = r"{}".format(file_path_2)
if excel_filename[-4:] == ".csv":
df2 = pd.read_csv(excel_filename)
else:
if var_entry_2.get() == "":
df2 = pd.read_excel(excel_filename)
else:
df2 = pd.read_excel(
excel_filename, sheet_name=var_entry_2.get())
except ValueError:
tk.messagebox.showerror(
"Information", "The file you have chosen is invalid")
return None
except FileNotFoundError:
tk.messagebox.showerror(
"Information", f"No such file as {file_path_2}")
return None
today = date.today()
folder_result = "{}/resutl_{}".format(lbl1["text"], today)
if os.path.exists(folder_result):
shutil.rmtree(f'{folder_result}')
print(
f"le dossier {folder_result} à été bien supprimer et recréer\n-------------")
print()
else:
print(f"le dossier {folder_result} n'existe pas\n-------------")
print()
os.mkdir(folder_result)
folder_exp = f'{folder_result}/testAFR_{today}'
if os.path.exists(folder_exp):
shutil.rmtree(f'{folder_exp}')
print(
f"le dossier AFR_{today} à été bien supprimer et recréer\n-------------")
print()
else:
print(f"le dossier AFR_{today} n'existe pas\n-------------")
print()
os.mkdir(folder_exp)
data_sap = df1.copy()
data_sharepoint = df2.copy()
sh_p = data_sharepoint['Affiliate'].unique()
sap_p = data_sap['Affiliate'].unique()
for i in sh_p:
if i in sap_p:
element = i
print()
print('-'*20)
print(f"Pays : {element}")
print('-'*20)
path_ecart = f"{folder_exp}/{element + '_' + str(today)}.xlsx"
#path_list = f"{folder_list_affiliate}/list_affiliate_{str(today)}.xlsx"
df_sap = data_sap[data_sap['Affiliate'] == element]
df_sap.rename(columns={'SAPCODE': 'SAPCode'}, inplace=True)
df_sap = df_sap.drop_duplicates(subset="SAPCode", keep='first')
dim_sap = df_sap.shape
print(f"dimension données SAP pour {element} est : {dim_sap}")
df_sap['SAPCode'] = df_sap['SAPCode'].str.strip()
df_sharepoint = data_sharepoint[data_sharepoint['Affiliate'] == element]
df_sharepoint = df_sharepoint.drop_duplicates()
dim_sharepoint = df_sharepoint.shape
print(
f"dimension données sharepoint pour {element} est : {dim_sharepoint}")
df_sharepoint['SAPCode'] = df_sharepoint['SAPCode'].str.strip()
print()
print("Comparaison :")
print('-'*7)
X, Y, df_commun_1 = com(
df_sap, df_sharepoint, col_name_1["text"], col_name_2["text"])
writer = pd.ExcelWriter(path_ecart, engine='openpyxl')
df_sap.to_excel(writer, sheet_name='Data_SAP_Brute', index=False)
df_sharepoint.to_excel(
writer, sheet_name='Data_Sharepoint_Brute', index=False)
X.to_excel(
writer, sheet_name='ecart_SAP_vs_Sharepoint', index=False)
Y.to_excel(
writer, sheet_name='ecart_Sharepoint_vs_SAP', index=False)
writer.save()
writer.close()
def selected_item_1():
for i in box1.curselection():
# var_col_name_1.set(box1.get(i))
col_name_1["text"] = box1.get(i)
def selected_item_2():
for j in box2.curselection():
# var_col_name_2.set(box2.get(i))
col_name_2["text"] = box2.get(j)
def browse_button():
# Allow user to select a directory and store it in global var
# called folder_path
global folder_path
filename = filedialog.askdirectory()
lbl1["text"] = filename
# ---------- la boîte de dialogue d'ouverture de fichier ---------- #
file_frame_1 = tk.LabelFrame(
root, text="Open First File", background='#CCCCCC')
file_frame_1.place(height=200, width=400, rely=0.05, relx=0.02)
# label
label_1 = tk.Label(
file_frame_1, text='If the file is an Excel file enter the name of the sheet (optional)')
label_1.place(rely=0.45, relx=0)
var_entry_1 = tk.StringVar()
sheet_name_1 = tk.Entry(file_frame_1, textvariable=var_entry_1)
sheet_name_1.place(rely=0.65, relx=0.10)
# Buttons
button1 = tk.Button(file_frame_1, text="Browse A File",
command=lambda: File_dialog_1())
button1.place(rely=0.85, relx=0.50)
button2 = tk.Button(file_frame_1, text="Load File",
command=lambda: view_data())
button2.place(rely=0.85, relx=0.30)
# Le texte du fichier/chemin d'accès au fichier
label_file_1 = ttk.Label(file_frame_1, text="No File Selected")
label_file_1.place(rely=0, relx=0)
box1 = tk.Listbox(root)
box1.place(height=200, width=200, rely=0.43, relx=0.05)
# commande signifie mettre à jour la vue de l'axe y du widget
treescrolly = tk.Scrollbar(box1, orient="vertical", command=box1.yview)
# commande signifie mettre à jour la vue axe x du widget
treescrollx = tk.Scrollbar(box1, orient="horizontal", command=box1.xview)
# affecter les barres de défilement au widget Treeview
box1.configure(xscrollcommand=treescrollx.set,
yscrollcommand=treescrolly.set)
# faire en sorte que la barre de défilement remplisse l'axe x du widget Treeview
treescrollx.pack(side="bottom", fill="x")
# faire en sorte que la barre de défilement remplisse l'axe y du widget Treeview
treescrolly.pack(side="right", fill="y")
# colonne selectionner
# var_col_name_1 = tk.StringVar()
# var_col_name_1.trace("w", selected_item_1)
# col_name_1 = tk.Label(root, textvariable=var_col_name_1,
# background="#BFF3EC", width=22)
col_name_1 = tk.Label(root, text="",
background="#74BBE4", width=22)
col_name_1.place(rely=0.53, relx=0.28)
btn_1 = tk.Button(root, text='Ok', command=selected_item_1)
btn_1.place(rely=0.63, relx=0.30)
# ---------- la boîte de dialogue d'ouverture de fichier ---------- #
file_frame_2 = tk.LabelFrame(
root, text="Open Second File", background='#CCCCCC')
file_frame_2.place(height=200, width=400, rely=0.05, relx=0.50)
# label
label_2 = tk.Label(
file_frame_2, text='If the file is an Excel file enter the name of the sheet (optional)')
label_2.place(rely=0.45, relx=0)
var_entry_2 = tk.StringVar()
sheet_name_2 = tk.Entry(file_frame_2, textvariable=var_entry_2)
sheet_name_2.place(rely=0.65, relx=0.10)
# Buttons
button3 = tk.Button(file_frame_2, text="Browse A File",
command=lambda: File_dialog_2())
button3.place(rely=0.85, relx=0.50)
button4 = tk.Button(file_frame_2, text="Load File",
command=lambda: view_data_2())
button4.place(rely=0.85, relx=0.30)
# Le texte du fichier/chemin d'accès au fichier
label_file_2 = ttk.Label(file_frame_2, text="No File Selected")
label_file_2.place(rely=0, relx=0)
box2 = tk.Listbox(root)
box2.place(height=200, width=200, rely=0.43, relx=0.53)
# commande signifie mettre à jour la vue de l'axe y du widget
treescrollw = tk.Scrollbar(box2, orient="vertical", command=box2.yview)
# commande signifie mettre à jour la vue axe x du widget
treescrollz = tk.Scrollbar(box2, orient="horizontal", command=box2.xview)
# affecter les barres de défilement au widget Treeview
box2.configure(xscrollcommand=treescrollz.set,
yscrollcommand=treescrollw.set)
# faire en sorte que la barre de défilement remplisse l'axe x du widget Treeview
treescrollz.pack(side="bottom", fill="x")
# faire en sorte que la barre de défilement remplisse l'axe y du widget Treeview
treescrollw.pack(side="right", fill="y")
# colonne selectionner
# var_col_name_2 = tk.StringVar()
# var_col_name_2.trace("w", selected_item_2)
# col_name_2 = tk.Label(root, textvariable=var_col_name_2,
# background="#BFF3EC", width=22)
col_name_2 = tk.Label(root, text="",
background="#74BBE4", width=22)
col_name_2.place(rely=0.53, relx=0.76)
btn_2 = tk.Button(root, text='Ok', command=selected_item_2)
btn_2.place(rely=0.63, relx=0.80)
button_comparer = tk.Button(
root, text="Compare", width=20, background="#004C8C", fg="white", command=compare)
button_comparer.place(rely=0.90, relx=0.30)
button_quit = tk.Button(
root, text="Quit", width=20, background="#C60030", fg="white", command=root.quit)
button_quit.place(rely=0.90, relx=0.50)
fram = tk.Frame(root, bd=1)
# folder_path = tk.StringVar()
# lbl1 = tk.Label(fram, textvariable=folder_path)
lbl1 = tk.Label(fram, text="")
lbl1.grid(row=0, column=0)
button_folder = tk.Button(
fram, text="destination folder", command=browse_button)
button_folder.grid(row=1, column=0)
fram.place(rely=0.80, relx=0.25)
# button_comparer = tk.Button(
# root, text="compare", width=20, background="#3FB8F2", command=lambda: compare)
# button_folder.place(rely=0.90, relx=0.40)
###########################################################################################
# -------------------------------------- Backend -------------------------------------- #
###########################################################################################
def File_dialog_1():
"""Cette fonction ouvrira l'explorateur de fichiers et affectera le chemin de fichier choisi à label_file"""
filename_1 = filedialog.askopenfilename(initialdir="E:\Total\Station Data\Master data\Data source",
title="Select A File",
filetype=(("xlsx files", "*.xlsx"), ("All Files", "*.*")))
label_file_1["text"] = filename_1
return None
def view_data():
new_interface = tk.Toplevel(root)
new_interface.title("Previous Data of first file")
new_interface.iconbitmap("TotalEnergies.ico")
new_interface.geometry("800x550")
new_interface.resizable(width=False, height=False)
frame1 = tk.LabelFrame(new_interface, text="Excel Data")
frame1.place(height=500, width=750, rely=0.05, relx=0.05)
tv1 = ttk.Treeview(frame1)
tv1.place(relheight=1, relwidth=1)
# commande signifie mettre à jour la vue de l'axe y du widget
treescrolly = tk.Scrollbar(frame1, orient="vertical", command=tv1.yview)
# commande signifie mettre à jour la vue axe x du widget
treescrollx = tk.Scrollbar(frame1, orient="horizontal", command=tv1.xview)
# affecter les barres de défilement au widget Treeview
tv1.configure(xscrollcommand=treescrollx.set,
yscrollcommand=treescrolly.set)
# faire en sorte que la barre de défilement remplisse l'axe x du widget Treeview
treescrollx.pack(side="bottom", fill="x")
# faire en sorte que la barre de défilement remplisse l'axe y du widget Treeview
treescrolly.pack(side="right", fill="y")
def Load_excel_data_1():
"""Si le fichier sélectionné est valide, cela chargera le fichier"""
file_path_1 = label_file_1["text"]
try:
excel_filename = r"{}".format(file_path_1)
if excel_filename[-4:] == ".csv":
df1 = pd.read_csv(excel_filename)
for id, column in enumerate(df1.columns):
box1.insert(id, column)
else:
if var_entry_1.get() == "":
df1 = pd.read_excel(excel_filename)
for id, column in enumerate(df1.columns):
box1.insert(id, column)
else:
df1 = pd.read_excel(
excel_filename, sheet_name=var_entry_1.get())
for id, column in enumerate(df1.columns):
box1.insert(id, column)
except ValueError:
tk.messagebox.showerror(
"Information", "The file you have chosen is invalid")
return None
except FileNotFoundError:
tk.messagebox.showerror(
"Information", f"No such file as {file_path_1}")
return None
clear_data()
tv1["column"] = list(df1.columns)
tv1["show"] = "headings"
for column in tv1["columns"]:
tv1.heading(column, text=column)
df_rows = df1.to_numpy().tolist()
for row in df_rows:
tv1.insert("", "end", values=row)
return df1
def clear_data():
tv1.delete(*tv1.get_children())
return None
Load_excel_data_1()
def File_dialog_2():
"""Cette fonction ouvrira l'explorateur de fichiers et affectera le chemin de fichier choisi à label_file"""
filename_2 = filedialog.askopenfilename(initialdir="E:\Total\Station Data\Master data\Data source",
title="Select A File",
filetype=(("xlsx files", "*.xlsx"), ("All Files", "*.*")))
label_file_2["text"] = filename_2
return None
def view_data_2():
new_interface = tk.Toplevel(root)
new_interface.title("Previous Data of second file")
new_interface.iconbitmap("TotalEnergies.ico")
new_interface.geometry("800x550")
new_interface.resizable(width=False, height=False)
frame2 = tk.LabelFrame(new_interface, text="Excel Data")
frame2.place(height=500, width=750, rely=0.05, relx=0.05)
tv2 = ttk.Treeview(frame2)
tv2.place(relheight=1, relwidth=1)
# commande signifie mettre à jour la vue de l'axe y du widget
treescrollw = tk.Scrollbar(frame2, orient="vertical", command=tv2.yview)
# commande signifie mettre à jour la vue axe x du widget
treescrollz = tk.Scrollbar(frame2, orient="horizontal", command=tv2.xview)
# affecter les barres de défilement au widget Treeview
tv2.configure(xscrollcommand=treescrollz.set,
yscrollcommand=treescrollw.set)
# faire en sorte que la barre de défilement remplisse l'axe x du widget Treeview
treescrollz.pack(side="bottom", fill="x")
# faire en sorte que la barre de défilement remplisse l'axe y du widget Treeview
treescrollw.pack(side="right", fill="y")
def Load_excel_data_2():
"""Si le fichier sélectionné est valide, cela chargera le fichier"""
file_path_2 = label_file_2["text"]
try:
excel_filename = r"{}".format(file_path_2)
if excel_filename[-4:] == ".csv":
df2 = pd.read_csv(excel_filename)
for id, column in enumerate(df2.columns):
box2.insert(id, column)
else:
if var_entry_2.get() == "":
df2 = pd.read_excel(excel_filename)
for id, column in enumerate(df2.columns):
box2.insert(id, column)
else:
df2 = pd.read_excel(
excel_filename, sheet_name=var_entry_2.get())
for id, column in enumerate(df2.columns):
box2.insert(id, column)
except ValueError:
tk.messagebox.showerror(
"Information", "The file you have chosen is invalid")
return None
except FileNotFoundError:
tk.messagebox.showerror(
"Information", f"No such file as {file_path_2}")
return None
clear_data()
tv2["column"] = list(df2.columns)
tv2["show"] = "headings"
for column in tv2["columns"]:
tv2.heading(column, text=column)
df_rows = df2.to_numpy().tolist()
for row in df_rows:
tv2.insert("", "end", values=row)
return df2
def clear_data():
tv2.delete(*tv2.get_children())
return None
Load_excel_data_2()
def com(df_X, df_Y, col_x, col_y, texte=True):
if texte:
diff_X = np.setdiff1d(df_X[col_x], df_Y[col_y])
ecart_X = df_X.loc[df_X[col_x].isin(diff_X)]
print("Données SAP versus données Sharepoint :")
print(f"il y'a {len(diff_X)} code SAP de différence")
print()
diff_Y = np.setdiff1d(df_Y[col_y], df_X[col_x])
ecart_Y = df_Y.loc[df_Y[col_y].isin(diff_Y)]
print("Données Sharepoint versus données SAP :")
print(f"il y'a {len(diff_Y)} code SAP de différence")
commun = df_X.loc[~df_X[col_x].isin(diff_X)]
return ecart_X, ecart_Y, commun
else:
diff_X = np.setdiff1d(df_X[col_x], df_Y[col_y])
ecart_X = df_X.loc[df_X[col_x].isin(diff_X)]
diff_Y = np.setdiff1d(df_Y[col_y], df_X[col_x])
ecart_Y = df_Y.loc[df_Y[col_y].isin(diff_Y)]
commun = df_X.loc[~df_X[col_x].isin(diff_X)]
return ecart_X, ecart_Y, commun
def compare():
"""Si le fichier sélectionné est valide, cela chargera le fichier"""
# file 1
file_path_1 = label_file_1["text"]
try:
excel_filename = r"{}".format(file_path_1)
if excel_filename[-4:] == ".csv":
df1 = pd.read_csv(excel_filename)
else:
if var_entry_1.get() == "":
df1 = pd.read_excel(excel_filename)
else:
df1 = pd.read_excel(
excel_filename, sheet_name=var_entry_1.get())
except ValueError:
tk.messagebox.showerror(
"Information", "The file you have chosen is invalid")
return None
except FileNotFoundError:
tk.messagebox.showerror(
"Information", f"No such file as {file_path_1}")
return None
# file 2
file_path_2 = label_file_2["text"]
try:
excel_filename = r"{}".format(file_path_2)
if excel_filename[-4:] == ".csv":
df2 = pd.read_csv(excel_filename)
else:
if var_entry_2.get() == "":
df2 = pd.read_excel(excel_filename)
else:
df2 = pd.read_excel(
excel_filename, sheet_name=var_entry_2.get())
except ValueError:
tk.messagebox.showerror(
"Information", "The file you have chosen is invalid")
return None
except FileNotFoundError:
tk.messagebox.showerror(
"Information", f"No such file as {file_path_2}")
return None
today = date.today()
folder_result = "{}/resutl_{}".format(lbl1["text"], today)
folder_exp = f'{folder_result}/testAFR_{today}'
if os.path.exists(folder_exp):
shutil.rmtree(f'{folder_exp}')
print(
f"le dossier AFR_{today} à été bien supprimer et recréer\n-------------")
print()
else:
print(f"le dossier AFR_{today} n'existe pas\n-------------")
print()
os.mkdir(folder_exp)
# folder_list_affiliate= f'E:/Total/Station Data/Master Data/export/list_affiliate_{today}'
# os.mkdir(folder_list_affiliate)
# path_data_SAP = "E:/Total/Station Data/Master Data/Data source/Data-SAP.xlsx"
# path_data_sharepoint = "E:/Total/Station Data/Master Data/Data source/all-data-sharepoint.xlsx"
# path_list = f"{folder_result}/Affiliate_list.xlsx"
# if os.path.exists(path_list):
# os.remove(path_list)
# print("le fichier 'Affiliate_list.xlsx' à été bien supprimer et recréer\n-------------")
# else:
# print("le fichier 'Affiliate_list.xlsx' n'existe pas\n-------------")
# # data_sharepoint = pd.read_excel(
# # 'E:/Total/Station Data/Master Data/Data source/all-data-sharepoint.xlsx')
# # data_sap = pd.read_excel(
# # 'E:/Total/Station Data/Master Data/Data source/Data-SAP.xlsx')
# writer_list = pd.ExcelWriter(path_list, engine='openpyxl')
# df2.to_excel(
# writer_list, sheet_name='Station Data Brute', index=False)
# writer_list.save()
# writer_list.close()
# print()
data_sap = df1.copy()
data_sharepoint = df2.copy()
sh_p = data_sharepoint['Affiliate'].unique()
sap_p = data_sap['Affiliate'].unique()
for i in sh_p:
if i in sap_p:
element = i
# print()
# print('-'*20)
# print(f"Pays : {element}")
# print('-'*20)
path_ecart = f"{folder_exp}/{element + '_' + str(today)}.xlsx"
#path_list = f"{folder_list_affiliate}/list_affiliate_{str(today)}.xlsx"
df_sap = data_sap[data_sap['Affiliate'] == element]
df_sap.rename(columns={'SAPCODE': 'SAPCode'}, inplace=True)
df_sap = df_sap.drop_duplicates(subset="SAPCode", keep='first')
dim_sap = df_sap.shape
# print(f"dimension données SAP pour {element} est : {dim_sap}")
df_sap['SAPCode'] = df_sap['SAPCode'].str.strip()
df_sharepoint = data_sharepoint[data_sharepoint['Affiliate'] == element]
df_sharepoint = df_sharepoint.drop_duplicates()
dim_sharepoint = df_sharepoint.shape
# print(f"dimension données sharepoint pour {element} est : {dim_sharepoint}")
df_sharepoint['SAPCode'] = df_sharepoint['SAPCode'].str.strip()
# print()
# print("Comparaison :")
# print('-'*7)
X, Y, df_commun_1 = com(
df_sap, df_sharepoint, col_name_1["text"], col_name_2["text"], texte=False)
# a, cost, df_commun_2 = com(
# df_commun_1, df_sharepoint, 'SAPCode_BM', 'SAPCode_BM', texte=False)
# b, cost, df_commun_3 = com(
# df_commun_2, df_sharepoint, 'SAPCode_BM_ISACTIVESITE', 'SAPCode_BM_ISACTIVESITE', texte=False)
writer = pd.ExcelWriter(path_ecart, engine='openpyxl')
df_sap.to_excel(writer, sheet_name='Data_SAP_Brute', index=False)
df_sharepoint.to_excel(
writer, sheet_name='Data_Sharepoint_Brute', index=False)
X.to_excel(
writer, sheet_name='ecart_SAP_vs_Sharepoint', index=False)
Y.to_excel(
writer, sheet_name='ecart_Sharepoint_vs_SAP', index=False)
# a.to_excel(
# writer, sheet_name='SAP_vs_Sharepoint_SAPCode_BM', index=False)
# b.to_excel(
# writer, sheet_name='SAP_vs_Sharepoint_SAPCode_BM_ISACTIVESITE', index=False)
writer.save()
writer.close()
# print()
# print('#'*70)
# print()
# sh = pd.read_excel("C:/Users/J1049122/Desktop/Station Data/Master-Data/Data source/Data-SAP.xlsx")
# sh = sh.drop_duplicates()
# sh['SAPCode'] = sh['SAPCode'].str.strip()
# z = sh['Affiliate'].unique()
# # for w in z:
# d = sh[sh['Affiliate']==w]
# ecart_sap = X.copy()
# ecart_sap = ecart_sap[["SAPCode", "Affiliate", "FINAL_SITENAME",
# "SITETOWN", "ISACTIVESITE", "BUSINESSMODEL", "BM_source"]]
# ecart_sap.columns = ['SAPCode', 'Affiliate', 'SAPName',
# 'Town', 'IsActiveSite', 'BUSINESSMODEL', 'BM_source']
# colonnes = ['Zone', 'SubZone', 'IntermediateStatus', 'Brand', 'Segment', 'ContractMode', 'ShopSegment', 'SFSActivity', 'SFSContractType', 'PartnerOrBrand', 'TargetKit', 'TargetPOSprovider',
# 'EstimatedInstallationDate', 'InstalledSolutionOnSite', 'SolutionProvider', 'SolutionInstallationDate', 'Status', 'SolutionRelease', 'SystemOwner', 'ConfigurationStatus',
# 'IsAllPumpsConnectedToFCC', 'Reason', 'AutomaticTankGauging', 'ATGProvider', 'ATGModel', 'ATGConnected', 'ATGInstallationDate', 'TotalCardEPT connection', 'FuelCardProvider',
# 'EPTHardware', 'EPTModel', 'EPTNumber', 'EPTConnected', 'PaymentLocation', 'HOSInstalled', 'HOSProvider', 'WSMSoftwareInstalled', 'WSMProvider', 'TELECOM', 'STABILITE TELECOM',
# 'STARTBOXStatus', 'BM_source'
# ]
# for col in colonnes:
# ecart_sap[col] = ""
# all_cols_ordonner = ['SAPCode', 'Zone', 'SubZone', 'Affiliate', 'SAPName', 'Town',
# 'IsActiveSite', 'IntermediateStatus', 'Brand', 'Segment',
# 'BUSINESSMODEL', 'ContractMode', 'ShopSegment', 'SFSActivity',
# 'SFSContractType', 'PartnerOrBrand', 'TargetKit', 'TargetPOSprovider',
# 'EstimatedInstallationDate', 'InstalledSolutionOnSite',
# 'SolutionProvider', 'SolutionInstallationDate', 'Status',
# 'SolutionRelease', 'SystemOwner', 'ConfigurationStatus',
# 'IsAllPumpsConnectedToFCC', 'Reason', 'AutomaticTankGauging',
# 'ATGProvider', 'ATGModel', 'ATGConnected', 'ATGInstallationDate',
# 'TotalCardEPT connection', 'FuelCardProvider', 'EPTHardware',
# 'EPTModel', 'EPTNumber', 'EPTConnected', 'PaymentLocation',
# 'HOSInstalled', 'HOSProvider', 'WSMSoftwareInstalled', 'WSMProvider',
# 'TELECOM', 'STABILITE TELECOM', 'STARTBOXStatus', 'BM_source']
# ecart_sap1 = ecart_sap.reindex(columns=all_cols_ordonner)
# ecart_sap1['data_source'] = "ecart SAP"
# ecart_sap1 = ecart_sap1[ecart_sap1['BUSINESSMODEL'] != 'CLOS']
# sh = df_sharepoint.copy()
# if a.shape[0] > 0:
# for j in range(a.shape[0]):
# for k in range(sh.shape[0]):
# if a['SAPCode'].iloc[j] == sh['SAPCode'].iloc[k]:
# sh['BUSINESSMODEL'].iloc[k] = a['BUSINESSMODEL'].iloc[j]
# sh['BM_source'].iloc[k] = a['BM_source'].iloc[j]
# sh = sh[['SAPCode', 'Zone', 'SubZone', 'Affiliate', 'SAPName', 'Town', 'IsActiveSite', 'IntermediateStatus', 'Brand', 'Segment',
# 'BUSINESSMODEL', 'ContractMode', 'ShopSegment', 'SFSActivity', 'SFSContractType', 'PartnerOrBrand', 'TargetKit', 'TargetPOSprovider',
# 'EstimatedInstallationDate', 'InstalledSolutionOnSite', 'SolutionProvider', 'SolutionInstallationDate', 'Status',
# 'SolutionRelease', 'SystemOwner', 'ConfigurationStatus', 'IsAllPumpsConnectedToFCC', 'Reason', 'AutomaticTankGauging',
# 'ATGProvider', 'ATGModel', 'ATGConnected', 'ATGInstallationDate', 'TotalCardEPT connection', 'FuelCardProvider', 'EPTHardware',
# 'EPTModel', 'EPTNumber', 'EPTConnected', 'PaymentLocation', 'HOSInstalled', 'HOSProvider', 'WSMSoftwareInstalled', 'WSMProvider',
# 'TELECOM', 'STABILITE TELECOM', 'STARTBOXStatus', 'BM_source']]
# sh['data_source'] = "Station Data"
# sh_1 = sh.append(ecart_sap1, ignore_index=True)
# book = load_workbook(path_list)
# writer_list = pd.ExcelWriter(path_list, engine='openpyxl')
# writer_list.book = book
# sh_1.to_excel(writer_list, sheet_name=element, index=False)
# writer_list.save()
# writer_list.close()
# boucle principale
root.config(menu=mainMenu)
root.mainloop()
|
import requests
from bs4 import BeautifulSoup
import json
import pandas as pd
from sklearn import preprocessing
def process_rec(rec):
t1slash=rec[0].index('/')
t2slash=rec[1].index('/')
try:
print(rec)
int(rec[0][-1])
except:
print(rec)
dd = {
'line': rec[0][1]
,'t1score': rec[0][-1]
,'t2score': rec[1][-1]
,'t1p1': '_'.join([rec[0][2].lower(),rec[0][3].lower()])
,'t1p2': '_'.join([rec[0][t1slash+1].lower(),rec[0][t1slash+2].lower()])
,'t2p1': '_'.join([rec[1][0].lower(),rec[1][1].lower()])
,'t2p2': '_'.join([rec[1][t2slash+1].lower(),rec[1][t2slash+2].lower()])
}
print(dd)
return(dd)
def winloss(row):
t1=[int(i) for i in row['t1score'] if i not in ['(',')']]
t2=[int(i) for i in row['t2score'] if i not in ['(',')']]
if len(t1) == 2:
diff = ((t1[0] - t2[0]) + (t1[1] - t2[1]))/2
elif len(t1) == 3:
diff = ((t1[0] - t2[0]) + (t1[1] - t2[1]) + (t1[2] - t2[2]))/3
else:
diff = 0
return diff
def scrapeFCTL(hrefs):
games = []
for urlstring in hrefs:
url = 'https://fctlmen.tenniscores.com/'+urlstring
response = requests.get(url)
page = response.text
soup = BeautifulSoup(page, 'lxml')
tables = soup.find_all(class_="standings-table2")
hds = soup.find(class_="datelocheader")
teams = hds.contents[0][:hds.contents[0].find((':'))].split(' @ ')
for table in tables:
rec = [row.text.split() for row in table.find_all("tr")]
try:
dd=process_rec(rec)
dd['team1']=teams[0].strip()
dd['team2']=teams[1].strip()
if int(dd.get('t1score'))>0:
games.append(dd)
except:
print(rec)
return games
def getHREFS(baseurl):
baseresponse = requests.get(baseurl)
basesoup = BeautifulSoup(baseresponse.text, 'lxml')
basehrefs = basesoup.find_all(class_="lightbox-760-tall iframe link")
hrefs = set([i.get('href') for i in basehrefs])
return hrefs
def getDF(games):
df = pd.DataFrame.from_records(games)
df['line'] = df.line.astype(int)
df['t1_points'] = df.apply(lambda row: winloss(row), axis=1)
df['t2_points'] = df.t1_points*-1
pdfs = []
for col in ['t1p1', 't1p2']:
xf = df.groupby(col).agg({'t1_points':np.sum, 'line':np.sum, 'team2':'count', 'team1':'max'})
xf=xf.reset_index()
xf.columns = ['player','points', 'line','match_cnt', 'team']
pdfs.append(xf)
for col in ['t2p1', 't2p2']:
xf = df.groupby(col).agg({'t2_points':np.sum, 'line':np.mean, 'team1':'count', 'team2':'max'})
xf=xf.reset_index()
xf.columns = ['player','points', 'line','match_cnt', 'team']
pdfs.append(xf)
pf = pd.concat(pdfs)
pf = pf.groupby('player').agg({ 'points':'sum', 'line':'sum', 'match_cnt': 'sum', 'team':'max'}).reset_index()
pf['avg_points'] = pf.points/pf.match_cnt
pf['line_quality'] = 5-pf.line/pf.match_cnt
pf['team_quality'] = pf.team.map(tf.rank())
return df, pf
def getNNF(pf):
nf=pf[['avg_points','line_quality','team_quality']].set_index(pf.player)
x = nf.values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
nnf = pd.DataFrame(x_scaled)
nnf=nnf.set_index(pf.player).reset_index()
nnf.columns = ['player','avg_points','line_quality','team_quality']
pdd=pf[['player','team']].set_index('player')
pdx=pf[['player','match_cnt']].set_index('player')
nnf['team'] = nnf.player.map(pdd.team)
nnf['match_cnt'] = nnf.player.map(pdx.match_cnt)
nnf = nnf.loc[~nnf.player.str.contains('forfeit')]
nnf['raw_score'] = nnf['avg_points']*.5+nnf['line_quality']*.25+nnf['team_quality']*.25
nnf['power_rank'] = nnf.raw_score.rank()
nnf=nnf.sort_values(by='power_rank', ascending=False).reset_index(drop=True)
nnf['power_rank_pct'] = round(nnf.raw_score.rank(pct=True)*100,0)
return nnf
baseurls = {
'div3open': 'https://fctlmen.tenniscores.com/?mod=nndz-TjJiOWtOR3QzTU4yakRrY1NjN1FMcGpx&did=nndz-WnlXNHc3MD0%3D',
'div2open': 'https://fctlmen.tenniscores.com/?mod=nndz-TjJiOWtOR3QzTU4yakRrY1NjN1FMcGpx&did=nndz-WnlXNHc3ND0%3D',
'div2fifty': 'https://fctlmen.tenniscores.com/?mod=nndz-TjJiOWtOR3QzTU4yakRrY1NjN1FMcGpx&did=nndz-WnlXNHc3Zz0%3D',
'div3fifty': 'https://fctlmen.tenniscores.com/?mod=nndz-TjJiOWtOR3QzTU4yakRrY1NjN1FMcGpx&did=nndz-WnlXNHdydz0%3D'
}
for league in ['div3open','div2open','div2fifty','div3fifty']:
hrefs = getHREFS(baseurls[league])
games = scrapeFCTL(hrefs)
df, pf = getDF(games)
tf = df.groupby("team1").sum()['t1_points'] + df.groupby("team2").sum()['t2_points']
nnf=getNNF(pf)
df.to_csv(league+'_games'+'.csv')
pf.to_csv(league+'_players'+'.csv')
nnf.to_csv(league+'_rankings'+'.csv')
# analysis
#nnf[:50]
#nnf.loc[nnf.team.str.contains('Rowayton')]
|
from tkinter import *
from tkinter.ttk import *
from tkinter import *
from tkinter.ttk import *
# Command interface
class Command():
def comd(self):pass
#derived button class with an abstract comd method
class DButton(Button, Command):
def __init__(self, master, **kwargs):
super().__init__(master, command=self.comd, **kwargs)
def disable(self):
try:
self.state(['disabled'])
except AttributeError:
self.configure(state="disable")
def enable(self):
self.state(['!disabled'])
|
import django_filters
from subreddit.models import Subreddit
from django_filters import CharFilter
class SubredditFilter(django_filters.FilterSet):
title = CharFilter(field_name='title', lookup_expr='startswith')
class Meta:
model = Subreddit
fields = ['title']
|
# Generated by Django 2.2.9 on 2020-02-27 08:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dmarc', '0002_auto_20180124_2311'),
]
operations = [
migrations.AlterModelOptions(
name='report',
options={},
),
]
|
#1
def numberlist():
ui = input("Enter a list of numbers. Please put a space between each number ")
List = list(int(num) for num in ui.strip().split())
for i in range(len(List)):
List[i] = List[i] ** 2
print(List)
numberlist()
#2
def numberlist2():
print("\n")
ui2 = input("Enter a list of numbers. Please put a space between each number ")
List2 = ui2.split()
sum2 = 0
for num in List2:
sum2 += float(num)
print("Sum = ", sum2)
numberlist2()
|
# -*- coding: utf-8 -*-
from app.tests import WebTestCase
class ModelTestCase(WebTestCase):
""" Parent of all models test classes """
pass
|
def ghostbusters(building):
return building.replace(' ', '') if ' ' in building else "You just wanted my autograph didn't you?"
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
""""
версия 1.0.1
общие утилиты
"""
import os
import logging
import numpy as np
import torch
import functools
import operator
import tqdm
from utils.data_structures import Config, PredictResultDescription
log = logging.getLogger(__name__)
def batch_predict_area_class(model, data_loader, cfg: Config):
"""
:param model:
:param data_loader:
:param cfg:
:return: кортеж из трех массивов -
(координаты исходыне (GPS) в текстовом формате строками размер - N
координаты точек в пикселях (np.array shape (N, 2)
предсказания классов np.array shape (N, кол-во классов)
"""
gps = []
pixels = []
predicts = []
log.info(f"Ready to predict for {len(data_loader)} batches")
for batch in tqdm.tqdm(data_loader,
total=len(data_loader), desc="recognizing...",
position=0, unit="points", unit_scale=cfg.batch_size,
leave=True):
with torch.no_grad():
predicts.append(model(batch["tensor"]).cpu().numpy())
gps.append(batch["gps"])
pixels.append(batch["pixels"].numpy())
gps = functools.reduce(operator.iconcat, gps, [])
pixels = np.concatenate(pixels, axis=0)
predicts = np.concatenate(predicts, axis=0)
log.info(f" {len(gps)} records processed")
return gps, pixels, predicts
def description_iterator(prediction_results: tuple,
cfg,
geo_map):
"""
расшифровка результатов предсказаний
:param prediction_results: tuple (кортеж из трех массивов -
(координаты исходыне (GPS) в текстовом формате строками размер - N
координаты точек в пикселях (np.array shape (N, 2)
предсказания классов np.array shape (N, кол-во классов))
:param cfg:
:param geo_map:
:return: итератор
"""
gps_list, pixels_arr, predicts_arr = prediction_results
sorted_class_ids_list = np.argsort(predicts_arr, axis=1)
class_dict = {int(k): cfg.map.class_list[k] for k in cfg.map.class_list}
for gps, pixels, sorted_class_ids, predict in zip(gps_list,
pixels_arr,
sorted_class_ids_list,
predicts_arr):
if geo_map.check_coord(pixels, geo_map.crop_size):
prob = np.exp(-predict[sorted_class_ids[-1]])
# предсказание для наиболее вероятного класса
predicted_class1 = sorted_class_ids[-1]
# предсказание для второго по вероятности класса
predicted_class2 = sorted_class_ids[-2]
if prob > cfg.threshold:
description = f"{class_dict[predicted_class1]}"
else:
description = f"{class_dict[predicted_class1]}_or_{class_dict[predicted_class2]}"
else:
description = " ошибка координат"
log.warning(f" ошибка координат GPS: {gps} pixel {pixels}")
yield PredictResultDescription(coord=pixels,
gps=gps,
description=description,
probability=prob)
def norm_file_path(file_path, norm_path):
if not os.path.isabs(os.path.dirname(file_path)):
file_path = os.path.join(norm_path, file_path)
return file_path
|
from django.shortcuts import render
from rest_framework import viewsets
from .serializers import UserSerializer
from .models.site_application import *
class UserView(viewsets.ModelViewSet):
serializer_class = UserSerializer
queryset = RecordedUser.objects.all()
# Create your views here.
|
import numpy as np
import cv2
image=cv2.imread("./../si.jpg",1)
imageinfo = image.shape
height = imageinfo[0]
width = imageinfo[1]
matSrc = np.float32([[0,0],[0,height-1],[width-1,0]])
matDst = np.float32([[100,50],[300,height+100],[width+300,300]])
matAffine = cv2.getAffineTransform(matSrc,matDst)
print(matAffine)
dst = cv2.warpAffine(image,matAffine,(2000,2000))
cv2.imshow('dst',dst)
cv2.imshow("src",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Filename: step03_run_preliminary_regression_with_count_year_control
# @Date: 2020/3/12
# @Author: Mark Wang
# @Email: wangyouan@gamil.com
"""
python -m ConstructRegressionFile.Stata.step03_run_preliminary_regression_with_count_year_control
"""
import os
from Constants import Constants as const
from .step02_rerun_preliminary_regression import generate_regression_code, DEP_VARS
CTRL_VARS = 'ln_at SGA TANGIBILITY CAPEX PTBI VOL_PTBI ln_GDP ln_GDP_PC GC_TAX_TOTL_GD_ZS FP_CPI_TOTL_ZG FR_INR_RINR'
IND_VARS = ['formal_Extend_3', 'real_Extend_3', 'formal_Shrink_3']
CONDITION = 'if firm_num >= 100'
CONDITION2 = 'if firm_num >= 100 & fyear >= 1995'
if __name__ == '__main__':
date_str = '20200312'
save_file = os.path.join(const.STATA_CODE_PATH, '{}_preliminary_code_2.do'.format(date_str))
output_path = os.path.join(const.STATA_RESULT_PATH, '{}_preliminary_2'.format(date_str))
if not os.path.isdir(output_path):
os.makedirs(output_path)
cmd_list = ['clear', 'use "{}"'.format(os.path.join(const.STATA_DATA_PATH, '20200311_regression_data.dta'))]
for ind_key in IND_VARS:
output_file = os.path.join(output_path, '{}.txt'.format(ind_key[:-2]))
for dep_key in DEP_VARS:
cmd_list.extend(generate_regression_code(dep=dep_key, ind=ind_key, ctrl=CTRL_VARS, fe_option='gvkey fyear',
cluster_option='gvkey', output_path=output_file,
condition=CONDITION if 'Shrink' in dep_key else CONDITION2,
text_option='Firm Dummy, Yes, Year Dummy, Yes, Cluster, Firm',
data_description='tstat bdec(4) tdec(4) rdec(4)'))
with open(save_file, 'w') as f:
f.write('\n'.join(cmd_list))
print('do "{}"'.format(save_file))
|
from _typeshed import Incomplete
from collections.abc import Generator
from networkx.algorithms.flow import edmonds_karp
default_flow_func = edmonds_karp
def all_node_cuts(
G, k: Incomplete | None = None, flow_func: Incomplete | None = None
) -> Generator[Incomplete, None, None]: ...
|
import pymongo
from tqdm import tqdm
import copy
import os
from utils import *
if __name__ == '__main__':
config = Config()
data = getData(config.db, config.data_dir+'/data0.pkl', restore=True, save=True)
data = delRepetition(dataset=data, save_dir=config.data_dir+'/data1_unique.pkl', restore=True, save=True)
data = text_clear(dataset=data, save_dir=config.data_dir+'/data2_keep.pkl', restore=True, save=True)
data = filterQ(dataset=data, save_dir=config.data_dir+'/data3_filterQ.pkl', restore=True, save=True)
_ =statisticChar(data)
data = delToLongSample(dataset=data, save_dir=config.data_dir+'/data4_delToLong.pkl', restore=True, save=True)
data = final_check(dataset=data, save_dir=config.data_dir+'/data5_final.pkl', restore=True, save=True)
writeToDB(data, config.new_db)
print('end')
|
"""clangparser - use clang to get preprocess a source code."""
import logging
import os
import collections
from clang.cindex import Index, TranslationUnit
from clang.cindex import TypeKind
from ctypeslib.codegen import cursorhandler
from ctypeslib.codegen import typedesc
from ctypeslib.codegen import typehandler
from ctypeslib.codegen import util
from ctypeslib.codegen.handler import DuplicateDefinitionException
from ctypeslib.codegen.handler import InvalidDefinitionError
from ctypeslib.codegen.handler import InvalidTranslationUnitException
log = logging.getLogger("clangparser")
class Clang_Parser:
"""
Will parse libclang AST tree to create a representation of Types and
different others source code objects objets as described in Typedesc.
For each Declaration a declaration will be saved, and the type of that
declaration will be cached and saved.
"""
has_values = {
"Enumeration",
"Function",
"FunctionType",
"OperatorFunction",
"Method",
"Constructor",
"Destructor",
"OperatorMethod",
"Converter",
}
# FIXME, macro definition __SIZEOF_DOUBLE__
ctypes_typename = {
TypeKind.VOID: "None", # because ctypes.POINTER(None) == c_void_p
TypeKind.BOOL: "c_bool",
TypeKind.CHAR_U: "c_ubyte", # ?? used for PADDING
TypeKind.UCHAR: "c_ubyte", # unsigned char
TypeKind.CHAR16: "c_wchar", # char16_t
TypeKind.CHAR32: "c_wchar", # char32_t
TypeKind.USHORT: "c_ushort",
TypeKind.UINT: "c_uint",
TypeKind.ULONG: "TBD",
TypeKind.ULONGLONG: "c_ulonglong",
TypeKind.UINT128: "c_uint128", # FIXME
TypeKind.CHAR_S: "c_char", # char
TypeKind.SCHAR: "c_byte", # signed char
TypeKind.WCHAR: "c_wchar",
TypeKind.SHORT: "c_short",
TypeKind.INT: "c_int",
TypeKind.LONG: "TBD",
TypeKind.LONGLONG: "c_longlong",
TypeKind.INT128: "c_int128", # FIXME
TypeKind.FLOAT: "c_float",
TypeKind.DOUBLE: "c_double",
TypeKind.LONGDOUBLE: "c_longdouble",
TypeKind.POINTER: "POINTER_T",
TypeKind.NULLPTR: "c_void_p",
}
def __init__(self, flags):
self.all = collections.OrderedDict()
# a shortcut to identify registered decl in cases of records
self.all_set = set()
self.cpp_data = {}
self._unhandled = []
self.fields = {}
self.tu = None
self.tu_options = None
self.flags = flags
self.ctypes_sizes = {}
self.init_parsing_options()
self.make_ctypes_convertor(flags)
self.cursorkind_handler = cursorhandler.CursorHandler(self)
self.typekind_handler = typehandler.TypeHandler(self)
self.__filter_location = None
self.__processed_location = set()
def init_parsing_options(self):
"""Set the Translation Unit to skip functions bodies per default."""
self.tu_options = TranslationUnit.PARSE_SKIP_FUNCTION_BODIES
def activate_macros_parsing(self):
"""Activates the detailled code parsing options in the Translation
Unit."""
self.tu_options |= TranslationUnit.PARSE_DETAILED_PROCESSING_RECORD
def activate_comment_parsing(self):
"""Activates the comment parsing options in the Translation Unit."""
self.tu_options |= TranslationUnit.PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION
def deactivate_function_body_parsing(self):
self.tu_options |= TranslationUnit.PARSE_SKIP_FUNCTION_BODIES
def filter_location(self, src_files):
self.__filter_location = [os.path.abspath(f) for f in src_files]
def parse(self, filename):
"""
. reads 1 file
. if there is a compilation error, print a warning
. get root cursor and recurse
. for each STRUCT_DECL, register a new struct type
. for each UNION_DECL, register a new union type
. for each TYPEDEF_DECL, register a new alias/typdef to the underlying type
- underlying type is cursor.type.get_declaration() for Record
. for each VAR_DECL, register a Variable
. for each TYPEREF ??
"""
if os.path.abspath(filename) in self.__processed_location:
return
index = Index.create()
translation_unit = index.parse(filename, self.flags, options=self.tu_options)
if not translation_unit:
log.warning("unable to load input")
return
self._parse_tu_diagnostics(translation_unit, filename)
self.tu = translation_unit
root = self.tu.cursor
for node in root.get_children():
self.start_element(node)
return
def parse_string(self, input_data, lang="c", all_warnings=False, flags=None):
"""Use this parser on a memory string/file, instead of a file on disk"""
translation_unit = util.get_tu(input_data, lang, all_warnings, flags)
self._parse_tu_diagnostics(translation_unit, "memory_input.c")
self.tu = translation_unit
root = self.tu.cursor
for node in root.get_children():
self.start_element(node)
@staticmethod
def _parse_tu_diagnostics(translation_unit, input_filename):
if len(translation_unit.diagnostics) == 0:
return
errors = []
for diagnostic in translation_unit.diagnostics:
msg = (
f"{diagnostic.spelling} ({diagnostic.location.file}:{diagnostic.location.line}:"
f"{diagnostic.location.column}) during processing {input_filename}"
)
log.warning(msg)
if diagnostic.severity > 2:
errors.append(msg)
if len(errors) > 0:
log.warning("Source code has %d error. Please fix.", len(errors))
# code.interact(local=locals())
raise InvalidTranslationUnitException(errors[0])
def start_element(self, node):
"""Recurses in children of this node"""
if node is None:
return None
if self.__filter_location is not None:
# dont even parse includes.
# FIXME: go back on dependencies ?
if node.location.file is None:
return None
filepath = os.path.abspath(node.location.file.name)
if filepath not in self.__filter_location:
if not filepath.startswith("/usr"):
log.debug("skipping include '%s'", filepath)
return None
# find and call the handler for this element
log.debug(
"%s:%d: Found a %s|%s|%s",
node.location.file,
node.location.line,
node.kind.name,
node.displayname,
node.spelling,
)
# build stuff.
try:
stop_recurse = self.parse_cursor(node)
if node.location.file is not None:
filepath = os.path.abspath(node.location.file.name)
self.__processed_location.add(filepath)
# Signature of parse_cursor is:
# if the fn returns True, do not recurse into children.
# anything else will be ignored.
if stop_recurse is not False: # True:
return None
# if fn returns something, if this element has children, treat
# them.
for child in node.get_children():
self.start_element(child)
except InvalidDefinitionError:
log.exception("Invalid definition")
# if the definition is invalid
# startElement returns None.
return None
def register(self, name, obj):
"""Registers an unique type description"""
if (name, obj) in self.all_set:
log.debug("register: %s already defined: %s", name, obj.name)
return self.all[name]
if name in self.all:
if not isinstance(self.all[name], typedesc.Structure) or (self.all[name].members is not None):
# code.interact(local=locals())
raise DuplicateDefinitionException(
f"register: {name} which has a previous incompatible definition: {obj.name}"
f"\ndefined here: {obj.location}"
f"\npreviously defined here: {self.all[name].location}"
)
if isinstance(self.all[name], typedesc.Structure) and (self.all[name].members is None):
return obj
log.debug("register: %s ", name)
self.all[name] = obj
self.all_set.add((name, obj))
return obj
def get_registered(self, name):
"""Returns a registered type description"""
return self.all[name]
def is_registered(self, name):
"""Checks if a named type description is registered"""
return name in self.all
def remove_registered(self, name):
"""Removes a named type"""
log.debug("Unregister %s", name)
self.all_set.remove((name, self.all[name]))
del self.all[name]
def make_ctypes_convertor(self, _flags):
"""
Fix clang types to ctypes conversion for this parsing instance.
Some architecture dependent size types have to be changed if the target
architecture is not the same as local
"""
# NOTE: one could also use the __SIZEOF_x__ MACROs to obtain sizes.
translation_unit = util.get_tu(
"""
typedef short short_t;
typedef int int_t;
typedef long long_t;
typedef long long longlong_t;
typedef float float_t;
typedef double double_t;
typedef long double longdouble_t;
typedef void* pointer_t;""",
flags=_flags,
)
size = util.get_cursor(translation_unit, "short_t").type.get_size() * 8
self.ctypes_typename[TypeKind.SHORT] = f"c_int{size:d}"
self.ctypes_typename[TypeKind.USHORT] = f"c_uint{size:d}"
self.ctypes_sizes[TypeKind.SHORT] = size
self.ctypes_sizes[TypeKind.USHORT] = size
size = util.get_cursor(translation_unit, "int_t").type.get_size() * 8
self.ctypes_typename[TypeKind.INT] = f"c_int{size:d}"
self.ctypes_typename[TypeKind.UINT] = f"c_uint{size:d}"
self.ctypes_sizes[TypeKind.INT] = size
self.ctypes_sizes[TypeKind.UINT] = size
size = util.get_cursor(translation_unit, "long_t").type.get_size() * 8
self.ctypes_typename[TypeKind.LONG] = f"c_int{size:d}"
self.ctypes_typename[TypeKind.ULONG] = f"c_uint{size:d}"
self.ctypes_sizes[TypeKind.LONG] = size
self.ctypes_sizes[TypeKind.ULONG] = size
size = util.get_cursor(translation_unit, "longlong_t").type.get_size() * 8
self.ctypes_typename[TypeKind.LONGLONG] = f"c_int{size:d}"
self.ctypes_typename[TypeKind.ULONGLONG] = f"c_uint{size:d}"
self.ctypes_sizes[TypeKind.LONGLONG] = size
self.ctypes_sizes[TypeKind.ULONGLONG] = size
# FIXME : Float && http://en.wikipedia.org/wiki/Long_double
size0 = util.get_cursor(translation_unit, "float_t").type.get_size() * 8
size1 = util.get_cursor(translation_unit, "double_t").type.get_size() * 8
size2 = util.get_cursor(translation_unit, "longdouble_t").type.get_size() * 8
# 2014-01 stop generating crap.
# 2015-01 reverse until better solution is found
# the idea is that you cannot assume a c_double will be same format as a c_long_double.
# at least this pass size TU
if size1 != size2:
self.ctypes_typename[TypeKind.LONGDOUBLE] = "c_long_double_t"
else:
self.ctypes_typename[TypeKind.LONGDOUBLE] = "c_double"
self.ctypes_sizes[TypeKind.FLOAT] = size0
self.ctypes_sizes[TypeKind.DOUBLE] = size1
self.ctypes_sizes[TypeKind.LONGDOUBLE] = size2
# save the target pointer size.
size = util.get_cursor(translation_unit, "pointer_t").type.get_size() * 8
self.ctypes_sizes[TypeKind.POINTER] = size
self.ctypes_sizes[TypeKind.NULLPTR] = size
log.debug(
"ARCH sizes: long:%s longdouble:%s",
self.ctypes_typename[TypeKind.LONG],
self.ctypes_typename[TypeKind.LONGDOUBLE],
)
return
def get_ctypes_name(self, typekind):
return self.ctypes_typename[typekind]
def get_ctypes_size(self, typekind):
return self.ctypes_sizes[typekind]
def parse_cursor(self, cursor):
"""Forward parsing calls to dedicated CursorKind Handlder"""
return self.cursorkind_handler.parse_cursor(cursor)
def parse_cursor_type(self, _cursor_type):
"""Forward parsing calls to dedicated TypeKind Handlder"""
return self.typekind_handler.parse_cursor_type(_cursor_type)
###########################################################################
################
def get_macros(self, text):
if text is None:
return
text = "".join(text)
# preprocessor definitions that look like macros with one or more
# arguments
for macro in text.splitlines():
name, body = macro.split(None, 1)
name, args = name.split("(", 1)
args = f"({args}"
self.all[name] = typedesc.Macro(name, args, body)
def get_aliases(self, text, namespace):
if text is None:
return
# preprocessor definitions that look like aliases:
# #define A B
text = "".join(text)
aliases = {}
for alias in text.splitlines():
name, value = alias.split(None, 1)
alias = typedesc.Alias(name, value)
aliases[name] = alias
self.all[name] = alias
for name, alias in aliases.items():
value = alias.alias
# the value should be either in namespace...
if value in namespace:
# set the type
alias.typ = namespace[value]
# or in aliases...
elif value in aliases:
alias.typ = aliases[value]
# or unknown.
else:
# not known
# print "skip %s = %s" % (name, value)
pass
def get_result(self):
# all of these should register()
interesting = (
typedesc.Typedef,
typedesc.Enumeration,
typedesc.EnumValue,
typedesc.Function,
typedesc.Structure,
typedesc.Union,
typedesc.Variable,
typedesc.Macro,
typedesc.Alias,
typedesc.FunctionType,
)
# typedesc.Field) #???
self.get_macros(self.cpp_data.get("functions"))
# fix all objects after that all are resolved
remove = []
for _id, _item in self.all.items():
if _item is None:
log.warning("ignoring %s", _id)
continue
location = getattr(_item, "location", None)
# FIXME , why do we get different location types
if location and hasattr(location, "file"):
_item.location = location.file.name, location.line
log.error("%s %s came in with a SourceLocation", _id, _item)
elif location is None:
# FIXME make this optional to be able to see internals
# FIXME macro/alias are here
log.warning("No source location in %s - ignoring", _id)
remove.append(_id)
for _x in remove:
self.remove_registered(_x)
# Now we can build the namespace.
namespace = {}
for i in self.all.values():
if not isinstance(i, interesting):
log.debug("ignoring %s", i)
continue # we don't want these
name = getattr(i, "name", None)
if name is not None:
namespace[name] = i
self.get_aliases(self.cpp_data.get("aliases"), namespace)
result = []
for i in self.all.values():
if isinstance(i, interesting):
result.append(i)
log.debug("parsed items order: %s", result)
return result
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import redirect
from .models import Post
from django.contrib.auth.models import User
from django.utils import timezone
from django.shortcuts import render, get_object_or_404
from .forms import PostForm
import re
import string
import collections
from operator import itemgetter
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
new_posts = []
def special_characters(data):
regexp = '[{}]*'.format(string.punctuation)
return re.sub(regexp, '', text)
for i, post in enumerate(posts):
counter = collections.Counter()
text = post.text
for word in special_characters(text).split():
counter[word] += 1
post.uniq = len(counter)
new_posts.append(post)
# new_posts[i]['uniq'] = len(counter)
# new_posts = sorted(new_posts, key=itemgetter('uniq'), reverse=True)
new_posts = sorted(new_posts, key=lambda k: k.uniq, reverse=True)
return render(request, 'task/post_list.html', {'posts': new_posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'task/post_detail.html', {'post': post})
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
demo = User.objects.get(username='demo')
post.author = demo
post.published_date = timezone.now()
post.save()
return redirect('post_dlist')
else:
form = PostForm()
return render(request, 'task/post_edit.html', {'form': form})
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
demo = User.objects.get(username='demo')
post.author = demo
post.published_date = timezone.now()
post.save()
return redirect('post_list')
else:
form = PostForm(instance=post)
return render(request, 'task/post_edit.html', {'form': form})
# Create your views here.
|
from typing import Set
from argsolverdd.common.atom import Atom
from argsolverdd.common.misc import NameDict
class Rule:
def __init__(self, name: str, premises: Set[Atom], conclusions: Atom, strict: bool):
self.name = name
self.premises = premises
self.conclusions = conclusions
self.strict = strict
self.preferred_to = set()
def __repr__(self):
s = "=>"
if self.strict:
s = "->"
p = ""
for pr in self.premises:
p += ", " + str(pr)
p = p[1:]
return "" + self.name + ":" + p + " " + s + " " + str(self.conclusions)
def __hash__(self):
s = 0
if len(self.premises) > 0:
for p in self.premises:
s += hash(p)
return hash((s, self.name, self.conclusions, self.strict))
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self.name == other.name \
and self.conclusions == other.conclusions \
and self.strict == other.strict \
and self.premises == other.premises
def __ge__(self, other):
if not isinstance(other, type(self)):
raise TypeError(f"Cannot compare Rule with {type(other)}")
if self.strict and not other.strict:
return True
if other.strict and not self.strict:
return False
return self not in other.preferred_to
@staticmethod
def assign_preferences(ruleset, pref_dict):
for rule in ruleset:
rule.preferred_to = pref_dict[rule]
class Rules(NameDict):
def __init__(self, rules):
super(Rules, self).__init__(rules)
def _compare_elitist(self, other) -> bool:
"""There is some rule in set R1 which is preferred (>=) to all rules in ser R2
Return True if the current is preferred to the other, False otherwise.
"""
for r1 in self:
if all(r1 >= r2 for r2 in other):
return True
return False
def _compare_democratic(self, other) -> bool:
"""For every rule r1 in set R1, there is a rule r2 in set R2 which is less preferred (r1 >= r2)
Return True if the current is preferred to the other, False otherwise.
"""
for r1 in self:
if not any(r1 >= r2 for r2 in other):
return False
return True
def preferred_to(self, other, elitist=True):
if elitist:
return self._compare_elitist(other)
else:
return self._compare_democratic(other)
|
# This file is part of beets.
# Copyright 2016, Bruno Cauet.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from os import path, remove
from tempfile import mkdtemp
from shutil import rmtree
import unittest
from unittest.mock import Mock, MagicMock
from beetsplug.smartplaylist import SmartPlaylistPlugin
from beets.library import Item, Album, parse_query_string
from beets.dbcore import OrQuery
from beets.dbcore.query import NullSort, MultipleSort, FixedFieldSort
from beets.util import syspath, bytestring_path, py3_path, CHAR_REPLACE
from beets.ui import UserError
from beets import config
from test import _common
from test.helper import TestHelper
class SmartPlaylistTest(_common.TestCase):
def test_build_queries(self):
spl = SmartPlaylistPlugin()
self.assertEqual(spl._matched_playlists, None)
self.assertEqual(spl._unmatched_playlists, None)
config['smartplaylist']['playlists'].set([])
spl.build_queries()
self.assertEqual(spl._matched_playlists, set())
self.assertEqual(spl._unmatched_playlists, set())
config['smartplaylist']['playlists'].set([
{'name': 'foo',
'query': 'FOO foo'},
{'name': 'bar',
'album_query': ['BAR bar1', 'BAR bar2']},
{'name': 'baz',
'query': 'BAZ baz',
'album_query': 'BAZ baz'}
])
spl.build_queries()
self.assertEqual(spl._matched_playlists, set())
foo_foo = parse_query_string('FOO foo', Item)
baz_baz = parse_query_string('BAZ baz', Item)
baz_baz2 = parse_query_string('BAZ baz', Album)
bar_bar = OrQuery((parse_query_string('BAR bar1', Album)[0],
parse_query_string('BAR bar2', Album)[0]))
self.assertEqual(spl._unmatched_playlists, {
('foo', foo_foo, (None, None)),
('baz', baz_baz, baz_baz2),
('bar', (None, None), (bar_bar, None)),
})
def test_build_queries_with_sorts(self):
spl = SmartPlaylistPlugin()
config['smartplaylist']['playlists'].set([
{'name': 'no_sort',
'query': 'foo'},
{'name': 'one_sort',
'query': 'foo year+'},
{'name': 'only_empty_sorts',
'query': ['foo', 'bar']},
{'name': 'one_non_empty_sort',
'query': ['foo year+', 'bar']},
{'name': 'multiple_sorts',
'query': ['foo year+', 'bar genre-']},
{'name': 'mixed',
'query': ['foo year+', 'bar', 'baz genre+ id-']}
])
spl.build_queries()
sorts = {name: sort
for name, (_, sort), _ in spl._unmatched_playlists}
asseq = self.assertEqual # less cluttered code
sort = FixedFieldSort # short cut since we're only dealing with this
asseq(sorts["no_sort"], NullSort())
asseq(sorts["one_sort"], sort('year'))
asseq(sorts["only_empty_sorts"], None)
asseq(sorts["one_non_empty_sort"], sort('year'))
asseq(sorts["multiple_sorts"],
MultipleSort([sort('year'), sort('genre', False)]))
asseq(sorts["mixed"],
MultipleSort([sort('year'), sort('genre'), sort('id', False)]))
def test_matches(self):
spl = SmartPlaylistPlugin()
a = MagicMock(Album)
i = MagicMock(Item)
self.assertFalse(spl.matches(i, None, None))
self.assertFalse(spl.matches(a, None, None))
query = Mock()
query.match.side_effect = {i: True}.__getitem__
self.assertTrue(spl.matches(i, query, None))
self.assertFalse(spl.matches(a, query, None))
a_query = Mock()
a_query.match.side_effect = {a: True}.__getitem__
self.assertFalse(spl.matches(i, None, a_query))
self.assertTrue(spl.matches(a, None, a_query))
self.assertTrue(spl.matches(i, query, a_query))
self.assertTrue(spl.matches(a, query, a_query))
def test_db_changes(self):
spl = SmartPlaylistPlugin()
nones = None, None
pl1 = '1', ('q1', None), nones
pl2 = '2', ('q2', None), nones
pl3 = '3', ('q3', None), nones
spl._unmatched_playlists = {pl1, pl2, pl3}
spl._matched_playlists = set()
spl.matches = Mock(return_value=False)
spl.db_change(None, "nothing")
self.assertEqual(spl._unmatched_playlists, {pl1, pl2, pl3})
self.assertEqual(spl._matched_playlists, set())
spl.matches.side_effect = lambda _, q, __: q == 'q3'
spl.db_change(None, "matches 3")
self.assertEqual(spl._unmatched_playlists, {pl1, pl2})
self.assertEqual(spl._matched_playlists, {pl3})
spl.matches.side_effect = lambda _, q, __: q == 'q1'
spl.db_change(None, "matches 3")
self.assertEqual(spl._matched_playlists, {pl1, pl3})
self.assertEqual(spl._unmatched_playlists, {pl2})
def test_playlist_update(self):
spl = SmartPlaylistPlugin()
i = Mock(path=b'/tagada.mp3')
i.evaluate_template.side_effect = \
lambda pl, _: pl.replace(b'$title', b'ta:ga:da').decode()
lib = Mock()
lib.replacements = CHAR_REPLACE
lib.items.return_value = [i]
lib.albums.return_value = []
q = Mock()
a_q = Mock()
pl = b'$title-my<playlist>.m3u', (q, None), (a_q, None)
spl._matched_playlists = [pl]
dir = bytestring_path(mkdtemp())
config['smartplaylist']['relative_to'] = False
config['smartplaylist']['playlist_dir'] = py3_path(dir)
try:
spl.update_playlists(lib)
except Exception:
rmtree(syspath(dir))
raise
lib.items.assert_called_once_with(q, None)
lib.albums.assert_called_once_with(a_q, None)
m3u_filepath = path.join(dir, b'ta_ga_da-my_playlist_.m3u')
self.assertExists(m3u_filepath)
with open(syspath(m3u_filepath), 'rb') as f:
content = f.read()
rmtree(syspath(dir))
self.assertEqual(content, b'/tagada.mp3\n')
class SmartPlaylistCLITest(_common.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.item = self.add_item()
config['smartplaylist']['playlists'].set([
{'name': 'my_playlist.m3u',
'query': self.item.title},
{'name': 'all.m3u',
'query': ''}
])
config['smartplaylist']['playlist_dir'].set(py3_path(self.temp_dir))
self.load_plugins('smartplaylist')
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
def test_splupdate(self):
with self.assertRaises(UserError):
self.run_with_output('splupdate', 'tagada')
self.run_with_output('splupdate', 'my_playlist')
m3u_path = path.join(self.temp_dir, b'my_playlist.m3u')
self.assertExists(m3u_path)
with open(syspath(m3u_path), 'rb') as f:
self.assertEqual(f.read(), self.item.path + b"\n")
remove(syspath(m3u_path))
self.run_with_output('splupdate', 'my_playlist.m3u')
with open(syspath(m3u_path), 'rb') as f:
self.assertEqual(f.read(), self.item.path + b"\n")
remove(syspath(m3u_path))
self.run_with_output('splupdate')
for name in (b'my_playlist.m3u', b'all.m3u'):
with open(path.join(self.temp_dir, name), 'rb') as f:
self.assertEqual(f.read(), self.item.path + b"\n")
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
import streamlit as st
import pandas as pd
import torch
import pytorch_model_summary as pms
from utils import CompTwo
from st_utils import load_model_v2, load_model_v3, load_model_v4, load_model_v5
st.set_page_config(layout="wide")
@st.cache()
def get_bias(model, class_ref):
bias = model.anime_bias.weight.squeeze()
idxs = bias.argsort()[:10]
bot = [class_ref['title'][i] for i in idxs]
idxs = bias.argsort(descending=True)[:10]
top = [class_ref['title'][i] for i in idxs]
return top, bot
if __name__ == '__main__':
nav = st.sidebar.radio("Page Selection", ['Landing', 'Comparison'])
model, class_ref = load_model_v5()
username_mapping = {x: i for i, x in enumerate(class_ref['username'].items)}
anime_mapping = {x: i for i, x in enumerate(class_ref['title'].items)}
if nav == 'Landing':
st.title("MyAnimeList PMF")
st.write("This is a quick-and-dirty Anime Recommender built off MyAnimeList's API and data.")
st.write("The recommender is implemented using Probabilistic Matrix Factorization.")
top, bot = get_bias(model, class_ref)
st.header("Model Details")
summary = pms.summary(model, (torch.rand([64, 2])*1000).int(), show_input=True, max_depth=None, show_parent_layers=True)
st.text(summary)
n_users, n_animes = len(class_ref['username']), len(class_ref['title'])
st.write(F"This model was trained on the data of {n_users} random users")
st.header("Some Results")
st.write("Below are the top and bottom 10 anime by bias")
st.write("This is essentially what the model has learned to be the best and worst regardless of watcher")
cols = st.columns(2)
cols[0].subheader("Top 10")
cols[0].table(top)
cols[1].subheader("Bottom 10")
cols[1].table(bot)
elif nav == 'Comparison':
user_form = st.form("comp")
cols = user_form.columns(2)
u1 = cols[0].selectbox("User 1", options=[""]+list(username_mapping.keys()))
u2 = cols[1].selectbox("User 2", options=[""]+list(username_mapping.keys()))
submitted = user_form.form_submit_button("Submit")
if submitted:
c2 = CompTwo(u1, u2, model, username_mapping, anime_mapping)
c2.gen_combined()
user_cols = user_form.columns(2)
user_cols[0].subheader(u1)
user_cols[0].write(F"Recommendations for {u1} from the shows that {u2} has watched")
recs = c2.show_missing_preds(0).sort_values(ascending=False)
recs.name = 'Value'
user_cols[0].table(recs)
user_cols[1].subheader(u2)
user_cols[1].write(F"Recommendations for {u2} from the shows that {u1} has watched")
recs = c2.show_missing_preds(1).sort_values(ascending=False)
recs.name = 'Value'
user_cols[1].table(recs)
|
import re
def validate_name(string):
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
if USER_RE.match(string):
return True
return False
def validate_password(string):
USER_RE = re.compile(r"^.{3,20}$")
if USER_RE.match(string):
return True
return False
def validate_email(string):
USER_RE = re.compile(r"^[\S]+@[\S]+.[\S]+$")
if USER_RE.match(string):
return True
return False
#Validation Test
name = validate_name("barny smith")
print(name)
name = validate_name("barnysmith")
print(name)
password = validate_password("&")
print(password)
email = validate_email("scrub@zoned")
print(email)
if validate_name("barny") and validate_password("nope") and "nope" == "nope":
print("yes")
|
# coding: utf-8
"""Base classes for content scraping / visiting."""
from __future__ import print_function, unicode_literals
from parsimonious.nodes import NodeVisitor
from .data import Data
from .issues import ISSUES
class Recorder(object):
"""Records information in HTML or parsed HTML."""
def initialize_tracker(self, offset=0, data=None):
"""Setup common variables.
offset - The character position that this fragment starts at.
issues - The collected issues found during extraction.
"""
self.offset = offset
self.data = data or Data()
self.issues = []
def add_issue(self, issue_slug, processed, **issue_args):
"""Add an issue for a given processed node."""
assert issue_slug in ISSUES
self.issues.append(
(issue_slug, processed.start, processed.end, issue_args))
def add_raw_issue(self, issue):
"""Add an issue in tuple (slug, start, end, args) format."""
issue_slug, start, end, issue_args = issue
assert issue_slug in ISSUES
assert isinstance(start, int)
assert isinstance(end, int)
assert end >= start
assert hasattr(issue_args, 'keys')
self.issues.append(issue)
class Visitor(Recorder, NodeVisitor):
"""Base class for node vistors.
The Parsimonious NodeVisitor works from leaf nodes up to trunk nodes.
"""
def __init__(self, offset=0, data=None):
super(Visitor, self).__init__()
self.initialize_tracker(offset=offset, data=data)
class Extractor(Recorder):
"""Extracts information from parsed HTML.
The Extractor works from trunk nodes to leaf nodes.
"""
extractor_name = 'Extractor'
def initialize_extractor(self, elements=None, debug=False, **kwargs):
"""Setup extractor plus common variables."""
self.initialize_tracker(**kwargs)
self.elements = elements or []
self.debug = debug
def extract(self):
"""Extract data from a parsed list of elements.
This is used when you need sibling processing of a tree, rather than
subtree processing during parsing.
"""
self.setup_extract()
state = 'begin'
for element in self.elements:
state = self.walk(state, element)
return self.extracted_data()
def setup_extract(self):
"""Setup the extraction state variables."""
raise NotImplementedError(
'.setup_extract method is not implemented') # pragma: no cover
def walk(self, state, element):
"""
Walk a parsed element.
Keyword Attributes:
state - A string specifying the current state
element - The element being walked
Return is the new state
"""
old_state = state
state, recurse = self.entering_element(state, element)
if state != old_state:
self.report_transition('entering', element, old_state, state)
if recurse and hasattr(element, 'children'):
for child in element.children:
state = self.walk(state, child)
old_state = state
state = self.leaving_element(state, element)
if state != old_state:
self.report_transition('leaving', element, old_state, state)
return state
def report_transition(self, phase, element, old_state, new_state):
"""Report transitions, for debugging."""
if self.debug: # pragma: no cover
print('In {}, state changed from "{}" to "{}" when {} {}'.format(
self.extractor_name, old_state, new_state, phase, element))
def entering_element(self, state, element):
"""
Extract data (state or information) from entering an element.
Return is a tuple of:
- The next state
- True if the element's children should be walked for extraction.
"""
raise NotImplementedError(
'.entering_element method is not implemented') # pragma: no cover
def leaving_element(self, state, element):
"""
Extract data (state or information) from leaving an element.
Return is the next state.
"""
raise NotImplementedError(
'.leaving_element method is not implemented') # pragma: no cover
def extracted_data(self):
"""
Finalize the extracted data.
Return is a dictionary of data items.
"""
raise NotImplementedError(
'.extracted_data method is not implemented') # pragma: no cover
def is_tag(self, element, tag):
"""Return True if element matches the tag."""
return getattr(element, 'tag', None) == tag
|
from ..extensions import marshmallow
from .answer import AnswerSchema
from marshmallow import fields
class UserAnswerSchema(marshmallow.Schema):
id = fields.Str()
answers = fields.Nested(AnswerSchema, many=True, exclude=[u'updated' , u'user'])
|
"""
Base class of all configurators, must be extended.
"""
class UConfiguratorBase(object):
def getParams(self):
"""
Dictionary with known variables, need to be overridden.
"""
pass
def run(self, options, config, section, params):
"""
Needs to be overriden to provide an actual implementation.
Parameters: <parsed command line options> <parsed configuration> <configuration section of this module>
"""
pass
def getTemplate(self, section):
"""
Returns a template of configurator's configuration with all the
available parameters.
"""
ret = "[" + section + "]\n\n"
sortedKeys = sorted(self.getParams().keys())
for param in sortedKeys:
value = self.getParams().get(param)
if value == None:
value = ''
splitVal = value.split("##", 1)
if len(splitVal) > 1:
value = splitVal[0]
ret += "# "
if value == "__REQUIRED":
ret += "(REQUIRED) "
ret += splitVal[1] + "\n"
if value == '__NONE' or value == "__REQUIRED":
param = "#"+param
value = ''
ret += param + "=" + value + "\n\n"
return ret
#Useful methods for (nearly) all configurators
def prepareCredAndTrustDict(self, prefix, params):
secProps = {
prefix+'.credential.path' : params[prefix+'.credential.path'],
prefix+'.credential.password' : params[prefix+'.credential.password'],
prefix+'.credential.keyPath' : params[prefix+'.credential.keyPath'],
prefix+'.truststore.type' : params[prefix+'.truststore.type'],
prefix+'.truststore.allowProxy' : params[prefix+'.truststore.allowProxy'],
}
tsType = params[prefix+'.truststore.type']
if tsType == 'keystore':
secProps[prefix+'.truststore.keystorePath'] = params[prefix+'.truststore.location']
secProps[prefix+'.truststore.keystorePassword'] = params[prefix+'.truststore.keystorePassword']
secProps[prefix+'.truststore.crlLocations.1'] = params[prefix+'.truststore.crlLocation']
elif tsType == 'openssl':
secProps[prefix+'.truststore.opensslPath'] = params[prefix+'.truststore.location']
elif tsType == 'directory':
secProps[prefix+'.truststore.directoryLocations.1'] = params[prefix+'.truststore.location']
secProps[prefix+'.truststore.crlLocations.1'] = params[prefix+'.truststore.crlLocation']
return secProps
def prepareStartupDict(self, prefix, params):
return {
'JAVA' : params[prefix+'.javaCommand'],
'MEM' : '"-Xmx' + params[prefix+'.memory'] + '"',
'JMX_PORT' : '"' + params[prefix+'.jmxPort'] + '"'
}
def addCredAndTrustParams(self, prefix, params):
params[prefix+'.credential.path'] = "__REQUIRED##Path to the file with the credential"
params[prefix+'.credential.password'] = "__NONE##Password of the credential (or leave undefined if not encrypted)"
params[prefix+'.credential.keyPath'] = "__REQUIRED##Path to the pem file with the private key, used if the credential was given as a PEM certificate"
params[prefix+'.truststore.type'] = "__REQUIRED##Type of truststore: keystore, openssl or directory"
params[prefix+'.truststore.location'] = "__REQUIRED##Location of the truststore dependent on its type: path to the keystore file, path to openssl directory or wildcard expression with CAs locations"
params[prefix+'.truststore.crlLocation'] = "##Location of CRL files given as wildcard expression. Not used for openssl truststore."
params[prefix+'.truststore.keystorePassword'] = "##Password of the keystore if it is used as a truststore"
params[prefix+'.truststore.allowProxy'] = 'ALLOW##Whether to accept proxy certificates (ALLOW or DENY)'
return params
def addStartupParams(self, prefix, params, mem):
params[prefix+'.jmxPort'] = '##JMX command port or leave empty to disable JMX'
params[prefix+'.memory'] = mem+'m##Maximum Java memory to be used for the process'
params[prefix+'.javaCommand'] = 'java##Path to java command'
return params
# the rest is used internally, no need to bother
def loadParams(self, options, config, section):
paramsDict = self._loadParamsNoCheck(options, config, section)
self._stripValuesExternalQuotationMarks(paramsDict)
self._checkParams(paramsDict, section)
return paramsDict
def _stripValuesExternalQuotationMarks(self, paramsDict):
for paramKey in paramsDict:
paramVal = paramsDict[paramKey].strip()
if len(paramVal) > 2 and paramVal.startswith('"') and paramVal.endswith('"'):
paramVal = paramVal[1:-1]
paramsDict[paramKey] = paramVal
def _loadParamsNoCheck(self, options, config, section):
paramsList = config.items(section)
paramsDict = dict(paramsList)
return paramsDict
def _checkParams(self, paramsDict, section):
from ConfigParser import NoOptionError
for param in self.getParams().keys():
value = self.getParams().get(param)
if value.startswith("__REQUIRED") and not param in paramsDict.keys():
raise NoOptionError(param, section)
return paramsDict
|
"""
La idea es crear un script que tenga una funcion que me de Rc'\D' y R0'/D'
dandole como entrada beta, para toda i posible. Luego, dar un intervalo para
Rc/Ro y para R0/D y de la salida que me dio ver cuales valores de i dan un R0
y Rc que caigan en el intervalo, y guardar esos valores de i en un archivo, y
graficar despues
"""
#import libprop
import numpy as np
import argparse
import matplotlib.pyplot as plt
from scipy.optimize import bisect, leastsq
from equation6 import Shell
import json
from scipy.interpolate import interp1d
"""
* First step: Set interval for R0 and Rc
* Second step: Create the bowshock NA
* Third step: Measure Ro and Rc NA
* 2nd A step: Read the file with de R0-Rc curves
Repeat second A -and third step- for all possible inclinations (End of first test)
* Fourth step: Check if R0 and Rc fits into the interval (Possible results: Yes or No)
* Fifth step: if forurth is true, graph the corresponding b & i, if false, don't do anything
* Sixth step: Repeat from second to fifth step for many i's and b's
"""
"""
set interval:
For LV3, for example:
R0/D = 0.336 +- 1e-3
Rc/R0 = 2.0342262 +- 0.139
And so on for the rest of the proplyds
"""
class Proplyd(object):
def __init__(self, name, beta=(0.01, 0.001), inc=(30.0, 15.0), color="r"):
self.name = name
self.beta, self.dbeta = beta
self.inc, self.dinc = inc
self.color = color
shelldata = json.load(open("rc-r0.json"))
proplyds = [
# inclination from Henney et al 2002
Proplyd("LV2", beta=(0.126, 0.01), inc=(40.0, 10.0), color="r"),
Proplyd("LV3", beta=(0.061, 0.015), inc=(45.0, 15.0), color="g"),
Proplyd("LV4", beta=(0.126, 0.01), inc=(40.0, 10.0), color="b"),
Proplyd("LV5", beta=(0.126, 0.01), inc=(40.0, 10.0), color="y"),
Proplyd("177-341", beta=(0.126, 0.01), inc=(40.0, 10.0), color="c"),
Proplyd("167-328", beta=(0.126, 0.01), inc=(40.0, 10.0), color="m"),
]
#input of observational measurements of R0/D
proplyd = ["LV2","LV3","LV4","LV5","177-341","167-328"]
color = ['r','g','b','y','c','m']
obs_beta = [0.126,0.061,0.040,0.073,0.135, None]
del_beta = [0.010,0.015,0.007,0.014,0.021,None]
obs_inc = [60,45,45,45,60, None]
mirror_inc = [30,45,45,45,30,None] #in the GAH 2002 data the reported inclination is the complementary angle of
#the inclinations in my model
del_inc = [7,15,15,15,7, None]
# Will's original changes - now superseded
# obs_beta = [0.126,0.061,0.040,0.073,0.135, None]
# obs_inc = [40,45,45,45,10, None]
# del_inc = [10,15,15,15,5, None]
R0m = np.array([0.2385,0.336,0.188,0.2125,0.132,0.096])
#input of observational inputs for Rc and errorbars
Rcm = np.array([1.468,2.034,1.987,1.501,1.405,1.297])
Rcd = np.array([0.194,0.139,0.072,0.146,0.118,0.269])
ylow = Rcm-Rcd
yhigh = Rcm+Rcd
R0_epsilon = 0.1 # Assume 10% error in radii
for j,p in enumerate(proplyd):
print p
label = p
# Plot the beta-inc derived from HA98 parameters
if obs_beta[j] is not None:
plt.errorbar(obs_inc[j], obs_beta[j], xerr=del_inc[j],yerr = del_beta[j], fmt=color[j]+'o', label="")
plt.errorbar(mirror_inc[j], obs_beta[j], xerr=del_inc[j],yerr = del_beta[j], fmt=color[j]+'D', label="")
#Also plot for the complementary inclinations
for beta, beta_data in shelldata.items():
beta = float(beta)
r0 = np.array(beta_data["R0'"])
rc = np.array(beta_data["Rc"])/r0
inc = np.array(beta_data['inc'])
# Select all points consistent with error bars
m1 = abs(rc - Rcm[j]) <= Rcd[j] # Rc'/R0' axis
m2 = abs(r0 - R0m[j]) <= R0_epsilon*R0m[j] # R0'/D' axis
inc_good = inc[m1 & m2] # Points must satisfy both conditions
ngood = len(inc_good)
beta_good = np.ones((ngood,))*beta
if ngood > 0:
plt.plot(np.degrees(inc_good), beta_good, color[j]+'.', label=label, alpha=1.0)
label = ""
# Also plot points that only agree with R0'/D'
inc_good = inc[m2]
ngood = len(inc_good)
beta_good = np.ones((ngood,))*beta
if ngood > 0:
plt.plot(np.degrees(inc_good), beta_good, color[j]+'.', label="", alpha=0.1)
plt.yscale('log')
plt.grid()
plt.xlim(0,90)
plt.ylim(0.001 - 1e-4, 0.16 + 1e-4)
plt.xlabel("i(deg)")
plt.ylabel("beta")
plt.legend(loc="best")
plt.title("i vs beta plausible for proplyds")
plt.savefig("i-beta-will.pdf")
|
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
# Originally contributed by Check Point Software Technologies, Ltd.
import ConfigParser
class Config:
def __init__(self, cfg):
"""@param cfg: configuration file."""
config = ConfigParser.ConfigParser(allow_no_value=True)
config.read(cfg)
for section in config.sections():
for name, raw_value in config.items(section):
try:
value = config.getboolean(section, name)
except ValueError:
try:
value = config.getint(section, name)
except ValueError:
value = config.get(section, name)
setattr(self, name, value)
|
import logging.config
from django.utils.log import DEFAULT_LOGGING
LOGGING_CONFIG = None
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'console': {
'format': '%(asctime)s %(name)-12s %(funcName)s %(levelname)-8s %(message)s',
},
'django.server': DEFAULT_LOGGING['formatters']['django.server'],
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'console',
},
'django.server': DEFAULT_LOGGING['handlers']['django.server'],
},
'loggers': {
'django.server': DEFAULT_LOGGING['loggers']['django.server'],
# application logger
'metrogas': {
'level': 'DEBUG',
'handlers': ['console']
},
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
},
# root logger
'': {
'level': 'ERROR',
'handlers': ['console'],
},
},
}
logging.config.dictConfig(DEFAULT_LOGGING)
|
# Stream limited number of tweets, filter them (language,location, etc) #
# Passes filtered tweets to sentiment.py #
import setup
import csv
import json
import tweepy
from tweepy import StreamListener
class Streamer(StreamListener):
def __init__(self):
super().__init__()
self.counter = 0
self.limit = 10
self.statuses = []
def on_status(self, status):
if status.retweeted or "RT @" in status.text or status.lang != "en": # May be an issue
return
if len(self.statuses) < self.limit:
self.statuses.append(status)
print(len(self.statuses))
if len(self.statuses) == self.limit:
with open("/Users/Ekene/Desktop/Yang_Tweets.csv", "w") as file:
writer = csv.writer(file)
for status in self.statuses:
writer.writerow([status.user.screen_name, status.text, status.created_at, status.user.location,
status.id_str])
print(self.statuses)
print("*** Limit of " + str(self.limit) + " met ***")
return False
if len(self.statuses) > self.limit:
streaming.disconnect()
streaming = tweepy.Stream(auth=setup.api.auth, listener=Streamer())
stream_data = streaming.filter(track=["Trump"])
|
# name: Breann Nielsen
# date: 12/4/2020
# description: text-based adventure game
# Global imports
import random
import sys
import time
def print1by1(text, delay=0.0001):
for c in text:
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(delay)
print
def displayIntro():
time.sleep(1)
print1by1("It has been many years since the vault was sealed...")
time.sleep(2)
print1by1(" Since the bombs fell.")
time.sleep(2)
print1by1(" You wake up in a disoriented state, seemingly trapped within a windowed coffin.")
time.sleep(2)
print()
print1by1("How will you escape?")
time.sleep(2)
print()
def start_room():
start_room_options = ["1","2","3"]
action = ""
while action not in start_room_options:
print("You have three options. What would you like to do?: 1) inspect window, 2) inspect door latch, 3) inspect overhead wires.")
action = str(input("Enter option number: "))
print("You have selected " + action)
if action == start_room_options[0]:
incorrect01()
elif action == start_room_options[1]:
incorrect02()
elif action == start_room_options[2]:
correct01()
def incorrect01():
print("It's a thick, solid glass. I don't think I can break that.")
time.sleep(2)
def incorrect02():
print("It's sealed tight. Won't budge.")
time.sleep(2)
def correct01():
str(input("These wires might do something. Should I pull on them? Y/N"))
time.sleep(2)
# print("You inspect the door latch.")
# time.sleep(2)
# print("It's sealed tight. Won't budge.")
# correctAction = "inspect overhead wires"
# if chosenAction == str(correctAction):
# print("These wires might do something. Should I pull on them?")
# Main program
displayIntro()
start_room()
|
# If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
#
# Find the sum of all the multiples of 3 or 5 below 1000.
def findmultiplessum (lower, upper):
sumval=0
for i in range (lower, upper):
if i%3==0 or i%5==0:
sumval+=i
return(sumval)
print(sumval)
lower = 1
upper = 1000
sumval=findmultiplessum(lower,upper)
print(sumval)
|
# -*- mode: python -*-
block_cipher = None
# windows cmd:
# pyinstaller --clean -y musclex_win32.spec 2>&1 | findstr "..*" | findstr /v "api-ms-win"
a = Analysis(['musclex\\main.py'],
pathex=['.'],
binaries=[],
datas=[('musclex\\tests\\testImages', 'testImages'),('musclex\\tests\\testResults', 'testResults'),
('musclex\\tests\\test_images', 'test_images'),('musclex\\tests\\test_logs', 'test_logs')],
hiddenimports=['PyMca5'],
hookspath=['hooks'],
runtime_hooks=[],
excludes=['tcl', 'zmq', 'IPython'],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
# analysis for launcher
la = Analysis(['musclex\\launcher.py'],
pathex=['.'],
binaries=[],
datas=[],
hiddenimports=['PyMca5'],
hookspath=['hooks'],
runtime_hooks=[],
excludes=['tcl', 'zmq', 'IPython'],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
MERGE((a, 'main', 'musclex'),
(la, 'launcher', 'musclex-launcher'))
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
exclude_binaries=True,
name='musclex-main',
debug=False,
strip=False,
upx=True,
console=True )
lpyz = PYZ(la.pure, la.zipped_data,
cipher=block_cipher)
lexe = EXE(lpyz,
la.scripts,
exclude_binaries=True,
name='musclex-launcher',
debug=False,
strip=False,
upx=True,
console=False )
coll = COLLECT(exe, lexe,
a.binaries, la.binaries,
a.zipfiles, la.zipfiles,
a.datas, la.datas,
strip=False,
upx=True,
name='musclex')
|
# -*- coding: utf-8 -*-
"""
##############################################################################
The calculation of 3D RDF descriptors. You can get 180 molecular
decriptors. You can freely use and distribute it. If you hava
any problem, you could contact with us timely!
Authors: Dongsheng Cao and Yizeng Liang.
Date: 2012.11.13
Email: oriental-cds@163.com
##############################################################################
"""
import math
import scipy
from .AtomProperty import GetRelativeAtomicProperty
from .GeoOpt import _ReadCoordinates
Version = 1.0
#########################################################################
###set the parameters in RDF equation
_beta = 100
#########################################################################
def _GetR(n=30):
"""
#################################################################
Obtain the parameter R in RDF equation.
#################################################################
"""
R = []
for i in range(2, n + 2):
R.append(float(i * 0.5))
return R
def GetAtomDistance(x, y):
"""
#################################################################
Obtain the Elucidian distance based on the
coordinates of two atoms
#################################################################
"""
temp = [math.pow(x[0] - y[0], 2), math.pow(x[1] - y[1], 2), math.pow(x[2] - y[2], 2)]
res = math.sqrt(sum(temp))
return res
def GetGementricalDistanceMatrix(CoordinateList):
"""
#################################################################
Obtain the distance matrix of a molecule based
on coordinate list
#################################################################
"""
NAtom = len(CoordinateList)
DistanceMatrix = scipy.zeros((NAtom, NAtom))
for i in range(NAtom - 1):
for j in range(i + 1, NAtom):
DistanceMatrix[i, j] = GetAtomDistance(CoordinateList[i], CoordinateList[j])
DistanceMatrix[j, i] = DistanceMatrix[i, j]
return DistanceMatrix
def CalculateUnweightRDF(ChargeCoordinates):
"""
#################################################################
The calculation of unweighted radial distribution
function (RDF) descriptors.
#################################################################
"""
R = _GetR(n=30)
temp = []
# ChargeCoordinates=_ReadCoordinates('temp.arc')
for i in ChargeCoordinates:
# if i[0]!='H':
temp.append([float(i[1]), float(i[2]), float(i[3])])
DM = GetGementricalDistanceMatrix(temp)
nAT = len(temp)
RDFresult = {}
for kkk, Ri in enumerate(R):
res = 0.0
for j in range(nAT - 1):
for k in range(j + 1, nAT):
res = res + math.exp(-_beta * math.pow(Ri - DM[j, k], 2))
RDFresult['RDF' + 'U' + str(kkk + 1)] = round(res, 3)
return RDFresult
def CalculateChargeRDF(ChargeCoordinates):
"""
#################################################################
The calculation of radial distribution function
(RDF) descriptors based on atomic charge.
#################################################################
"""
R = _GetR(n=30)
temp = []
Charge = []
# ChargeCoordinates=_ReadCoordinates('temp.arc')
for i in ChargeCoordinates:
# if i[0]!='H':
temp.append([float(i[1]), float(i[2]), float(i[3])])
Charge.append(float(i[4]))
DM = GetGementricalDistanceMatrix(temp)
nAT = len(temp)
RDFresult = {}
for kkk, Ri in enumerate(R):
res = 0.0
for j in range(nAT - 1):
for k in range(j + 1, nAT):
res = res + Charge[j] * Charge[k] * math.exp(-_beta * math.pow(Ri - DM[j, k], 2))
RDFresult['RDF' + 'C' + str(kkk + 1)] = round(res, 3)
return RDFresult
def CalculateMassRDF(mol, ChargeCoordinates):
"""
#################################################################
The calculation of radial distribution function (RDF)
descriptors based on atomic mass.
#################################################################
"""
mol.addh()
mass = [i.atomicmass for i in mol.atoms]
R = _GetR(n=30)
temp = []
# ChargeCoordinates=_ReadCoordinates('temp.arc')
for i in ChargeCoordinates:
# if i[0]!='H':
temp.append([float(i[1]), float(i[2]), float(i[3])])
DM = GetGementricalDistanceMatrix(temp)
nAT = len(temp)
RDFresult = {}
for kkk, Ri in enumerate(R):
res = 0.0
for j in range(nAT - 1):
for k in range(j + 1, nAT):
res = res + mass[j] * mass[k] * math.exp(-_beta * math.pow(Ri - DM[j, k], 2))
RDFresult['RDF' + 'M' + str(kkk + 1)] = round(res / 144, 3)
return RDFresult
def CalculatePolarizabilityRDF(ChargeCoordinates):
"""
#################################################################
The calculation of radial distribution function
(RDF) descriptors based on atomic polarizability.
#################################################################
"""
R = _GetR(n=30)
temp = []
polarizability = []
# ChargeCoordinates=_ReadCoordinates('temp.arc')
for i in ChargeCoordinates:
# if i[0]!='H':
temp.append([float(i[1]), float(i[2]), float(i[3])])
polarizability.append(GetRelativeAtomicProperty(i[0], 'alapha'))
DM = GetGementricalDistanceMatrix(temp)
nAT = len(temp)
RDFresult = {}
for kkk, Ri in enumerate(R):
res = 0.0
for j in range(nAT - 1):
for k in range(j + 1, nAT):
res = res + polarizability[j] * polarizability[k] * math.exp(-_beta * math.pow(Ri - DM[j, k], 2))
RDFresult['RDF' + 'P' + str(kkk + 1)] = round(res, 3)
return RDFresult
def CalculateSandersonElectronegativityRDF(ChargeCoordinates):
"""
#################################################################
The calculation of radial distribution function
(RDF) descriptors based on atomic electronegativity.
#################################################################
"""
R = _GetR(n=30)
temp = []
EN = []
# ChargeCoordinates=_ReadCoordinates('temp.arc')
for i in ChargeCoordinates:
# if i[0]!='H':
temp.append([float(i[1]), float(i[2]), float(i[3])])
EN.append(GetRelativeAtomicProperty(i[0], 'En'))
DM = GetGementricalDistanceMatrix(temp)
nAT = len(temp)
RDFresult = {}
for kkk, Ri in enumerate(R):
res = 0.0
for j in range(nAT - 1):
for k in range(j + 1, nAT):
res = res + EN[j] * EN[k] * math.exp(-_beta * math.pow(Ri - DM[j, k], 2))
RDFresult['RDF' + 'E' + str(kkk + 1)] = round(res, 3)
return RDFresult
def CalculateVDWVolumeRDF(ChargeCoordinates):
"""
#################################################################
The calculation of radial distribution function
(RDF) descriptors based on atomic van der Waals volume.
#################################################################
"""
R = _GetR(n=30)
temp = []
VDW = []
# ChargeCoordinates=_ReadCoordinates('temp.arc')
for i in ChargeCoordinates:
# if i[0]!='H':
temp.append([float(i[1]), float(i[2]), float(i[3])])
VDW.append(GetRelativeAtomicProperty(i[0], 'V'))
DM = GetGementricalDistanceMatrix(temp)
nAT = len(temp)
RDFresult = {}
for kkk, Ri in enumerate(R):
res = 0.0
for j in range(nAT - 1):
for k in range(j + 1, nAT):
res = res + VDW[j] * VDW[k] * math.exp(-_beta * math.pow(Ri - DM[j, k], 2))
RDFresult['RDF' + 'V' + str(kkk + 1)] = round(res, 3)
return RDFresult
def GetRDFUnweighed(mol):
"""
#################################################################
Obtain all Unweighed radial distribution function descriptors.
#################################################################
"""
filename = 'temp'
ChargeCoordinates = _ReadCoordinates(filename)
result = CalculateUnweightRDF(ChargeCoordinates)
return result
def GetRDFCharge(mol):
"""
#################################################################
Obtain all radial distribution function descriptors based
on Charge schems.
#################################################################
"""
filename = 'temp'
ChargeCoordinates = _ReadCoordinates(filename)
result = CalculateChargeRDF(ChargeCoordinates)
return result
def GetRDFMass(mol):
"""
#################################################################
Obtain all radial distribution function descriptors based
on Mass schems.
#################################################################
"""
filename = 'temp'
ChargeCoordinates = _ReadCoordinates(filename)
result = CalculateMassRDF(mol, ChargeCoordinates)
return result
def GetRDFPolarizability(mol):
"""
#################################################################
Obtain all radial distribution function descriptors based
on Polarizability schems.
#################################################################
"""
filename = 'temp'
ChargeCoordinates = _ReadCoordinates(filename)
result = CalculatePolarizabilityRDF(ChargeCoordinates)
return result
def GetRDFSandersonElectronegativity(mol):
"""
#################################################################
Obtain all radial distribution function descriptors based
onSanderson Electronegativity schems.
#################################################################
"""
filename = 'temp'
ChargeCoordinates = _ReadCoordinates(filename)
result = CalculateSandersonElectronegativityRDF(ChargeCoordinates)
return result
def GetRDFVDWVolume(mol):
"""
#################################################################
Obtain all radial distribution function descriptors based
on VDW Volume schems.
#################################################################
"""
filename = 'temp'
ChargeCoordinates = _ReadCoordinates(filename)
result = CalculateVDWVolumeRDF(ChargeCoordinates)
return result
def GetRDF(mol):
"""
#################################################################
Obtain all radial distribution function descriptors based
on different weighted schems.
#################################################################
"""
result = {}
filename = 'temp'
ChargeCoordinates = _ReadCoordinates(filename)
result.update(CalculateUnweightRDF(ChargeCoordinates))
result.update(CalculateChargeRDF(ChargeCoordinates))
result.update(CalculateMassRDF(mol, ChargeCoordinates))
result.update(CalculatePolarizabilityRDF(ChargeCoordinates))
result.update(CalculateSandersonElectronegativityRDF(ChargeCoordinates))
result.update(CalculateVDWVolumeRDF(ChargeCoordinates))
return result
def _GetHTMLDoc():
"""
#################################################################
Write HTML documentation for this module.
#################################################################
"""
import pydoc
pydoc.writedoc('rdf')
############################################################################
|
questions = {
"strong": "Do ye like yer drinks strong?",
"salty": "Do ye like it with a salty tang?",
"bitter": "Are ye a lubber who likes it bitter?",
"sweet": "Would ye like a bit of sweetness with yer poison?",
"fruity": "Are ye one for a fruity finish?",
}
ingredients = {
"strong": ["glug of rum", "slug of whisky", "splash of gin"],
"salty": ["olive on a stick", "salt-dusted rim", "rasher of bacon"],
"bitter": ["shake of bitters", "splash of tonic", "twist of lemon peel"],
"sweet": ["sugar cube", "spoonful of honey", "spash of cola"],
"fruity": ["slice of orange", "dash of cassis", "cherry on top"],
}
#creating a new dictionary for responses
preferences={}
def custom_order():
for n in questions:
response=input(questions[n])
if response==1:
response=True
else:
response=False
preferences[n]=response
print(n)
if __name__=='__main__':
custom_order()
def construct(preferences):
drink_order=[]
for n in preferences:
if preferences[n]==True:
drink_order.append(ingredients[n])
print(drink_order)
if __name__=='__main__':
construct(preferences)
|
import logging
logger = logging.getLogger(__name__)
# noinspection PyUnresolvedReferences
import logging
logger = logging.getLogger(__name__)
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.http import HttpResponse
from django.views.generic import (
ListView,
CreateView,
DeleteView,
DetailView,
UpdateView,
RedirectView)
from django.http import HttpResponseRedirect, Http404
from braces.views import LoginRequiredMixin, StaffuserRequiredMixin
from pure_pagination.mixins import PaginationMixin
from ..models import Category, Version
from ..forms import CategoryForm
class JSONResponseMixin(object):
"""A mixin that can be used to render a JSON response. """
def render_to_json_response(self, context, **response_kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
return HttpResponse(
self.convert_context_to_json(context),
content_type='application/json',
**response_kwargs)
def convert_context_to_json(self, context):
"""Convert the context dictionary into a JSON object"""
result = '{\n'
first_flag = True
for category in context['categories']:
if not first_flag:
result += ',\n'
result += ' "%s" : "%s"' % (category.id, category.name)
first_flag = False
result += '\n}'
return result
class CategoryMixin(object):
"""Mixin class to provide standard settings for categories."""
model = Category # implies -> queryset = Entry.objects.all()
form_class = CategoryForm
class JSONCategoryListView(CategoryMixin, JSONResponseMixin, ListView):
"""View to get category list as a json object - needed by javascript."""
context_object_name = 'categories'
def dispatch(self, request, *args, **kwargs):
"""Ensure this view is only used via ajax.
:param request: Http request.
:param args: Positional args - passed to base class.
:param kwargs: Keyword args - passed to base class.
"""
if not request.is_ajax():
raise Http404("This is an ajax view, friend.")
return super(ListView, self).dispatch(request, *args, **kwargs)
def render_to_response(self, context, **response_kwargs):
"""Render this version as markdown.
:param context: Context data to use with template.
:type context: dict
:param response_kwargs: A dict of arguments to pass to the renderer.
:type response_kwargs: dict
:returns: A rendered template with mime type application/text.
:rtype: HttpResponse
"""
return self.render_to_json_response(context, **response_kwargs)
def get_queryset(self):
"""Get the queryset for this view.
:returns: A queryset which is filtered to only show approved versions.
:rtype: QuerySet
"""
version_id = self.kwargs['version']
version = get_object_or_404(Version, id=version_id)
qs = Category.objects.filter(project=version.project)
return qs
class CategoryCreateUpdateMixin(CategoryMixin, LoginRequiredMixin):
""""Mixin for updating categories."""
def get_context_data(self, **kwargs):
"""Get the context data which is passed to a template.
:param kwargs: Any arguments to pass to the superclass.
:type kwargs: dict
:returns: Context data which will be passed to the template.
:rtype: dict
"""
context = super(CategoryMixin, self).get_context_data(**kwargs)
return context
def form_invalid(self, form):
"""Behaviour for invalid forms.
:param form: Form which is being validated.
:type form: ModelForm
"""
return self.render_to_response(self.get_context_data(form=form))
class CategoryListView(CategoryMixin, PaginationMixin, ListView):
"""View for the list of categories."""
context_object_name = 'categories'
template_name = 'category/list.html'
paginate_by = 10
def get_context_data(self, **kwargs):
"""Get the context data which is passed to a template.
:param kwargs: Any arguments to pass to the superclass.
:type kwargs: dict
:returns: Context data which will be passed to the template.
:rtype: dict
"""
context = super(CategoryListView, self).get_context_data(**kwargs)
context['num_categories'] = context['categories'].count()
context['unapproved'] = False
return context
def get_queryset(self):
"""Get the queryset for this view.
:returns: Queryset which is filtered to only show approved categories.
:rtype: QuerySet
"""
qs = Category.objects.all()
return qs
class CategoryDetailView(CategoryMixin, DetailView):
"""Show the details for a category."""
context_object_name = 'category'
template_name = 'category/detail.html'
def get_context_data(self, **kwargs):
"""Get the context data which is passed to a template.
:param kwargs: Any arguments to pass to the superclass.
:type kwargs: dict
:returns: Context data which will be passed to the template.
:rtype: dict
"""
context = super(CategoryDetailView, self).get_context_data(**kwargs)
return context
def get_queryset(self):
"""Get the queryset for this view.
:returns: Queryset which is filtered to only show approved categories.
:rtype: QuerySet
"""
qs = Category.objects.all()
return qs
def get_object(self, queryset=None):
"""Get the object referenced by this view.
:param queryset: An option queryset from which the object should be
retrieved.
:type queryset: QuerySet
:returns: A Version instance.
:rtype: Version
"""
obj = super(CategoryDetailView, self).get_object(queryset)
obj.request_user = self.request.user
return obj
class CategoryDeleteView(CategoryMixin, DeleteView, LoginRequiredMixin):
"""A view for deleting categories."""
context_object_name = 'category'
template_name = 'category/delete.html'
def get_success_url(self):
"""Get the url for when the operation was successful.
:returns: A url.
:rtype: str
"""
return reverse('category-list')
def get_queryset(self):
"""Get the queryset for this view.
:returns: A queryset which is filtered to only show approved versions.
:rtype: QuerySet
"""
qs = Category.all_objects.all()
if self.request.user.is_staff:
return qs
else:
return get_object_or_404(qs, creator=self.request.user)
class CategoryCreateView(CategoryCreateUpdateMixin, CreateView):
context_object_name = 'category'
template_name = 'category/create.html'
def get_success_url(self):
return reverse('pending-category-list')
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.save()
return HttpResponseRedirect(self.get_success_url())
class CategoryUpdateView(CategoryCreateUpdateMixin, UpdateView):
context_object_name = 'category'
template_name = 'category/update.html'
def get_form_kwargs(self):
kwargs = super(CategoryUpdateView, self).get_form_kwargs()
return kwargs
def get_queryset(self):
qs = Category.objects
if self.request.user.is_staff:
return qs
else:
return qs.filter(creator=self.request.user)
def get_success_url(self):
return reverse('category-list')
class PendingCategoryListView(CategoryMixin,
PaginationMixin,
ListView,
StaffuserRequiredMixin):
"""List all unapproved categories"""
context_object_name = 'categories'
template_name = 'category/list.html'
paginate_by = 10
def get_context_data(self, **kwargs):
context = super(PendingCategoryListView, self).get_context_data(**kwargs)
context['num_categories'] = self.get_queryset().count()
context['unapproved'] = True
return context
def get_queryset(self):
qs = Category.unapproved_objects.all()
if self.request.user.is_staff:
return qs
else:
return qs.filter(creator=self.request.user)
class ApproveCategoryView(CategoryMixin, StaffuserRequiredMixin, RedirectView):
permanent = False
query_string = True
pattern_name = 'pending-category-list'
def get_redirect_url(self, pk):
category_qs = Category.unapproved_objects.all()
category = get_object_or_404(category_qs, pk=pk)
category.approved = True
category.save()
return reverse(self.pattern_name)
|
import julia
import timeit
from functools import partial
julia_large = partial(julia.julia_set,2048,1536)
print(timeit.timeit(julia_large,number=1))
|
from backbone import ResNet2015
from backbone import RegNet2020
from backbone import effnet
NET_LUT = {
'resnet': ResNet2015.ResNet,
'regnet': RegNet2020.RegNet,
'resnext': RegNet2020.AnyNet,
'effnet': effnet.EffNet,
}
def load_regnet_weight(model,pretrain_path,sub_name):
from collections import OrderedDict
import torch
checkpoints = torch.load(pretrain_path+WEIGHT_LUT[sub_name])
states_no_module = OrderedDict()
for k, v in checkpoints['model_state'].items():
if k != 'head.fc.weight' and k!= 'head.fc.bias':
name_no_module = k
states_no_module[name_no_module] = v
model.load_state_dict(states_no_module,strict=False)
LOAD_LUT = {
'resnet': ResNet2015.ResNet,
'regnet': load_regnet_weight,
'resnext': load_regnet_weight,
'effnet': load_regnet_weight,
}
WEIGHT_LUT = {
'RegNetY-8.0GF': 'regnet/RegNetY-8.0GF_dds_8gpu.pyth',
'RegNetX-4.0GF': 'regnet/RegNetX-4.0GF_dds_8gpu.pyth',
'RegNetY-32GF': 'regnet/RegNetY-32GF_dds_8gpu.pyth',
'ResNeXt-50': 'resnext/X-50-32x4d_dds_8gpu.pyth',
'EfficientNet-B2': 'effnet/EN-B2_dds_8gpu.pyth',
}
def get_network(net_name, logger=None, cfg=None):
try:
net_class = NET_LUT.get(net_name)
except:
logger.error("network tpye error, {} not exist".format(net_name))
net_instance = net_class(cfg=cfg, logger=logger)
if cfg.PRETRAIN is not None:
load_func = LOAD_LUT.get(net_name)
load_func(net_instance,cfg.PRETRAIN_PATH,cfg.PRETRAIN)
logger.info("load {} pretrain weight success".format(net_name))
return net_instance
if __name__ == "__main__":
import logging
from config import cfg, load_cfg
from ptflops import get_model_complexity_info
logger = load_cfg()
model = get_network('resnet', logger=logger, cfg=cfg.MODEL)
model = model.cuda()
flops, params = get_model_complexity_info(model, (3, 224, 224),
as_strings=True, print_per_layer_stat=True)
print('{:<30} {:<8}'.format('Computational complexity: ', flops))
print('{:<30} {:<8}'.format('Number of parameters: ', params))
|
'''
Copyright (c) 2020 Aria-K-Alethia@github.com
Description:
train and exp code
Licence:
MIT
THE USER OF THIS CODE AGREES TO ASSUME ALL LIABILITY FOR THE USE OF THIS CODE.
Any use of this code should display all the info above.
'''
from __future__ import print_function
import argparse
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
import torch.distributions as dist
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from dataset import get_mnist
from model import VAE, CVAE, StackedVAE, GMVAE
from utils import onehot_vector
from itertools import cycle
parser = argparse.ArgumentParser(description='VAE MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1024, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--train', action='store_true', default=False)
parser.add_argument('--output', type=str, default='./model/model.pt')
parser.add_argument('--label', action='store_true', default=False)
parser.add_argument('--alpha', type=float, default=1)
parser.add_argument('--architecture', type=str)
parser.add_argument('--pretrained-vae', type=str, default='./model/vae.pt')
parser.add_argument('--labels-per-class', type=int)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
labelled, unlabelled, validation = get_mnist(location="./data", batch_size=args.batch_size, labels_per_class=args.labels_per_class)
prev_loss = float('inf')
X = 784
Y = 10
Z = 20
H = 400
C = [400, 128]
if args.architecture == 'vae':
model = VAE(X, Y, Z, H)
elif args.architecture == 'cvae':
model = CVAE(X, Y, Z, H, C)
elif args.architecture == 'stackedvae':
vae = VAE(X, Y, Z, H)
vae.load_state_dict(torch.load(args.pretrained_vae))
model = StackedVAE(X, Y, Z, H, C, vae)
elif args.architecture == 'gmvae':
model = GMVAE(X, Y, Z, H, C)
else:
raise ValueError('Model architecture {} is not defined'.format(args.architecture))
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
def train(epoch):
model.train()
train_loss = 0
print('Train start, labelled: {}, unlablled: {}'.format(len(labelled), len(unlabelled)))
if epoch == 1:
for x, y in labelled:
continue
for x, y in unlabelled:
continue
for batch_idx, ((x, y), (u, _)) in enumerate(zip(cycle(labelled), unlabelled)):
#for (x, y), (u, _) in zip(cycle(labelled), unlabelled):
x = x.to(device)
y = y.to(device)
u = u.to(device)
optimizer.zero_grad()
# labelled data
l_recon_batch, L, classification_loss, l_loss_state, l_state = model(x, y)
u_recon_batch, U, _, u_loss_state, u_state = model(u)
if args.architecture == 'vae':
loss = U
else:
loss = L + U + args.alpha * classification_loss
loss.backward()
train_loss += loss.item()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, L_BCE: {:.6f}, L_KLD: {:.6f}, L_CLAS: {:.6f}, U_BCE: {:.6f}, U_KLD: {:.6f}'.format(
epoch, batch_idx * len(x), len(unlabelled.dataset),
100. * batch_idx / len(unlabelled),
loss.item() / len(x),
l_loss_state['reconstruction'].item() / len(x),
l_loss_state['kl'].item() / len(x),
l_loss_state['classification'].item() / len(x),
u_loss_state['reconstruction'].item() / len(x),
u_loss_state['kl'].item() / len(x)))
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss / len(unlabelled.dataset)))
def lr_schedule():
old_lr = optimizer.param_groups[0]['lr']
factor = 0.4
lr = old_lr * factor
for pg in optimizer.param_groups:
pg['lr'] = lr
print('Learning rate change: {} -> {}'.format(old_lr, lr))
def test(epoch):
model.eval()
test_loss = 0
outdata = []
with torch.no_grad():
for i, (x, y) in enumerate(validation):
x = x.to(device)
y = y.to(device)
recon_batch, loss, classification_loss, loss_state, state = model(x, y)
test_loss += loss_state['reconstruction'].item()
if i == 0:
n = min(x.shape[0], 8)
comparison = torch.cat([x.view(x.shape[0], 1, 28, 28)[:n],
recon_batch.view(x.shape[0], 1, 28, 28)[:n]])
save_image(comparison.cpu(),
'results/{}_reconstruction_'.format(args.architecture) + str(epoch) + '.png', nrow=n)
global prev_loss
test_loss /= len(validation.dataset)
if test_loss > prev_loss:
lr_schedule()
prev_loss = test_loss
print('====> Test set loss: {:.4f}'.format(test_loss))
def random_sample(epoch):
with torch.no_grad():
if args.architecture != 'gmvae':
sample = torch.randn(64, 20).to(device)
y = onehot_vector(torch.randint(0, 10, (64,)), 10).to(device).type_as(sample)
else:
y = onehot_vector(torch.randint(0, 10, (64,)), 10).to(device).float()
loc = model.loc(y)
scale = model.sp(model.scale(y))
temp_dist = dist.Independent(dist.Normal(loc, scale), 1)
sample = temp_dist.rsample()
if args.label:
sample = torch.cat([sample, y], dim=1)
sample = model.decode(sample).cpu()
save_image(sample.view(64, 1, 28, 28),
'results/{}_sample_'.format(args.architecture) + str(epoch) + '.png')
def main_train():
for epoch in range(1, args.epochs + 1):
train(epoch)
test(epoch)
random_sample(epoch)
state_dict = model.state_dict()
torch.save(state_dict, args.output)
def analysis():
state_dict = torch.load(args.output)
model.load_state_dict(state_dict)
embedding, label = None, None
# latent variable visualization and unsupervised accuracy
plt.figure()
tsne = TSNE(2, 50, init='pca')
#tsne = PCA(n_components=2, whiten=True)
correct_count = 0
with torch.no_grad():
for i, (x, y) in enumerate(validation):
x = x.to(device)
y = y.to(device)
recon_batch, _, _, _, state= model(x, y)
mu = state['mean']
if embedding is None:
embedding = state['mean']
label = y
else:
embedding = torch.cat([embedding, mu], 0)
label = torch.cat([label, y], 0)
if args.architecture == 'stackedvae':
feat = model.vae.sample(x)
logits = model.classify(feat)
else:
logits = model.classify(x)
temp = torch.argmax(logits, dim=-1)
correct_count += (torch.argmax(logits, dim=-1).squeeze() == y).sum().item()
accuracy = correct_count / len(validation.dataset)
print('Unsupervised accuracy: {:.2f}%'.format(accuracy * 100))
embedding2 = embedding.cpu().numpy()
label = label.cpu().numpy()
label = label[:10000]
embedding2 = embedding2[:10000]
#pca.fit(embedding)
#out = pca.transform(embedding)
#print(pca.explained_variance_ratio_)
out = tsne.fit_transform(embedding2)
out = (out - out.min(0)) / (out.max(0) - out.min(0))
for i in range(10):
d = out[label==i]
plt.scatter(d[:, 0], d[:, 1], label=str(i))
plt.legend(loc='upper right')
f = plt.gcf()
f.savefig('./output/{}_{}_latent_variable.png'.format(args.architecture, args.labels_per_class))
plt.clf()
'''
with torch.no_grad():
sample = torch.diag(torch.ones(20)).to(device)
if args.label:
y = onehot_vector(torch.arange(10).repeat(2), 10).to(device).type_as(sample)
sample = torch.cat([sample, y], dim=1)
sample = model.decode(sample).cpu()
save_image(sample.view(20, 1, 28, 28), './output/sample.png')
buf = []
for i in range(10):
mu = embedding[label==i]
mu = torch.mean(mu, 0)
buf.append(mu)
sample = torch.stack(buf)
with torch.no_grad():
if args.label:
y = onehot_vector(torch.arange(10), 10).to(device).type_as(sample)
sample = torch.cat([sample, y], dim=1)
sample = model.decode(sample).cpu()
save_image(sample.view(10, 1, 28, 28), './output/mean.png')
'''
buf = [-3, -1.5, 0, 1.5, 3]
if args.architecture != 'gmvae':
base_sample = torch.zeros(Z).to(device)
sample = []
with torch.no_grad():
for i in range(Z * len(buf)):
temp = base_sample.clone()
temp[i//len(buf)] = buf[i%len(buf)]
sample.append(temp)
sample = torch.stack(sample)
if args.label:
y = onehot_vector(torch.cat([torch.ones(Z * len(buf) // Y) * i for i in range(Y)]), Y).to(device).type_as(sample)
sample = torch.cat([sample, y], dim=1)
sample = model.decode(sample).cpu()
else:
y = onehot_vector(torch.cat([torch.ones(Z * len(buf) // Y) * i for i in range(Y)]), Y).to(device).float()
mean = model.loc(y)
scale = model.scale(y)
for i in range(y.shape[0]):
dim = i // len(buf)
index = i % len(buf)
mean[i, dim] = mean[i, dim] + buf[index] * scale[i, dim]
sample = torch.cat([mean, y], dim=1)
sample= model.decode(sample).cpu()
save_image(sample.view(Z * len(buf), 1, 28, 28), './output/{}_traverse.png'.format(args.architecture), nrow=2 * len(buf))
if __name__ == "__main__":
if args.train:
main_train()
else:
analysis()
|
import unittest
from katas.kyu_8.did_she_say_hello import validate_hello
class ValidateHelloTestCase(unittest.TestCase):
def test_true(self):
self.assertTrue(validate_hello('hello'))
def test_true_2(self):
self.assertTrue(validate_hello('ciao bella!'))
def test_true_3(self):
self.assertTrue(validate_hello('salut'))
def test_true_4(self):
self.assertTrue(validate_hello('hallo, salut'))
def test_true_5(self):
self.assertTrue(validate_hello('hombre! Hola!'))
def test_true_6(self):
self.assertTrue(validate_hello('Hallo, wie geht\'s dir?'))
def test_true_7(self):
self.assertTrue(validate_hello('AHOJ!'))
def test_true_8(self):
self.assertTrue(validate_hello('czesc'))
def test_true_9(self):
self.assertTrue(validate_hello('Ahoj'))
def test_false(self):
self.assertFalse(validate_hello('meh'))
|
import pytest
from ethereum.tools.tester import TransactionFailed
from plasma_core.constants import NULL_ADDRESS, NULL_ADDRESS_HEX, MIN_EXIT_PERIOD, NULL_SIGNATURE
from plasma_core.transaction import Transaction
from plasma_core.utils.transactions import decode_utxo_id
def test_challenge_standard_exit_valid_spend_should_succeed(testlang):
owner, amount = testlang.accounts[0], 100
deposit_id = testlang.deposit(owner, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner.key], outputs=[(owner.address, NULL_ADDRESS, amount)])
testlang.start_standard_exit(spend_id, owner.key)
doublespend_id = testlang.spend_utxo([spend_id], [owner.key], outputs=[(owner.address, NULL_ADDRESS, amount)])
testlang.challenge_standard_exit(spend_id, doublespend_id)
assert testlang.get_standard_exit(spend_id) == [NULL_ADDRESS_HEX, NULL_ADDRESS_HEX, 0]
def test_challenge_standard_exit_if_successful_awards_the_bond(testlang):
owner, amount = testlang.accounts[0], 100
deposit_id = testlang.deposit(owner, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner.key], outputs=[(owner.address, NULL_ADDRESS, amount)])
testlang.start_standard_exit(spend_id, owner.key)
doublespend_id = testlang.spend_utxo([spend_id], [owner.key], outputs=[(owner.address, NULL_ADDRESS, amount)])
pre_balance = testlang.get_balance(owner)
testlang.challenge_standard_exit(spend_id, doublespend_id)
post_balance = testlang.get_balance(owner)
assert post_balance > pre_balance
def test_challenge_standard_exit_mature_valid_spend_should_succeed(testlang):
owner, amount = testlang.accounts[0], 100
deposit_id = testlang.deposit(owner, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner.key], outputs=[(owner.address, NULL_ADDRESS, amount)])
testlang.start_standard_exit(spend_id, owner.key)
doublespend_id = testlang.spend_utxo([spend_id], [owner.key], outputs=[(owner.address, NULL_ADDRESS, amount)])
testlang.forward_timestamp(2 * MIN_EXIT_PERIOD + 1)
testlang.challenge_standard_exit(spend_id, doublespend_id)
assert testlang.get_standard_exit(spend_id) == [NULL_ADDRESS_HEX, NULL_ADDRESS_HEX, 0]
def test_challenge_standard_exit_invalid_spend_should_fail(testlang):
owner_1, owner_2, amount = testlang.accounts[0], testlang.accounts[1], 100
deposit_id = testlang.deposit(owner_1, amount)
testlang.start_standard_exit(deposit_id, owner_1.key)
spend_id = testlang.spend_utxo([deposit_id], [owner_2.key], force_invalid=True)
with pytest.raises(TransactionFailed):
testlang.challenge_standard_exit(deposit_id, spend_id)
def test_challenge_standard_exit_unrelated_spend_should_fail(testlang):
owner, amount = testlang.accounts[0], 100
deposit_id_1 = testlang.deposit(owner, amount)
testlang.start_standard_exit(deposit_id_1, owner.key)
deposit_id_2 = testlang.deposit(owner, amount)
spend_id = testlang.spend_utxo([deposit_id_2], [owner.key])
with pytest.raises(TransactionFailed):
testlang.challenge_standard_exit(deposit_id_1, spend_id)
def test_challenge_standard_exit_uninitialized_memory_and_zero_sig_should_fail(testlang):
bond = testlang.root_chain.standardExitBond()
owner, amount = testlang.accounts[0], 100 * bond
deposit_id = testlang.deposit(owner, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner.key])
tx = testlang.child_chain.get_transaction(spend_id)
with pytest.raises(TransactionFailed):
testlang.root_chain.challengeStandardExit(0, tx.encoded, 3, NULL_SIGNATURE)
def test_challenge_standard_exit_not_started_should_fail(testlang):
owner, amount = testlang.accounts[0], 100
deposit_id = testlang.deposit(owner, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner.key])
with pytest.raises(TransactionFailed):
testlang.challenge_standard_exit(deposit_id, spend_id)
def test_challenge_standard_exit_wrong_oindex_should_fail(testlang):
from plasma_core.utils.transactions import decode_utxo_id, encode_utxo_id
from plasma_core.transaction import Transaction
alice, bob, alice_money, bob_money = testlang.accounts[0], testlang.accounts[1], 10, 90
deposit_id = testlang.deposit(alice, alice_money + bob_money)
deposit_blknum, _, _ = decode_utxo_id(deposit_id)
spend_tx = Transaction(inputs=[decode_utxo_id(deposit_id)], outputs=[(alice.address, NULL_ADDRESS, alice_money), (bob.address, NULL_ADDRESS, bob_money)])
spend_tx.sign(0, alice.key)
blknum = testlang.submit_block([spend_tx])
alice_utxo = encode_utxo_id(blknum, 0, 0)
bob_utxo = encode_utxo_id(blknum, 0, 1)
testlang.start_standard_exit(alice_utxo, alice.key)
testlang.start_standard_exit(bob_utxo, bob.key)
bob_spend_id = testlang.spend_utxo([bob_utxo], [bob.key], outputs=[(bob.address, NULL_ADDRESS, bob_money)])
alice_spend_id = testlang.spend_utxo([alice_utxo], [alice.key], outputs=[(alice.address, NULL_ADDRESS, alice_money)])
with pytest.raises(TransactionFailed):
testlang.challenge_standard_exit(alice_utxo, bob_spend_id)
with pytest.raises(TransactionFailed):
testlang.challenge_standard_exit(bob_utxo, alice_spend_id)
testlang.challenge_standard_exit(alice_utxo, alice_spend_id)
def test_challenge_standard_exit_with_in_flight_exit_tx_should_succeed(ethtester, testlang):
# exit cross-spend test, cases 3 and 4
owner, amount = testlang.accounts[0], 100
deposit_id = testlang.deposit(owner, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner.key], outputs=[(owner.address, NULL_ADDRESS, amount)])
ife_tx = Transaction(inputs=[decode_utxo_id(spend_id)], outputs=[(owner.address, NULL_ADDRESS, amount)])
ife_tx.sign(0, owner.key)
(encoded_spend, encoded_inputs, proofs, signatures) = testlang.get_in_flight_exit_info(None, spend_tx=ife_tx)
bond = testlang.root_chain.inFlightExitBond()
testlang.root_chain.startInFlightExit(encoded_spend, encoded_inputs, proofs, signatures, value=bond, sender=owner.key)
testlang.start_standard_exit(spend_id, owner.key)
assert testlang.get_standard_exit(spend_id).amount == 100
exit_id = testlang.get_standard_exit_id(spend_id)
# FIXME a proper way of getting encoded body of IFE tx is to get it out of generated events
testlang.root_chain.challengeStandardExit(exit_id, ife_tx.encoded, 0, ife_tx.signatures[0])
assert testlang.get_standard_exit(spend_id) == [NULL_ADDRESS_HEX, NULL_ADDRESS_HEX, 0, 0]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-02-07 22:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('medecin', '0002_patient_affectation'),
]
operations = [
migrations.AlterField(
model_name='patient',
name='affectation',
field=models.BooleanField(default=False),
),
]
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
tdefault.py
=============================================
Loads test events from Opticks and Geant4 and
created by OKG4Test and
compares their bounce histories.
"""
import os, sys, logging, argparse, numpy as np
log = logging.getLogger(__name__)
from opticks.ana.base import opticks_main
from opticks.ana.nbase import vnorm
from opticks.ana.evt import Evt
from opticks.ana.cf import CF
if __name__ == '__main__':
np.set_printoptions(precision=4, linewidth=200)
args = opticks_main(doc=__doc__, tag="1", src="torch", det="default", c2max=2.0, tagoffset=0, dbgseqhis=0)
log.info("tag %s src %s det %s c2max %s " % (args.utag,args.src,args.det, args.c2max))
cf = CF(tag=args.tag, src=args.src, det=args.det)
|
def isBinary(n):
num = str(n)
res = True
for pos, bit in enumerate(num):
if bit != '0' and bit != '1':
res = False
return res
def toDecimal(num):
pot = len(num) - 1
soma = 0
for pos in range(0, len(num)):
soma += (int(num[pos]) * (2 ** pot))
pot -= 1
res = soma
return res
def getBinaryNumber(msg=''):
while True:
number = input(msg)
if number.isnumeric():
if isBinary(number):
return number
else:
print('Número inválido!')
else:
print('Insira um número!')
def question(msg=''):
while True:
resp = str(input(msg)).lower()
if resp == 's':
return True
elif resp == 'n':
return False
else:
print('Tente novamente!')
def conversor():
print('---' * 10)
while True:
num = getBinaryNumber('Digite um valor na base 2: ')
resp = toDecimal(num)
print(f'Decimal: {int(resp)}')
resp = question('Quer sair? [S/N] ')
print('---' * 10)
if resp:
break
else:
continue
def organiza(lista):
cont = 0
nova_lista = []
for valor in lista:
cont += 1
if cont == 1:
nova_lista.append(valor)
else:
if valor < min(nova_lista):
nova_lista.insert(0, valor)
elif valor > max(nova_lista):
nova_lista.append(valor)
else:
for pos in range(0, len(nova_lista) + 1):
if valor not in nova_lista:
if valor < nova_lista[pos]:
nova_lista.insert(pos, valor)
return nova_lista
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# raiz_quadrada_recursiva.py
#
# Copyright 2015 Cristian <cristian@cristian>
#
"""
4. Calcular a raiz quadrada de um número n com tolerância máxima t. (pesquise a definição de raiz
quadrada)
"""
def raiz(n, t):
if abs(n**2 - t) <= 0.0001:
return n
else:
a0 = n / 2
t = n / a0 # 4 / 2 = 2
an = (a0 + t) / 2 # (1 + 2) / 2 = 1.5 ]
#~ print(raiz(n / a2, t))
#~ print(raiz(n / a2, a2))
#~ input()
return raiz(n / an, an)
#
#
# Referência do método:
"""
https://books.google.com.br/books?id=I-5hAAAAcAAJ&pg=PR12&dq=easy+square+roots+children&hl=en&sa=X&ei=XgFjVcX8PMLKsAWo1ICQDA&redir_esc=y#v=onepage&q&f=false
"""
def raiz_francois(n, t):
res = t + (n - t**2) / (2*t)
rp = res**2
if abs(rp - n) <= 0.000001 or t == res:
return res
else:
t = res
return raiz_francois(n, t)
#
#
def main():
print(raiz_francois(2, 1))
return 0
if __name__ == '__main__':
main()
|
# Generated by Django 3.2.5 on 2021-07-27 22:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('username', models.CharField(db_index=True, max_length=50, primary_key=True, serialize=False, verbose_name='логин покупателя')),
],
options={
'verbose_name': 'Покупатель',
'verbose_name_plural': 'Покупатели',
},
),
migrations.CreateModel(
name='Gem',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, unique=True, verbose_name='наименование товара')),
],
options={
'verbose_name': 'Камень',
'verbose_name_plural': 'Камни',
},
),
migrations.CreateModel(
name='Deal',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total', models.PositiveIntegerField(verbose_name='сумма сделки')),
('quantity', models.PositiveSmallIntegerField(verbose_name='количество товара, шт')),
('date', models.DateTimeField(verbose_name='дата и время регистрации сделки')),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='deals', to='API.customer')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='deals', to='API.gem')),
],
options={
'verbose_name': 'Сделка',
'verbose_name_plural': 'Сделки',
},
),
]
|
"""Implementation of the server-side open-tracing interceptor."""
import sys
import logging
import re
import grpcext
import opentracing
def _add_peer_tags(peer_str, tags):
ipv4_re = r"ipv4:(?P<address>.+):(?P<port>\d+)"
match = re.match(ipv4_re, peer_str)
if match:
tags['peer.ipv4'] = match.group('address')
tags['peer.port'] = match.group('port')
return
ipv6_re = r"ipv6:\[(?P<address>.+)\]:(?P<port>\d+)"
match = re.match(ipv6_re, peer_str)
if match:
tags['peer.ipv6'] = match.group('address')
tags['peer.port'] = match.group('port')
return
logging.warning('unrecognized peer: %s', peer_str)
def _start_server_span(tracer, servicer_context, method):
span_context = None
error = None
metadata = servicer_context.invocation_metadata()
try:
if metadata:
span_context = tracer.extract(opentracing.Format.HTTP_HEADERS,
dict(metadata))
except (opentracing.UnsupportedFormatException,
opentracing.InvalidCarrierException,
opentracing.SpanContextCorruptedException) as e:
logging.exception('tracer.extract() failed')
error = e
tags = {'component': 'grpc', 'span.kind': 'server'}
_add_peer_tags(servicer_context.peer(), tags)
span = tracer.start_span(
operation_name=method, child_of=span_context, tags=tags)
if error is not None:
span.log_kv({'event': 'error', 'error.object': error})
return span
class OpenTracingServerInterceptor(grpcext.UnaryServerInterceptor,
grpcext.StreamServerInterceptor):
def __init__(self, tracer, log_payloads):
self._tracer = tracer
self._log_payloads = log_payloads
def intercept_unary(self, request, servicer_context, server_info, handler):
with _start_server_span(self._tracer, servicer_context,
server_info.full_method) as span:
response = None
if self._log_payloads:
span.log_kv({'request': request})
try:
response = handler(request)
except:
e = sys.exc_info()[0]
span.set_tag('error', True)
span.log_kv({'event': 'error', 'error.object': e})
raise
if self._log_payloads:
span.log_kv({'response': response})
return response
# For RPCs that stream responses, the result can be a generator. To record
# the span across the generated responses and detect any errors, we wrap the
# result in a new generator that yields the response values.
def _intercept_server_stream(self, servicer_context, server_info, handler):
with _start_server_span(self._tracer, servicer_context,
server_info.full_method) as span:
try:
result = handler()
for response in result:
yield response
except:
e = sys.exc_info()[0]
span.set_tag('error', True)
span.log_kv({'event': 'error', 'error.object': e})
raise
def intercept_stream(self, servicer_context, server_info, handler):
if server_info.is_server_stream:
return self._intercept_server_stream(servicer_context, server_info,
handler)
with _start_server_span(self._tracer, servicer_context,
server_info.full_method) as span:
try:
return handler()
except:
e = sys.exc_info()[0]
span.set_tag('error', True)
span.log_kv({'event': 'error', 'error.object': e})
raise
|
import requests
import json
from .api_results import *
class BaseFacade(object):
baseURL = ""
@classmethod
def make_get_call(cls, url, result_type):
response = requests.get(cls.baseURL + url)
# print(response.json())
json_str = json.dumps(response.json(), default=lambda o: o.__dict__, sort_keys=True, indent=4)
print(json_str)
# result = ApiResult(**json.loads(json_str))
result = ApiResult.from_json(json.loads(json_str), result_type)
return result
@classmethod
def make_post_call(cls, url, data, result_type):
response = requests.post(cls.baseURL + url, json=data)
json_str = json.dumps(response.json(), default=lambda o: o.__dict__, sort_keys=True, indent=4)
# print(json_str)
result = ApiResult.from_json(json.loads(json_str), result_type)
return result
|
from ..FeatureExtractor import ContextFeatureExtractor
class closest_in_light(ContextFeatureExtractor):
"""distance_in_arcmin_to_nearest_galaxy"""
active = True
extname = 'closest_in_light' #extractor's name
light_cutoff = 4.0 ## dont report anything farther away than this
verbose = False
def extract(self):
n = self.fetch_extr('interng')
try:
tmp = n["closest_in_light"]
except:
return None # 20081010 dstarr adds try/except in case NED mysql cache server is down
if tmp is None or tmp > self.light_cutoff:
rez = None
else:
rez = tmp
if self.verbose:
print n
return rez
|
import ConfigParser
import io
# default config as string
def_config = """
[seed]
iseed = 1234
wseed = 4321
pseed = 4321
[netsyn]
NMAMREE = 0.1
NMAMREI = 0.1
mGLURR = 7.5
GB2R = 7.5
rdmsec = 1
nmfracca = 0.13
[chan]
ihginc = 2.0
iark2fctr = 1.0
iark4 = 0.008
erevh = -30.0
h_lambda = 325.0
h_gbar = 0.0025
fs_h_gbar = 0.00002
lts_h_gbar = 0.15
cagk_gbar = 0.0001
ikc_gkbar = 0.003
nax_gbar = 0.081
kdr_gbar = 0.021
kap_gbar = 0.3
kdmc_gbar = 0.00085
km_gmax = 0.1
cabar = 0.005
lts_cabar = 1.0
[cada]
taur = 5
[run]
indir = data
outdir = data
tstop = 2000.0
dt = 0.1
saveout = 1
simstr = 15dec29_B
statestr = 15apr20_net_S3
dorun = 1
doquit = 0
dodraw = 0
verbose = 0
recdt = 10.0
recvdt = 1.0
binsz = 5
saveconns = 0
[rxd]
CB_frate=5.5
CB_brate=0.0026
CB_init=0.2
gip3 = 120400.0
gserca = 4.0
gleak = 3.0
cacytinit = 100e-6
caerinit = 1.25
caexinit = 0.0
spaceum = 0.0
nsubseg = 0
subsegum = 0.0
v1ryr = 100.0
[net]
scale=1.0
IIGain = 0.1
IEGain = 0.15
EIGainFS = 0.15
EIGainLTS = 0.15
EEGain = 0.25
[stim]
EXGain = 1.0
noise = 1
ip3_stim = 0.0
ip3_stimT = 10000.0
sgrhzNMI = 600.0
sgrhzNME = 300.0
sgrhzEE = 800.0
sgrhzEI = 1600.0
sgrhzIE = 150.0
sgrhzII = 150.0
sgrhzMGLURE = 0.0
sgrhzGB2 = 0.0
"""
# write config file starting with defaults and new entries
# specified in section (sec) , option (opt), and value (val)
# saves to output filepath fn
def writeconf (fn,sec,opt,val):
conf = ConfigParser.ConfigParser()
conf.optionxform = str
conf.readfp(io.BytesIO(def_config)) # start with defaults
# then change entries by user-specs
for i in xrange(len(sec)): conf.set(sec[i],opt[i],val[i])
# write config file
with open(fn, 'wb') as cfile: conf.write(cfile)
# read config file
def readconf (fn="physiol.cfg"):
config = ConfigParser.ConfigParser()
config.optionxform = str
config.read(fn)
def conffloat (base,var,defa): # defa is default value
val = defa
try: val=config.getfloat(base,var)
except: pass
return val
def confint (base,var,defa):
val = defa
try: val=config.getint(base,var)
except: pass
return val
def confstr (base,var,defa):
val = defa
try: val = config.get(base,var)
except: pass
return val
d = {}
d['iseed'] = confint("seed","iseed",1234)
d['wseed'] = confint("seed","wseed",4321)
d['pseed'] = confint("seed","pseed",4321)
d['NMAMREE'] = conffloat("netsyn","NMAMREE",0.1)
d['NMAMREI'] = conffloat("netsyn","NMAMREI",0.1)
d['mGLURR'] = conffloat("netsyn","mGLURR",7.5)
d['GB2R'] = conffloat("netsyn","GB2R",7.5)
d['nmfracca'] = conffloat("netsyn","nmfracca", 0.13)
d['rdmsec'] = confint("netsyn","rdmsec", 1)
d['erevh'] = conffloat("chan","erevh",-30.0)
d['h_lambda'] = conffloat("chan","h_lambda",325.0)
d['h_gbar'] = conffloat("chan","h_gbar",0.0025)
d['fs_h_gbar'] = conffloat("chan","fs_h_gbar",0.00002)
d['lts_h_gbar'] = conffloat("chan","lts_h_gbar",0.15)
d['cagk_gbar'] = conffloat("chan","cagk_gbar",0.0001)
d['ikc_gkbar'] = conffloat("chan","ikc_gkbar",0.003)
d['nax_gbar'] = conffloat("chan","nax_gbar",0.081)
d['kdr_gbar'] = conffloat("chan","kdr_gbar",0.021)
d['kap_gbar'] = conffloat("chan","kap_gbar",0.3)
d['kdmc_gbar'] = conffloat("chan","kdmc_gbar",0.00085)
d['km_gmax'] = conffloat("chan","km_gmax",0.1)
d['ihginc'] = conffloat("chan","ihginc", 2.0)
d['iark2fctr'] = conffloat("chan", "iark2fctr",1.0)
d['iark4'] = conffloat("chan", "iark4",0.008)
d['cabar'] = conffloat("chan","cabar",0.005)
d['lts_cabar'] = conffloat("chan","lts_cabar",1.0)
d['taurcada'] = conffloat("cada", "taur", 5.0)
d['outdir'] = confstr("run","outdir", "data")
d['indir'] = confstr("run","indir", "data")
d['tstop'] = conffloat("run","tstop", 2000.0)
d['dt'] = conffloat("run","dt",0.1)
d['saveout'] = conffloat("run","saveout",1)
d['simstr'] = confstr("run","simstr","15dec29_B")
d['statestr'] = confstr("run","statestr","15apr20_net_S3")
d['dorun'] = confint("run","dorun",1)
d['recdt'] = conffloat("run","recdt",10.0)
d['recvdt'] = conffloat("run","recvdt",1.0)
d['binsz'] = conffloat("run","binsz",5)
for k in ['saveconns','doquit','verbose','dodraw']: d[k] = confint("run",k,0)
d['CB_frate'] = conffloat("rxd","CB_frate", 5.5)
d['CB_brate'] = conffloat("rxd","CB_brate", 0.0026)
d['CB_init'] = conffloat("rxd","CB_init", 0.2)
d['gip3'] = conffloat("rxd","gip3",120400.0)
d['gserca'] = conffloat("rxd","gserca",4.0)
d['gleak'] = conffloat("rxd","gleak",3.0)
d['caerinit'] = conffloat("rxd","caerinit",1.25)
d['cacytinit'] = conffloat("rxd","cacytinit",100e-6)
d['caexinit'] = conffloat("rxd","caexinit",0.0)
d['spaceum'] = conffloat("rxd","spaceum",0.0)
d['nsubseg'] = confint("rxd","nsubseg",0)
d['subsegum'] = conffloat("rxd","subsegum",0.0)
d['v1ryr'] = conffloat("rxd","v1ryr",100.0)
d['scale'] = conffloat("net","scale",1.0)
d['IIGain'] = conffloat("net","IIGain",0.1)
d['IEGain'] = conffloat("net","IEGain",0.15)
d['EIGainFS'] = conffloat("net","EIGainFS",0.15)
d['EIGainLTS'] = conffloat("net","EIGainLTS",0.15)
d['EEGain'] = conffloat("net","EEGain",0.25)
d['EXGain'] = conffloat("stim","EXGain",1.0)
d['noise'] = conffloat("stim","noise",1.0)
d['ip3_stim'] = conffloat("stim","ip3_stim",0.0)
d['ip3_stimT'] = conffloat("stim","ip3_stimT",10000.0)
d['sgrhzNME'] = conffloat("stim","sgrhzNME",300.0)
d['sgrhzNMI'] = conffloat("stim","sgrhzNMI",600.0)
d['sgrhzEE'] = conffloat("stim","sgrhzEE",800.0)
d['sgrhzIE'] = conffloat("stim","sgrhzIE",150.0)
d['sgrhzEI'] = conffloat("stim","sgrhzEI",1600.0)
d['sgrhzII'] = conffloat("stim","sgrhzII",150.0)
d['sgrhzMGLURE'] = conffloat("stim","sgrhzMGLURE",0.0)
d['sgrhzGB2'] = conffloat("stim","sgrhzGB2",0.0)
return d
|
"""Read and combine zone logs into single dataframe."""
import pandas as pd
import numpy as np
import os
class file_operations(object):
"""
Read in zone character confidence and content files and join them as a single
file in a Pandas dataframe. Perform simple math to develop confidence data about
pages and the zones within them. Output .csv file with low conf. zones for inspection.
Attributes:
aggregate_zone_df: combined data from input files as a single dataframe with
with quality scores and zone content.
page_confidence_df: dataframe containing page confidence scores.
trimmed_low_confidence_zones: dataframe containing zones with low confidence
under a variable threshold.
"""
def __init__(self, file_dict):
self.file_dict = file_dict
self.aggregate_zone_df = self.read_combine()
self.page_confidence_df = self.define_page_confidence()
self.trimmed_low_confidence_zones = self.show_low_confidence_zones()
def read_combine(self):
"""loop through files, read file in, and combine file contents into single list object."""
def define_index(page, zone_index_list, i):
"""define dataframe row_index for each zone."""
# check if page (i.e. Industrial19200003-0001) is in zone_index_list
if page in zone_index_list:
i += 1
# slice page and build dataframe index using i as the zone number (i.e. 1920-0003-0001-3)
row_index = page[10:14] + '-' + page[14:] + '-' + str(i)
else:
# add page to zone_index_list
zone_index_list.append(page)
# slice page and build dataframe index using 1 as the zone number (i.e. 1920-0003-0001-1)
row_index = page[10:14] + '-' + page[14:] + '-' + str(1)
# reset i value.
i = 1
return (row_index, i)
# predefine columns titles for aggregate_confidence_df.
columns_confidence = ['filename', 'zone_index', 'count_zones', 'left', 'top', 'right',
'bottom', 'total_characters', 'char_conf_yes', 'char_conf_no', 'conf_ratio']
# declare empty dataframes to be filled (and later joined).
aggregate_confidence_dict = {}
aggregate_content_dict = {}
# loop through files in earlier-defined file path dictionary and extract data.
for key, file_list in self.file_dict.items():
# ID paths in dict. value as temp. objects.
file_zone_confidence = file_list[0]
file_zone_text = file_list[1]
# open file containing zone confidence information
with open(file_zone_confidence, 'r') as file_in_confidence:
# set beginning values for i and zone_index_list.
i = 1
zone_index_list = []
# loop through lines in file.
for line in file_in_confidence:
# split line string into comma-sep. items and define page.
zone = line.rstrip().split(',')
page = zone[0]
# trigger define_index function and pass returned index forward.
index_data = define_index(page, zone_index_list, i)
row_index = index_data[0]
i = index_data[1]
# perform basic math on confidence scores to summarise zone quality.
total_characters = sum([int(value) for value in zone[6:]])
char_conf_yes = sum([int(value) for value in zone[6:9]])
char_conf_no = sum([int(value) for value in zone[9:]])
conf_ratio = round(char_conf_yes / (total_characters + .001), 3)
# redefine zone using newly-computed values. Update aggregate_confidence_dict with zone.
zone = [zone[0]] + [i] + zone[1:6] + [total_characters] + [char_conf_yes] + [char_conf_no] + [conf_ratio]
aggregate_confidence_dict.update({row_index:zone})
# convert aggregate_confidence_dict to dataframe.
aggregate_confidence_df = pd.DataFrame.from_dict(aggregate_confidence_dict, orient='index')
aggregate_confidence_df.columns = columns_confidence
# # show dataframe.
# display(aggregate_confidence_df)
with open(file_zone_text, 'r', encoding = 'ISO-8859-1') as file_in_text:
# set beginning values for i and zone_index_list.
i = 1
zone_index_list = []
# loop through lines in file.
for line in file_in_text:
# split line string into comma-sep. items and define page.
zone = line.rstrip().split(',')
page = zone[0][12:]
# trigger define_index function and pass returned index forward.
index_data = define_index(page, zone_index_list, i)
row_index = index_data[0]
i = index_data[1]
# redefine zone and convert zone list to dataframe row.
zone = zone[-1]
aggregate_content_dict.update({row_index:zone})
# convert aggregate_content_dict to dataframe.
aggregate_content_df = pd.DataFrame.from_dict(aggregate_content_dict, orient='index')
aggregate_content_df.columns = ['zone_content']
# # show dataframe.
# display(aggregate_content_df)
# join aggregate_confidence_df & aggregate_content_df into single dataframe.
zone_data_df = aggregate_confidence_df.join(aggregate_content_df)
# # show dataframe.
# display(zone_data_df)
return zone_data_df
def define_page_confidence(self):
"""Create dataframe for page confidence as an aggregate of zones on-page."""
page_confidence_df = self.aggregate_zone_df.groupby(['filename']).agg({'count_zones':'count',
'total_characters':'sum', 'char_conf_yes':'sum', 'char_conf_no':'sum'})
page_confidence_df['page_confidence'] = round(page_confidence_df['char_conf_yes'] / (page_confidence_df['total_characters'] + .001), 3)
return page_confidence_df
def show_low_confidence_zones(self):
"""Filter aggregate_zone_df to display zones with low confidence."""
# set confidence boundary (for the time at .90).
low_confidence_zones = self.aggregate_zone_df[self.aggregate_zone_df.conf_ratio < .90]
trimmed_low_confidence_zones = low_confidence_zones[['filename', 'count_zones', 'zone_index', 'conf_ratio', 'zone_content']]
# trim page_confidence_df to only show the page confidence value.
page_confidence_df_trimmed = self.page_confidence_df[['page_confidence']]
# combine dataframes to add page confidence column to trimmed_low_confidence_zones.
trimmed_low_confidence_zones = trimmed_low_confidence_zones.join(page_confidence_df_trimmed, on='filename', how='left')
trimmed_low_confidence_zones = trimmed_low_confidence_zones[['filename', 'page_confidence', 'count_zones', 'zone_index', 'conf_ratio', 'zone_content']]
# # show dataframe.
# display(trimmed_low_confidence_zones)
# export dataframe as .csv file.
save_name = 'low_confidence_zones.csv'
out_path = os.getcwd()[:-8] + save_name
trimmed_low_confidence_zones.to_csv(out_path, index=False)
return trimmed_low_confidence_zones
|
# Generated by Django 3.2.5 on 2021-07-24 06:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Dashboard',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=64, null=True)),
('grade', models.FloatField(default=1)),
('year', models.IntegerField(default=1)),
('group', models.CharField(default='A', max_length=1)),
('student_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
from select_factor import get_factors
from mylib import get_data
from mylib import get_data_fromDB #从数据库中获取数据
from mylib import train_test_split
from label_generator import generate_label
from mltool import method
from sklearn.metrics import classification_report
import numpy as np
def train(high,low,dopen,close,vol,ratio,set_bool,methodss):
datasets=get_factors(high,low,close,vol,set_bool)
#train_data,test_data=train_test_split(datasets)
label=generate_label(close,sign=1)
methods=["DecisionTreeClassifier","MLPClassifier","KNeighborsClassifier","AdaBoostClassifier","RandomForestClassifier","GradientBoostingClassifier"]
for i in range(len(methods)):
if methodss==methods[i]:
num=i
#train_label,test_label=train_test_split(label)
pred=[]
true_value=[]
start=int(len(datasets)*ratio)
for i in range(start,len(datasets)-1):
#print(datasets)
#print(label)
method[num].fit(datasets[:i,:],label[:i])
pred.append(method[num].predict(np.array(datasets[i]).reshape(1,-1)))
true_value.append(label[i])
#print(classification_report(np.array(true_value),pred))
return classification_report(np.array(true_value),pred)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/7/23 8:04 PM
# @Author : ZhangHao
# @File : config.py
# @Desc : 通用配置
import os
import feature_generator.base_generator
import feature_generator.card_qualification_data
# 原始训练数据总目录
origin_data = "data/origin_data"
# 处理原始数据 生成特征的类
data_feature_generator = feature_generator.card_qualification_data.CardDataFeatureGenerator
# word_seg_type
# 切词可选:结巴"jieba" 和 公司内部切词类"word_seg"
word_seg_func = "word_seg"
# 特征的最小长度
feature_min_length = 2
# 停用词
stopwords_path = "data/dict/stopwords.txt"
# 每行记录添加其生成的特征
# 总体生成特征后 再划分训练集和验证集
origin_data_feature_path = "data/origin_data_feature.txt"
# 划分训练集和测试集
test_ratio = 0.1
# 训练集和测试集
# train.txt 是训练数据集的全部信息 包括类别、特征、原数据等
# train_feature.txt 是train.txt的精简,只包括类别和特征两列,用于训练
# 测试文件同理
train_data_path = "data/train.txt"
train_data_feature_path = "data/train_feature.txt"
test_data_path = "data/test.txt"
test_data_feature_path = "data/test_feature.txt"
# test_data_feature_path = "data/origin_data_feature"
# 数据集各列之间的间隔符
col_sep = "\t"
# 是否要重新生成特征
re_gen_feature = True
# 是否要重新划分训练测试集
re_split_train_test = True
# 是否是测试
is_debug = True
# 可选:"logistic_regression"
model = "logistic_regression"
# 可选:"tf_word"
feature_type = "tf_word"
# 特征频率阈值 小于该频率的特征省略
feature_min_df = 2
# vectorizer读取特征时用的token_pattern
token_pattern = r'(?u)[^ ]+'
# 可选 "l1"、"l2"
penalty = "l1"
# lr参数
# 正则化系数 可以是int,float或list
Cs = [0.1,0.2,0.5,1,2,5,10,20]
# 各类别的权重
class_weight = {
0: 1,
1: 1
}
# 交叉校验确定系数C的fold数
k_fold = 5
# 预测时 给出特征作为证据的个数
evidence_num = 10
min_hit_feature = 3
# 置信度阈值
confidence = 0.65
# 输出文件地址
output_dir = "output"
vectorizer_path = output_dir + "/vectorizer_" + feature_type + "_" + model + ".pkl"
model_path = output_dir + "/model_" + feature_type + "_" + model + ".pkl"
feature_weight_path = output_dir + "/feature_weight_" + feature_type + "_" + model + ".txt"
label_vocab_path = output_dir + "/label_" + feature_type + "_" + model + ".txt"
# 测试文件地址
predict_dir = output_dir + "/pred_" + feature_type + "_" + model + ".txt"
if not os.path.exists(output_dir):
os.mkdir(output_dir)
|
#!/usr/bin/python
##############
## Make Plots
##############
########################
# Libraries and Includes
########################
# Python System Modules
import os,sys,glob
import logging
import array
# Python Paths
# Python Math Modules
import array
from math import sqrt,fabs,sin,pow
# ROOT Modules
from ROOT import TFile,TTree,TChain,TBranch,TH1F,TH2F,THStack
from ROOT import TCanvas,TPad
from ROOT import TLorentzVector
from ROOT import gROOT, gDirectory,gStyle
from ROOT import TPaveText, TLegend, TLatex
import ROOT
###############################
# User defined drawing function
###############################
if __name__ == "__main__":
############################
# Plot for Unfolding Results
############################
VAR = ["leadingJetPt", "secondJetPt", "met", "met_soft", "oppositeJet", "mTZZ", "zPt", "zEta", "M2Lep", "dLepR", "dMetZPhi", "mjj", "detajj", "jetSumPt", "METOHT" ]
TITLE = { "leadingJetPt" : "Pt_leadingJet[GeV]",
"secondJetPt" : "Pt_SubleadingJet[GeV]",
"oppositeJet" : "#eta_{j1} * #eta_{j2}",
"met" : "Missing Et[GeV]",
"met_soft" : "Missing Et_{soft} [GeV]",
"mTZZ" : "mT_{ZZ}[GeV]",
"zPt" : "Pt_{Z}[GeV]",
"zEta" : "#eta_{Z}",
"M2Lep" : "M_{ll}[GeV]",
"dLepR" : "#DeltaR_{ll}",
"dMetZPhi" : "#Delta#phi_{MET,Z}",
"mjj" : "M_{jj}[GeV]",
"detajj" : "#Delta#eta_{jj}",
"jetSumPt" : "#SigmaPt_{jet}",
"METOHT" : "MET/H_{T}",
"score" : "score",
"score_cut" : "score",
}
# open file
sig_file = TFile("sig_hist.root")
Diboson_file = TFile("Diboson_hist.root")
Triboson_file = TFile("Triboson_hist.root")
Top_file = TFile("Top_hist.root")
W_jets_file = TFile("W+jets_hist.root")
Z_jets_file = TFile("Z+jets_hist.root")
data_file = TFile("data_hist.root")
for var in VAR:
hs = THStack(var, "")
# load basic histograms
sig_hist = sig_file.Get(var)
Diboson_hist = Diboson_file.Get(var)
Triboson_hist = Triboson_file.Get(var)
Top_hist = Top_file.Get(var)
W_jets_hist = W_jets_file.Get(var)
Z_jets_hist = Z_jets_file.Get(var)
data_hist = data_file.Get(var)
# renormalization
scale = 36.5/40;
sig_hist.Scale( scale )
Diboson_hist.Scale( scale )
Triboson_hist.Scale( scale )
Top_hist.Scale( scale )
W_jets_hist.Scale( scale )
Z_jets_hist.Scale( scale )
MC_hist = sig_hist.Clone()
MC_hist.Add(Diboson_hist)
MC_hist.Add(Triboson_hist)
MC_hist.Add(Top_hist)
MC_hist.Add(W_jets_hist)
MC_hist.Add(Z_jets_hist)
ratio_hist = data_hist.Clone(var+"_ratio")
ratio_hist.Divide(MC_hist)
# hs.Add(sig_hist)
hs.Add(W_jets_hist)
hs.Add(Triboson_hist)
hs.Add(Top_hist)
hs.Add(Diboson_hist)
hs.Add(Z_jets_hist)
sig_hist.SetFillColor(2)
sig_hist.SetMarkerColor(2)
sig_hist.SetMarkerSize(1)
sig_hist.SetMarkerStyle(ROOT.kFullCircle)
sig_hist.SetLineColor(2)
sig_hist.SetLineWidth(2)
Diboson_hist.SetFillColor(3)
Diboson_hist.SetMarkerColor(3)
Diboson_hist.SetMarkerSize(1)
Diboson_hist.SetMarkerStyle(0)
Diboson_hist.SetLineColor(3)
Diboson_hist.SetLineWidth(0)
Triboson_hist.SetFillColor(4)
Triboson_hist.SetMarkerColor(4)
Triboson_hist.SetMarkerSize(1)
Triboson_hist.SetMarkerStyle(0)
Triboson_hist.SetLineColor(4)
Triboson_hist.SetLineWidth(0)
Top_hist.SetFillColor(6)
Top_hist.SetMarkerColor(6)
Top_hist.SetMarkerSize(1)
Top_hist.SetMarkerStyle(0)
Top_hist.SetLineColor(6)
Top_hist.SetLineWidth(0)
W_jets_hist.SetFillColor(7)
W_jets_hist.SetMarkerColor(7)
W_jets_hist.SetMarkerSize(1)
W_jets_hist.SetMarkerStyle(0)
W_jets_hist.SetLineColor(7)
W_jets_hist.SetLineWidth(0)
Z_jets_hist.SetFillColor(5)
Z_jets_hist.SetMarkerColor(5)
Z_jets_hist.SetMarkerSize(1)
Z_jets_hist.SetMarkerStyle(0)
Z_jets_hist.SetLineColor(5)
Z_jets_hist.SetLineWidth(0)
data_hist.SetFillColor(1)
data_hist.SetMarkerColor(1)
data_hist.SetMarkerSize(1)
data_hist.SetMarkerStyle(ROOT.kFullSquare)
data_hist.SetLineColor(1)
data_hist.SetLineWidth(2)
leg = TLegend(0.77, 0.7, 0.97, 0.99)
leg.AddEntry(data_hist, "Data", "lep")
leg.AddEntry(Diboson_hist, "Diboson")
leg.AddEntry(Triboson_hist, "Triboson")
leg.AddEntry(Z_jets_hist, "Z+jets")
leg.AddEntry(Top_hist, "Top")
leg.AddEntry(W_jets_hist, "W+jets")
leg.AddEntry(sig_hist, "signal", "lep")
tex1 = TLatex(0.54,0.75, "#int Ldt = 36.5 fb^{-1}")
tex1.SetNDC()
tex1.SetTextFont(42)
tex1.SetLineWidth(2)
tex2 = TLatex(0.54,0.85, "#sqrt{s} = 13 TeV")
tex2.SetNDC()
tex2.SetTextFont(42)
tex2.SetLineWidth(2)
tex3 = TLatex(0.54,0.93, "ATLAS Internal")
tex3.SetNDC()
tex3.SetTextFont(42)
tex3.SetLineWidth(2)
cc = TCanvas(var, var, 800, 750)
# cc.SetLogy()
pad1 = TPad("p1", "p1", 0, 0.25, 1, 1, 0, 0)
pad1.SetMargin(0.15, 0.03, 0, 0.01)
pad2 = TPad("p2", "p2", 0, 0, 1, 0.25, 0, 0)
pad2.SetMargin(0.15, 0.03, 0.3, 0.01)
pad2.SetGrid()
pad1.Draw()
pad2.Draw()
pad1.cd()
pad1.SetLogy()
hs.SetMinimum(0.1)
hs.Draw("HIST")
# hs.GetXaxis().SetTitle(TITLE[var])
hs.GetYaxis().SetTitle("Events")
hs.GetYaxis().SetTitleOffset(0.8)
hs.GetYaxis().SetTitleSize(0.05)
data_hist.Draw("same""LPE")
sig_hist.Draw("same""LPE")
leg.Draw()
tex1.Draw()
tex2.Draw()
tex3.Draw()
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetOptTitle(0)
pad2.cd()
ratio_hist.SetMarkerColor(4)
ratio_hist.SetMarkerSize(1)
ratio_hist.SetMarkerStyle(ROOT.kFullCircle)
ratio_hist.Draw()
ratio_hist.GetXaxis().SetTitle(TITLE[var])
ratio_hist.GetXaxis().SetTitleOffset(0.9)
ratio_hist.GetXaxis().SetTitleSize(0.15)
ratio_hist.GetXaxis().SetLabelSize(0.1)
ratio_hist.GetYaxis().SetTitle("Data/MC")
ratio_hist.GetYaxis().SetTitleOffset(0.3)
ratio_hist.GetYaxis().SetTitleSize(0.15)
ratio_hist.GetYaxis().SetLabelSize(0.1)
ratio_hist.SetMinimum(0)
ratio_hist.SetMaximum(2)
ratio_hist.GetYaxis().SetNdivisions(5+100*5)
cc.Update()
cc.SaveAs("{0}.png".format(var))
sig_file.Close()
Diboson_file.Close()
Triboson_file.Close()
Top_file.Close()
W_jets_file.Close()
Z_jets_file.Close()
data_file.Close()
|
def alex_mistakes(number_of_katas, time_limit):
req_time = 5
sets = 0
remaining = time_limit - number_of_katas * 6
while remaining>=req_time:
remaining-=req_time
req_time*=2
sets+=1
return sets
'''
Alex is transitioning from website design to coding and wants to sharpen his
skills with CodeWars. He can do ten kata in an hour, but when he makes a mistake,
he must do pushups. These pushups really tire poor Alex out, so every time he does
them they take twice as long. His first set of redemption pushups takes 5 minutes.
Create a function, alexMistakes, that takes two arguments: the number of kata he
needs to complete, and the time in minutes he has to complete them.
Your function should return how many mistakes Alex can afford to make.
'''
|
"""
Created by Alex Wang on 2018-04
LBP常用使用方法
(1)首先将检测窗口划分为16×16的小区域(cell);
(2)对于每个cell中的一个像素,将相邻的8个像素的灰度值与其进行比较,若周围像素值大于中心像素值,则该像素点的位置被标记为1,否则为0。这样,3*3邻域内的8个点经比较可产生8位二进制数,即得到该窗口中心像素点的LBP值;
(3)然后计算每个cell的直方图,即每个数字(假定是十进制数LBP值)出现的频率;然后对该直方图进行归一化处理。
(4)最后将得到的每个cell的统计直方图进行连接成为一个特征向量,也就是整幅图的LBP纹理特征向量;
(5)然后便可利用SVM或者其他机器学习算法进行分类了。
skimage.feature.local_binary_pattern(image, P, R, method='default')
P : int
Number of circularly symmetric neighbour set points (quantization of the angular space).
R : float
Radius of circle (spatial resolution of the operator).
method : {‘default’, ‘ror’, ‘uniform’, ‘var’}
default: gray scale, not rotation invariant
ror: gray scale and rotation invariant
uniform: uniform patters
nri_uniform: non rotation-invariant uniform patterns
"""
import cv2
from skimage.feature import local_binary_pattern
radius = 3
n_points = radius * 8
google_img = cv2.imread('google.jpg')
google_gray = cv2.cvtColor(google_img, cv2.COLOR_BGR2GRAY)
lbp_ror = local_binary_pattern(google_gray, n_points, radius, 'ror')
lbp_uniform = local_binary_pattern(google_gray, n_points, radius, 'uniform')
cv2.imshow('org_image', google_img)
cv2.imshow('lbp_ror', lbp_ror)
cv2.imshow('lbp_uniform', lbp_uniform)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
from flask import Flask, request, send_file
from google_api import Vision
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub import PubNub
import os
from PIL import Image, ImageDraw
def publish_callback(result, status):
print(result, status)
# Handle PNPublishResult and PNStatus
app = Flask(__name__)
def clearLine(line, title):
line = line.replace(title, "")
line = line.replace(" ", "")
line = line.replace(":", "")
line = line.replace("\n", "")
line = line.replace("{", "")
line = line.replace("}", "")
return line
def parseLandmarks(path):
f = open(path, 'r+')
nextLandmark = ""
foundLandmark = 0
foundLandmarkX = ""
foundLandmarkY = ""
landmarks = {}
for line in f:
if "landmarks" in line:
foundLandmark = 1
elif foundLandmark == 1:
nextLandmark = clearLine(line, "type")
foundLandmark = 2
elif foundLandmark == 2:
foundLandmark = 3
elif foundLandmark == 3:
foundLandmarkX = clearLine(line, "x")
foundLandmark = 4
elif foundLandmark == 4:
foundLandmarkY = clearLine(line, "y")
landmarks[nextLandmark] = (float(foundLandmarkX), float(foundLandmarkY))
foundLandmark = 0
return landmarks
@app.route('/', methods=['POST'])
def process_image():
file = request.files['pic']
filename = file.filename
path = os.getcwd() + '/temp/{0}'.format(filename)
file.save(path)
value = Vision(path).run_process()
mood = value[0]
score = value[1]
pnconfig = PNConfiguration()
pnconfig.subscribe_key = "sub-c-9cbba3d6-a053-11e7-96f6-d664df0bd9f6"
pnconfig.publish_key = "pub-c-b8445efe-d010-4c8a-85d7-59dd061bea13"
pnconfig.ssl = False
pubnub = PubNub(pnconfig)
data = {}
data["filename"] = filename
if mood == 'joyful':
data["box-id"] = 1
data["mood"] = mood
elif mood == 'angry':
data["box-id"] = 2
data["mood"] = mood
elif mood == 'blurred':
data["box-id"] = 3
data["mood"] = mood
elif mood == 'sorrow':
data["box-id"] = 4
data["mood"] = mood
elif mood == 'outstanding in a hat':
data["box-id"] = 5
data["mood"] = mood
elif mood == "under_exposed":
data["box-id"] = 6
data["mood"] = mood
pubnub.publish().channel("parcelbox").message(data).sync()
@app.route('/image/<filename>')
def get_image(filename):
path = os.getcwd() + '/temp/{0}'.format(filename)
googleResultPath = os.getcwd() + '/test.txt'
landmarks = parseLandmarks(googleResultPath)
print(landmarks)
image = Image.open(path)
draw = ImageDraw.Draw(image)
for key in landmarks.keys():
(x, y, r) = (landmarks[key][0], landmarks[key][1], 5)
draw.ellipse((x - r, y - r, x + r, y + r), fill=(255, 0, 0, 255))
newFile = os.getcwd() + '/temp/out-{0}'.format(filename)
image.save(newFile)
return send_file(newFile, mimetype='image/jpg')
|
from typing import List, Dict
from fractions import Fraction
power_mod = {0: 1.0, 1: 1.25, 2: 1.4, 3: 1.5, 4: 1.6, 5: 1.7, 6: 1.8}
def calc_attack(attack_stat: int, gear: int, sync_grid_additions: int):
atk = attack_stat + gear + sync_grid_additions
return atk
def cal_defence(
base_move_dmg: int,
effective_damage: int,
modifier_stage_up: int,
attack_stat: int,
gear: int,
sync_grid_additions: int,
) -> None:
roll = 1.0
crit_mod = 1.5
attack = calc_attack(attack_stat, gear, sync_grid_additions)
defence_low = ((base_move_dmg * attack) + 1) / (
effective_damage / (power_mod[modifier_stage_up] * crit_mod) / roll
)
defence_high = ((base_move_dmg * attack) + 1) / (
effective_damage / (power_mod[modifier_stage_up] * crit_mod) / (roll * 1.5)
)
print(f"This is the possible defence stat: {int(defence_low)}-{int(defence_high)}")
def TE_damage(
move_name,
base_power_of_move,
modifier_stage_up,
attack_stat,
gear,
sg_adds,
defence,
crit_mod=1.0,
):
damage = (
base_power_of_move * (calc_attack(attack_stat, gear, sg_adds) / defence) + 1
) * (power_mod[modifier_stage_up] * crit_mod)
print(
f"{move_name.capitalize()}, with a base power of {base_power_of_move} will do {int(damage*0.9)}-{int(damage*1.0)} damage against a Pokemon with {defence} defence points. If {move_name.capitalize()} was super-effective, it would {int(damage*0.9*2)}-{int(damage*1.0*2)} damage."
)
cal_defence(
base_move_dmg=186,
effective_damage=10797,
modifier_stage_up=6,
attack_stat=380,
gear=40,
sync_grid_additions=10,
)
TE_damage(
"Blast Burn",
base_power_of_move=187,
modifier_stage_up=6,
attack_stat=380,
gear=40,
sg_adds=10,
defence=20,
crit_mod=1.5,
)
|
import datetime
from flask import request, jsonify
from init import create_app
from models import Player, Country, Club, db
from views import player_json, club_json, country_json
app = create_app()
@app.route('/players', methods=['GET'])
def get_players():
players = Player.query.all()
all_players = []
for player in players:
all_players.append(player_json(player))
return jsonify(all_players), 200
@app.route('/players', methods=['POST'])
def create_player():
data = request.get_json()
required = [
'first_name', 'last_name', 'date_of_birth',
'nationality_id', 'current_club_id', 'preferred_position',
]
errors = []
for field in required:
if not data.get(field):
errors.append([f'Field {field} is required'])
if errors:
return jsonify({'errors': errors}), 400
player = Player(
first_name=data['first_name'],
last_name=data['last_name'],
date_of_birth=data['date_of_birth'],
nationality_id=data['nationality_id'],
current_club_id=data['current_club_id'],
preferred_position=data['preferred_position'],
)
db.session.add(player)
db.session.commit()
return '', 201
@app.route('/player/<id>', methods=['GET'])
def get_by_id(id):
player = Player.query.get_or_404(id)
return jsonify(player_json(player)), 200
@app.route('/player/<id>', methods=['PUT'])
def edit_player(id):
data = request.get_json()
player = Player.query.get_or_404(id)
if data.get('first_name'):
player.first_name = data['first_name'],
if data.get('last_name'):
player.last_name = data['last_name'],
if data.get('date_of_birth'):
player.date_of_birth = data['date_of_birth'],
if data.get('nationality_id'):
player.nationality_id = data['nationality_id'],
if data.get('current_club_id'):
player.current_club_id = data['current_club_id'],
if data.get('preferred_position'):
player.preferred_position = data['preferred_position'],
player.last_modified = datetime.datetime.utcnow()
db.session.add(player)
db.session.commit()
return jsonify({'message': 'updated'}), 200
@app.route('/player/<id>', methods=['DELETE'])
def delete_player(id):
Player.query.filter_by(id=id).delete()
db.session.commit()
return jsonify({'message': 'deleted'}), 204
@app.route('/countries', methods=['GET'])
def get_countries():
countries = Country.query.all()
all_countries = []
for country in countries:
all_countries.append(country_json(country))
return jsonify(all_countries), 200
@app.route('/clubs', methods=['GET'])
def get_clubs():
clubs = Club.query.all()
all_clubs = []
for club in clubs:
all_clubs.append(club_json(club))
return jsonify(all_clubs), 200
|
import sys
import os
# Make sure we're using the right python. Assume we're running out of the venv.
INTERP = os.path.join(os.getcwd(), 'bin', 'python')
if sys.executable != INTERP:
os.execl(INTERP, INTERP, *sys.argv)
sys.path.append(os.getcwd())
import configparser
import importlib
config = configparser.ConfigParser()
config.read("config.ini")
bots = {}
for bot in config.sections():
if "disabled" in config[bot] and config[bot]["webhook"] == "1":
print("Bot {0} disabled".format(bot))
continue
if "webhook" not in config[bot] or config[bot]["webhook"] != "1":
print("Bot {0} not using webhook".format(bot))
continue
if "repo_name" not in config[bot]:
raise RuntimeError("Cannot find repo for bot {0}".format(bot))
bot_path = os.path.join(os.getcwd(), config[bot]["repo_name"])
if not os.path.isdir(bot_path):
raise RuntimeError("Cannot find path {0} for bot {1}".format(bot_path,
bot))
sys.path.append(bot_path)
# Assume the bot module is the same as the config file
if "module_name" not in config[bot]:
raise RuntimeError("Cannot find module for bot {0}".format(bot))
module = config[bot]["module_name"]
importlib.import_module(module)
bots[config[bot]["token"]] = getattr(sys.modules[module],
"create_webhook_bot")(config[bot])
if len(bots.keys()) == 0:
raise RuntimeError("Not running any bots!")
from flask import Flask, request
import telegram
application = Flask(__name__)
@application.route('/')
def hello():
return ""
@application.route('/telegram/<token>', methods=['POST'])
def webhook(token):
update = telegram.update.Update.de_json(request.get_json(force=True))
if token not in bots.keys():
return 'OK'
bots[token].update_queue.put(update)
return 'OK'
if __name__ == "__main__":
application.run()
|
import argparse
import os.path
import logging
import sys
import django
from django.core.management import load_command_class, find_commands, \
BaseCommand, CommandError
from django.apps import apps
logger = logging.getLogger("backathon.main")
def setup():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "backathon.settings")
django.setup()
def main():
"""Main entry point for the command line interface
The workflow of this function follows a similar pattern as Django's
MangementUtility.execute() in that an inital "fake" parser is used to
parse a couple preliminary arguments, but we always force the very first
argument to be the subcommand, never an option.
It also only exports commands from this package.
"""
argv = sys.argv
try:
subcommand = argv[1]
except IndexError:
subcommand = "help"
parser = argparse.ArgumentParser(
usage="%(prog)s subcommand --config CONFIG [options] [args]",
add_help=False,
)
parser.add_argument("--config")
options, args = parser.parse_known_args(argv)
# Set the path to the database from either the command line option or an
# env var. It must be set one way or the other.
if options.config:
os.environ['BACKATHON_CONFIG'] = options.config
if not "BACKATHON_CONFIG" in os.environ:
if subcommand == "help":
# Just going to display some help... set an in-memory database so
# we don't run into any errors if something tries to do database
# access
os.environ['BACKATHON_CONFIG'] = ":memory:"
else:
parser.error("You must use --config or set the environment variable "
"BACKATHON_CONFIG")
dbpath = os.environ['BACKATHON_CONFIG']
# Special exception, all commands except for 'init' require the database
# to exist.
if (subcommand not in ['init', 'help'] and not os.path.exists(dbpath)):
sys.stderr.write("Could not find config database: {}\n".format(dbpath))
sys.stderr.write("Check the path, or if this is a new config you must run 'init'\n")
sys.exit(1)
setup()
# Now that we've configured Django, we can import the rest of the modules
# and configure the real parser specific for the given subcommand
backathon_config = apps.app_configs['backathon']
commands = find_commands(
os.path.join(backathon_config.path, 'management')
)
if subcommand == "help":
usage = [
parser.usage % {'prog': parser.prog},
"",
"Available subcommands:"
]
for command in sorted(commands):
usage.append("\t" + command)
sys.stdout.write("\n".join(usage) + "\n")
sys.exit(1)
if subcommand not in commands:
sys.stderr.write("Unknown command: {!r}\tType '{} help' for usage.\n"
.format(subcommand, os.path.basename(argv[0])))
sys.exit(1)
command_class = load_command_class("backathon", subcommand)
assert isinstance(command_class, BaseCommand)
# Reconfigure the parser and re-parse the arguments
parser = argparse.ArgumentParser(
prog="{} {}".format(os.path.basename(argv[0]), subcommand),
description=command_class.help or None,
)
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("-q", "--quiet", action="store_true")
parser.add_argument("--config", help="Path to the config database")
command_class.add_arguments(parser)
options = parser.parse_args(argv[2:])
# Set log level
if options.quiet:
level = logging.ERROR
elif options.verbose == 0:
level = logging.WARNING
elif options.verbose == 1:
level = logging.INFO
else:
level = logging.DEBUG
logging.getLogger("backathon").setLevel(level)
logger.info("Using config database {}".format(
dbpath
))
try:
command_class.handle(**vars(options))
except CommandError as e:
command_class.stderr.write(str(e))
sys.exit(1)
if __name__ == "__main__":
main()
|
from __future__ import unicode_literals
from django.shortcuts import render, HttpResponse, redirect
from django.core.urlresolvers import reverse
from .models import User, Message, Comment
from .forms import Register, Message_Form, Comment_Form
from django.contrib import messages
import bcrypt
# Rendering pages below.
def dashboard(request):
users = User.objects.all()
logged_user = User.objects.get(id=request.session["logged_user"])
return render(request, "user_dash/dashboard.html", {"users": users, "logged_user": logged_user})
def dashboard_admin(request):
users = User.objects.all()
logged_user = User.objects.get(id=request.session["logged_user"])
return render(request, "user_dash/dashboard_admin.html", {"users": users, "logged_user": logged_user})
def user_new(request):
registration_form = Register()
logged_user = User.objects.get(id=request.session["logged_user"])
return render(request, "user_dash/new.html", {"registration_form": registration_form, "logged_user": logged_user})
def profile(request, user_id):
message_form = Message()
comment_form = Comment()
user = User.objects.get(id=user_id)
logged_user = User.objects.get(id=request.session["logged_user"])
message = Message.objects.filter(user_id=user_id).order_by("-created_at")
# comment = Comment.objects.filter(message_id=message.id)
return render(request, "user_dash/profile.html", {"message_form": message_form, "comment_form": comment_form, "user": user, "logged_user": logged_user, "message": message})
# def user_edits(request):
# return render(request, "login_reg/user_edit.html")
# def admin_edit(request, user_id):
# return render(request, "login_reg/user_edit.html")
# def user_delete(request):
# return render(request, "login_reg/delete.html")
# Functions used by processing routes.
# For registration.
def register_function(request):
register = Register(request.POST)
if request.method == "POST":
# If there are any validation errors.
errors = User.objects.registration_validate(request.POST)
if len(errors):
for tab, error in errors.iteritems():
messages.error(request, error)
func_return = "error"
return func_return
# If no errors, then it will add the user information to the database.
else:
if register.is_valid():
user = User.objects.create(
name = register.cleaned_data["name"].title(),
alias = register.cleaned_data["alias"],
email = register.cleaned_data["email"],
password = bcrypt.hashpw(register.cleaned_data["password"].encode(), bcrypt.gensalt()),
user_level = 0
)
if len(User.objects.all()) == 1:
user.user_level = 9
user.save()
curr_user = User.objects.get(email=user.email)
try:
request.session["logged_user"]
except KeyError:
request.session["logged_user"] = curr_user.id
func_return = "user"
return func_return
# Processing routes below.
# Create new user.
def create_new(request):
register = Register(request.POST)
new_user = register_function(request)
if new_user == "error":
logged_user = User.objects.get(id=request.session["logged_user"])
return render(request, "user_dash/new.html", {"registration_form": register, "logged_user": logged_user})
else:
return redirect(reverse("userdash:user_dash_admin"))
# Post a message.
def message(request, user_id):
Message.objects.create(
message_post = request.POST["message_post"],
poster_id = request.session["logged_user"],
user_id = user_id
)
return redirect(reverse("userdash:user_profile", kwargs={"user_id": user_id}))
# Delete a message.
def d_message(request, message_id):
message = Message.objects.get(id=message_id)
message.delete()
return redirect(reverse("userdash:user_profile", kwargs={"user_id": request.session["logged_user"]}))
# Post a comment.
def comment(request, message_id, user_id):
Comment.objects.create(
comment_post = request.POST["text_comment"],
user_id = user_id,
message_id = message_id
)
return redirect(reverse("userdash:user_profile", kwargs={"user_id": request.session["logged_user"]}))
# Delete a comment.
# def d_comment(request):
# return redirect(reverse("logreg:logreg_profile"))
|
#!/usr/bin/env python
"""
pyjld.phidgets.bonjour.bus
"""
__author__ = "Jean-Lou Dupont"
__email = "python (at) jldupont.com"
__fileid = "$Id: bus.py 69 2009-04-17 18:49:17Z jeanlou.dupont $"
__all__ = ['busSignals',]
import dbus, dbus.service
class busSignals(dbus.service.Object):
"""
Dbus signals
"""
def __init__(self):
dbus.service.Object.__init__(self, dbus.SessionBus(), bus_name="pyjld.phidgets")
@dbus.service.signal(dbus_interface='pyjld.phidget', signature='sss')
def PhidgetFound(self, name, serial, type):
pass
|
import paramiko
from termcolor import colored
import time
def sshConnection(user, password):
host = "172.16.0.10"
port = 22
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host, port, user, password)
|
#!/usr/bin/env python
import requests
import re
import urllib.parse as urlparse
import subprocess
import colorama
from colorama import Fore, Back, Style
colorama.init(autoreset=True)
try:
unknown = input(Fore.WHITE + "\n Enter the Website (target url): ")
target_url = "https://" + unknown
target_links = []
def extract_links_from(url):
response = requests.get(url)
return re.findall('(?:href=")(.*?)"', str(response.content))
def crawl(url):
href_links = extract_links_from(url)
for link in href_links:
link = urlparse.urljoin(url, link)
if "#" in link:
link = link.split("#")[0]
if target_url in link and link not in target_links:
target_links.append(link)
print (link)
crawl(link)
crawl(target_url)
except KeyboardInterrupt:
print(Fore.WHITE + Back.RED +"\n\n\tKeyboardInterruption Detected")
print(Fore.WHITE + Back.RED +"\n\n\t\tExiting")
exit()
|
import models
from Crypto.Cipher import AES
def uid_check(uid):
if models.User.select().where(models.User.username == uid).exists():
uid = models.User.get(models.User.username == uid).uid
elif models.User.select().where(models.User.uid == uid).exists():
pass
else:
raise models.DoesNotExist
return uid
def encrypt(key, iv, text):
# Encryption
encryption_suite = AES.new(key, AES.MODE_CFB, iv)
text = encryption_suite.encrypt("A not really secret message. Not for prying eyes.")
return text
def decrypt(key, iv, text):
# Decryption
decryption_suite = AES.new(key, AES.MODE_CFB, iv)
text = decryption_suite.decrypt(text)
return text
|
import pygame,sys
from pygame.locals import *
pygame.init()
new_surface = pygame.display.set_mode((600,600))
imgSurface = pygame.image.load(Knot_Class.jpg)
new_surface.blit(imgSurface,(0,0))
|
import numpy
#TOIMII
def isInTriangle(x,y,x1,y1,x2,y2,x3,y3):
sqx1 = x1*x1
sqx2 = x2*x2
sqx3 = x3*x3
sqy1 = y1*y1
sqy2 = y2*y2
sqy3 = y3*y3
#if 1.41*1.41 > (x*x+y*y):
#time.sleep(10)
return 1.41*1.41 > (x*x+y*y)
#TESTAA
def ray_hits_triangle(screenpoint,triangle):
tri = triangle
I = screenpoint
II = (tri)
III =
m = numpy.array([[I[0]],[I[1]],[I[2]]])
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 20:19:15 2018
@author: user
資料加總
"""
with open("read.txt","r",encoding="utf-8") as fd:
data=fd.read()
d_sp=data.split(" ")
d_li=list(map(eval,d_sp))
print(sum(d_li) )
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import time
import os
import sys
import urllib
import urllib2
import logging
DOWNLOAD_LIST_FNAME = 'download_list'
DOWNLOAD_TAG_FNAME = 'finish'
def generate_job(file_path):
"""
Arguments:
- `file_path`:
"""
with open(DOWNLOAD_LIST_FNAME, 'w') as f:
f.write(file_path)
f.close()
while True:
try:
if os.path.exists(DOWNLOAD_TAG_FNAME):
with open(DOWNLOAD_TAG_FNAME) as f:
cnt = f.read()
if cnt.strip() == file_path:
cmd = 'rm -rf %s' % file_path
logging.warning(cmd)
# FIXME
# os.system(cmd)
break
time.sleep(5)
except Exception as e:
logging.warning(str(e))
if __name__ == '__main__':
for i in ['2016%02d' % i for i in range(1, 13)]:
# os.system('hadoop fs -get %s .' %i)
generate_job(i)
|
from mongoengine import *
from threads import Thread
class Board(Document):
"""
A Board object represents a list of topics.
"""
name = StringField()
board_id = StringField()
description = StringField()
topics = ListField(ReferenceField(Thread, dbref=False))
|
import logging
from datetime import datetime, timedelta
import time
from canvas import Canvas
# #############################################################################
# Class: Clock
# Draws the date and/or time on a canvases, and then those canvases to the
# Matrix (matrixobject).
# positioninmatrix is the position of the clock canvas in the matrix canvas
# clockdefinition is the definition of what should be in the clock (Time, DoW,
# Month or Day(date))
# clock1224 = 12 for 12 hour clock, or 24 for 24 hour clock
# #############################################################################
class Clock:
def __init__(self, matrixobject, positioninmatrix, clockdefinition, clock1224):
logging.info('Creating new Clock instance')
self.__Matrix = matrixobject
self.__MatrixPosition = positioninmatrix
self.__ClockDefinition = clockdefinition
self.__Clock24h = clock1224
self.__ClockCanvas = Canvas(clockdefinition['Size'])
self.__ImageChanged = True
self.__CurrentHour = -1
self.__CurrentMinute = -1
self.__CurrentSecond = -1
self.__CurrentDay = -1
self.__CurrentMonth = -1
self.__CurrentDoW = -1
# -------------------------------------------------------------------------
# update_time_canvas
# Update the time every second. The time can be 12h or 24h
# -------------------------------------------------------------------------
def update_time_canvas(self):
while True:
delay = 1.0
if 'Time' in self.__ClockDefinition:
if self.__ClockDefinition['Time'] != {}:
self.draw_time()
delay = 1.0 - float(time.time() % 1)
time.sleep(delay)
# -------------------------------------------------------------------------
# update_date_canvas
# Updates the date every day
# -------------------------------------------------------------------------
def update_date_canvas(self):
while True:
todaysdate = datetime.today()
datedow = todaysdate.weekday()
datemonth = todaysdate.month
dateday = todaysdate.day
if 'DoW' in self.__ClockDefinition:
if self.__ClockDefinition['DoW'] != {}:
if self.__CurrentDoW != datedow:
self.__CurrentDoW = datedow
self.__ImageChanged = True
self.__ClockCanvas.draw_on_canvas(self.__ClockDefinition['DoW'], self.__CurrentDoW)
if 'Day' in self.__ClockDefinition:
if self.__ClockDefinition['Day'] != {}:
if self.__CurrentDay != dateday:
self.__CurrentDay = dateday
self.__ImageChanged = True
self.draw_day()
if 'Month' in self.__ClockDefinition:
if self.__ClockDefinition['Month'] != {}:
if self.__CurrentMonth != datemonth:
self.__CurrentMonth = datemonth
self.__ImageChanged = True
self.__ClockCanvas.draw_on_canvas(self.__ClockDefinition['Month'], self.__CurrentMonth)
if self.__ClockDefinition['AutoDraw']:
self.draw_on_matrix_canvas()
# Calculate the number of seconds until midnight and wait for then
secondstomidnight = (datetime.now().replace(hour=0, minute=0, second=0,
microsecond=0) + timedelta(days=1)) - datetime.now()
time.sleep(secondstomidnight.total_seconds())
# ----------------------------------------------------------------------------------
# draw_clock_digit
# Draw an individual clock digit at the pre-defined position (x, y) tuple
# ----------------------------------------------------------------------------------
def draw_clock_digit(self, position, digit):
self.__ClockCanvas.draw_on_canvas(
(position[0], position[1], self.__ClockDefinition['Time'][2], self.__ClockDefinition['Time'][3]),
digit)
# -------------------------------------------------------------------------
# add_image_width
# Increase x by a defined width
# -------------------------------------------------------------------------
def add_image_width(self, x, imagedef):
return x + imagedef[2] - imagedef[0] + 1
# ----------------------------------------------------------------------------------
# draw_day
# Update the day (date), consisting of a one or two digit number
# ----------------------------------------------------------------------------------
def draw_day(self):
x = self.__ClockDefinition['Day'][0]
y = self.__ClockDefinition['Day'][1]
image = self.__ClockDefinition['Day'][2]
imagedefinition = self.__ClockDefinition['Day'][3]
# 10's digit
if self.__CurrentDay >= 10.0:
tensdigit = str(abs(int(self.__CurrentDay)))[0]
self.__ClockCanvas.draw_on_canvas((x, y, image, imagedefinition), tensdigit)
x = self.add_image_width(x, imagedefinition[tensdigit])
else:
tensdigit = 0
self.__ClockCanvas.draw_on_canvas((x, y, image, imagedefinition), 'sp')
x = self.add_image_width(x, imagedefinition['sp'])
# Units digit
unitdigit = str(abs(int(self.__CurrentDay)) - (int(tensdigit) * 10))
self.__ClockCanvas.draw_on_canvas((x, y, image, imagedefinition), unitdigit)
# ----------------------------------------------------------------------------------
# draw_time
# Update the time display, hh:mm a/p
# ----------------------------------------------------------------------------------
def draw_time(self):
x = self.__ClockDefinition['Time'][0]
y = self.__ClockDefinition['Time'][1]
image = self.__ClockDefinition['Time'][2]
imagedefinition = self.__ClockDefinition['Time'][3]
self.__ImageChanged = False
# The font is assumed to be non-proportional for all but the : and am/pm
# Calculate the positions of the various items
xhourtens = x
xhourunits = self.add_image_width(xhourtens, imagedefinition[1])
xcolon = self.add_image_width(xhourunits, imagedefinition[1])
xminutetens = self.add_image_width(xcolon, imagedefinition[':'])
xminuteunits = self.add_image_width(xminutetens, imagedefinition[1])
xampm = self.add_image_width(xminuteunits, imagedefinition[1])
# Get the current hour
currenttime = time.localtime()
# Only update the hour if it has changed
if currenttime.tm_hour != self.__CurrentHour:
self.__ImageChanged = True
self.__CurrentHour = currenttime.tm_hour
if self.__Clock24h == 12:
# Change to 12 hour clock
if currenttime.tm_hour > 12:
hour = self.__CurrentHour - 12
ampm = 1
else:
hour = self.__CurrentHour
ampm = 0
else:
# 24 hour
hour = self.__CurrentHour
ampm = -1
# Draw the hours - first digit
if hour >= 20:
firstdigit = 2
seconddigit = hour - 20
elif hour >= 10:
firstdigit = 1
seconddigit = hour - 10
else:
firstdigit = ' '
seconddigit = hour
# Draw the first digit
self.draw_clock_digit((xhourtens, y), firstdigit)
# Draw the second digit
self.draw_clock_digit((xhourunits, y), seconddigit)
# Draw AM/PM
if ampm == 0:
self.draw_clock_digit((xampm, y), 'am')
elif ampm == 1:
self.draw_clock_digit((xampm, y), 'pm')
# Draw the : flashing each second
if currenttime.tm_sec != self.__CurrentSecond:
self.__ImageChanged = True
self.__CurrentSecond = currenttime.tm_sec
if self.__CurrentSecond / 2.0 == int(self.__CurrentSecond / 2):
self.draw_clock_digit((xcolon, y), ':')
else:
self.draw_clock_digit((xcolon, y), ': ')
# Only update the minutes if they have changed
if currenttime.tm_min != self.__CurrentMinute:
self.__ImageChanged = True
self.__CurrentMinute = currenttime.tm_min
minute = self.__CurrentMinute
if self.__CurrentMinute < 10:
minute_firstdigit = 0
minute_seconddigit = self.__CurrentMinute
else:
minute_firstdigit = int(str(self.__CurrentMinute)[0])
minute_seconddigit = int(str(self.__CurrentMinute)[1])
self.draw_clock_digit((xminutetens, y), minute_firstdigit)
self.draw_clock_digit((xminuteunits, y), minute_seconddigit)
self.draw_on_matrix_canvas()
# ----------------------------------------------------------------------------------
# draw_on_matrix_canvas
# If the canvas has changed, draw it on the
# ----------------------------------------------------------------------------------
def draw_on_matrix_canvas(self):
# Only draw on matrix canvas if AutoDraw is set to True, and the image has changed
if self.__ClockDefinition['AutoDraw']:
if self.__ImageChanged:
self.__Matrix.paste_to_matrix_canvas(self.__MatrixPosition, self.__ClockCanvas)
self.__ImageChanged = False
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
#Q: [b, n, d_k]
#K: [k, d_k]
#V: [k, d_v]
#TODO / care: a@b and a.bmm(b) come out the same right?
def scaled_dotproduct_attention(Q,K,V, apply_mask=False):
assert Q.size(-1)==K.size(-1) and K.size(-2)==V.size(-2)
batch_size, n, d_k = Q.size()
k = K.size(-2)
qk = Q@K.transpose(1,2) # [b, n, k]
qk /= np.sqrt(d_k) # [b, n, k]
if apply_mask:
assert n==k #Only during self attention
for i in range(n):
qk[:, i, i+1:] = -np.inf
weights = F.softmax(qk, dim=-1) # [b, n, k]
output = weights@V # [b, n, d_v]
return output
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, d_k, d_v, h):
super(MultiHeadAttention, self).__init__()
assert d_model==d_k*h==d_v*h #Not necessarily true, but proposed in the original paper
self.d_model, self.d_k, self.d_v, self.h = d_model, d_k, d_v, h
self.Q_projections = nn.ModuleList([nn.Linear(d_model, d_k) for i in range(h)])
self.K_projections = nn.ModuleList([nn.Linear(d_model, d_k) for i in range(h)])
self.V_projections = nn.ModuleList([nn.Linear(d_model, d_v) for i in range(h)])
self.out_projection = nn.Linear(h*d_v, d_model)
def forward(self, Q, K, V, apply_mask=False):
Q_projected = [proj(Q) for proj in self.Q_projections]
K_projected = [proj(K) for proj in self.K_projections]
V_projected = [proj(V) for proj in self.V_projections]
attention_outputs = [scaled_dotproduct_attention(Q_projected[i], K_projected[i], V_projected[i], apply_mask) for i in range(self.h)]
concatenated_outputs = torch.cat(attention_outputs, dim=-1)
output = self.out_projection(concatenated_outputs)
return output
#Test the layers
if __name__ == "__main__":
batch_size = 2
Q, K, V = torch.rand(batch_size, 10,3), torch.rand(batch_size, 5,3), torch.rand(batch_size, 5,2)
result = scaled_dotproduct_attention(Q,K,V)
print(result.size())
print()
d_model, h = 128, 8
d_k = d_v = d_model//h
Q, K, V = torch.rand(batch_size, 10, d_model), torch.rand(batch_size, d_k, d_model), torch.rand(batch_size, d_v, d_model)
multihead_attention = MultiHeadAttention(d_model, d_k, d_v, h)
result = multihead_attention(Q,K,V)
print(result.size())
print()
|
from appconfig import systemd
def test_enable(app, testdir, mocker):
files = mocker.Mock(upload_template=mocker.Mock())
mocker.patch('appconfig.systemd.files', files)
mocker.patch('appconfig.systemd.sudo')
systemd.enable(app, testdir / 'systemd')
assert files.upload_template.call_count == 3
|
#!/usr/bin/python
import feedparser
import zlib
f = open('http://www.torrentday.com/torrents/rss?download;7;u=428237;tp=887f3b1d10049f24d6fddf65d2139b22', 'rb')
decompressed_data=zlib.decompress(f.read(), 16+zlib.MAX_WBITS)
print decompressed_data
#feed = feedparser.parse( decompressed_data )
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to parse Windows Jump List files:
* .automaticDestinations-ms
* .customDestinations-ms
"""
import argparse
import logging
import sys
import pyolecf
from dtformats import file_system
from dtformats import jump_list
from dtformats import output_writers
try:
from dtformats import dfvfs_helpers
except ImportError:
dfvfs_helpers = None
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Extracts information from Windows Jump List files.'))
argument_parser.add_argument(
'-d', '--debug', dest='debug', action='store_true', default=False,
help='enable debug output.')
if dfvfs_helpers:
dfvfs_helpers.AddDFVFSCLIArguments(argument_parser)
argument_parser.add_argument(
'source', nargs='?', action='store', metavar='PATH',
default=None, help='path of the Windows Jump List file.')
options = argument_parser.parse_args()
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
if dfvfs_helpers and getattr(options, 'image', None):
file_system_helper = dfvfs_helpers.ParseDFVFSCLIArguments(options)
if not file_system_helper:
print('No supported file system found in storage media image.')
print('')
return False
else:
if not options.source:
print('Source file missing.')
print('')
argument_parser.print_help()
print('')
return False
file_system_helper = file_system.NativeFileSystemHelper()
output_writer = output_writers.StdoutWriter()
try:
output_writer.Open()
except IOError as exception:
print(f'Unable to open output writer with error: {exception!s}')
print('')
return False
file_object = file_system_helper.OpenFileByPath(options.source)
if not file_object:
print('Unable to open source file.')
print('')
return False
try:
is_olecf = pyolecf.check_file_signature_file_object(file_object)
finally:
file_object.close()
if is_olecf:
jump_list_file = jump_list.AutomaticDestinationsFile(
debug=options.debug, file_system_helper=file_system_helper,
output_writer=output_writer)
else:
jump_list_file = jump_list.CustomDestinationsFile(
debug=options.debug, file_system_helper=file_system_helper,
output_writer=output_writer)
jump_list_file.Open(options.source)
jump_list_entries = list(jump_list_file.GetJumpListEntries())
print('Windows Jump List information:')
number_of_entries = len(jump_list_entries)
print(f'Number of entries:\t\t{number_of_entries:d}')
print('')
for jump_list_entry in jump_list_entries:
print(f'Entry: {jump_list_entry.identifier:s}')
print_header = True
for shell_item in jump_list_entry.GetShellItems():
if print_header:
print('\tShell items:')
print_header = False
print(f'\t\t0x{shell_item.class_type:02x}')
print_header = True
for format_identifier, property_record in jump_list_entry.GetProperties():
if print_header:
print('\tProperties:')
print_header = False
print(f'\t\t{{{format_identifier:s}}}/{property_record.entry_type:d}')
print('')
jump_list_file.Close()
output_writer.Close()
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
|
for num in range (1500,20001):
if num % 7 == 0:
print (num)
elif num % 5 == 0:
print (num)
|
# Framework using SQLite.
import sqlite3
active = 'A'
inactive = 'I'
status = 'Status'
connection = sqlite3.connect('Ram.db')
generalTable = connection.execute('select * from general_configurations')
menuConfigurations = []
def getGeneralConfigurationValue(configurationkey):
return generalConfigurations[configurationkey]
def getRecord(Id):
data = connection.execute('select * from ' + dataTableName + ' where ' + columnNames[0] + ' = ' + Id + ' and ' + columnNames[-1] + ' = "' + active + '"')
record = data.fetchall()
if(len(record) != 0):
for line in record:
printRecord(line)
else:
print(getGeneralConfigurationValue('IdNotFound'))
printNewLine()
showMenu()
def printNewLine():
print()
def printRecord(record):
for (fieldName, fieldValue) in zip(fieldConfigurations, record):
if(fieldName != status):
print(fieldName + ': ' + str(fieldValue))
def create():
printNewLine()
print(getGeneralConfigurationValue('Create'))
query = 'insert into ' + dataTableName + ' values ("'
for field in fieldConfigurations:
if(field != status):
userInput = input(field + ": ")
query = query + userInput + '", "'
else:
query = query + active + '")'
connection.execute(query)
connection.commit()
print(getGeneralConfigurationValue('Created'))
printNewLine()
def view():
data = connection.execute('select * from ' + dataTableName + ' where ' + columnNames[-1] + ' = "' + active + '"')
printNewLine()
print(getGeneralConfigurationValue('View'))
for record in data:
printRecord(record)
printNewLine()
def update():
printNewLine()
print(getGeneralConfigurationValue('Update'))
userId = input('Enter ' + fieldConfigurations[0] + ': ')
printNewLine()
getRecord(userId)
printNewLine()
for counter in range(1, len(fieldConfigurations)):
if (fieldConfigurations[counter] != status):
print(str(counter) + ') ' + fieldConfigurations[counter])
userChoice = int(input(getGeneralConfigurationValue('Choice') + ': '))
updateValue = input(fieldConfigurations[userChoice] + ': ')
connection.execute('update ' + dataTableName + ' set ' + columnNames[userChoice] + ' = ' + updateValue + ' where ' + columnNames[0] + ' = ' + userId)
connection.commit()
print(getGeneralConfigurationValue('Updated'))
printNewLine()
def delete():
printNewLine()
print(getGeneralConfigurationValue('Delete'))
userId = input('Enter ' + fieldConfigurations[0] + ': ')
printNewLine()
getRecord(userId)
printNewLine()
confirmation = input(getGeneralConfigurationValue('Confirmation') + ': ')
if(confirmation == 'y'):
connection.execute('update ' + dataTableName + ' set ' + columnNames[-1] + ' = "' + inactive + '" where ' + columnNames[0] + ' = ' + userId)
connection.commit()
print(getGeneralConfigurationValue('Deleted'))
printNewLine()
def search():
printNewLine()
print(getGeneralConfigurationValue('Search'))
userId = input('Enter ' + fieldConfigurations[0] + ': ')
printNewLine()
getRecord(userId)
printNewLine()
def exitProgram():
exit()
def showMenu():
while True:
for line in menuConfigurations:
print(line)
userChoice = int(input(getGeneralConfigurationValue('Choice') + ': '))
if(userChoice > 0 and userChoice < 7):
[create, view, update, delete, search, exitProgram][userChoice - 1]()
else:
with open('menu.cfg') as fpFile:
for lineWithNewLine in fpFile:
lineWithoutNewLine = lineWithNewLine.replace('\n', '')
menuConfigurations.append(lineWithoutNewLine)
generalConfigurations = {}
for line in generalTable:
generalConfigurations[line[0]] = line[1]
dataTableName = generalConfigurations['DataTableName']
pragmaQuery = 'pragma table_info("' + dataTableName + '")'
fieldDetails = connection.execute(pragmaQuery)
fieldConfigurations = []
columnNames = []
for line in fieldDetails:
columnNames.append(line[1])
text = line[1].replace('_', ' ')
fieldConfigurations.append(text)
showMenu()
|
from nameko.rpc import rpc
from model_provider import Agent
class ChitChat(object):
name = 'chitchat'
a = Agent()
@rpc
def predict(self, phrase=None, session=0):
with self.a['graph'].as_default():
answer = self.a['agent'].send(msg=phrase, agent_id=session)
print("Session {}: {}".format(session, answer))
return answer
@rpc
def init_session(self, session=0):
print("Session {} initialized".format(session))
self.a['agent'].deploy_agent(agent_id=session, reset=True)
|
# coding=utf-8
from var_dump import var_dump as vd
import const
from pprint import pprint as pp
import reader # CSV Files
import os.path
class Converter:
'''
Conver specific files into format Select into ...
'''
def __init__(self, file_name, file_reader, file_writer):
self.file_name = file_name
self.file_reader = file_reader
self.file_writer = file_writer
def load_file(self):
return self.file_reader.read(self.file_name)
def save_file(self):
self.file_writer.write(self.file_name, self.lines)
class Selector:
'''
Class for selecting engines
'''
def __int__(self, file_reader, file_name):
self.file_reader = file_reader
self.file_name = file_name
self.output_type = const.output_type
self.input_type = const.input_type
def select(self):
if self.file_reader(self.file_name)[0] == self.output_type:
return const.export_header_line
if self.file_reader(self.file_name)[0] == self.input_type:
return const.import_header_line
class FileReader:
'''
Reading files
'''
def __init__(self, file_checker):
self.file_checker = file_checker
def read(self, file_name):
self.file_checker.check(file_name)
with open(file_name, 'r') as f:
return list(f.readlines())
class FileWriter:
'''
Write result file
'''
def save(self, file_name, lines):
with open(file_name, 'w') as f:
f.writelines(self, lines)
class FileChecker:
'''
Checking existing target file
'''
def check(self, file_name):
if not os.path.exists(file_name):
raise IOError("File not found.")
def main():
file = 'import_small.csv'
file_checker = FileChecker()
file_writer = FileWriter()
file_reader = FileReader(file_checker)
converter = Converter(file, file_reader, file_writer)
vd(converter.load_file())
# pprint(converter.load_file())
if __name__ == '__main__':
main()
|
boardList = [[[4], [2], [], [], [], [3], [8], [], []],
[[], [], [3], [4], [], [], [2], [7], []],
[[], [8], [], [], [2], [5], [9], [3], [4]],
[[5], [], [1], [], [4], [], [], [], []],
[[], [], [], [5], [], [7], [], [], []],
[[], [], [], [], [6], [], [1], [], [3]],
[[3], [1], [2], [8], [7], [], [], [4], []],
[[], [5], [9], [], [], [4], [7], [], []],
[[], [], [4], [9], [], [], [], [6], [1]]]
validAnswer = False
while validAnswer == False:
for rowIndex, row in enumerate(boardList):
for columnIndex, column in enumerate(row):
if len(boardList[rowIndex][columnIndex]) == 0:
#Add all possible values to array
for x in range(9):
boardList[rowIndex][columnIndex].append(x + 1)
if rowIndex > 2 and rowIndex < 6:
tempRowSearch = 3
elif rowIndex > 5:
tempRowSearch = 6
else:
tempRowSearch = 0
if columnIndex > 2 and columnIndex < 6:
tempColumnSearch = 3
elif rowIndex > 5:
tempColumnSearch = 6
else:
tempColumnSearch = 0
saveTemp = tempColumnSearch
saveRowTemp = tempRowSearch
# Box Search
for tempRowSearch in range(tempRowSearch,tempRowSearch + 3):
tempColumnSearch = saveTemp
for tempColumnSearch in range(tempColumnSearch,tempColumnSearch + 3):
if len(boardList[tempRowSearch][tempColumnSearch]) != 1:
continue
elif boardList[tempRowSearch][tempColumnSearch][0] in boardList[rowIndex][columnIndex]:
boardList[rowIndex][columnIndex].remove(boardList[tempRowSearch][tempColumnSearch][0])
#Column Search
for tempColumnSearch in range(9):
if len(boardList[rowIndex][columnIndex]) == 1:
break
if len(boardList[rowIndex][tempColumnSearch]) != 1:
continue
elif boardList[rowIndex][tempColumnSearch][0] in boardList[rowIndex][columnIndex]:
boardList[rowIndex][columnIndex].remove(boardList[rowIndex][tempColumnSearch][0])
#Row Search
for tempRowSearch in range(9):
if len(boardList[rowIndex][columnIndex]) == 1:
break
if(len(boardList[tempRowSearch][columnIndex]) != 1):
continue
elif boardList[tempRowSearch][columnIndex][0] in boardList[rowIndex][columnIndex]:
boardList[rowIndex][columnIndex].remove(boardList[tempRowSearch][columnIndex][0])
#Box Search for unique values
tempRowSearch = saveRowTemp
for tempRowSearch in range(tempRowSearch,tempRowSearch + 3):
tempColumnSearch = saveTemp
for tempColumnSearch in range(tempColumnSearch,tempColumnSearch + 3):
if len(boardList[tempRowSearch][tempColumnSearch]) > 1 and tempRowSearch != rowIndex and tempColumnSearch != columnIndex:
boardList[rowIndex][columnIndex] = list(set(boardList[rowIndex][columnIndex]) - set(boardList[tempRowSearch][tempColumnSearch]))
possiblesCount = 0
for row in boardList:
for column in row:
if len(column) != 1:
possiblesCount += 1
if possiblesCount == 0:
validAnswer = True
print(boardList)
|
import random
# noinspection PyUnresolvedReferences
from six.moves import range
def partition_string(s, segments):
"""
Partition a string into a number of segments. If the given number of
segments does not divide evenly into the string's length, extra characters
are added to the leading segments in order to allow for the requested number
of segments.
This is useful when partitioning the checksum of a file to determine where
it should be placed in a directory tree system.
>>> partition_string("foobar", 2)
['foo', 'bar']
>>> partition_string("foobar", 4)
['fo', 'ob', 'a', 'r']
>>> partition_string("foobar", 6)
['f', 'o', 'o', 'b', 'a', 'r']
If the string is not evenly divisible by the requested number of segments,
then the length of trailing segments will be shorter than leading segments.
>>> partition_string('d7ca25c5-b886-4a1b-87fe-5945313d350b', 11)
['d7ca', '25c5', '-b88', '6-4', 'a1b', '-87', 'fe-', '594', '531', '3d3',
'50b']
>>> partition_string('abcde', 2)
['abc', 'de']
>>> partition_string('abcde', 4)
['ab', 'c', 'd', 'e']
If the number of segments is greater than the number of characters in the
input string, an assertion error is raised.
>>> partition_string('a', 2)
Traceback (most recent call last):
...
AssertionError: Cannot split given string into more segments than there are
characters in the string!
:raises AssertionError: Segmentation value greater than the length of the
string.
:param s: String to partition.
:type s: str
:param segments: Number of segments to split the string into
:type segments: int
:return: A list of N segments. If the given number of segments does not
divide evenly into the string's length, this function behaves
erratically.
:rtype: list[str]
"""
assert segments <= len(s), \
"Cannot split given string into more segments than there are " \
"characters in the string!"
seg_len = len(s) // segments
extra_iters = len(s) % segments
r = []
i = 0
for k in range(segments):
j = i + seg_len + (k < extra_iters)
r.append(s[i:j])
i = j
return r
DEFAULT_CHAR_SET = \
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
def random_characters(n, char_set=DEFAULT_CHAR_SET):
"""
Return ``n`` random characters from the given ``char_set``.
If ``n`` is a floating point valid, it is cast to an integer (floor).
The default ``char_set`` includes a-z, A-Z and 0-9.
:param n: Number of random characters to return.
:type n: int
:param char_set: Sequence of characters to pull from when constructing
random sequence.
:type char_set: str | unicode
:return: New string of random characters of length ``n`` from the given
``char_set``.
:rtype: str | unicode
:raises ValueError: If ``char_set`` given is empty, or ``n`` is negative.
"""
n = int(n)
if n < 0:
raise ValueError("n must be a positive integer.")
L = len(char_set)
if L == 0:
raise ValueError("Empty char_set given.")
return ''.join(char_set[random.randint(0, L - 1)] for _ in range(n))
|
# -*- coding: utf-8 -*-
import re
import sys
import os
import pickle
import numpy as np
from collections import Counter
from os import listdir
from os.path import isfile, join
##################################################################################################################
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
def InfoExtraction(f, codeset=set(), dllset=set()):
cnt = Counter()
dlllist = []
idataVarSizeArray = np.zeros(32)
Context = f.readlines()
for i,line in enumerate(Context):
if len(line) < 18: # Bullshit line
continue
line = line.replace(' ','\t',1) # Compatibility format
if '.text:' == line[0:6] or 'CODE:' == line[0:5] or 'seg' == line[0:3]:
if ' ptr ' in line and '=' in line:
if '=' != line.split('\t')[-1].strip()[0]:
txt = '1'+line.split('\t')[-1].strip()
else:
txt = '1'+line.split('\t')[-2].strip() + ' ' + line.split('\t')[-1].strip() # Only keep the assembly part and not everything on the left
else:
txt = line.split('\t')[-1].strip() # Only keep the assembly part and not everything on the left
uselessflag = 0
if re.search('^;', txt): # Most likely a comment
uselessflag = 1
if 'db' in txt: # Painful to watch
uselessflag = 1
if 'dd' in txt: # Painful to watch
uselessflag = 1
if 'align' in txt: # Painful to watch
uselessflag = 1
if 'proc near' in txt or 'endp' in txt:
uselessflag = 1
if 'sub_' in txt or 'loc_' in txt:
uselessflag = 1
if '(' in txt or ')' in txt:
uselessflag = 1
element = txt.split(' ')
# Trying to only keep basic instructions like: mov, lea, jmp, etc..
if uselessflag==0 and len(element[0]) < 6 and not hasNumbers(element[0]) and not re.search('[ 0123456789/?%":\|\.\*_\(\)\[\]]', element[0]) and element[0].strip() != '':
if element[0] == 'dw' or element[0] == 'TO' or element[0] == 'extrn' or element[0] == 'PRESS':
continue
else:
codeset.add(element[0])
cnt[element[0]] += 1
if '.data:' == line[0:6]:
if '; DATA XREF:' in line:
idataNameMat = re.search('([_\w]+)[ \t]+(db|dw|dd)[ \t]+\w+',line)
if idataNameMat:
#print(idataNameMat.groups()[0])
#print(idataNameMat.group())
if re.search('([_\w]+)[ \t]+db[ \t]+\w+',line):
idataVarSize = 1
elif re.search('([_\w]+)[ \t]+dw[ \t]+\w+',line):
idataVarSize = 2
elif re.search('([_\w]+)[ \t]+dd[ \t]+\w+', line):
idataVarSize = 4
for k in range(1,33):
#检查文本有没有超出范围
if i+k+1 > len(Context):
break
#各种条件
if 'db' not in Context[i+k] and 'dw' not in Context[i+k] and 'dd' not in Context[i+k]:
#uslessLine
continue
if '; DATA XREF:' in Context[i+k]:
break
if 'align' in Context[i+k] or 'ends' in Context[i+k]:
break
if k==32:
idataVarSize = k
break
if re.search('([_\w]+)[ \t]+db[ \t]+\w+', Context[i+k]):
idataVarSize = idataVarSize + 1
elif re.search('([_\w]+)[ \t]+dw[ \t]+\w+', Context[i+k]):
idataVarSize = idataVarSize + 2
elif re.search('([_\w]+)[ \t]+dd[ \t]+\w+', Context[i+k]):
idataVarSize = idataVarSize + 4
if idataVarSize > 32:
idataVarSize = 32
break
#print(idataVarSize)
idataVarSizeArray[idataVarSize-1] = idataVarSizeArray[idataVarSize-1] + 1
if '.idata:' == line[0:7]:
importMat = re.search('Imports from' + '(.+)', line)
if importMat:
dllset.add(importMat.groups()[0].strip())
dlllist.append(importMat.groups()[0].strip())
return cnt, idataVarSizeArray , dlllist
##################################################################################################################
benginPath = "Benign_ML_set/train/"
malwarePath = "Malware_ML_Set/train/"
list_bengin_files = [f for f in listdir(benginPath) if isfile(join(benginPath, f))]
list_malware_files = [f for f in listdir(malwarePath) if isfile(join(malwarePath, f))]
set_bengin_files = set([os.path.splitext(el)[0] for el in list_bengin_files])
set_malware_files = set([os.path.splitext(el)[0] for el in list_malware_files])
benginId_list = list(set_bengin_files)
malwareId_list = list(set_malware_files)
nb_sample = len(benginId_list) + len(malwareId_list)
X = [0] * nb_sample
Y = np.array([0]*len(benginId_list) + [1]*len(malwareId_list))
LineC = [0] * nb_sample
dllcollect = [0] * nb_sample
idataVSize = np.zeros((nb_sample, 32))
s_word = set()
dllset = set()
for i, benginId in enumerate(benginId_list):
f = open(benginPath + benginId + '.asm', 'r', encoding='utf-8')
print('benign:', benginId, 'num = ', i, '/', len(benginId_list))
X[i] , idataVSize[i] , dllcollect[i] = InfoExtraction(f, s_word ,dllset)
if not i % 10:
sys.stdout.write("\r%d / %d \n" % (i, len(s_word)))
sys.stdout.flush()
f.close()
for i, malwareId in enumerate(malwareId_list):
f = open(malwarePath + malwareId + '.asm', 'r', encoding='utf-8')
print('malware:', malwareId, 'num = ', i, '/', len(malwareId_list))
X[i+len(benginId_list)] , idataVSize[i+len(benginId_list)] , dllcollect[i+len(benginId_list)] = InfoExtraction(f, s_word ,dllset)
if not i % 10:
sys.stdout.write("\r%d / %d \n" % (i, len(s_word)))
sys.stdout.flush()
f.close()
sys.stdout.write("\r%d" % i)
sys.stdout.flush()
outfile1 = open('BMsave\set_word_asm', 'w+b')
pickle.dump(s_word, outfile1)
outfile1.close()
outfile1 = open('BMsave\X_data_asm', 'w+b')
pickle.dump(X, outfile1)
outfile1.close()
outfile1 = open('BMsave\Y_data_asm', 'w+b')
pickle.dump(Y, outfile1)
outfile1.close()
outfile1 = open('BMsave\idataVSize', 'w+b')
pickle.dump(idataVSize, outfile1)
outfile1.close()
outfile1 = open('BMsave\dllset', 'w+b')
pickle.dump(dllset, outfile1)
outfile1.close()
outfile1 = open('BMsave\dllcollect', 'w+b')
pickle.dump(dllcollect, outfile1)
outfile1.close()
#print(s_word)
#print(X)
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
# @File:User_CF.py
# @Author: Michael.liu
# @Date:2020/6/19 13:37
# @Desc: this code is ....
import argparse
from pyspark.sql import SparkSession
from pyspark import SparkConf, SparkContext
import pyspark.sql.types as T
def UserCF():
print("...begin...")
if __name__ == '__main__':
print("....begin....")
spark = SparkSession \
.builder \
.appName("User_CF") \
.getOrCreate()
lines = spark.read.text("").rdd
|
# -*- coding: utf8 -*-
#!env python
import subprocess
import os
import codecs
import re
import datetime
import time
now = str(int(time.mktime(datetime.datetime.now().timetuple())))
#压缩html
targets = ['index', 'top', 'cat', 'search','subject']
for target in targets:
file = os.path.abspath(target+'-dev.html')
outeFile = os.path.abspath( target+'.html')
fp = codecs.open(file, 'r', 'UTF-8')
outFp = codecs.open(outeFile, 'w+', 'UTF-8')
content = fp.read()
content = re.sub(r'href="src/css/(.*)\.css"','href="src/css/\g<1>.min.css?t=' + now + '"',content)
content = re.sub(r'src="(.*)\.js"','src="\g<1>.min.js?t=' + now + '"',content)
#去除html空格
#content = re.sub(r'\s*\r?\n\s*', '', content)
#content = re.sub(r'<!--.*?-->', '', content)
#替换导航
content = re.sub(r'"(.*?)-dev\.html"','"\g<1>.html"',content)
outFp.write(content)
fp.close()
outFp.close()
|
from selenium.webdriver.common.by import By
class HomePage:
def __init__(self, driver):
self.driver = driver
Search = (By.XPATH, "//*[@type='search']")
AddToCart = (By.XPATH, "//button[text()='ADD TO CART']")
Cart_Icon = (By.XPATH, "//*[@class='cart-icon']/img")
Proceed_ToCheckOut = (By.XPATH, "//button[text()='PROCEED TO CHECKOUT']")
def SearchBar(self):
return self.driver.find_element(*HomePage.Search)
def AddToCartItem(self):
return self.driver.find_elements(*HomePage.AddToCart)
def CartIcon(self):
return self.driver.find_element(*HomePage.Cart_Icon)
def ProceedToCheckOut(self):
return self.driver.find_element(*HomePage.Proceed_ToCheckOut)
|
from kivy.lang import Builder
from kivy.properties import ObjectProperty
from kivymd.app import MDApp
from kivymd.uix.boxlayout import MDBoxLayout
KV = '''
<ContentNavigationDrawer>:
ScrollView:
MDList:
OneLineListItem:
text: "Список учеников"
on_press:
root.nav_drawer.set_state("close")
root.screen_manager.current = "scr 1"
OneLineListItem:
text: "Добавить ученика"
on_press:
root.nav_drawer.set_state("close")
root.screen_manager.current = "scr 2"
OneLineListItem:
text: "Кто ест, а кто нет"
on_press:
root.nav_drawer.set_state("close")
root.screen_manager.current = "scr 3"
MDScreen:
MDToolbar:
id: toolbar
pos_hint: {"top": 1}
elevation: 10
title: "Организация питания"
left_action_items: [["menu", lambda x: nav_drawer.set_state("open")]]
MDNavigationLayout:
x: toolbar.height
ScreenManager:
id: screen_manager
MDScreen:
name: "scr 1"
MDLabel:
text: "Тут список учеников"
halign: "center"
MDScreen:
name: "scr 2"
MDLabel:
text: "Тут функция добавления ученика"
halign: "center"
MDTextField:
hint_text: "Имя"
pos_hint: {"center_y": .45}
MDTextField:
hint_text: "Фамилия"
pos_hint: {"center_y": .4}
MDTextField:
hint_text: "Счет"
pos_hint: {"center_y": .35}
MDFlatButton:
id: button_create
text: "Добавить"
pos_hint: {"center_x": .5, "center_y": .30}
MDScreen:
name: "scr 3"
MDLabel:
text: "Тут таблица кто ест, а кто нет"
halign: "center"
MDNavigationDrawer:
id: nav_drawer
ContentNavigationDrawer:
screen_manager: screen_manager
nav_drawer: nav_drawer
'''
class ContentNavigationDrawer(MDBoxLayout):
screen_manager = ObjectProperty()
nav_drawer = ObjectProperty()
class TestNavigationDrawer(MDApp):
def anything(self):
btn = self.screen.ids.button_create
if btn.on_press():
print("anything")
def build(self):
return Builder.load_string(KV)
TestNavigationDrawer().run()
|
import _plotly_utils.basevalidators
class BoxsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name='box', parent_name='layout.template.data', **kwargs
):
super(BoxsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Box'),
data_docs=kwargs.pop('data_docs', """
"""),
**kwargs
)
|
# import time
# import os
# import automationhat
# import Adafruit_DHT
# from balena import Balena
# import json
# import paho.mqtt.client as mqtt
#
# class PlantSaver:
#
# def __init__(self):
#
# self.client = mqtt.Client("1")
#
# # Variables
# self.dht_sensor = Adafruit_DHT.DHT22
# self.dht_pin = int(self.set_variable("dht_pin", 11))
# self.max_value = float(self.set_variable("max_value", 2.77))
# self.min_value = float(self.set_variable("min_value", 1.46))
# self.target_soil_moisture = int(self.set_variable("target_soil_moisture", 60))
# self.target_soil_threshold = int(self.set_variable("target_soil_threshold", 15))
# self.pump_delay = int(self.set_variable("pump_delay", 15))
#
# # Initial status
# self.status = 'Starting'
# self.status_code = 0
# self.moisture_level = None
# self.pumping = False
# self.temperature = 0
# self.humidity = 0
#
# # set up an instance of the SDK - used for updating device tags
# self.balena = Balena()
# self.balena.auth.login_with_token(os.environ['BALENA_API_KEY'])
#
# # Checks if there is an environment variable set, otherwise save the default value
# def set_variable(self, name, default_value):
# if name in os.environ:
# self.value = os.environ.get(name)
# else:
# self.value = default_value
# return self.value
#
# def read_moisture(self):
# self.moisture_level= 100-(automationhat.analog.one.read()-self.min_value)/((self.max_value-self.min_value)/100)
#
# def read_temperature_humidity(self):
# self.humidity, self.temperature = Adafruit_DHT.read_retry(self.dht_sensor, self.dht_pin)
#
# def update_sensors(self):
# self.read_moisture()
# self.read_temperature_humidity()
# self.read_float_switch()
#
# # Take a reading from the float switch and update the vars
# def read_float_switch(self):
# self.water_left = not bool(automationhat.input.one.read())
#
# # Update the device tags with the moisture level and the status on balenaCloud
# # This means that you'll be able to see the status of the plant from the dashboard
# def update_device_tags(self):
# self.balena.models.tag.device.set(os.environ['BALENA_DEVICE_UUID'], 'Status', str(self.status))
# moisture_string = str(round(self.moisture_level,2))+'%'
# self.balena.models.tag.device.set(os.environ['BALENA_DEVICE_UUID'], 'Moisture', moisture_string)
#
# # Store the current instance measurements within InfluxDB
# def write_measurements(self):
#
# self.client.connect("localhost")
#
# measurements = [
# {
# 'measurement': 'plant-data',
# 'fields': {
# 'moisture': float(self.moisture_level),
# 'pumping': int(self.pumping),
# 'water_left': int(self.water_left),
# 'status': int(self.status_code),
# 'temperature': float(self.temperature),
# 'humidity': float(self.humidity)
# }
# }
# ]
# msgInfo = self.client.publish("sensors", json.dumps(measurements))
# if False == msgInfo.is_published():
# msgInfo.wait_for_publish()
# self.client.disconnect()
#
# # Generate a status string so we have something to show in the logs
# # We also generate a status code which is used in the front end UI
# def update_status(self):
# if self.moisture_level < self.target_soil_moisture-self.target_soil_threshold:
# status = 'Too dry'
# self.status_code = 1
# elif self.moisture_level > self.target_soil_moisture+self.target_soil_threshold:
# status = 'Too wet'
# self.status_code = 2
# else:
# status = 'OK'
# self.status_code = 3
#
# if self.pumping:
# status = status + ', pump on'
# else:
# status = status + ', pump off'
#
# if not self.water_left:
# status = status + ', water low'
# else:
# status = status + ', water normal'
#
# self.status = status
#
# # Pump water
# def pump_water(self, action):
# if action == True:
# automationhat.relay.one.on()
# self.pumping = True
# else:
# automationhat.relay.one.off()
# self.pumping = False
#
# # Refresh the relevant things - designed to be run once every 10 seconds
# def tick(self):
# self.update_sensors()
# self.update_status()
# self.write_measurements()
|
# encoding=utf8
# \xc3\x93 -> O ; \xc2\xa0 -> "" ; \xc3\x91 -> Ñ
# from procesos.bancolombia_castigada import ejecutar_query
from google.cloud import bigquery
def makeTrans(listaHeader):
standarizedHeaders = []
my_query = ''' SELECT *
FROM `contento-bi.MetLife.base_campos_matriculados_bd_iniciales`
'''
client = bigquery.Client()
query_job = client.query(my_query)
diccionario = query_job.result() # Ejecución en BigQuery
rows = dict(diccionario)
# print(rows)
for i in listaHeader:
for n, m in rows.items():
if i == n :
standarizedHeaders.append(m)
else :
continue
return standarizedHeaders
|
"""empty message
Revision ID: b5a45440b11d
Revises: 44ac09b089b6
Create Date: 2020-03-24 11:52:57.802979
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'b5a45440b11d'
down_revision = '44ac09b089b6'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('lable__data',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('source_id', sa.Integer(), nullable=True),
sa.Column('leval', sa.Integer(), nullable=True),
sa.Column('cost_perform', sa.Integer(), nullable=True),
sa.Column('appearance', sa.Integer(), nullable=True),
sa.Column('applicability', sa.Integer(), nullable=True),
sa.Column('laber', sa.String(length=16), nullable=True),
sa.Column('is_labled', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_lable__data_is_labled'), 'lable__data', ['is_labled'], unique=False)
op.create_index(op.f('ix_lable__data_laber'), 'lable__data', ['laber'], unique=False)
op.create_index(op.f('ix_lable__data_source_id'), 'lable__data', ['source_id'], unique=False)
op.drop_index('ix_lable_evalution_is_labled', table_name='lable_evalution')
op.drop_index('ix_lable_evalution_laber', table_name='lable_evalution')
op.drop_index('ix_lable_evalution_source_id', table_name='lable_evalution')
op.drop_table('lable_evalution')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('lable_evalution',
sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),
sa.Column('source_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('leval', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('cost_perform', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('appearance', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('applicability', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('laber', mysql.VARCHAR(length=16), nullable=True),
sa.Column('is_labled', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.create_index('ix_lable_evalution_source_id', 'lable_evalution', ['source_id'], unique=False)
op.create_index('ix_lable_evalution_laber', 'lable_evalution', ['laber'], unique=False)
op.create_index('ix_lable_evalution_is_labled', 'lable_evalution', ['is_labled'], unique=False)
op.drop_index(op.f('ix_lable__data_source_id'), table_name='lable__data')
op.drop_index(op.f('ix_lable__data_laber'), table_name='lable__data')
op.drop_index(op.f('ix_lable__data_is_labled'), table_name='lable__data')
op.drop_table('lable__data')
# ### end Alembic commands ###
|
# Generated by Django 2.2.5 on 2019-10-03 19:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_user_is_staff'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='is_staff',
),
migrations.AddField(
model_name='user',
name='admin',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='user',
name='staff',
field=models.BooleanField(default=False),
),
]
|
# Astrocrash 1
# Get asteroids moving on the screen
import random, math
from livewires import games, color
games.init(screen_width = 640, screen_height = 480, fps = 50)
class Wrapper(games.Sprite):
"""A sprite that wraps around the screen"""
def update(self):
"""Wrap sprite around screen."""
if self.top > games.screen.height:
self.bottom = 0
if self.bottom < 0:
self.top = games.screen.height
if self.left > games.screen.width:
self.right = 0
if self.right < 0:
self.left = games.screen.width
def die(self):
"""destroy self"""
self.destroy()
class Collider(Wrapper):
"""A Wrapper that can collide with another object."""
def update(self):
"""Check for overlapping sprites"""
super(Collider, self).update()
if self.overlapping_sprites:
for sprite in self.overlapping_sprites:
sprite.die()
self.die()
def die(self):
"""Destroy self and leave explosion behind. """
new_explosion = Explosion(x = self.x, y = self.y)
games.screen.add(new_explosion)
self.destroy()
class Explosion(games.Animation):
"""Explosion animation. """
sound = games.load_sound("explosion.wav")
images = ["explosion1.bmp",
"explosion2.bmp",
"explosion3.bmp",
"explosion4.bmp",
"explosion5.bmp",
"explosion6.bmp",
"explosion7.bmp",
"explosion8.bmp",
"explosion9.bmp"]
def __init__(self, x, y):
super(Explosion, self).__init__(images = Explosion.images,
x = x, y = y,
repeat_interval = 4, n_repeats = 1,
is_collideable = False)
Explosion.sound.play()
class Asteroid(Wrapper):
""" An asteroid which floats across the screen."""
SPAWN = 2
SMALL = 1
MEDIUM = 2
LARGE = 3
images = {SMALL : games.load_image("asteroid_small.bmp"),
MEDIUM : games.load_image("asteroid_med.bmp"),
LARGE : games.load_image("asteroid_big.bmp") }
SPEED = 2
POINTS = 30
total = 0
def __init__(self, game, x, y, size):
"""Initialize asteroid sprite. """
super(Asteroid, self).__init__(
image = Asteroid.images[size],
x = x, y = y,
dx = random.choice([1,-1]) * Asteroid.SPEED * random.random()/size,
dy = random.choice([1,-1]) * Asteroid.SPEED * random.random()/size)
self.size = size
Asteroid.total += 1
self.game = game
def die(self):
"""Destroy asteroid,then spawn debris"""
Asteroid.total -= 1
self.game.score.value += int(Asteroid.POINTS / self.size)
self.game.score.right = games.screen.width - 10
if self.size != Asteroid.SMALL:
for i in range(Asteroid.SPAWN):
new_asteroid = Asteroid(game = self.game,
x = self.x,
y = self.y,
size = self.size -1)
games.screen.add(new_asteroid)
super(Asteroid, self).die()
# if all asteroids are gone, advance to next level
if Asteroid.total == 0:
self.game.advance()
class Ship(Collider):
""" A moving ship."""
image = games.load_image("ship.bmp")
ROTATION_STEP = 3
VELOCITY_STEP = 0.03
MISSILE_DELAY = 25
VELOCITY_MAX = 3
sound = games.load_sound("thrust.wav")
ammo = 10
def __init__(self, game, x, y):
"""initialize a ship"""
super(Ship, self).__init__(image = Ship.image, x = x, y = y)
self.missile_wait = 0
self.game = game
def update(self):
"""Move ship based on keys pressed."""
super(Ship, self).update()
# if waiting until the ship can fire next, decrease wait
if self.missile_wait > 0:
self.missile_wait -= 1
if games.keyboard.is_pressed(games.K_UP):
Ship.sound.play()
angle = self.angle * math.pi / 180 # convert ship's angle to radians
self.dx += Ship.VELOCITY_STEP * math.sin(angle)
self.dy += Ship.VELOCITY_STEP * -math.cos(angle)
if games.keyboard.is_pressed(games.K_SPACE) and self.missile_wait == 0:
if self.ammo == 0:
Missile.none.play()
else:
new_missile = Missile(self.x, self.y, self.angle)
games.screen.add(new_missile)
self.missile_wait = Ship.MISSILE_DELAY
if games.keyboard.is_pressed(games.K_TAB):
if games.keyboard.is_pressed(games.K_a):
Vocab.current_guess += "a"
if games.keyboard.is_pressed(games.K_b):
Vocab.current_guess += "b"
if games.keyboard.is_pressed(games.K_c):
Vocab.current_guess += "c"
if games.keyboard.is_pressed(games.K_d):
Vocab.current_guess += "d"
if games.keyboard.is_pressed(games.K_e):
Vocab.current_guess += "e"
if games.keyboard.is_pressed(games.K_f):
Vocab.current_guess += "f"
if games.keyboard.is_pressed(games.K_g):
Vocab.current_guess += "g"
if games.keyboard.is_pressed(games.K_h):
Vocab.current_guess += "h"
if games.keyboard.is_pressed(games.K_i):
Vocab.current_guess += "i"
if games.keyboard.is_pressed(games.K_j):
Vocab.current_guess += "j"
if games.keyboard.is_pressed(games.K_k):
Vocab.current_guess += "k"
if games.keyboard.is_pressed(games.K_l):
Vocab.current_guess += "l"
if games.keyboard.is_pressed(games.K_m):
Vocab.current_guess += "m"
if games.keyboard.is_pressed(games.K_n):
Vocab.current_guess += "n"
if games.keyboard.is_pressed(games.K_o):
Vocab.current_guess += "o"
if games.keyboard.is_pressed(games.K_p):
Vocab.current_guess += "p"
if games.keyboard.is_pressed(games.K_q):
Vocab.current_guess += "q"
if games.keyboard.is_pressed(games.K_r):
Vocab.current_guess += "r"
if games.keyboard.is_pressed(games.K_s):
Vocab.current_guess += "s"
if games.keyboard.is_pressed(games.K_t):
Vocab.current_guess += "t"
if games.keyboard.is_pressed(games.K_u):
Vocab.current_guess += "u"
if games.keyboard.is_pressed(games.K_v):
Vocab.current_guess += "v"
if games.keyboard.is_pressed(games.K_w):
Vocab.current_guess += "w"
if games.keyboard.is_pressed(games.K_x):
Vocab.current_guess += "x"
if games.keyboard.is_pressed(games.K_y):
Vocab.current_guess += "y"
if games.keyboard.is_pressed(games.K_z):
Vocab.current_guess += "z"
if games.keyboard.is_pressed(games.K_a) and games.keyboard.is_pressed(games.K_LALT):
Vocab.current_guess += "á"
if games.keyboard.is_pressed(games.K_e) and games.keyboard.is_pressed(games.K_LALT):
Vocab.current_guess += "é"
if games.keyboard.is_pressed(games.K_i) and games.keyboard.is_pressed(games.K_LALT):
Vocab.current_guess += "í"
if games.keyboard.is_pressed(games.K_o) and games.keyboard.is_pressed(games.K_LALT):
Vocab.current_guess += "ó"
if games.keyboard.is_pressed(games.K_u) and games.keyboard.is_pressed(games.K_LALT):
Vocab.current_guess += "ú"
if games.keyboard.is_pressed(games.K_n) and games.keyboard.is_pressed(games.K_LALT):
Vocab.current_guess += "ñ"
if games.keyboard.is_pressed(games.K_BACKSPACE):
Vocab.current_guess -= Vocab.current_guess[len.Vocab.current_guess]
if games.keyboard.is_pressed(games.K_RETURN):
Vocab.check()
if games.keyboard.is_pressed(games.K_SPACE) and games.keyboard.is_pressed(games.K_LALT):
Vocab.current_guess += " "
if games.keyboard.is_pressed(games.K_RIGHT):
self.angle += Ship.ROTATION_STEP
if games.keyboard.is_pressed(games.K_LEFT):
self.angle -= Ship.ROTATION_STEP
if games.keyboard.is_pressed(games.K_1):
self.angle = 0
if games.keyboard.is_pressed(games.K_2):
self.angle = 90
if games.keyboard.is_pressed(games.K_3):
self.angle = 180
if games.keyboard.is_pressed(games.K_4):
self.angle = 270
#cap velocity in each direction
self.dx = min(max(self.dx, -Ship.VELOCITY_MAX), Ship.VELOCITY_MAX)
self.dy = min(max(self.dy, -Ship.VELOCITY_MAX), Ship.VELOCITY_MAX)
#updates current_guess with new keyboard input
Vocab.check(Vocab)
def die(self):
"""Destroy ship and end the game."""
self.game.end()
super(Ship, self).die()
class Missile(Collider):
""" A missile launched by the player's ship."""
image = games.load_image("missile.bmp")
sound = games.load_sound("missile.wav")
none = games.load_sound("no_missiles.wav")
BUFFER = 40
VELOCITY_FACTOR = 7
LIFETIME = 40
def __init__(self, ship_x, ship_y, ship_angle):
"""Initialize the missile sprite."""
Ship.ammo -= 1
Missile.sound.play()
# convert to radians
angle = ship_angle *math.pi / 180
# calculate missile's starting position
buffer_x = Missile.BUFFER * math.sin(angle)
buffer_y = Missile.BUFFER * -math.cos(angle)
x = ship_x + buffer_x
y = ship_y + buffer_y
# calculate missile's velocity components
dx = Missile.VELOCITY_FACTOR * math.sin(angle)
dy = Missile.VELOCITY_FACTOR * -math.cos(angle)
# create the missile
super(Missile, self).__init__(image = Missile.image,
x = x, y =y,
dx = dx, dy = dy)
self.lifetime = Missile.LIFETIME
def update(self):
super(Missile, self).update()
"""Move the missile"""
# if lifetime is up, destroy the missile
self.lifetime -= 1
if self.lifetime == 0:
self.destroy()
class Vocab(object):
"""Object that deals with vocab files"""
current_guess = ""
lines = list()
lines_len = 0
word_choice = ""
phrase = "Decode " + word_choice + " for Missiles: " + current_guess
language = 0 # set to even for first language = question
# set to odd for first language = answer
def __init__(self):
try:
vocab_file = open("vocab.txt", "r")
except IOError:
print("No file called 'vocab.txt'. Please add 'vocab.txt'")
Vocab.lines = vocab_file.readlines()
for shit in Vocab.lines:
print(shit)
Vocab.lines_len = len(Vocab.lines)
Vocab.new_word(self)
def new_word(self):
word_line = (random.randrange(0, Vocab.lines_len))/2 + Vocab.language
word_line = int(word_line)
self.word_choice = self.lines[word_line]
self.phrase = "Decode " + self.word_choice + " for Missiles: " + self.current_guess
self.screen_guess = games.Text(value = self.phrase,
size = 30,
color = color.black,
x = 30,
y = games.screen.height-10,
is_collideable = False)
self.screen_guess.left = 5
games.screen.add(self.screen_guess)
def check(self):
if self.current_guess == self.word_choice:
Ship.ammo += 10
self.new_word(Vocab)
else:
self.phrase = "Decode " + self.word_choice + " for Missiles: " + self.current_guess
class Game(object):
"""The game itself. """
def __init__(self):
"""Initialize Game object. """
# set level
self.level = 0
#load sound for level advance
self.sound = games.load_sound("level.wav")
#create score
self.score = games.Text(value = 0,
size = 30,
color = color.white,
top = 5,
right = games.screen.width -10,
is_collideable = False)
games.screen.add(self.score)
#create the player's ship
self.ship = Ship(game = self,
x = games.screen.width/2,
y = games.screen.height/2)
games.screen.add(self.ship)
def play(self):
"""Play the game."""
#begin theme music
games.music.load("theme.mid")
games.music.play(-1)
#load and set background
nebula_image = games.load_image("nebula.jpg")
games.screen.background = nebula_image
#advance to level 1
self.advance()
# start play
games.screen.mainloop()
def advance(self):
"""Advance to the next game level"""
self.level += 1
#create buffer zone around ship forbidding asteroid spawn points
BUFFER = 150
# create asteroids
for i in range(self.level):
# calculate an x and y at least BUFFER distance from the ship
# choose minimum distance along x-axis and y-axis
x_min = random.randrange(BUFFER)
y_min = BUFFER - x_min
# choose a distance along x-axis and y-axis based on minimum distance
x_distance = random.randrange(x_min, games.screen.width - x_min)
y_distance = random.randrange(y_min, games.screen.height - y_min)
# calculate location based on distance
x = self.ship.x + x_distance
y = self.ship.y + y_distance
# wrap around the screen if necessary
x %= games.screen.width
y %= games.screen.height
# create the asteroid
new_asteroid = Asteroid(game = self,
x = x, y = y,
size = Asteroid.LARGE)
games.screen.add(new_asteroid)
# display level number
level_message = games.Message(value = "Level " + str(self.level),
size = 40,
color = color.yellow,
x = games.screen.width/2,
y = games.screen.height/2,
lifetime = 3 * games.screen.fps,
is_collideable = False)
games.screen.add(level_message)
#play new level sound (except at first level
if self.level > 1:
self.sound.play()
def end(self):
"""End the game"""
# show game 'Game Over' for 5 seconds
end_message = games.Message(value = "Game Over",
size = 90,
color = color.red,
x = games.screen.width/2,
y = games.screen.height/2,
lifetime = 5*games.screen.fps,
after_death = games.screen.quit,
is_collideable = False)
games.screen.add(end_message)
def main():
study = Vocab()
astrocrash = Game()
astrocrash.play()
#kick it off!
main()
|
from UnitTesting.page_objects.base_page_object import base_page_object
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
import random
class sign_up(base_page_object):
def __init__(self, webd_wrap):
base_page_object.__init__(self, webd_wrap)
def get_page(self):
self._webd_wrap.open_page('/signup')
return self
def confirm_page(self):
self._webd_wrap.wait.until(EC.text_to_be_present_in_element((By.CLASS_NAME, 'header-1'), 'Fill in your'))
_actual_url = self._webd_wrap._driver.current_url
_url = self._webd_wrap._baseURL + '/signup'
if not _actual_url.startswith(_url):
raise AssertionError("Not on Sign Up page.")
########################################################################
########################################################################
def submit_new_member_info(self):
self.confirm_page()
_email = 'jay+' + str( random.randint(0, 100000000) ) + '@zolabooks.com'
self.register_email(_email)
self.register_password()
self.register_name()
self.register_birthday()
self.register_submit()
return _email
def register_email(self, email):
self._webd_wrap._driver.find_element_by_name("email").send_keys(email)
self._webd_wrap._driver.find_element_by_name("confirm_email").send_keys(email)
def register_password(self):
self._webd_wrap._driver.find_element_by_id("password").send_keys("password")
self._webd_wrap._driver.find_element_by_id("confirm_password").send_keys("password")
def register_name(self):
self._webd_wrap._driver.find_element_by_id("first_name").send_keys("Lin")
self._webd_wrap._driver.find_element_by_id("last_name").send_keys("Robinson")
def register_birthday(self):
self._webd_wrap.wait.until(EC.presence_of_element_located((By.ID, "bday_m")))
self._webd_wrap._driver.find_element_by_id("bday_m").send_keys("10")
self._webd_wrap._driver.find_element_by_id("bday_d").send_keys("26")
self._webd_wrap._driver.find_element_by_id("bday_y").send_keys("1990")
def register_submit(self):
self._webd_wrap._driver.find_element_by_name('submit').click()
|
x=int(input("x= "))
y=int(input("y= "))
if ((x>0)and(y>0)or(x<0)and(y>0)or(x<0)and(y<0)or(x>0)and(y<0)):
print(3)
elif((x>0)and(y==0)or(x<0)and(y==0)):
print(1)
elif((x==0)and(y>0)or(x==0)and(y<0)):
print(2)
else:
print(0)
|
import sys, os, argparse
from Midi import MidiEvents
from Sender import Sender
MILLI_SEC = 0.001
MICRO_SEC = 0.000001
# ループ間隔のデフォルト値[sec]
DEFAULT_INTERVAL = 2.0 * MILLI_SEC
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', help = 'MIDI file path (*.mid)', required = True, type = str)
parser.add_argument('-i', '--interval', help = 'Interval time [sec]', default = DEFAULT_INTERVAL, type = float)
return parser.parse_args()
def main():
args = get_args()
path = args.file
interval = args.interval
if not os.path.exists(path):
print('{} does not exist.'.format(path))
sys.exit(1)
# MIDIファイルからMIDIイベントを読み込む
events = MidiEvents(path)
# MIDIイベントを送信
sender = Sender()
sender.send_events(events, interval)
# 送信したMIDIイベントと送信時刻を表示
sender.show_send_events_and_times()
if __name__ == '__main__':
main()
|
from tkinter import ttk ,messagebox
import os , json , tkinter as tk
def insert_into_list_box():
course_name = string_course.get()
if course_name and course_name not in listbox.get(0,'end'):
listbox.insert(tk.END, course_name)
e_course.delete(0, 'end')
else:
messagebox.showinfo("Adding Course", "{} already exists :)".format(course_name))
e_course.delete(0, 'end')
def delete_from_list_box():
selection = listbox.curselection()
if not selection : messagebox.showinfo("Deleting Course", "You have to select a course first :( ")
else : listbox.delete(selection)
def load_required_info():
required_info_path = os.path.join(os.path.dirname(__file__), 'required_info.json')
if os.path.isfile(required_info_path) and os.access(required_info_path, os.R_OK):
with open(required_info_path) as f: return json.loads(f.read())
else:
return {'linkedin_email':'','linkedin_password':'','courses_links':[""]}
def save_data():
required_info_path = os.path.join(os.path.dirname(__file__), 'required_info.json')
email = string_email.get()
password = string_pass.get()
courses = list(lb1_values.get())
data = {'linkedin_email':email,'linkedin_password':password,'courses_links':courses}
with open(required_info_path , 'w') as f:
f.write(json.dumps(data))
messagebox.showinfo("File Saving", "File required_info.json saved Succesfully\nYou can Run the downloader now ^_^")
master.quit()
cached_info = load_required_info()
master = tk.Tk()
tk.Label(master,text="Email").grid(row=0)
tk.Label(master,text="Password").grid(row=1)
tk.Label(master,text="Courses").grid(row=3)
string_email = tk.StringVar(value=cached_info['linkedin_email'])
e_email = tk.Entry(master,width=40,textvariable=string_email)
string_pass = tk.StringVar(value=cached_info['linkedin_password'])
e_pass = tk.Entry(master,width=40,textvariable=string_pass)
string_course = tk.StringVar()
e_course = tk.Entry(master,width=40,textvariable=string_course)
lb1_values = tk.Variable()
listbox = tk.Listbox(master,width=40,listvariable=lb1_values)
for course in cached_info['courses_links']:
course = course.split('/learning/')[1] if '/learning/' in course else course
if course : listbox.insert(tk.END, course)
e_email.grid(row=0, column=1)
e_pass.grid(row=1, column=1)
e_course.grid(row=2, column=1)
listbox.grid(row=3, column=1)
# tk.Button(master, text='Quit', command=master.quit).grid(row=8, column=0, sticky=tk.W,pady=4)
tk.Button(master, text='Add a course',command=insert_into_list_box).grid(row=2, column=0, sticky=tk.W, pady=4)
tk.Button(master, text='Remove selected',command=delete_from_list_box).grid(row=3, column=0, sticky=tk.W, pady=4)
tk.Button(master, text='Save All the data To required_info.json',command=save_data).grid(row=4, column=1, sticky=tk.W, pady=4)
master.resizable(False, False)
window_height = 280
window_width = 350
screen_width = master.winfo_screenwidth()
screen_height = master.winfo_screenheight()
x_cordinate = int((screen_width/2) - (window_width/2))
y_cordinate = int((screen_height/2) - (window_height/2))
master.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate))
master.title('Saving Required info')
master.mainloop()
|
import argparse,collections,copy,datetime,os,pandas,shutil,sys,time
import Wrangler
# Based on NetworkWrangler\scripts\build_network.py
#
# Builds 3 futures networks. Use with net_spec_horizon.py
#
import build_network_mtc
###############################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=build_network_mtc.USAGE, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--configword", help="optional word for network specification script")
parser.add_argument("--analysis", choices=["Round1","Round2","PPA","PPA_NoSLR"], help="Specify which set of analysis are relevant for these networks.", default="Round1")
parser.add_argument("--continue_on_warning", help="Don't prompt the user to continue if there are warnings; just warn and continue", action="store_true")
parser.add_argument("--skip_precheck_requirements", help="Don't precheck network requirements, stale projects, non-HEAD projects, etc", action="store_true")
parser.add_argument("--create_project_diffs", help="Pass this to create proejct diffs information for each project. NOTE: THIS WILL BE SLOW", action="store_true")
parser.add_argument("net_spec", metavar="network_specification.py", help="Script which defines required variables indicating how to build the network")
parser.add_argument("future", choices=["CleanAndGreen", "RisingTides", "BackToTheFuture"], help="Specify which Future Scenario for which to create networks")
args = parser.parse_args()
NOW = time.strftime("%Y%b%d.%H%M%S")
BUILD_MODE = None # regular
PIVOT_DIR = build_network_mtc.PIVOT_DIR
NETWORK_PROJECTS = build_network_mtc.NETWORK_PROJECTS
TRANSIT_CAPACITY_DIR = os.path.join(PIVOT_DIR, "trn")
TRN_NET_NAME = "Transit_Lines"
HWY_NET_NAME = "freeflow.net"
OUT_DIR = "network_{}" # YEAR
TAG = 'HEAD' # 'PPA' tag isn't propogated yet
if args.analysis == "Round1":
PROJECT = "FU1"
elif args.analysis == "Round2":
PROJECT = "FU2"
elif args.analysis in ["PPA","PPA_NoSLR"]:
PROJECT = args.analysis
# Read the configuration
NETWORK_CONFIG = args.net_spec
SCENARIO = args.future
LOG_FILENAME = "build%snetwork_%s_%s_%s.info.LOG" % ("TEST" if BUILD_MODE=="test" else "", PROJECT, SCENARIO, NOW)
Wrangler.setupLogging(LOG_FILENAME, LOG_FILENAME.replace("info", "debug"))
exec(open(NETWORK_CONFIG).read())
# Verify mandatory fields are set
if PROJECT==None:
print("PROJECT not set in %s" % NETWORK_CONFIG)
sys.exit(2)
if TAG==None:
print("TAG not set in %s" % NETWORK_CONFIG)
sys.exit(2)
if NETWORK_PROJECTS==None:
print("NETWORK_PROJECTS not set in %s" % NETWORK_CONFIG)
sys.exit(2)
if TRANSIT_CAPACITY_DIR:
Wrangler.TransitNetwork.capacity = Wrangler.TransitCapacity(directory=TRANSIT_CAPACITY_DIR)
# Create a scratch directory to check out project repos into
SCRATCH_SUBDIR = "scratch"
TEMP_SUBDIR = "Wrangler_tmp_" + NOW
if not os.path.exists(SCRATCH_SUBDIR): os.mkdir(SCRATCH_SUBDIR)
os.chdir(SCRATCH_SUBDIR)
os.environ["CHAMP_node_names"] = os.path.join(PIVOT_DIR,"Node Description.xls")
networks = {
'hwy' :Wrangler.HighwayNetwork(modelType=Wrangler.Network.MODEL_TYPE_TM1, modelVersion=1.0,
basenetworkpath=os.path.join(PIVOT_DIR,"hwy"),
networkBaseDir=build_network_mtc.NETWORK_BASE_DIR,
networkProjectSubdir=build_network_mtc.NETWORK_PROJECT_SUBDIR,
networkSeedSubdir=build_network_mtc.NETWORK_SEED_SUBDIR,
networkPlanSubdir=build_network_mtc.NETWORK_PLAN_SUBDIR,
isTiered=True if PIVOT_DIR else False,
tag=TAG,
tempdir=TEMP_SUBDIR,
networkName="hwy",
tierNetworkName=HWY_NET_NAME),
'trn':Wrangler.TransitNetwork( modelType=Wrangler.Network.MODEL_TYPE_TM1, modelVersion=1.0,
basenetworkpath=os.path.join(PIVOT_DIR,"trn"),
networkBaseDir=build_network_mtc.NETWORK_BASE_DIR,
networkProjectSubdir=build_network_mtc.NETWORK_PROJECT_SUBDIR,
networkSeedSubdir=build_network_mtc.NETWORK_SEED_SUBDIR,
networkPlanSubdir=build_network_mtc.NETWORK_PLAN_SUBDIR,
isTiered=True if PIVOT_DIR else False,
networkName=TRN_NET_NAME)
}
# For projects applied in a pivot network (because they won't show up in the current project list)
if build_network_mtc.APPLIED_PROJECTS != None:
for proj in build_network_mtc.APPLIED_PROJECTS:
networks['hwy'].appliedProjects[proj]=TAG
# Wrangler.WranglerLogger.debug("NETWORK_PROJECTS=%s NET_MODES=%s" % (str(NETWORK_PROJECTS), str(NET_MODES)))
if args.skip_precheck_requirements:
Wrangler.WranglerLogger.info("skip_precheck_requirements passed so skipping preCheckRequirementsForAllProjects()")
else:
build_network_mtc.preCheckRequirementsForAllProjects(NETWORK_PROJECTS, TEMP_SUBDIR, networks, args.continue_on_warning)
# create the subdir for SET_CAPCLASS with set_capclass.job as apply.s
SET_CAPCLASS = "set_capclass"
SET_CAPCLASS_DIR = os.path.join(TEMP_SUBDIR, SET_CAPCLASS)
os.makedirs(SET_CAPCLASS_DIR)
source_file = os.path.join(os.path.dirname(build_network_mtc.THIS_FILE), "set_capclass.job")
shutil.copyfile( source_file, os.path.join(SET_CAPCLASS_DIR, "apply.s"))
networks_without_earthquake = {}
# Network Loop #2: Now that everything has been checked, build the networks.
for YEAR in NETWORK_PROJECTS.keys():
projects_for_year = NETWORK_PROJECTS[YEAR]
appliedcount = 0
for netmode in build_network_mtc.NET_MODES:
Wrangler.WranglerLogger.info("Building {} {} networks".format(YEAR, netmode))
# restore version without earthquake
if netmode in networks_without_earthquake:
Wrangler.WranglerLogger.info("Restoring version without earthquake")
networks[netmode] = networks_without_earthquake[netmode]
appliedcount += 1 # increment to trigger writing this out
del networks_without_earthquake[netmode]
if netmode == "hwy":
shutil.move(os.path.join("FREEFLOW_WITHOUT_EARTHQUAKE.BLD"),
os.path.join("FREEFLOW.BLD"))
for project in projects_for_year[netmode]:
(project_name, projType, tag, kwargs) = build_network_mtc.getProjectAttributes(project)
if tag == None: tag = TAG
Wrangler.WranglerLogger.info("Applying project [{}] of type [{}] with tag [{}] and kwargs[{}]".format(project_name, projType, tag, kwargs))
if projType=='plan':
continue
# save a copy of this network instance for comparison
if args.create_project_diffs:
network_without_project = copy.deepcopy(networks[netmode])
applied_SHA1 = None
cloned_SHA1 = networks[netmode].cloneProject(networkdir=project_name, tag=tag,
projtype=projType, tempdir=TEMP_SUBDIR, **kwargs)
(parentdir, networkdir, gitdir, projectsubdir) = networks[netmode].getClonedProjectArgs(project_name, None, projType, TEMP_SUBDIR)
if ((project_name == "Earthquake") and ((PROJECT == "FU1" and args.future in ["CleanAndGreen","BackToTheFuture"]) or (PROJECT == "FU2"))):
# Then this "project" is only temporary, so save aside a deepcopy of the network PRIOR
# to the apply to restore after we write it
networks_without_earthquake[netmode] = copy.deepcopy(networks[netmode])
if netmode == "hwy":
shutil.copyfile(os.path.join("FREEFLOW.BLD"),
os.path.join("FREEFLOW_WITHOUT_EARTHQUAKE.BLD"))
applied_SHA1 = networks[netmode].applyProject(parentdir, networkdir, gitdir, projectsubdir, **kwargs)
appliedcount += 1
# Create difference report for this project
# TODO: roadway not supported yet
if args.create_project_diffs and netmode!="hwy":
# difference information to be store in network_dir netmode_projectname
# e.g. BlueprintNetworks\net_2050_Blueprint\trn_BP_Transbay_Crossing
project_diff_folder = os.path.join("..", "BlueprintNetworks",
"net_{}_{}".format(YEAR, NET_VARIANT),
"{}_{}".format(build_network_mtc.HWY_SUBDIR if netmode == "hwy" else build_network_mtc.TRN_SUBDIR, project_name))
hwypath=os.path.join("..", "BlueprintNetworks", "net_{}_{}".format(YEAR, NET_VARIANT), build_network_mtc.HWY_SUBDIR)
# the project may get applied multiple times -- e.g., for different phases
suffix_num = 1
project_diff_folder_with_suffix = project_diff_folder
while os.path.exists(project_diff_folder_with_suffix):
suffix_num += 1
project_diff_folder_with_suffix = "{}_{}".format(project_diff_folder, suffix_num)
Wrangler.WranglerLogger.debug("Creating project_diff_folder: {}".format(project_diff_folder_with_suffix))
# new!
networks[netmode].reportDiff(network_without_project, project_diff_folder_with_suffix, project_name,
roadwayNetworkFile=os.path.join(os.path.abspath(hwypath), HWY_NET_NAME))
# if hwy project has set_capclass override, copy it to set_capclass/apply.s
set_capclass_override = os.path.join(TEMP_SUBDIR, project_name, "set_capclass.job")
if os.path.exists(set_capclass_override):
dest_file = os.path.join(SET_CAPCLASS_DIR, "apply.s")
shutil.copyfile(set_capclass_override, dest_file)
Wrangler.WranglerLogger.info("Copied override {} to {}".format(set_capclass_override, dest_file))
# apply set_capclass before writing any hwy network
if netmode == "hwy" and appliedcount > 0:
applied_SHA1 = networks[netmode].applyProject(parentdir=TEMP_SUBDIR, networkdir=SET_CAPCLASS,
gitdir=os.path.join(TEMP_SUBDIR, SET_CAPCLASS))
if appliedcount == 0:
Wrangler.WranglerLogger.info("No applied projects for this year -- skipping output")
continue
# Initialize output subdirectories up a level (not in scratch)
hwypath=os.path.join("..", SCENARIO, OUT_DIR.format(YEAR),HWY_SUBDIR)
if not os.path.exists(hwypath): os.makedirs(hwypath)
trnpath = os.path.join("..", SCENARIO, OUT_DIR.format(YEAR),TRN_SUBDIR)
if not os.path.exists(trnpath): os.makedirs(trnpath)
networks['hwy'].write(path=hwypath,name=HWY_NET_NAME,suppressQuery=True,
suppressValidation=True) # MTC doesn't have turn penalties
networks['trn'].write(path=trnpath,
name="transitLines",
writeEmptyFiles = False,
suppressQuery = True if BUILD_MODE=="test" else False,
suppressValidation = False,
cubeNetFileForValidation = os.path.join(os.path.abspath(hwypath), HWY_NET_NAME))
# Write the transit capacity configuration
Wrangler.TransitNetwork.capacity.writeTransitVehicleToCapacity(directory = trnpath)
Wrangler.TransitNetwork.capacity.writeTransitLineToVehicle(directory = trnpath)
Wrangler.TransitNetwork.capacity.writeTransitPrefixToVehicle(directory = trnpath)
Wrangler.WranglerLogger.debug("Successfully completed running %s" % os.path.abspath(__file__))
|
# -*- coding: utf-8 -*-
# @Author: zjx
# @Date : 2018/3/20
class ValidationError(ValueError):
pass
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 6 19:36:00 2018
@author: PPAGACZ
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 6 18:15:24 2018
@author: PPAGACZ
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 6 13:01:27 2018
@author: PPAGACZ
"""
import pytest
from FitARMAFilter import *
from unittest import TestCase
import pandas as pd
from statsmodels.tsa.arima_model import ARIMA
class Test_FitARMAFilter(TestCase):
def test_get_item_org_diff_wald(self):
series = pd.read_csv('AirPassengers.csv')
columns = list(series.columns.values)
data = Data(series[columns[1]])
data.differencing = series[columns[1]]
data.waldDecomposition = series[columns[1]]
expected = (ORGINAL, WALD, DIFF)
actual = FitARMAFilter.getItems(data)
assert expected == actual
def test_get_item_org_diff(self):
series = pd.read_csv('AirPassengers.csv')
columns = list(series.columns.values)
data = Data(series[columns[1]])
data.differencing = series[columns[1]]
expected = (ORGINAL, DIFF)
actual = FitARMAFilter.getItems(data)
assert expected == actual
def test_get_item_org_wald(self):
series = pd.read_csv('AirPassengers.csv')
columns = list(series.columns.values)
data = Data(series[columns[1]])
data.waldDecomposition = series[columns[1]]
expected = (ORGINAL, WALD)
actual = FitARMAFilter.getItems(data)
assert expected == actual
def test_get_item_org(self):
series = pd.read_csv('AirPassengers.csv')
columns = list(series.columns.values)
data = Data(series[columns[1]])
expected = (ORGINAL)
actual = FitARMAFilter.getItems(data)
assert expected == actual
def test_get_title(self):
expected = ARMA_FITTED
actual = FitARMAFilter.get_title()
assert expected == actual
def test_calculate_arma_p_q(self):
series = pd.read_csv('AirPassengers.csv')
series.index = pd.DatetimeIndex(freq = 'M', start = 0, periods=series.shape[0])
columns = list(series.columns.values)
data = series[columns[1]]
model_expected = ARIMA(data, order=(4,0,4))
model_actual = FitARMAFilter.calculate_arma_p_q(data, 4,4)
expected = model_expected.fit(disp=0).bic
actual = model_actual.fit(disp=0).bic
assert expected == actual
def test_calculate_arma_p_q_wrong(self):
series = pd.read_csv('AirPassengers.csv')
series.index = pd.DatetimeIndex(freq = 'M', start = 0, periods=series.shape[0])
columns = list(series.columns.values)
data = series[columns[1]]
expected = ARIMA(data, order=(3,0,3))
actual = FitARMAFilter.calculate_arma_p_q(data, 4,4)
assert expected != actual
def test_calculate_arma_fit(model):
series = pd.read_csv('AirPassengers.csv')
series.index = pd.DatetimeIndex(freq = 'M', start = 0, periods=series.shape[0])
columns = list(series.columns.values)
data = series[columns[1]]
model = ARIMA(data, order=(3,0,3))
expected = model.fit(disp=0)
actual = FitARMAFilter.calculate_arma_fit(model)
assert expected.bic == actual.bic
def test_calculate_arma_fit_wrong(model):
series = pd.read_csv('AirPassengers.csv')
series.index = pd.DatetimeIndex(freq = 'M', start = 0, periods=series.shape[0])
columns = list(series.columns.values)
data = series[columns[1]]
model = ARIMA(data, order=(3,0,3))
expected = model.fit(disp=-1)
actual = FitARMAFilter.calculate_arma_fit(model)
assert expected != actual
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.