text stringlengths 8 6.05M |
|---|
# -*- coding: utf-8 -*-
import os
from extractor import *
extractor = CLAbstractExtractor()
parent_folder = "../articles"
output_folder = "../output"
with open(output_folder + "/" + "log.txt", "w") as log:
for (_, _, filenames) in os.walk(parent_folder):
for filename in filenames:
try:
candidates = extractor.read_from(filename, parent_folder)
for i in range(0, len(candidates)):
if len(candidates[i]) >= 200:
last_stop = candidates[i].rfind(".")
if last_stop == -1:
raise ValueError("No stops found")
with open(output_folder + "/" + filename + ".txt", "w")\
as result:
result.write(candidates[i][0:last_stop + 1])
result.flush()
break
except:
log.write(filename + "\n")
log.flush()
print("Couldn't process " + filename)
|
from tkinter import *
import tkinter.messagebox
from random import randint
from users import *
from Vehicles import *
from rental_info import *
import tkinter
from tkcalendar import *
import pymongo
from pymongo import MongoClient
cluster = MongoClient("mongodb+srv://amogh:amogh@shivi-usyoc.mongodb.net/test?retryWrites=true&w=majority")
db = cluster["tkinter_1"]
collection = db["users"]
db_2 = cluster["tkinter_1"]
collection_2 = db_2["Vehicles"]
db_3 = cluster["tkinter_1"]
collection_3 = db_3["rental_info"]
def login_page():
frame_login = Frame(start)
frame_login.pack(side= TOP)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_login.destroy(), frame_back.destroy(), begin()]).pack(side = BOTTOM)
def message_box():
user_object = collection.find({"username":username_login_entry.get(),'password': password_login_entry.get(),'user_type' : 'admin'})
if user_object.count() == 0 :
tkinter.messagebox.showerror('incorrect details','incorrect username or password')
else :
tkinter.messagebox.showinfo('Login status','you have been logged in')
frame_login.destroy()
frame_back.destroy()
main_menu()
Label(frame_login, text="Please enter login details").pack(side = TOP)
Label(frame_login, text="").pack()
Label(frame_login, text="Username").pack()
username_login_entry = Entry(frame_login)
username_login_entry.pack()
Label(frame_login, text="").pack()
Label(frame_login, text="Password").pack()
password_login_entry = Entry(frame_login, show= '*')
password_login_entry.pack()
Label(frame_login, text="").pack()
Button(frame_login, text="Submit", width=10, height=1, activebackground = "grey", command = lambda : [message_box()]).pack(pady = 5)
Button(frame_login, text="Sign Up", width=10, height=1, activebackground = "grey", command = lambda : [frame_login.destroy() ,frame_back.destroy(), sign_up()]).pack()
def login_page_customer():
frame_login = Frame(start)
frame_login.pack(side= TOP)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_login.destroy(), frame_back.destroy(), begin()]).pack(side = BOTTOM)
def message_box():
user_object = collection.find({"username":username_login_entry.get(),'password': password_login_entry.get(),'user_type' : 'customer'})
global current_user_ID
current_user_ID = collection.find_one({"username":username_login_entry.get(),'password': password_login_entry.get(),'user_type' : 'customer'})["ID"]
if user_object.count() == 0 :
tkinter.messagebox.showerror('incorrect details','incorrect username or password')
else :
tkinter.messagebox.showinfo('Login status','you have been logged in')
frame_login.destroy()
frame_back.destroy()
main_menu_customer()
Label(frame_login, text="Please enter login details").pack(side = TOP)
Label(frame_login, text="").pack()
Label(frame_login, text="Username").pack()
username_login_entry = Entry(frame_login)
username_login_entry.pack()
Label(frame_login, text="").pack()
Label(frame_login, text="Password").pack()
password_login_entry = Entry(frame_login, show= '*')
password_login_entry.pack()
Label(frame_login, text="").pack()
Button(frame_login, text="Submit", width=10, height=1, activebackground = "grey", command = lambda : [message_box()]).pack(pady = 5)
Button(frame_login, text="Sign Up", width=10, height=1, activebackground = "grey", command = lambda : [frame_login.destroy() ,frame_back.destroy() ,sign_up_customer()]).pack()
def sign_up():
frame_sign_up = Frame(start)
frame_sign_up.pack(side= TOP)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_sign_up.destroy(), frame_back.destroy(), login_page()]).pack(side = BOTTOM)
def getvalues_2():
add_user = Admin_users(randint(100000,999999), username_signup_entry.get(), password_signup_entry.get())
collection.insert_one(add_user.sign_up())
tkinter.messagebox.showinfo('Sign up status','your account has been created')
Label(frame_sign_up, text="create your new account.").pack(side = TOP)
Label(frame_sign_up, text="").pack()
Label(frame_sign_up, text="Username").pack()
username_signup_entry = Entry(frame_sign_up)
username_signup_entry.pack()
Label(frame_sign_up, text="").pack()
Label(frame_sign_up, text="Password").pack()
password_signup_entry = Entry(frame_sign_up, show= '*')
password_signup_entry.pack()
Label(frame_sign_up, text="").pack()
Button(frame_sign_up, text="create account", width=13, height=1, activebackground = "grey", command = lambda : [getvalues_2()]).pack()
def sign_up_customer():
frame_sign_up = Frame(start)
frame_sign_up.pack(side= TOP)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_sign_up.destroy(), frame_back.destroy(), login_page_customer()]).pack(side = BOTTOM)
def getvalues_2():
add_user = Customer_users(randint(100000,999999), username_signup_entry.get(), password_signup_entry.get(),name_signup_entry.get(),address_signup_entry.get(),phone_signup_entry.get())
collection.insert_one(add_user.sign_up())
tkinter.messagebox.showinfo('Sign up status','your account has been created')
Label(frame_sign_up, text="create your new account.").pack(side = TOP)
Label(frame_sign_up, text="").pack()
Label(frame_sign_up, text="Username").pack()
username_signup_entry = Entry(frame_sign_up)
username_signup_entry.pack()
Label(frame_sign_up, text="").pack()
Label(frame_sign_up, text="Password").pack()
password_signup_entry = Entry(frame_sign_up, show= '*')
password_signup_entry.pack()
Label(frame_sign_up, text="").pack()
Label(frame_sign_up, text="Name").pack()
name_signup_entry = Entry(frame_sign_up)
name_signup_entry.pack()
Label(frame_sign_up, text="").pack()
Label(frame_sign_up, text="Address").pack()
address_signup_entry = Entry(frame_sign_up)
address_signup_entry.pack()
Label(frame_sign_up, text="").pack()
Label(frame_sign_up, text="Phone Number").pack()
phone_signup_entry = Entry(frame_sign_up)
phone_signup_entry.pack()
Label(frame_sign_up, text="").pack()
Button(frame_sign_up, text="create account", width=13, height=1, activebackground = "grey", command = lambda : [getvalues_2()]).pack()
def main_menu():
frame_mainmenu = Frame(start)
frame_mainmenu.pack(side= TOP)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_mainmenu.destroy(), frame_back.destroy(), login_page()]).pack(side = BOTTOM)
Label(frame_mainmenu, text="").pack()
Label(frame_mainmenu, text="MAIN MENU", fg = "purple").pack()
Label(frame_mainmenu, text="").pack()
def administer_vehicles():
frame_admin_vehicle = Frame(start)
frame_admin_vehicle.pack(side = TOP)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_admin_vehicle.destroy(), frame_back.destroy(), main_menu()]).pack(side = BOTTOM)
Label(frame_admin_vehicle, text="").pack()
Label(frame_admin_vehicle, text="ADMINISTER VEHICLES", fg = "purple").pack()
Label(frame_admin_vehicle, text="").pack()
Button(frame_admin_vehicle, text = "View all vehicles", width = 15, activebackground = "grey", command = lambda :[frame_admin_vehicle.destroy(),frame_back.destroy(), view_vehicles()]).pack(pady = 5)
Button(frame_admin_vehicle, text = "Add new vehicle", width = 15, activebackground = "grey", command = lambda:[frame_admin_vehicle.destroy(),frame_back.destroy(), add_vehicles()]).pack(pady = 5)
Button(frame_admin_vehicle, text = "Edit Vehicle", width = 15, activebackground = "grey", command = lambda :[frame_admin_vehicle.destroy(),frame_back.destroy(), edit_vehicles_1()]).pack(pady = 5)
Button(frame_admin_vehicle, text = "Delete Vehicle", width = 15, activebackground = "grey", command = lambda :[frame_admin_vehicle.destroy(),frame_back.destroy(), delete_vehicles()]).pack(pady = 5)
def view_vehicles():
frame_view_vehicle = Frame(start)
frame_d = Frame(start)
frame_d.pack(side = TOP)
frame_view_vehicle.pack(side = TOP, anchor = NW)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_view_vehicle.destroy(),frame_d.destroy(), frame_back.destroy(), administer_vehicles()]).pack(side = BOTTOM)
Label(frame_d, text="").pack()
Label(frame_d, text="LIST OF VEHICLES", fg = "purple").pack()
Label(frame_d, text="").pack()
c = collection_2.find({},{"_id":0})
i=1
for d in c:
Message(frame_view_vehicle, text =str(i) + ". \nBrand \t : " + d["brand"] + "\nModel \t : " +d["model"] + "\nIs rented : " + str(d["is_rented"]), width = 170, ).pack(side = TOP,anchor = W)
i=i+1
def add_vehicles():
frame_add_vehicle = Frame(start)
frame_add_vehicle.pack(side = TOP)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_add_vehicle.destroy(), frame_back.destroy(), administer_vehicles()]).pack(side = BOTTOM)
Label(frame_add_vehicle, text="").pack()
Label(frame_add_vehicle, text="ENTER VEHICLE DETAILS", fg = "purple").pack()
Label(frame_add_vehicle, text="").pack()
def getvalues_3():
vehicle_type.get()
if(vehicle_type.get() == "sedan"):
add_vehicle = Sedan(int(ID.get()),brand.get(),model.get(),int(year.get()),color.get(),vehicle_type.get(),int(cost.get()))
collection_2.insert_one(add_vehicle.create_vehicle())
tkinter.messagebox.showinfo('Vehicle status','Vehicle has been added.')
if(vehicle_type.get() == "SUV"):
add_vehicle = SUV(int(ID.get()),brand.get(),model.get(),int(year.get()),color.get(),vehicle_type.get(),int(cost.get()))
collection_2.insert_one(add_vehicle.create_vehicle())
tkinter.messagebox.showinfo('Vehicle status','Vehicle has been added.')
if(vehicle_type.get() == "coupe"):
add_vehicle = Coupe(int(ID.get()),brand.get(),model.get(),int(year.get()),color.get(),vehicle_type.get(),int(cost.get()))
collection_2.insert_one(add_vehicle.create_vehicle())
tkinter.messagebox.showinfo('Vehicle status','Vehicle has been added.')
Label(frame_add_vehicle, text="").pack()
Label(frame_add_vehicle, text="ID :").pack()
ID = Entry(frame_add_vehicle)
ID.pack()
Label(frame_add_vehicle, text="").pack()
Label(frame_add_vehicle, text="Brand :").pack()
brand = Entry(frame_add_vehicle)
brand.pack()
Label(frame_add_vehicle, text="").pack()
Label(frame_add_vehicle, text="Model :").pack()
model = Entry(frame_add_vehicle)
model.pack()
Label(frame_add_vehicle, text="").pack()
Label(frame_add_vehicle, text="Year :").pack()
year = Entry(frame_add_vehicle)
year.pack()
Label(frame_add_vehicle, text="").pack()
Label(frame_add_vehicle, text="Colour :").pack()
color = Entry(frame_add_vehicle)
color.pack()
Label(frame_add_vehicle, text="").pack()
Label(frame_add_vehicle, text="Type :").pack()
vehicle_type = Entry(frame_add_vehicle)
vehicle_type.pack()
Label(frame_add_vehicle, text="").pack()
Label(frame_add_vehicle, text="Cost :").pack()
cost = Entry(frame_add_vehicle)
cost.pack()
Label(frame_add_vehicle, text="").pack()
Button(frame_add_vehicle, text="Add Vehicle", width=13, height=1, activebackground = "grey", command = lambda : [getvalues_3()]).pack(pady = 5)
def edit_vehicles_1():
frame_edit_vehicle = Frame(start)
frame_edit_vehicle.pack(side = TOP)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_edit_vehicle.destroy(), frame_back.destroy(), administer_vehicles()]).pack(side = BOTTOM)
def getvalues_4():
ID.get()
i = int(ID.get())
def edit_vehicles_2():
frame_edit_vehicle_2 = Frame(start)
frame_edit_vehicle_2.pack(side = TOP)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_edit_vehicle_2.destroy(), frame_back.destroy(), edit_vehicles_1()]).pack(side = BOTTOM)
def getvalues_5():
vehicle_type.get()
if(vehicle_type.get() == "sedan"):
update_vehicle = Sedan(i,brand.get(),model.get(),int(year.get()),color.get(),vehicle_type.get(),int(cost.get()))
collection_2.update_one({"ID": i},{"$set":update_vehicle.create_vehicle()})
tkinter.messagebox.showinfo('Vehicle status','Vehicle has been updated.')
if(vehicle_type.get() == "SUV"):
update_vehicle = SUV(i,brand.get(),model.get(),int(year.get()),color.get(),vehicle_type.get(),int(cost.get()))
collection_2.update_one({"ID": i},{"$set":update_vehicle.create_vehicle()})
tkinter.messagebox.showinfo('Vehicle status','Vehicle has been updated.')
if(vehicle_type.get() == "coupe"):
update_vehicle = Coupe(i,brand.get(),model.get(),int(year.get()),color.get(),vehicle_type.get(),int(cost.get()))
collection_2.update_one({"ID": i},{"$set":update_vehicle.create_vehicle()})
tkinter.messagebox.showinfo('Vehicle status','Vehicle has been updated.')
Label(frame_edit_vehicle_2, text="").pack()
Label(frame_edit_vehicle_2, text="Brand :").pack()
brand = Entry(frame_edit_vehicle_2)
brand.pack()
Label(frame_edit_vehicle_2, text="").pack()
Label(frame_edit_vehicle_2, text="Model :").pack()
model = Entry(frame_edit_vehicle_2)
model.pack()
Label(frame_edit_vehicle_2, text="").pack()
Label(frame_edit_vehicle_2, text="Year :").pack()
year = Entry(frame_edit_vehicle_2)
year.pack()
Label(frame_edit_vehicle_2, text="").pack()
Label(frame_edit_vehicle_2, text="Colour :").pack()
color = Entry(frame_edit_vehicle_2)
color.pack()
Label(frame_edit_vehicle_2, text="").pack()
Label(frame_edit_vehicle_2, text="Type :").pack()
vehicle_type = Entry(frame_edit_vehicle_2)
vehicle_type.pack()
Label(frame_edit_vehicle_2, text="").pack()
Label(frame_edit_vehicle_2, text="Cost :").pack()
cost = Entry(frame_edit_vehicle_2)
cost.pack()
Label(frame_edit_vehicle_2, text="").pack()
Button(frame_edit_vehicle_2, text="Edit Vehicle", width=13, height=1, activebackground = "grey", command = lambda : [getvalues_5()]).pack(pady = 5)
vehicle_object = collection_2.find({"ID":i})
if vehicle_object.count() == 0 :
tkinter.messagebox.showerror('Ivalid ID','Enter correct ID')
else:
frame_edit_vehicle.destroy()
frame_back.destroy()
edit_vehicles_2()
Label(frame_edit_vehicle, text="").pack()
Label(frame_edit_vehicle, text="ENTER VEHICLE ID", fg = "purple").pack()
Label(frame_edit_vehicle, text="").pack()
Label(frame_edit_vehicle, text="").pack()
Label(frame_edit_vehicle, text="ID :").pack()
ID = Entry(frame_edit_vehicle)
ID.pack()
Label(frame_edit_vehicle, text="").pack()
Button(frame_edit_vehicle, text="Edit Vehicle", width=13, height=1, activebackground = "grey", command = lambda : [getvalues_4()]).pack(pady = 5)
def delete_vehicles():
frame_h = Frame(start)
frame_h.pack(side = TOP)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_h.destroy(), frame_back.destroy(), administer_vehicles()]).pack(side = BOTTOM)
Label(frame_h, text="").pack()
Label(frame_h, text="ENTER VEHICLE ID", fg = "purple").pack()
Label(frame_h, text="").pack()
Label(frame_h, text="").pack()
Label(frame_h, text="ID :").pack()
ID = Entry(frame_h)
ID.pack()
def del_values():
ID.get()
j = int(ID.get())
del_vehicle = collection_2.find({"ID":j})
if del_vehicle.count() == 0 :
tkinter.messagebox.showerror('Ivalid ID','Enter correct ID')
else :
collection_2.delete_one({"ID": j})
tkinter.messagebox.showinfo('Vehicle status','Vehicle Deleted')
Label(frame_h, text="").pack()
Button(frame_h, text="Delete Vehicle", width=13, height=1, activebackground = "grey", command = lambda : [del_values()]).pack(pady = 5)
def view_users():
frame_v = Frame(start)
frame_v.pack(side = TOP)
frame_w = Frame(start)
frame_w.pack(side = TOP, anchor = NW)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_v.destroy(), frame_w.destroy(), frame_back.destroy(), main_menu()]).pack(side = BOTTOM)
Label(frame_v, text="").pack()
Label(frame_v, text="LIST OF USERS", fg = "purple").pack()
Label(frame_v, text="").pack()
c = collection.find({"user_type":"customer"})
i = 1
for d in c :
Message(frame_w, text = str(i) + ". \n" + "Customer Name : " + d["name"] + "\n" + "Vehicle rented \t: " + str(d["has_rented"]), width = 200 ).pack(side = TOP,anchor = W)
i = i+1
def view_rentals():
frame_v = Frame(start)
frame_v.pack(side = TOP)
frame_w = Frame(start)
frame_w.pack(side = TOP, anchor = NW)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_v.destroy(), frame_w.destroy(), frame_back.destroy(), main_menu()]).pack(side = BOTTOM)
Label(frame_v, text="").pack()
Label(frame_v, text="LIST OF RENTALS", fg = "purple").pack()
Label(frame_v, text="").pack()
c = collection_2.find({})
i = 1
for d in c :
Message(frame_w, text = str(i) + ". \n" + "Vehicle brand : " + d["brand"] + "\n" + "Vehicle model : " + d["model"] + "\n" + "Vehicle rented : " + str(d["is_rented"]), width = 200 ).pack(side = TOP,anchor = W)
i = i+1
Button(frame_mainmenu, text = "Administer Vehicles", width = 15, activebackground = "grey", command = lambda :[frame_mainmenu.destroy(),frame_back.destroy(), administer_vehicles()]).pack(pady = 5)
Button(frame_mainmenu, text = "View Users", width = 15, activebackground = "grey", command = lambda :[frame_mainmenu.destroy(),frame_back.destroy(), view_users()]).pack(pady = 5)
Button(frame_mainmenu, text = "View Rentals", width = 15, activebackground = "grey", command = lambda :[frame_mainmenu.destroy(),frame_back.destroy(), view_rentals()]).pack(pady = 5)
def main_menu_customer():
frame_a = Frame(start)
frame_a.pack(side= TOP)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_a.destroy(), frame_back.destroy(), login_page_customer()]).pack(side = BOTTOM)
Label(frame_a, text="").pack()
Label(frame_a, text="MAIN MENU", fg = "purple").pack()
Label(frame_a, text="").pack()
Button(frame_a, text = "View Vehicles", width = 15, activebackground = "grey", command = lambda :[frame_a.destroy(),frame_back.destroy(), view_vehicles_2()]).pack(pady = 5)
Button(frame_a, text = "View My Rentals", width = 15, activebackground = "grey", command = lambda :[frame_a.destroy(),frame_back.destroy(), view_my_rentals()]).pack(pady = 5)
def view_vehicles_2():
frame_b = Frame(start)
frame_b.pack(side = TOP)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_b.destroy(), frame_back.destroy(), main_menu_customer()]).pack(side = BOTTOM)
Label(frame_b, text="").pack()
Label(frame_b, text="LIST OF VEHICLES", fg = "purple").pack()
Label(frame_b, text="").pack()
Button(frame_b, text = "View all vehicles", width = 15, activebackground = "grey", command = lambda :[frame_b.destroy(),frame_back.destroy(),view_all_vehicles()]).pack(pady = 5)
Button(frame_b, text = "View vehicles by type", width = 15, activebackground = "grey", command = lambda:[frame_b.destroy(),frame_back.destroy(),view_vehicles_by_type()]).pack(pady = 5)
def view_all_vehicles():
frame_c = Frame(start)
frame_c.pack(side = TOP)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_c.destroy(), frame_back.destroy(), view_vehicles_2()]).pack(side = BOTTOM)
Label(frame_c, text="").pack()
Label(frame_c, text="LIST OF VEHICLES", fg = "purple").pack()
Label(frame_c, text="").pack()
c = collection_2.find({"is_rented":bool(False)})
v = StringVar()
for d in c :
Radiobutton(frame_c,text = d["brand"] +" - " + d["model"] + "\nCost - Rs." + str(d["cost"]) , value = d["ID"],variable = v, padx = 5, justify = LEFT).pack(anchor = W)
Message(frame_c,text = " ").pack()
def get_values_vehicle():
question = tkinter.messagebox.askyesno("Proceed","Do you want to rent this vehicle ?")
if question == True:
frame_d = Frame(start)
frame_d.pack(side = TOP)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_d.destroy(), frame_back.destroy(), view_all_vehicles()]).pack(side = BOTTOM)
def dateval():
collection_2.update_one({"ID":float(v.get())},{"$set": {"is_rented":bool(True)}})
collection.update_one({"ID":current_user_ID},{"$set": {"has_rented":bool(True)}})
q = collection_2.find_one({"ID":float(v.get())})
insert_rental_info = rental_info(float(v.get()),q["brand"]+" "+q["model"],current_user_ID,str(a.get_date()),q["cost"],bool(True))
collection_3.insert_one(insert_rental_info.add_rental_info())
tkinter.messagebox.showinfo("Booked","The vehicle you selected has been rented to you.")
frame_d.destroy()
frame_back.destroy()
view_all_vehicles()
Label(frame_d,text = "Select Date").pack(padx = 10, pady = 10)
a = DateEntry(frame_d, width= 15, bg = "blue", fg = "red", borderwidth = 3)
a.pack()
Button(frame_d, text = "Click Here", command = lambda : [dateval()]).pack(pady = 5)
else:
view_all_vehicles()
Button(frame_c, text = "Book", width = 15, activebackground = "grey", command = lambda :[frame_c.destroy(),frame_back.destroy(), get_values_vehicle()]).pack(pady = 5)
def view_vehicles_by_type():
frame_c = Frame(start)
frame_c.pack(side = TOP)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_c.destroy(), frame_back.destroy(), view_vehicles_2()]).pack(side = BOTTOM)
Label(frame_c, text="").pack()
Label(frame_c, text="CHOOSE VEHICLE TYPE", fg = "purple").pack()
Label(frame_c, text="").pack()
vehicle_type_list = ["sedan","SUV","coupe"]
clicked = StringVar()
clicked.set(vehicle_type_list[0])
OptionMenu(frame_c,clicked,*vehicle_type_list).pack()
Label(frame_c, text="").pack()
Button(frame_c, text = "Show vehicles", command = lambda :[frame_c.destroy(),frame_back.destroy(),chosen_type()]).pack(pady = 5)
def chosen_type():
frame_c = Frame(start)
frame_c.pack(side = TOP)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_c.destroy(), frame_back.destroy(), view_vehicles_2()]).pack(side = BOTTOM)
vehicle_type = collection_2.find({"is_rented":bool(False),"type":str(clicked.get()) })
v = StringVar()
for vehicle in vehicle_type :
Radiobutton(frame_c,text = vehicle["brand"] +" - " + vehicle["model"] + "\nCost - Rs." + str(vehicle["cost"]) , value = vehicle["ID"],variable = v, padx = 5, justify = LEFT).pack(anchor = W)
Message(frame_c,text = " ").pack()
def get_values_vehicle():
question = tkinter.messagebox.askyesno("Proceed","Do you want to rent this vehicle ?")
if question == True:
frame_d = Frame(start)
frame_d.pack(side = TOP)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_d.destroy(), frame_back.destroy(), view_all_vehicles()]).pack(side = BOTTOM)
def dateval():
collection_2.update_one({"ID":float(v.get())},{"$set": {"is_rented":bool(True)}})
collection.update_one({"ID":current_user_ID},{"$set": {"has_rented":bool(True)}})
q = collection_2.find_one({"ID":float(v.get())})
insert_rental_info = rental_info(float(v.get()),q["brand"]+" "+q["model"],current_user_ID,str(a.get_date()),q["cost"],bool(True))
collection_3.insert_one(insert_rental_info.add_rental_info())
tkinter.messagebox.showinfo("Booked","The vehicle you selected has been rented to you.")
frame_d.destroy()
frame_back.destroy()
view_all_vehicles()
Label(frame_d,text = "Select Date").pack(padx = 10, pady = 10)
a = DateEntry(frame_d, width= 15, bg = "blue", fg = "red", borderwidth = 3)
a.pack()
Button(frame_d, text = "Click Here", command = lambda : [dateval()]).pack(pady = 5)
else:
chosen_type()
Button(frame_c, text = "Book", width = 15, activebackground = "grey", command = lambda :[frame_c.destroy(),frame_back.destroy(), get_values_vehicle()]).pack(pady = 5)
def view_my_rentals():
frame_b = Frame(start)
frame_b.pack(side = TOP)
frame_back = Frame(start)
frame_back.pack(side = BOTTOM , anchor = SW)
Button(frame_back, text="Back", width=4, height=1, activebackground = "grey", command = lambda:[frame_b.destroy(), frame_back.destroy(), main_menu_customer()]).pack(side = BOTTOM)
Label(frame_b, text="").pack()
Label(frame_b, text="MY RENTALS", fg = "purple").pack()
Label(frame_b, text="").pack()
rental_info = collection_3.find({"customer_id":current_user_ID})
i = 1
for vehicle in rental_info :
Message(frame_b, text = str(i) + ". \n" + "Vehicle Name : " + vehicle["vehicle_name"] + "\n" + "Customer ID : " + str(vehicle["customer_id"]) + "\n" + "Rental Date : " + vehicle["rental_date"] + "\n" + "Is Active : " + str(vehicle["is_active"]), width = 200 ).pack(side = TOP,anchor = W)
i = i+1
start = Tk()
start.geometry("500x425")
frame_begin = Frame(start)
frame_begin.pack()
def begin():
frame_begin.destroy()
frame_start = Frame(start)
frame_start.pack()
Label(frame_start, text="Select your user type :").pack()
Label(frame_start, text="").pack()
Button(frame_start, text="Admin", width=10, height=1, activebackground = "grey", command = lambda : [frame_start.destroy(), login_page()]).pack(side = TOP, pady = 5)
Button(frame_start, text="Customer", width=10, height=1, activebackground = "grey", command = lambda : [frame_start.destroy(), login_page_customer()]).pack(side = TOP, pady = 5)
begin()
start.mainloop() |
# -*- coding: utf-8 -*-"""
"""
This module encapsulates the logic of the model view controller.
Classes Provided:
GroupPoints(list)
a list of ('Coord', ['x', 'y']).
ClusterPoints(GroupPoints)
extends GroupPoints with 'medoid(self)' and 'overlaps(self, other)' methods.
"""
# Python Standard Libarary
from collections import namedtuple, defaultdict
from functools import partial
from math import ceil
from operator import itemgetter
import os.path as osp
from sys import stderr
# External Dependencies
import imghdr
import matplotlib.pyplot as plt
import numpy as np
import scipy.misc as misc
from scipy.spatial.distance import cdist as distancematrix
from sklearn.cluster import DBSCAN
# Internal Dependencies
from .utility import window, regen, ParameterizedDefaultDict
"""Lamplight Module
This module houses interesting operations on images for lamplight analysis
"""
def image_info(filename):
"""
Takes in a filename
returns a tuple (filetype, basename(filename), numpy image)
"""
img_type = imghdr.what(filename)
name, *_ext = osp.basename(filename).rsplit('.', 1)
src_image = misc.imread(filename)
return img_type, name, src_image
def empty_canvas(image):
dst_img = np.array(image, copy=True)
dst_img.fill(255)
return dst_img
def save_images(dst, name, img_type='png', **kwargs):
"""
input:
dst is a directory destination
name is the expected name of the image to be saved
img_type is the saving filetype
kwargs is dictionary of numpy image arrays
output:
saves images into dst with name in bmp format
"""
def save_modified(prefix, image):
result = osp.join(dst, prefix + name + '.' + img_type)
misc.imsave(result, image)
return result
li = [save_modified(i, kwargs[i]) for i in kwargs]
return li
def image_split(src_image):
"""
split image to RBG and then saves them to dst directory
# use by example with 'Extended Iterable Unpacking'
r, g, b, *_ = image_split(src_image)
"""
def np_one_color(keep_index__img):
keep_index, img = keep_index__img
"""
input image as an HxWxC numpy.array an index in range(C.len()) to preserve
returns the image only preserving that color.
"""
new = np.zeros(shape=img.shape)
new[:,:,keep_index] = img[:,:,keep_index]
return new
np_lst = enumerate([src_image]*src_image.shape[2])
return map(np_one_color, np_lst)
class _step_range_gen(regen):
"""
Object, probably needs documentation
"""
def __init__(self, delta=10, maxvalue=255):
gen = (maxvalue - i for i in range(0, maxvalue, delta))
regen.__init__(self, gen)
def topograph_image(image, step):
"""
Takes in NxMxC numpy matrix and a step size and a delta
returns NxMxC numpy matrix with contours in each C cell
"""
step_gen = _step_range_gen(step)
new_img = np.array(image, copy=True)
"""step_gen ~ (255, 245, 235, 225,...) """
def myfunc(color):
for tops, bots in window(step_gen, 2):
if (color <= tops) and (color > bots):
return tops
if color > tops:
break
return 0
topograph = np.vectorize(myfunc)
return new_img if step == 1 else topograph(new_img)
def get_index_cond(image, cond=lambda x: x == 255):
"""
splits image into dict[band, intensity] as (x, y) point pairs
this is used to shrink and split the search space for clustering
this function is much more useful if run on result of topograph_image
additionally, This function works very poorly on lossy image formats
"""
ret = ParameterizedDefaultDict(GroupPoints)
for x, col in enumerate(image):
for y, pixel in enumerate(col):
for band, intensity in enumerate(pixel):
if band == 3: continue # transparentcy is not applicable
if cond(intensity):
ret[band, intensity].append(Coord(x,y))
return ret
def make_clusters_dict(points_dict, radius=20, minpoints=10):
"""
Input:
points_dict - dictionary of points indexed by d[band][intensity]
radius - size of radius for ddbscan algorithm
minpoints - minimal number of points to be called a cluster
Output:
dict[band, intensity][cluster...] = [Coord(x, y)...]
"""
def make_clusters(d, band, intensity, radius, minpoints):
"""
Takes in an iterable of (x, y) points
returns a list of ClusterPoints
"""
points = d[band, intensity]
xy_arrays = np.array(points)
dbs = DBSCAN(radius, minpoints).fit(xy_arrays)
core_samples = dbs.core_sample_indices_
labels = dbs.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
clusters = [xy_arrays[labels == i] for i in range(n_clusters_)]
retval = []
def starmap(func, iterable):
gunc = lambda x: func(*x)
return map(gunc, iterable)
for cluster in clusters:
cp = ClusterPoints(band, intensity, starmap(Coord, cluster.tolist()))
retval.append(cp)
del d
return retval
retval = {}
for band, intensity in points_dict:
retval[band, intensity] = \
make_clusters(points_dict, band, intensity, radius, minpoints)
return retval
Coord = namedtuple('Coord', ['x', 'y'])
class GroupPoints(list):
def __init__(self, band, intensity, *args, **kwargs):
list.__init__(self, *args, **kwargs)
self.__band = band
self.__intensity = intensity
def append(self, value):
if not isinstance(value, Coord):
raise TypeError("value not Coord")
list.append(self, value)
def isband(self, band):
return band == self.__band
def isintensity(self, intenity):
return intensity == self.__intensity
class ClusterPoints(GroupPoints):
def __init__(self, band, intensity, *args, **kwargs):
GroupPoints.__init__(self, band, intensity, *args, **kwargs)
@property
def medoid(self):
"""
given an iterable of (x, y) points, return the medoid
"""
xy_arrays = np.array(self)
matrix = distancematrix(xy_arrays, xy_arrays, metric='sqeuclidean')
key, _ = min(enumerate(map(np.sum, matrix)), key=itemgetter(1))
return self[key]
def overlaps(self, other):
if not isinstance(other, type(self)):
raise TypeError("other does not share type")
[small, large] = map(set, sorted([self, other], key=len))
num = len(small.difference(large))
den = len(small)
return num/den
def simplify(overlapped_clusters):
"""
dict = {0: cr, 1: cg, 2: cb}
"""
min_x, min_y = float('inf'), float('inf')
max_x, max_y = float('-inf'), float('-inf')
den = sum(map(len, overlapped_clusters.values()))
ret = {}
for key in overlapped_clusters:
num = len(overlapped_clusters[key])
for x, y in overlapped_clusters[key]:
min_x = x if x < min_x else min_x
min_y = y if y < min_y else min_y
max_x = x if x > min_x else min_x
max_y = y if y > min_y else min_y
ret[key] = num
# medoids only exist on non-empty lists of points
ret['medoid'] = min(filter(bool, overlapped_clusters.values()), key=len).medoid
ret['min_x'] = min_x
ret['min_y'] = min_y
ret['max_x'] = max_x
ret['max_y'] = max_y
return ret
def overlapping_clusters(cluster_dict):
"""
INPUT :
dictionary[band, intensity][cluster...] = [(x, y)...]
OUTPUT:
yields overlapping clusters data as simplex dict + medoid
"""
def mostOverlapping(src, dsts, threshold=0.2):
"""
returns the largest cluster who is most overlapping with src in [dsts...]
note that 'sorted' is a stable sorting algorithm
"""
scores = map(src.overlaps, sorted(dsts, key=len, reverse=True))
key, remaining = min(enumerate(scores), key=itemgetter(1))
return dsts[key] if remaining < threshold else []
d = {}
for band, intensity in cluster_dict:
d[band] = cluster_dict[band, intensity] # re-index clusters
fband, *rg = iter(d)
for cluster in d[fband]:
p = {fband:cluster}
for band in rg:
p[band] = mostOverlapping(cluster, d[band])
yield p
def colorize_clusters(base_img, color, *clusters):
"""
clusters must be a dictionary
Input base_img numpy array, and dictionary of clusters
Outputs a copy of base_img with identified clusters filled with colors
"""
new_img = np.array(base_img, copy=True)
def colorize_my_cluster(c):
for x, y in c:
new_img[x, y][:3] = color
for c in clusters:
colorize_my_cluster(c)
return new_img
import matplotlib.pyplot as plt
from math import sqrt, ceil
def pie(tumpdir, *lamps):
colors = ['red', 'green', 'blue']
for lamp in lamps:
sizes = [getattr(lamp,x) for x in colors]
patches, _ = plt.pie(sizes, colors=colors, startangle=90)
plt.axis('equal')
plt.tight_layout()
itemgetter(list(), 0)
filename = osp.join(tumpdir, 'foo.png')
plt.savefig(filename, bbox_inches='tight', transparent=True)
plt.close()
size = max(lamp.max_x - lamp.min_x, lamp.max_y - lamp.min_y)
yield filename, size, lamp.min_x, lamp.min_y
from PIL import Image, ImageChops
def pie_canvas(tumpdir, shape, *lamps):
def trim(im):
bg = Image.new(im.mode, im.size, im.getpixel((0,0)))
diff = ImageChops.difference(im, bg)
diff = ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
return im.crop(bbox) if bbox else im
bg_h, bg_w, *_ = shape
background = Image.new('RGBA', (bg_w, bg_h), (255, 255, 255, 0))
for filename, file_size, loc_y, loc_x in pie(tumpdir, *lamps):
print(file_size, file_size, loc_x, loc_y)
img = Image.open(filename, 'r')
img = trim(img)
img.thumbnail((file_size, file_size), Image.ANTIALIAS)
img_w, img_h = file_size, file_size
offset = loc_x, loc_y
background.paste(img, offset, mask=img)
img.close()
#filename, file_size, loc_y, loc_x = 0, 0, 0, 0
dst = osp.join(tumpdir, 'out.png')
background.save(dst)
return dst
|
# Mountaineer
from Globals import *
from Utilities import *
def setup():
fullScreen()
# Keeps images from going blurry
noSmooth()
# Where I store everything accessed by draw()
global globals
globals = Globals(width, height)
# Part of the init for the player
globals.player.readSaveFile("saveData.txt")
def mousePressed():
println(get(mouseX, mouseY))
# See Utilities tab, I put all the actions in there. When something needs to change, I make it an action
doActions(globals)
def keyPressed():
# I seperate them into modes first
if globals.mode == "customize":
if keyCode == 10: # Enter
globals.player.attributes["name"] = globals.customize.namePlate.txt
globals.customize.namePlate.state = "fixed"
if keyCode == 8: # Delete
globals.customize.namePlate.txt = globals.customize.namePlate.txt[:-1]
elif globals.customize.namePlate.state == "edit" and keyCode >= 32 and keyCode <= 126: # Any character
globals.customize.namePlate.txt = globals.customize.namePlate.txt + str(key)
elif globals.mode == "play":
# This is the lead-up to the Village level
if globals.level == 0:
if keyCode == 38:
globals.action = "initVillage"
if globals.level == 1: # The village itself
if globals.popup == "": # Keeps the player from moving while the inventory is open
if keyCode == 38 or keyCode == 37 or keyCode == 39 or keyCode == 40: # Up down left right that sort of thing
globals.player.move(keyCode)
if globals.pressTime <= 2:
globals.player.updateImage() # So the player shows whether it's moving or still
if keyCode == 69: # E
globals.popup = "inventory"
if globals.popup != "": # This is for plates where it activates when the player is on it. If they player just closed a menu, it won't open again
if keyCode == 81:
globals.popup = ""
globals.pressTime += 1 # Just tracking, only use it occasionally
def keyReleased():
println(str(globals.player.attributes["x"]) + " " + str(width) + " " + str(globals.player.attributes["y"]) + " " + str(height))
if keyCode == 38 or keyCode == 37 or keyCode == 39 or keyCode == 40:
globals.player.attributes["state"] = "still"
if keyCode != 81:
doActions(globals) # See above
# Housekeeping
globals.player.updateImage()
globals.previousKey = keyCode # Not sure if I've used this anywhere yet
globals.pressTime = 0
def draw():
background(255, 255, 255) # So it doesn't show previous frames
# Seperated by mode
if globals.mode == "menu":
globals.menu.run()
if globals.mode == "customize":
globals.customize.run(globals.modeTime)
if globals.mode == "play":
# Seperated by level
if globals.level == 0:
globals.village.leadUp(globals.modeTime)
elif globals.level == 1:
globals.village.run(globals.modeTime, globals.screen, globals.popup)
# Checking whether player is onscreen
if checkOutside(globals.player, 0, 0, width, height) == "l":
globals.action = "charLeft"
if checkOutside(globals.player, 0, 0, width, height) == "r":
globals.action = "charRight"
if checkOutside(globals.player, 0, 0, width, height) == "d":
globals.action = "charDown"
if checkOutside(globals.player, 0, 0, width, height) == "u":
globals.action == "charUp"
# Popups
if globals.popup == "inventory":
globals.player.inventoryPopup.run()
# For tracking, very useful
globals.modeTime += 1
|
# -*- coding: utf-8 -*-
import serial
from django.shortcuts import render
from django.http import HttpResponse, HttpRequest
from django.contrib.auth import login, logout
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.template import RequestContext, Template, Context
from datetime import datetime, date
from django.utils import timezone
import time
import MySQLdb
from sms_controll.forms import KierowcaForm # AddKierowcaForm
from sms_controll.models import Kierowca, Zlecenie, Zlecenie2, FirmaFaktura
from django.views.generic.edit import CreateView, UpdateView
from django.views.generic import ListView, DeleteView
# Create your views here.
def index(request):
"""Strona glowna aplikacji"""
#return HttpResponse("Witaj w aplikacji Sms Controll System")
assert isinstance(request, HttpRequest)
return render(request, "sms_controll/index.html",
context_instance = RequestContext(request,
{
'title':'Home Page',
'year':datetime.now().year,
})
)
def home(request):
assert isinstance(request, HttpRequest)
return render(request, "sms_controll/home.html",
context_instance = RequestContext(request,
{
'title':'Home Page',
'year':datetime.now().year,
})
)
#moje
def utworz_zlecenie(request):
pass
def sms_list_zaladowany(request):
db = MySQLdb.connect(user='firefly_4', db='firefly_4', passwd='ZgX3.14.2xY', host='sql.firefly.nazwa.pl')
cursor = db.cursor()
cursor.execute("SELECT * FROM inbox WHERE textdecoded = 'zaladowany' or textdecoded = 'Zaladowany'") #updatedindb, sendernumber, textdecoded FROM inbox")
results = cursor.fetchall()
data = []
czas = []
nr_tel = []
status = []
result_list = []
for row in results:
data.append(row[0].strftime("%Y-%m-%d %H:%M:%S"))
nr_tel.append(row[3])
status.append(row[8])
x = 0
for dane in data:
result_list.insert(x,(x+1,data[x], nr_tel[x], status[x]))
x += 1
db.close()
"""Renders the sms page."""
assert isinstance(request, HttpRequest)
return render(request, 'sms_controll/sms_list_zaladowany.html',
context_instance = RequestContext(request,
{
'title':'SMS - przychodzacy',
'lista_sms':result_list,
#'lista_data':data,
#'lista_nr_tel': nr_tel,
#'wiadomosci': wiadomosci
})
)
def sms_list_rozladowany(request):
db = MySQLdb.connect(user='firefly_4', db='firefly_4', passwd='ZgX3.14.2xY', host='sql.firefly.nazwa.pl')
cursor = db.cursor()
cursor.execute("SELECT * FROM inbox WHERE textdecoded = 'rozladowany' or textdecoded = 'Rozladowany'") #updatedindb, sendernumber, textdecoded FROM inbox")
results = cursor.fetchall()
data = []
czas = []
nr_tel = []
status = []
result_list = []
for row in results:
data.append(row[0].strftime("%Y-%m-%d %H:%M:%S"))
nr_tel.append(row[3])
status.append(row[8])
x = 0
for dane in data:
result_list.insert(x,(x+1,data[x], nr_tel[x], status[x]))
x += 1
db.close()
"""Renders the sms page."""
assert isinstance(request, HttpRequest)
return render(request, 'sms_controll/sms_list_rozladowany.html',
context_instance = RequestContext(request,
{
'title':'SMS - przychodzacy',
'lista_sms':result_list,
#'lista_data':data,
#'lista_nr_tel': nr_tel,
#'wiadomosci': wiadomosci
})
)
def sms_list_all(request):
db = MySQLdb.connect(user='firefly_4', db='firefly_4', passwd='ZgX3.14.2xY', host='sql.firefly.nazwa.pl')
cursor = db.cursor()
cursor.execute("SELECT * FROM inbox") #updatedindb, sendernumber, textdecoded FROM inbox")
results = cursor.fetchall()
data = []
czas = []
nr_tel = []
status = []
result_list = []
for row in results:
data.append(row[0].strftime("%Y-%m-%d %H:%M:%S"))
nr_tel.append(row[3])
status.append(row[8])
x = 0
for dane in data:
result_list.insert(x,(x+1,data[x], nr_tel[x], status[x]))
x += 1
db.close()
"""Renders the sms page."""
assert isinstance(request, HttpRequest)
return render(request, 'sms_controll/sms_list_all.html',
context_instance = RequestContext(request,
{
'title':'SMS - przychodzacy',
'lista_sms':result_list,
#'lista_data':data,
#'lista_nr_tel': nr_tel,
#'wiadomosci': wiadomosci
})
)
#koniec moje
def kierowca_new(request):
form = KierowcaForm #AddKierowcaForm
return render(request, 'sms_controll/kierowca.html', {'form': form})
def loguj(request):
"""Logowanie uzytkownika"""
from django.contrib.auth.forms import AuthenticationForm
if request.method == 'POST':
form = AuthenticationForm(request, request.POST)
if form.is_valid():
login(request, form.get_user())
messages.success(request, "Zostales zalogowany!")
return redirect(reverse('sms_controll:home'))
kontekst = {'form': AuthenticationForm()}
return render(request, 'sms_controll/loguj.html', kontekst)
def wyloguj(request):
"""Wylogowanie uzytkownika"""
logout(request)
messages.info(request, "Zostales wylogowany!")
return redirect(reverse('sms_controll:index'))
class UtworzKierowce(CreateView):
model = Kierowca
fields = '__all__'
context_object_name = 'kierowcy'
template_name = 'sms_controll/kierowca_form.html'
success_url = '/home/kierowcy'
def get_initial(self):
initial = super(UtworzKierowce, self).get_initial()
return initial
def get_context_data(self, **kwargs):
context = super(UtworzKierowce, self).get_context_data(**kwargs)
context['kierowcy'] = Kierowca.objects.all()
return context
def form_valid(self, form):
kierowca = form.save(commit=False)
kierowca.save()
messages.success(self.request, "Dodano nowego kierowce!")
return super(UtworzKierowce, self).form_valid(form)
class EdytujKierowce(UpdateView):
model = Kierowca
readonly_fields = ('pk')
#from sms_controll.forms import KierowcaForm
form_class = KierowcaForm
context_object_name = 'kierowcy'
template_name = 'sms_controll/kierowca_form.html'
success_url = '/home/kierowcy'
def get_context_data(self, **kwargs):
context = super(EdytujKierowce, self).get_context_data(**kwargs)
#context['kierowcy'] = Kierowca.objects.filter(kierowca=self.request.user)
return context
def get_object(self, queryset=None):
kierowca = Kierowca.objects.get(id=self.kwargs['pk'])
return kierowca
class KierowcyList(ListView):
model = Kierowca
#success_url = '/home/kierowcy'
class WyslijSMS(UpdateView): #TextMessage:
model = Kierowca
from sms_controll.forms import SmsForm
form_class = SmsForm
#context_object_name = 'kierowcy'
template_name = 'sms_controll/sms_form.html'
success_url = '/home/kierowcy'
def get_context_data(self, **kwargs):
context = super(EdytujKierowce, self).get_context_data(**kwargs)
#context['kierowcy'] = Kierowca.objects.filter(kierowca=self.request.user)
return context
def get_object(self, queryset=None):
kierowca = Kierowca.objects.get(id=self.kwargs['pk'])
return kierowca
"""
def __init__(self, recipient="", message="TextMessage.content not set."):
self.recipient = recipient
self.content = message
def setRecipient(self, number):
self.recipient = number
def setContent(self, message):
self.content = message
def connectPhone(self):
self.ser = serial.Serial('COM10', 460800, timeout=5, xonxoff = False, rtscts = False, bytesize = serial.EIGHTBITS, parity = serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE)
time.sleep(1)
def sendMessage(self):
self.ser.write('ATZ\r')
time.sleep(1)
self.ser.write('AT+CMGF=1\r') #tryb nadawania 0 - PDU, 1 - textowy
time.sleep(1)
self.ser.write('''AT+CMGS="''' + self.recipient + '''"\r''')
time.sleep(1)
self.ser.write(self.content + "\r")
time.sleep(1)
self.ser.write(chr(26))
time.sleep(1)
def disconnectPhone(self):
self.ser.close()
"""
'''
sms = TextMessage("+48512457556","Co jest z tym modemem")
sms.connectPhone()
sms.sendMessage()
sms.disconnectPhone()
print "message sent successfully"
'''
class UtworzZlecenie(CreateView):
model = Zlecenie
fields = ['data_zamowienia', 'firma_zamawiajaca'] #'__all__'
#context_object_name = 'kierowcy'
template_name = 'sms_controll/utworz_zlecenie_form.html'#zlecenie_form.html' #utworz_zlecenie.html'
success_url = '/home/zlecenie'
def get_initial(self):
initial = super(UtworzZlecenie, self).get_initial()
initial['data_zamowienia'] = date.today()
return initial
def get_context_data(self, **kwargs):
context = super(UtworzZlecenie, self).get_context_data(**kwargs)
#context['zlecenia'] = Zlecenie.objects.all()
return context
def form_valid(self, form):
Zlecenie = form.save(commit=False)
Zlecenie.save()
messages.success(self.request, "Dodano nowe Zlecenie!")
return super(UtworzZlecenie, self).form_valid(form)
class PokazZlecenia(ListView):
model = Zlecenie
fields = '__all__'
#context_object_name = 'kierowcy'
template_name = 'sms_controll/zlecenia.html'
success_url = '/home/zlecenie' |
import unittest
from tree_data import FileSystemTree
from tree_data_helper_test import print_directory
DIR_PATH = "Testing\\"
WIDTH = 1000
HEIGHT = 600
class Test_File_System_Get_Leaf(unittest.TestCase):
# Testing the corners of Testing\\Depth 2
def test_rectangle_corners(self):
filesys = FileSystemTree(DIR_PATH + "Depth 2")
print_directory(filesys)
print(filesys.generate_treemap((0, 0, WIDTH, HEIGHT)))
corners = [(0, 0), (0, 510), (0, 600), (442, 0), (442, 510), (442, 584), (442, 600), (1000, 0), (1000, 584), (1000, 600)]
index = ["Bank.xlsx", "Bird Courses.txt", "COG.docx", "sadsad.txt"]
corners = [(0, 0), (0, 510), (0, 600), (442, 0), (442, 510), (442, 584), (442, 600), (1000, 0), (1000, 584), (1000, 600)]
answers = [0, [0, 1], 1, [0, 2], [0, 1, 2], [1, 2, 3], [1, 3], 2, [2, 3], 3]
self.assertEqual(len(corners), len(answers))
for i in range(0, len(corners)):
leaf = filesys.get_selected_leaf(corners[i], (0, 0, WIDTH, HEIGHT))
if type(answers[i]) is int:
self.assertEqual(index[answers[i]], leaf._root)
else:
is_answer_correct = False
for answer in answers[i]:
if index[answer] == leaf._root:
is_answer_correct = True
break
self.assertTrue(is_answer_correct)
# Testing the edges of Testing\\Depth 2
def test_rectangle_edges(self):
filesys = FileSystemTree(DIR_PATH + "Depth 2")
print_directory(filesys)
print(filesys.generate_treemap((0, 0, 1000, 600)))
# First column
for y in range(1, 599):
leaf_root = filesys.get_selected_leaf((0, y), (0, 0, 1000, 600))._root
if y < 510:
self.assertEqual(leaf_root, "Bank.xlsx")
elif y > 510:
self.assertEqual(leaf_root, "Bird Courses.txt")
# Second column
for y in range(1, 599):
leaf_root = filesys.get_selected_leaf((442, y), (0, 0, WIDTH, HEIGHT))._root
if y < 510:
self.assertTrue((leaf_root == "COG.docx" or leaf_root == "Bank.xlsx"))
elif 510 < y < 584:
self.assertTrue((leaf_root == "COG.docx" or leaf_root == "Bird Courses.txt"))
elif y > 584:
self.assertTrue((leaf_root == "sadsad.txt" or leaf_root == "Bird Courses.txt"))
# Third column
for y in range(1, 599):
leaf_root = filesys.get_selected_leaf((1000, y), (0, 0, WIDTH, HEIGHT))._root
if y < 584:
self.assertEqual(leaf_root, "COG.docx")
elif y > 584:
self.assertEqual(leaf_root, "sadsad.txt")
# Testing the interior points of rectangles
def test_interior(self):
filesys = FileSystemTree(DIR_PATH + "Depth 2")
rects = {"Bank.xlsx": (0, 0, 442, 510), "Bird Courses.txt": (0, 510, 442, 90), "COG.docx": (442, 0, 558, 584), "sadsad.txt": (442, 584, 558, 16)}
for file in rects:
rectangle = rects[file]
for x in range(rectangle[0] + 1, rectangle[0] + rectangle[2] ):
for y in range(rectangle[1] + 1, rectangle[1] + rectangle[3]):
leaf_root = filesys.get_selected_leaf((x, y), (0, 0, 1000, 600))._root
self.assertEqual(leaf_root, file)
if __name__ == '__main__':
unittest.main()
|
from itertools import product
functions = dict(triangle=lambda n: n * (n+1) / 2,
square=lambda n: n**2,
pentagonal=lambda n: n * (3*n - 1) / 2,
hexagonal=lambda n: n * (2*n - 1),
heptagonal=lambda n: n * (5*n - 3) / 2,
octagonal=lambda n: n*(3*n-2))
def follow(n1, n2):
return str(n1)[-2:] == str(n2)[:2]
lists = {name: dict() for name in functions}
for name, function in functions.iteritems():
n, calc = 0, function(0)
while calc < 10000:
if calc >= 1000:
lists[name][n] = calc
n += 1
calc = function(n)
print lists
for n1, calc1 in lists['triangle'].iteritems():
print calc1
for n2, calc2 in lists['square'].iteritems():
if n2 != n1 and follow(calc1, calc2):
print "\t", calc2
for n3, calc3 in lists['pentagonal'].iteritems():
if n3 not in [n1, n2] and follow(calc2, calc3):
print "\t\t", calc3
for n4, calc4 in lists['hexagonal'].iteritems():
if n4 not in [n1, n2, n3] and follow(calc3, calc4):
print "\t\t\t", calc4
for n5, calc5 in lists['heptagonal'].iteritems():
if n5 not in [n1, n2, n3, n4] and follow(calc4, calc5):
print "\t\t\t\t", calc5
for n6, calc6 in lists['octagonal'].iteritems():
if n6 not in [n1, n2, n3, n4, n5] and follow(calc5, calc6) and follow(calc6, calc1):
print "\t\t\t\t\t", calc6
print "Done :", n1, calc1, n2, calc2, n3, calc3, n4, calc4, n5, calc5, n6, calc6, sum([calc1, calc2, calc3, calc4, calc5, calc6])
break |
import hmac
import time
import urlparse
from hashlib import sha1
from time import time
from .account import get_temp_url_key, set_temp_url_key
from .credentials import swift
class Object(object):
""" A swift object.
Can be initialized by specifying a full name (includes container),
or container and name separately
"""
def __init__(self, full_name=None, container_name=None, name=None,
bytes=None, last_modified=None, hash=None,
content_type=None):
if full_name is not None:
(self.container_name, self.name) = full_name.split('/', 1)
else:
self.container_name = container_name
self.name = name
self.bytes = bytes
self.last_modified = last_modified
self.hash = hash
self.content_type = content_type
self._url = None # cached url
def __repr__(self):
return self.name
@property
def full_name(self):
""" Object name with container name prepended.
Note that this does not contain version name or account name
>>> Object(container_name='foo', name='baz/quux.png').full_name
'foo/baz/quux.png'
"""
return "{}/{}".format(self.container_name, self.name)
@property
def path(self):
""" Return the full path of the url.
>>> obj = Object(...)
>>> obj.path
'/v1/AUTH_a922ead7-1df8-42c1-8a39-226aff4223a4/'
"""
return urlparse.urlparse(self.url).path
@property
def url(self):
""" Return the full url for this object """
# We cache the url so we don't need to do a lookup every time
if self._url is None:
storage_url, _ = swift().get_auth()
s = "{storage_url}/{full_name}"
self._url = s.format(storage_url=storage_url,
full_name=self.full_name)
return self._url
def generate_temp_url(self, expires=None,
method='GET'):
""" Generate a temporary url for this object
expires : int
Timestamp in Unix time. The temporary url will be accessible
until this time. If set to *None*, the expiration date will
be 24 hours after this method is called.
method : str
Permitted http method. By default, only allows ``GET``.
Returns the url.
"""
if expires is None:
expires = int(time()+60*60*24)
# If the secret key exists, retrieve it. Otherwise, generate it
try:
key = get_temp_url_key()
except KeyError:
key = set_temp_url_key()
hmac_body = '%s\n%s\n%s' % (method, expires, self.path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
s = '{url}?temp_url_sig={sig}&temp_url_expires={expires}'
return s.format(url=self.url, sig=sig, expires=expires)
|
def permuta(a,b,c):
temp = a
a = b
b = temp
print("A = " + str(a) + ", B = " + str(b) + ",C = " + str(c))
x = input()
y = input()
z = input()
permuta(x,y,z)
permuta(y,z,x)
permuta(x,y,z)
permuta(x,z,y)
|
import tensorflow as tf
import os
from gmc.conf import settings
from gmc.core.cache import store
class NN:
def __init__(self, dataset, n_input=None):
self.data = dataset
self.layers = []
self.weights = []
self.bias = []
self.results_dir = os.path.join(settings.BRAIN_DIR, "nn")
if n_input is None:
n_input = 0
for f in settings.FEATURES:
n_input += settings.FEATURES_LENGTH[f]
self.n_input = n_input
if not os.path.isdir(self.results_dir):
os.mkdir(self.results_dir)
def train(self, display_step=100, out=False, path='model.final'):
n_classes = len(settings.GENRES)
n_input = self.n_input
x = self.x = tf.placeholder("float", [None, n_input], name='x')
y = self.y = tf.placeholder("float", [None, n_classes], name='y')
keep_prob = self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
w = self.get_weights(n_input, n_classes)
b = self.get_bias(n_classes)
y_ = self.y_ = self.prepare_layers()
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.y_, labels=self.y))
optimizer = tf.train.AdamOptimizer(learning_rate=settings.NN['LEARNING_RATE']).minimize(cost)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
self.correct_pred = correct_pred = tf.equal(tf.argmax(self.y_, 1), tf.argmax(self.y, 1))
self.accuracy = accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
train_acc = []
#val_acc = []
test_acc = []
train_cost = []
test_cost = []
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:
sess.run(init)
step = 1
for i in range(settings.NN['TRAINING_CYCLES']):
bx, by = self.data.train.next_batch(settings.NN['BATCH_SIZE'])
sess.run(optimizer, feed_dict={x: bx, y:by, keep_prob:settings.NN['DROPOUT_PROB']})
if step % display_step == 0:
# Calculate batch accuracy
acc = sess.run(accuracy, feed_dict={x: bx,
y: by, keep_prob: 1.})
# Calculate batch loss
loss = sess.run(cost, feed_dict={x: bx,
y: by, keep_prob: 1.})
train_acc.append(acc)
#vac = sess.run(accuracy,
# feed_dict={x: self.data.validation.music, y: self.data.validation.labels, keep_prob: 1.})
tac = sess.run(accuracy,
feed_dict={x: self.data.test.music, y: self.data.test.labels, keep_prob: 1.})
te_loss = sess.run(cost,
feed_dict={x: self.data.test.music, y: self.data.test.labels, keep_prob: 1.})
train_cost.append(loss)
test_cost.append(te_loss)
#val_acc.append(vac)
test_acc.append(tac)
save_path = saver.save(sess, os.path.join(self.results_dir, "model.ckpt"))
if out:
print("Iter " + str(step * settings.NN['BATCH_SIZE']) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc))
#print("Validation Accuracy:", vac)
print("Testing Accuracy:", tac)
print("Model saved in file: %s" % save_path)
step += 1
save_path = saver.save(sess, os.path.join(self.results_dir, path))
storage = store(self.results_dir)
storage['save.path'] = save_path
print("Model saved in file: %s" % save_path)
print("Testing Accuracy:", sess.run(self.accuracy,
feed_dict={x: self.data.test.music, y: self.data.test.labels, keep_prob: 1.}))
return train_acc, test_acc, train_cost, test_cost
def prepare_layers(self):
prev_layer = self.x
w = self.weights
b = self.bias
for i in range(settings.NN['NUM_HIDDEN_LAYERS']):
layer = tf.add(tf.matmul(prev_layer, w[i]), b[i])
layer = tf.nn.relu(layer)
self.layers.append(layer)
prev_layer = self.layers[-1]
drop_layer = tf.nn.dropout(self.layers[-1], self.keep_prob)
self.layers.append(drop_layer)
out_layer = tf.add(tf.matmul(self.layers[-1], w[-1]), b[-1], name='y_')
self.layers.append(out_layer)
return out_layer
def get_weights(self, inp, out):
prev_layer_out = inp
num_weights = settings.NN['HIDDEN_INPUTS']
num_weights.append(out)
for i in range(settings.NN['NUM_HIDDEN_LAYERS']+1):
next_out = num_weights[i]
if settings.NN['RANDOM']:
w = tf.Variable(tf.random_normal([prev_layer_out, next_out]))
else:
w = tf.Variable(tf.truncated_normal([prev_layer_out, next_out], stddev=0.1))
prev_layer_out = next_out
self.weights.append(w)
return self.weights
def get_bias(self, out):
for i in range(settings.NN['NUM_HIDDEN_LAYERS']):
b = tf.Variable(tf.random_normal([settings.NN['HIDDEN_INPUTS'][i]]))
self.bias.append(b)
b = tf.Variable(tf.random_normal([out]))
self.bias.append(b)
return self.bias
def eval(self):
print("Testing Accuracy:", self.sess.run(self.accuracy,
feed_dict={x: self.data.test.music, y: self.data.test.labels, keep_prob: 1.}))
|
from djl_ui import *
from djl_templater import *
class PostResponder(object):
def __init__(self, posts, template, graph):
self.posts = posts
self.template = template
self.templater = Templater()
self.graph = graph
def respond(self):
for post in self.posts:
self.send_response_to_post(post, self.response_to_post(post))
def response_to_post(self, post):
return ""
def send_response_to_post(self, post, response):
self.graph.post(path=str(post["id"]) + "/comments", message=response)
djl_print(response)
print djl_hint_seperator("sent to: " + post["sender"]["name"], 1)
class GenericResponder(PostResponder):
def response_to_post(self, post):
return self.templater.populate_template(self.template, post["sender"])
class SpecialResponder(PostResponder):
def response_to_post(self, post):
djl_print("Post from " + post["sender"]["name"] + ": ")
djl_print("> " + post["msg"] + "")
res = self.templater.populate_template(self.template, post["sender"])
new_thanks = djl_input("Thank you note for " + post["sender"]["name"] + ": (Enter nothing to post a generic response)\n")
if len(new_thanks) > 0: res = new_thanks
return res
|
import os
from path import Path
def delete_file(DIRECTORY,filename):
d = Path(DIRECTORY)
#replace directory with your desired directory
for i in d.walk():
if i.isfile():
if i.name == filename:
i.remove()
def delete_file_by_extension(DIRECTORY, extension= "*.pyc"):
d = Path(DIRECTORY)
files = d.walkfiles(extension)
for file in files:
file.remove()
def delete_file_by_size(d,bigger_tha_size = 5 *1024*1024):
del_size = bigger_tha_size
for i in d.walk():
if i.isfile():
if i.size > del_size:
i.remove()
def temp_file_removal(DIRECTORY,extras=[]):
extensions = ['.โ','~$*.doc','.000','.001','.bak',
'.bk!','.chk','.fts','.gid','.log',
'.old','.prv','.tmp','.wbk']
if extras:
extensions.extend(extras)
for ext in extensions:
delete_file_by_extension(DIRECTORY,ext) |
import os
import struct
from transitions.extensions import GraphMachine
from Game.utils import send_push_message, send_reply_message
class GameMachine(GraphMachine):
def __init__(self, user_id, **machine_configs):
self.machine = GraphMachine(model = self, **machine_configs)
self.user_id = user_id
self.ans = 0
self.remain_times = 0
self.remain_eggs = 0
def help(self, event):
return not (self.get_info(event) or self.query(event) or self.answer(event))
def get_info(self, event): # "#"
param = event.message.text.split()
return len(param) == 1 and param[0] == '#'
def query(self, event): # "? xxx"
param = event.message.text.split()
if len(param) == 1:
return param[0] == '?' and param[1:].isdigit()
if len(param) == 2:
return param[0] == '?' and param[1].isdigit()
return False
def answer(self, event): # "! xxx"
param = event.message.text.split()
if len(param) == 1:
return param[0] == '!' and param[1:].isdigit()
if len(param) == 2:
return param[0] == '!' and param[1].isdigit()
return False
def on_enter_reset(self, event):
self.forward()
def on_exit_reset(self):
send_push_message(
user_id = self.user_id,
text = '''Now you have 3 eggs, I want to know the
MAX number X such that the egg won't break
if I throw it from Xth floor of the building.
The range for X is 0 ~ 1000.
Note that if the egg is not broken,
it will be thrown again in the next trial.
Please help me to find the number X within at most 30 trials.
You only have 1 chance to check the answer.
Type "help" for more information.
(It's always possible to find X in such restriction,
you can try to figure out how to do that. ^^)'''
)
self.remain_times = 30
self.remain_eggs = 3
self.ans = struct.unpack('>Q', os.urandom(8))[0] % 1001 # answer is 0 ~ 1000
def on_enter_usage(self, event):
send_reply_message(
reply_token = event.reply_token,
text = '''Usage:
# : Get the information of remain throwing times and eggs.
? N : Query the result of throwing egg from Nth floor.
! N : Check whether N is the answer or not and restart game.
None of above : Print this message.'''
)
self.forward()
def on_enter_info(self, event):
status = f'''Remain:
Eggs: {self.remain_eggs}
Times: {self.remain_times}'''
send_reply_message(
reply_token = event.reply_token,
text = status
)
self.forward()
def on_enter_reply(self, event):
if self.remain_eggs == 0:
send_reply_message(
reply_token = event.reply_token,
text = 'No remaining egg for you to try.'
)
elif self.remain_times == 0:
send_reply_message(
reply_token = event.reply_token,
text = 'You try too many times.'
)
else:
param = event.message.text.split()
broken = int(param[1]) > self.ans
if broken:
self.remain_eggs -= 1
self.remain_times -= 1
send_reply_message(
reply_token = event.reply_token,
text = 'Broken' if broken else 'Safe'
)
self.forward()
def on_enter_judge(self, event):
param = event.message.text.split()
send_reply_message(
reply_token = event.reply_token,
text = 'Correct!' if int(param[1]) == self.ans else 'Wrong answer :('
)
self.forward(event)
|
# coding: utf-8
import sys
import argparse
import lglass.generators.roa
import lglass.database.file
def build_argparser():
argparser = argparse.ArgumentParser(description="Generator for ROA tables")
argparser.add_argument("--database", "-D", default=".", type=str,
help="Path to database")
argparser.add_argument("--table", "-t", default="roa1", type=str,
help="Name of ROA table")
argparser.add_argument("--flush", "-f", action="store_true", default=False,
help="Flush table entries before insertion")
argparser.add_argument("-6", dest="protocol", action="store_const", const=6,
help="Generate IPv6 ROA table")
argparser.add_argument("-4", dest="protocol", action="store_const", const=4,
help="Generate IPv4 ROA table")
return argparser
def main(argv=sys.argv[1:]):
argparser = build_argparser()
args = argparser.parse_args(argv)
db = lglass.database.file.FileDatabase(args.database)
if args.protocol == 4 or args.protocol is None:
routes = (db.get(*spec) for spec in db.list() if spec[0] == "route")
elif args.protocol == 6:
routes = (db.get(*spec) for spec in db.list() if spec[0] == "route6")
if args.flush:
print("flush roa table {table}".format(table=args.table))
print("\n".join(lglass.generators.roa.roa_table(routes, args.table)))
if __name__ == "__main__":
main()
|
import re
finename = "sequencias.fasta"
match = int (1)
mismatch = int(-1)
gaps = int (-2)
#lendo o arquivo .fasta
with open(finename) as f:
arquivo = f.readlines()
#transformando o arquivo em string
sequencia = ''.join(arquivo)
#dividindo cada fita
fitaCodificadora = re.split('\n', sequencia)
s = re.split('', fitaCodificadora[0])
f = re.split('', fitaCodificadora[1])
s.pop()#deleta ultimo
s.pop(0)#deleta primeiro
f.pop()#deleta ultimo
f.pop(0)#deleta primeiro
s.insert(0, '-')
f.insert(0, '-')
tam_coluna = len(f)
tam_linha = len(s)
lista_i = []
lista_j = []
score = 0
#funรงรฃo para exibir a matriz
def exibir_matriz(matriz):
for linha in matriz:
print(linha)
#criando uma matriz vazia
matriz = []
for i in range(tam_linha): #linha
linha = []
for j in range(tam_coluna): #coluna
elemento = ''
linha.append(elemento)
matriz.append(linha)
#iniciando a matriz
for i in range(tam_linha): #linha
if s[i] == '-' and f[0] == '-':
score = score + 0
matriz[i][0] = score #altera o elemento
elif s[i] == '-' and f[0] != '-' or s[i] != '-' and f[0] == '-':
score = score + gaps
matriz[i][0] = score
elif s[i] != f[0]:
score = score + mismatch
matriz[i][0] = score
else:#se for igual
socre = score + match
matriz[i][0] = score
score = 0
for j in range(tam_coluna): #coluna
if s[0] == '-' and f[j] == '-':
score = score + 0
matriz[0][j] = score
elif s[0] == '-' and f[j] != '-' or s[0] != '-' and f[j] == '-':
score = score + gaps
matriz[0][j] = score
elif s[0] != f[j]:
score = score + mismatch
matriz[0][j] = score
else:#se for igual
socre = score + match
matriz[0][j] = score
score = 0
v = 0 #vertical
h = 0 #horizontal
d = 0 #diagonal
for i in range(tam_linha): #linha
for j in range(tam_coluna): #coluna
if(matriz[i][j] == ''): #for vazia
#vertical
v = matriz[i-1][j] + (gaps)
#horizontal
h = (matriz[i][j-1] + (gaps))
#diagonal
if(s[i] == f[j]):#listas de sequencias
d = matriz[i-1][j-1] + (match)
else:
d = matriz[i-1][j-1] + (mismatch)
#verifcando qual รฉ o maior
maior = [v, h, d]
score_maior = max(maior)
matriz[i][j] = score_maior
exibir_matriz(matriz)
#Alinhamento global
alinhamento_s = []
elem = matriz[tam_linha-1][tam_coluna-1]
i = tam_linha-1
j = tam_coluna-1
aux_s = []
alinhamento_f = []
while(i > 0 and j > 0):
elem = matriz[i][j]
if ((matriz[i-1][j]) + (gaps)) == elem:#vertical
alinhamento_s.append(s[i])
alinhamento_f.append('-')
i = i-1
elif ((matriz[i][j-1]) + (gaps)) == elem:#horizontal
alinhamento_s.append('-')
alinhamento_f .append(f[j])
j = j-1
elif (((matriz[i-1][j-1]) + (match)) == elem) or (((matriz[i-1][j-1]) + (mismatch)) == elem) :#diagonal
alinhamento_s.append(s[i])
alinhamento_f .append(f[j])
i = i-1
j = j-1
print('\n')
print(alinhamento_f[::-1])
print(alinhamento_s[::-1])
|
#!/usr/bin/env python
import os
import subprocess
p = subprocess.Popen("/home/dstarr/src/TCP/Software/ingest_tools/lcs_classif.py http://127.0.0.1:5123/get_lc_data/?filename=dotastro_215153.dat&sep=,", shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
sts = os.waitpid(p.pid, 0)
script_output = p.stdout.readlines()
print script_output[0]
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import ugettext_lazy as _
from .managers import CustomUserManager
class User(AbstractUser):
username = None
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email = models.EmailField(unique=True)
USERNAME_FIELD = 'email'
EMAIL_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name']
objects = CustomUserManager()
class Role(models.Model):
name = models.CharField(max_length=255)
class Enrolled(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
enrolled_class = models.ForeignKey("Class", on_delete=models.CASCADE)
role = models.ForeignKey(Role, on_delete=models.CASCADE)
tag = models.CharField(max_length=255, null=True)
class Class(models.Model):
name = models.CharField(max_length=255)
description = models.TextField()
information_page = models.TextField()
enrollees = models.ManyToManyField(User, through=Enrolled, related_name="classes_enrolled")
primary_instructor = models.ForeignKey(User, on_delete=models.CASCADE, related_name="primary_instructor_in_class", default = 1)
class Term(models.Model):
full_name = models.CharField(max_length=255)
code = models.CharField(max_length=10)
class Tag(models.Model):
name = models.CharField(max_length=255)
class Comment(models.Model):
created_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
body = models.TextField()
upvotes = models.ManyToManyField(User, related_name="comments_upvoted", blank=True)
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name="comments_authored")
class Post(models.Model):
created_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
title = models.CharField(max_length=255)
body = models.TextField()
answerable = models.BooleanField()
tags = models.ManyToManyField(Tag, blank=True)
class_in = models.ForeignKey(Class, on_delete=models.CASCADE)
views = models.ManyToManyField(User, blank=True, related_name="posts_visited")
upvotes = models.ManyToManyField(User, related_name="posts_upvoted", blank=True)
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name="posts_authored")
comments = models.ManyToManyField(Comment, blank=True)
class Answer(models.Model):
created_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
body = models.TextField()
upvotes = models.ManyToManyField(User, related_name="answers_upvoted", blank=True)
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name="answers_authored")
comments = models.ManyToManyField(Comment, blank=True)
post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name="answers_post", default=6)
|
import numpy as np
import pandas as pd
tmass= pd.read_csv("/users/alex/Data/tmassF.min.db")
print("imported 2mass")
vvv = pd.read_csv("/users/alex/Data/vvvEQUF.min.db")
print("imported vvv")
print(tmass)
print(vvv)
|
"""Top-level package for fundamentals_of_data_science."""
__author__ = """Andrew Stewart"""
__email__ = 'andrew.c.stewart@gmail.com'
__version__ = '2.0.0'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# geraTabFreq.py
#
# Copyright 2015 Cristian <csanterio@gmail.com>
"""
6. Construa a tabela de frequencias de palavras das bรญblias catรณlica e protestante, novo e velho testamento.
Salve as tabelas em arquivos separados. Em seguida, use a planilha eletrรดnica para criar um grรกfico de pizza de cada tabela.
Compare os dois grรกficos. (obs: nรฃo incluir stopwords na tabela de frequencia. Incluir entidades nomeadas).
"""
"""
Ideia:
Criar uma base de dados (arquivo txt) com separadores
"""
import libplnbsi
# arqEntrada: nome do arquivo para leitura
# arqSaida: nome do arquivo de saida
def geraTabFreq(arqEntrada, arqSaida):
dicFrequencia = {}
dados = open(arqEntrada, 'r')
saida = open(arqSaida, 'w')
texto = dados.read()
b = ["MpM", "N/N/N", "MM","MMMM", "M", "MpMMpM","MMpMM", "MMM","m",'V.TM',"TM","V.TMM", 'TMM', "T.M"]
lista = []
separadores = libplnbsi.selecionaSeparadores(texto)
texto = libplnbsi.insereEspacos(texto)
tokens, posit = libplnbsi.tokenizadorv2(texto, separadores)
print(tokens)
listaPalavras = libplnbsi.extraiPadrao(tokens, b)
#~ stopW = open("stopwordspt.txt", "r")
#~ s = stopW.read()
#~ listaPalavras = libplnbsi.removeStopW(listaPalavras, s)
#~ stopW.close()
for elem in listaPalavras:
if elem not in dicFrequencia:
dicFrequencia[elem] = 1
elif elem in dicFrequencia:
dicFrequencia[elem] += 1
#
#
for chave, valor in dicFrequencia.items():
saida.write(str(chave) + "," + str(valor) + "\n")
#
dados.close()
saida.close()
#
def main():
geraTabFreq('constituicaoBr.txt', 'FrequenciaDePalavras.txt')
return 0
if __name__ == '__main__':
main()
|
import os
import sys
import argparse
import math
import shutil
import time
import logging
from io import open
import numpy as np
import torch
from torch import nn
from torch.nn import init
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import torch.optim as optim
#python train_cifar_dvs_snn.py --dataset CIFAR-DVS --parts 28 --batch_size 32 --nhid 512 --lr 3e-3 --when 20 70 120 --epochs 50
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("device:",device)
from utils import get_xt
# from snn_models_LIF4_cnn import * #for civar10 only
from snn_models_LIF4_dvs_cnn import *
# from snn_models_LIF_dvs_cnnv1 import * # default
from snn_models_LIF_dvs_vgg_dvs import *
from snn_models_LIF_dvs_res import *
from snn_sota_LIF4_dvs_gesture_v2 import *
from snn_sota_LIF4_dvs_gesture_v3 import *
# from snn_models_LIF_dvs_res_less import *
from datasets import data_generator, adding_problem_generator
def get_stats_named_params( model ):
named_params = {}
for name, param in model.named_parameters():
sm, lm, dm = param.detach().clone(), 0.0*param.detach().clone(), 0.0*param.detach().clone()
named_params[name] = (param, sm, lm, dm)
return named_params
def post_optimizer_updates( named_params, args, epoch ):
alpha = args.alpha
beta = args.beta
rho = args.rho
for name in named_params:
param, sm, lm, dm = named_params[name]
if args.debias:
beta = (1. / (1. + epoch))
sm.data.mul_( (1.0-beta) )
sm.data.add_( beta * param )
rho = (1. / (1. + epoch))
dm.data.mul_( (1.-rho) )
dm.data.add_( rho * lm )
else:
lm.data.add_( -alpha * (param - sm) )
sm.data.mul_( (1.0-beta) )
sm.data.add_( beta * param - (beta/alpha) * lm )
def get_regularizer_named_params( named_params, args, _lambda=1.0 ):
alpha = args.alpha
rho = args.rho
regularization = torch.zeros( [], device=args.device )
for name in named_params:
param, sm, lm, dm = named_params[name]
regularization += (rho-1.) * torch.sum( param * lm )
if args.debias:
regularization += (1.-rho) * torch.sum( param * dm )
else:
r_p = _lambda * 0.5 * alpha * torch.sum( torch.square(param - sm) )
regularization += r_p
# print(name,r_p)
return regularization
def reset_named_params(named_params, args):
if args.debias: return
for name in named_params:
param, sm, lm, dm = named_params[name]
param.data.copy_(sm.data)
def test(model, test_loader, logger):
model.eval()
test_loss = 0
correct = 0
# for data, target in test_loader:
for i ,(data, target) in enumerate(test_loader):
if args.cuda:
data, target = data.to(device), target.to(device)
T = data.shape[1]
data = data.view(-1, T,2,128,128)#.permute(0,1,2,4,3)
with torch.no_grad():
model.eval()
hidden = model.init_hidden(data.size(0))
outputs, hidden, recon_loss = model(data, hidden)
output = outputs[-1]
test_loss += F.nll_loss(output, target, reduction='sum').data.item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
torch.cuda.empty_cache()
test_loss /= len(test_loader.dataset)
logger.info('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
sys.stdout.flush()
return test_loss, 100. * correct / len(test_loader.dataset)
def train(epoch, args, train_loader, permute, n_classes, model, named_params, logger):
global steps
global estimate_class_distribution
batch_size = args.batch_size
alpha = args.alpha
beta = args.beta
PARTS = args.parts
train_loss = 0
total_clf_loss = 0
total_regularizaton_loss = 0
total_oracle_loss = 0
model.train()
# T = data.shape[1]
# PARTS = T
#entropy = EntropyLoss()
for batch_idx, (data, target) in enumerate(train_loader):
T = data.shape[1]
PARTS = T
if args.cuda: data, target = data.to(device), target.to(device)
# print(data.shape)
data = data.view(-1, T,2,128,128 )#.permute(0,1,2,4,3)
B = target.size()[0]
step = model.network.step
# xdata = data.clone()
# inputs = data
Delta = torch.zeros(B, dtype=data.dtype, device=data.device)
_PARTS = PARTS
for p in range(_PARTS):
if p==0:
h = model.init_hidden(data.size(0))
else:
h = tuple(v.detach() for v in h)
# print([p.shape for p in h])
if p<PARTS-1:
if epoch < 10:
if args.per_ex_stats:
oracle_prob = estimatedDistribution[batch_idx*batch_size:(batch_idx+1)*batch_size, p]
else:
oracle_prob = 0*estimate_class_distribution[target, p] + (1.0/n_classes)
else:
oracle_prob = estimate_class_distribution[target, p]
else:
oracle_prob = F.one_hot(target,n_classes).float()
o, h,hs = model.network.forward(data[:,p,:,:,:], h )
# print(os[-1].shape,h[-1].shape,hs[-1][-1].shape)
# print(h[-1],os[-1])
# print(x.shape)
# print(os.shape)
prob_out = F.softmax(h[-1], dim=1)
output = F.log_softmax(h[-1], dim=1)
if p<PARTS-1:
with torch.no_grad():
filled_class = [0]*n_classes
n_filled = 0
for j in range(B):
if n_filled==n_classes: break
y = target[j].item()
if filled_class[y] == 0 and (torch.argmax(prob_out[j]) != target[j]):
filled_class[y] = 1
estimate_class_distribution[y, p] = prob_out[j].detach()
n_filled += 1
optimizer.zero_grad()
# clf_loss = (p+1)/(_PARTS)*F.nll_loss(output, target)
clf_loss = (p+1)/(_PARTS)*F.cross_entropy(output, target)
# clf_loss = F.cross_entropy(output, target) # cnn
if output.shape!=oracle_prob.shape:
print(output.shape,oracle_prob.shape)
# clf_loss = F.nll_loss(output, target)
# clf_loss = (p+1)/(_PARTS)*F.gaussian_null_loss(output, target)
oracle_loss = (1 - (p+1)/(_PARTS)) * 1.0 *torch.mean( -oracle_prob * output )
regularizer = get_regularizer_named_params( named_params, args, _lambda=1.0 ) #1.
loss = clf_loss + oracle_loss + regularizer #+ model.network.fr*0.05
# loss.backward(retain_graph=True)
loss.backward()
# print(model.network.layer1_x.weight.grad, model.network.tau_m_r1.grad)
# print(os.shape)
if args.clip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
post_optimizer_updates( named_params, args,epoch )
train_loss += loss.item()
total_clf_loss += clf_loss.item()
total_regularizaton_loss += regularizer #.item()
total_oracle_loss += oracle_loss.item()
steps += seq_length
if batch_idx > 0 and batch_idx % args.log_interval == 0:
logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tlr: {:.6f}\tLoss: {:.6f}\tOracle: \
{:.6f}\tClf: {:.6f}\tReg: {:.6f}\tFr: {:.6f}\tSteps: {}'.format(
epoch, batch_idx * batch_size, len(train_loader.dataset),
100. * batch_idx / len(train_loader), lr, train_loss / args.log_interval,
total_oracle_loss / args.log_interval,
total_clf_loss / args.log_interval, total_regularizaton_loss / args.log_interval, model.network.fr,steps))
# print(model.network.fr)
train_loss = 0
total_clf_loss = 0
total_regularizaton_loss = 0
total_oracle_loss = 0
sys.stdout.flush()
# print(model.network.layer1_x.weight.grad, model.network.tau_m_r1.grad)
# print( model.network.tau_m_r1.grad)
parser = argparse.ArgumentParser(description='Sequential Decision Making..')
parser.add_argument('--alpha', type=float, default=.1, help='Alpha')
parser.add_argument('--beta', type=float, default=0.5, help='Beta')
parser.add_argument('--rho', type=float, default=0.0, help='Rho')
parser.add_argument('--lmbda', type=float, default=2.0, help='Lambda')
parser.add_argument('--debias', action='store_true', help='FedDyn debias algorithm')
parser.add_argument('--K', type=int, default=1, help='Number of iterations for debias algorithm')
parser.add_argument('--model', type=str, default='LSTM', help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
parser.add_argument('--emsize', type=int, default=256, help='size of word embeddings')
parser.add_argument('--nlayers', type=int, default=1, #2,
help='number of layers')
parser.add_argument('--bptt', type=int, default=300, #35,
help='sequence length')
parser.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
parser.add_argument('--n_experts', type=int, default=15,
help='PTB-Word n_experts')
parser.add_argument('--nhid', type=int, default=256,
help='number of hidden units per layer')
parser.add_argument('--nhidlast', type=int, default=620,
help='number of hidden units per layer')
parser.add_argument('--lr', type=float, default=5e-3,
help='initial learning rate (default: 4e-3)')
parser.add_argument('--clip', type=float, default=1., #0.5,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=200,
help='upper epoch limit (default: 200)')
parser.add_argument('--parts', type=int, default=10,
help='Parts to split the sequential input into (default: 10)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='batch size')
parser.add_argument('--small_batch_size', type=int, default=-1, metavar='N',
help='batch size')
parser.add_argument('--max_seq_len_delta', type=int, default=40, metavar='N',
help='batch size')
parser.add_argument('--eval_batch_size', type=int, default=10, metavar='N',
help='batch size')
parser.add_argument('--resume', type=str, default='',
help='path of model to resume')
parser.add_argument('--dropout', type=float, default=0.05,
help='output locked dropout (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.2,
help='input locked dropout (0 = no dropout)')
parser.add_argument('--dropoutl', type=float, default=0.29,
help='input locked dropout (0 = no dropout)')
parser.add_argument('--wdrop', type=float, default=0.1,
help='dropout applied to weights (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.2,
help='dropout applied to hidden layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--wnorm', action='store_false',
help='use weight normalization (default: True)')
parser.add_argument('--temporalwdrop', action='store_false',
help='only drop the temporal weights (default: True)')
parser.add_argument('--wdecay', type=float, default=0.0,
help='weight decay')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='report interval')
parser.add_argument('--optim', type=str, default='Adam',
help='optimizer to use')
parser.add_argument('--when', nargs='+', type=int, default=[10,20,40,80,120,150],#[30,70,120],#[10,20,50, 75, 90],
help='When to decay the learning rate')
parser.add_argument('--load', type=str, default='',
help='path to load the model')
parser.add_argument('--save', type=str, default='./models/',
help='path to load the model')
parser.add_argument('--per_ex_stats', action='store_true',
help='Use per example stats to compute the KL loss (default: False)')
parser.add_argument('--permute', action='store_true',
help='use permuted dataset (default: False)')
parser.add_argument('--dataset', type=str, default='CIFAR-10',
help='dataset to use')
parser.add_argument('--dataroot', type=str,
default='./data/',
help='root location of the dataset')
args = parser.parse_args()
args.cuda = True
exp_name = args.dataset + '-nhid-' + str(args.nhid) + '-parts-' + str(args.parts) + '-optim-' + args.optim
exp_name += '-B-' + str(args.batch_size) + '-E-' + str(args.epochs) + '-K-' + str(args.K)
exp_name += '-alpha-' + str(args.alpha) + '-beta-' + str(args.beta)
# exp_name +=
if args.permute:
exp_name += '-perm-' + str(args.permute)
if args.per_ex_stats:
exp_name += '-per-ex-stats-'
if args.debias:
exp_name += '-debias-'
prefix = args.save + exp_name
logger = logging.getLogger('trainer')
file_log_handler = logging.FileHandler( './logs/logfile-' + exp_name + '.log')
logger.addHandler(file_log_handler)
stderr_log_handler = logging.StreamHandler()
logger.addHandler(stderr_log_handler)
# nice output format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_log_handler.setFormatter(formatter)
stderr_log_handler.setFormatter(formatter)
logger.setLevel( 'DEBUG' )
logger.info('Args: {}'.format(args))
logger.info('Exp_name = ' + exp_name)
logger.info('Prefix = ' + prefix)
torch.backends.cudnn.benchmark = True
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
args.device = device
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
torch.set_default_tensor_type('torch.FloatTensor')
if torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
torch.cuda.manual_seed(args.seed)
steps = 0
if args.dataset in ['CIFAR-10', 'MNIST-10','CIFAR-DVS','DVS-Gesture','MNIST-DVS']:
train_loader, test_loader, seq_length, input_channels, n_classes = data_generator(args.dataset,
batch_size=args.batch_size,
dataroot=args.dataroot,
shuffle=(not args.per_ex_stats))
permute = torch.Tensor(np.random.permutation(seq_length).astype(np.float64)).long() # Use only if args.permute is True
estimate_class_distribution = torch.zeros(n_classes, seq_length, n_classes, dtype=torch.float)
estimatedDistribution = None
if args.per_ex_stats:
estimatedDistribution = torch.zeros(len(train_loader)*args.batch_size, args.parts, n_classes, dtype=torch.float)
else:
logger.info('Unknown dataset.. customize the routines to include the train/test loop.')
exit(1)
optimizer = None
lr = args.lr
model = SeqModel(ninp=[2,128,128],
nhid=args.nhid,
nout=n_classes,
dropout=args.dropout,
dropouti=args.dropouti,
dropouth=args.dropouth,
wdrop=args.wdrop,
temporalwdrop=args.temporalwdrop,
wnorm=args.wnorm,
n_timesteps=seq_length,
parts=args.parts)
total_params = count_parameters(model)
if args.cuda:
permute = permute.cuda()
if len(args.load) > 0:
logger.info("Loaded model\n")
model_ckp = torch.load(args.load)
model.load_state_dict(model_ckp['state_dict'])
optimizer = getattr(optim, args.optim)(model.parameters(), lr=lr, weight_decay=args.wdecay)
optimizer.load_state_dict(model_ckp['optimizer'])
print('best acc of loaded model: ',model_ckp['best_acc1'])
print('Model: ',model)
if args.cuda:
model.cuda()
if optimizer is None:
optimizer = getattr(optim, args.optim)(model.parameters(), lr=lr, weight_decay=args.wdecay)
if args.optim == 'SGD':
optimizer = getattr(optim, args.optim)(model.parameters(), lr=lr, momentum=0.9, weight_decay=args.wdecay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,T_max=30)
logger.info('Optimizer = ' + str(optimizer) )
logger.info('Model total parameters: {}'.format(total_params))
all_test_losses = []
epochs = args.epochs #100
best_acc1 = 0.0
best_val_loss = None
first_update = False
named_params = get_stats_named_params( model )
for epoch in range(1, epochs + 1):
start = time.time()
if args.dataset in ['CIFAR-10', 'MNIST-10','CIFAR-DVS','DVS-Gesture','MNIST-DVS']:
if args.per_ex_stats and epoch%5 == 1 :
first_update = update_prob_estimates( model, args, train_loader, permute, estimatedDistribution, estimate_class_distribution, first_update )
train(epoch, args, train_loader, permute, n_classes, model, named_params, logger)
#train_oracle(epoch)
reset_named_params(named_params, args)
test_loss, acc1 = test( model, test_loader, logger )
logger.info('time taken = ' + str(time.time() - start) )
scheduler.step()
# if epoch in args.when:
# Scheduled learning rate decay
# lr /= 2.
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
# linear lr decay
# lr = lr*(1-1./epochs)+1e-6
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
# if epoch>0 and epoch%20==0 and epoch<100:
# # Scheduled learning rate decay
# lr /= 5.
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
#'oracle_state_dict': oracle.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
#'oracle_optimizer' : oracle_optim.state_dict(),
}, is_best, prefix=prefix)
all_test_losses.append(test_loss)
test_loss, acc1 = test( model, test_loader, logger )
|
from bs4 import BeautifulSoup as BS
from urllib.request import urlopen
import csv
html = urlopen("https://en.wikipedia.org/wiki/List_of_international_cricket_centuries_by_Sachin_Tendulkar")
bsObj = BS(html, "html.parser")
table_of_interest = bsObj.findAll("table", {"class":"wikitable"})[1] # 1 for Test Centuries
rows = table_of_interest.findAll("tr")
csvFile = open("testCentury.csv", "w+")
writer = csv.writer(csvFile)
try:
for row in rows:
rowdata = []
columns = row.findAll(["td", "th"])
for col in columns:
rowdata.append(col.get_text())
writer.writerow(tuple(rowdata))
print("Done with current row...")
finally:
csvFile.close()
|
import turtle
canvas = turtle.Screen()
leo = turtle.Turtle()
for i in range(5):
leo.forward(50)
leo.right(145)
canvas.exitonclick() |
"""Miscellaneous functions for Canto, Raga, Wilkin (1996) bow shocks
"""
import numpy as np
def th1_approx(th, beta):
"""Equation (26) of CRW"""
fac = 0.8*beta*(1.0 - th/np.tan(th))
return np.sqrt(7.5*(np.sqrt(1.0 + fac) - 1.0))
def radius(th, th1):
"""Radius in terms of D from Eq (23) of CRW
This applies generally, even for anisotropic cases
"""
return np.sin(th1)/np.sin(th + th1)
|
# import env
Import('env')
env.Append(LIBS=['purple'])
env.Append(LIBPATH='.')
env.Append(CPPPATH = ['/usr/include/glib-2.0/','/usr/lib/glib-2.0/include/','/usr/include/libpurple/'])
#env.ParseConfig( 'pkg-config --cflags --libs glib-2.0')
env.Object([Glob('*.cpp')])
|
from drivers.driverchrome import DriverChrome
from drivers.driverfirefox import DriverFirefox
from drivers.driverIE import DriverIE
class DriverFactory():
@staticmethod
def get_driver(browser):
if browser== "chrome":
return DriverChrome()
if browser== "firefox":
return DriverFirefox()
if browser== "ie":
return DriverIE()
|
from tkinter import *
from tkinter import messagebox
import requests
from bs4 import BeautifulSoup
import re
import webbrowser
def Spider():
url = var1.get()
pattern = re.compile(r'av[0-9]*')
matchResult = pattern.search(url)
if not matchResult:
messagebox.showinfo("่ญฆๅ", "่พๅ
ฅ้ๆณ!!!")
html = requests.get("http://www.bilibili.com/video/"+url+'/')
bsObj = BeautifulSoup(html.text)
tagg = bsObj.findAll("", {"id":"bofqi"}) #'bofqi'ไปไน้ฌผ
for ACDC in tagg:
#Peak = ACDC.get_text().split("cid=")[1].split("&")[0]
pat = re.compile(r'cid=(.+?)&')
a = pat.findall(ACDC.get_text())
Peak = "".join(a) #่ฟ้ๆฏๆฏ่พ้พๆณ็ๅฐๆน
ahtml = requests.get("http://comment.bilibili.com/" + Peak + ".xml")
BsObj = BeautifulSoup(ahtml.text)
Lisst = BsObj.findAll("d")
#with open('PeakFrist.txt', 'w', encoding='utf-8') as f:
#f.write(ahtml.text)
#f.write(BsObj.get_text())
words = "ๅผนๅนๅ
ฑ" + str(len(Lisst)) + "ๆก"
lbl2.config(text = words)
#text2.insert('1.0',len(Lisst))
text.insert('2.0',BsObj.get_text()+'\n')
def Ahu_notice():
html = requests.get("http://jwc.ahu.cn/main/notice.asp").content.decode('gbk')
bsObj = BeautifulSoup(html)
tagg = bsObj.findAll('a', href=re.compile("^(show)\.asp\?id\=[0-9]*"))
for link in tagg:
if 'href' in link.attrs:
text.insert('1.0', link.get_text()+'\n')
text.insert('2.0', 'http://jwc.ahu.cn/main/'+link.attrs['href']+'\n\n')
def Search():
urll = var2.get()
'''
pattern = re.compile(r'av[0-9]*')
matchResult = pattern.search(url)
if not matchResult:
messagebox.showinfo("่ญฆๅ", "่พๅ
ฅ้ๆณ!!!")
'''
webbrowser.open_new(urll)
window = Tk()
window.resizable(width=False, height=False)
window.title("webHydra")
window.geometry("900x600")
#img1 = PhotoImage(file='lu.ico')
#window.tk.call('wm', 'iconphoto',window._w, img1)
window.wm_iconbitmap('lu.ico')
#window.configure(background='#4c66a4')
lbl = Label(window, text="่ง้ข็ชๅท:")
lbl.place(x = 10, y = 10)
lbl2 = Label(window)
lbl2.place(x = 10,y = 40)
'''
filename = '2.png'
img = PhotoImage(file=filename)
lal3 = Label(window, image=img)
lal3.place(x = 500, y = 80)
'''
lbl4 = Label(window, text = "่พๅ
ฅ็ฝๅ:")
lbl4.place(x = 500, y = 80)
var1=StringVar()
ent = Entry(window, textvariable=var1)
ent.place(x = 70, y = 10, width = 150, height = 25)
var1.set("")
var2=StringVar()
ent2 = Entry(window, textvariable=var2)
ent2.place(x = 560, y = 80, width = 220, height = 25)
var2.set("")
btn = Button(window, text='GO', command=Spider)
btn.place(x = 240, y = 10, height = 25, width = 50)
btn2 = Button(window, text='ๅฎๅคงๆๅกๅคๅ
ฌๅ',command=Ahu_notice)
btn2.place(x = 500, y = 10, height = 25, width = 90)
btn3 = Button(window, text='ๆฅ็',command=Search)
btn3.place(x = 790, y = 80, height = 25, width = 50)
text = Text(window)
text.place(x = 10, y = 80, height = 500, width = 480)
#text2 = Text(window)
#text2.place(x = 70, y = 40, height = 20, width = 30)
#lbl.pack()
#ent.pack()
#btn.pack()
#text.pack()
window.mainloop() |
"""
ใผใญใใๅญฆใถในใใคใญใณใฐใใฅใผใฉใซใใใใฏใผใฏ
- Spiking Neural Networks from Scratch
Copyright (c) 2020 HiroshiARAKI. All Rights Reserved.
"""
import numpy as np
import matplotlib.pyplot as plt
def lif(currents, time: int, dt: float = 1.0, rest=-65, th=-40, ref=3, tc_decay=100):
""" simple LIF neuron """
time = int(time / dt)
# initialize
tlast = 0 # ๆๅพใซ็บ็ซใใๆๅป
vpeak = 20 # ่้ปไฝใฎใใผใฏ(ๆๅคงๅค)
spikes = np.zeros(time)
v = rest # ้ๆญข่้ปไฝ
monitor = [] # monitor voltage
# Core of LIF
for t in range(time):
dv = ((dt * t) > (tlast + ref)) * (-v + rest + currents[t]) / tc_decay # ๅพฎๅฐ่้ปไฝๅขๅ ้
v = v + dt * dv # ่้ปไฝใ่จ็ฎ
tlast = tlast + (dt * t - tlast) * (v >= th) # ็บ็ซใใใ็บ็ซๆๅปใ่จ้ฒ
v = v + (vpeak - v) * (v >= th) # ็บ็ซใใใ่้ปไฝใใใผใฏใธ
monitor.append(v)
spikes[t] = (v >= th) * 1 # ในใใคใฏใใปใใ
v = v + (rest - v) * (v >= th) # ้ๆญข่้ปไฝใซๆปใ
return spikes, monitor
if __name__ == '__main__':
duration = 500 # ms
dt = 0.5 # time step
time = int(duration / dt)
# Input data
# ้ฉๅฝใชใตใคใณใซใผใใฎ่ถณใๅใใใงไปฃ็จ
input_data_1 = 10 * np.sin(0.1 * np.arange(0, duration, dt)) + 50
input_data_2 = -10 * np.cos(0.05 * np.arange(0, duration, dt)) - 10
input_data = input_data_1 + input_data_2
spikes, voltage = lif(input_data, duration, dt)
# Plot
plt.figure(figsize=(12, 6))
plt.subplot(2, 1, 1)
plt.ylabel('Input Current')
plt.plot(np.arange(0, duration, dt), input_data)
plt.subplot(2, 1, 2)
plt.ylabel('Membrane Voltage')
plt.xlabel('time [ms]')
plt.plot(np.arange(0, duration, dt), voltage)
plt.show()
|
"""
Simple python script that launches a worker (for rq).
"""
from misc.env_vars import *
from rq import Worker, Queue, Connection
if __name__ == '__main__':
with Connection(REDIS_CONN):
worker = Worker(Queue('default'))
worker.work(logging_level="INFO")
|
#######
####### Ensure that opencv-contrib is installed
#######
import cv2
import argparse
import sys
import math
import numpy as np
import time as t
import os
from yolo import yolo_on_one_frame
from stereo_to_3d import stereo_to_3d_wls
master_path_to_dataset = "C://Users//joebo//Documents//00uni//year3//vision//TTBB-durham-02-10-17-sub10"
#crops image bottom (to remove car)
def cropBottom(image):
return image[:450]
#modified draw pred to take class name and distance, modified label and text made smaller
def drawPred(image, class_name, distance, left, top, right, bottom, colour):
# Draw a bounding box.
cv2.rectangle(image, (left, top), (right, bottom), colour, 3)
# construct labels
if class_name != "person":
label = '%s @ %.2f m' % ("vehicle", distance)
else:
label = '%s @ %.2f m' % ("person", distance)
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.45, 1)
top = max(top, labelSize[1])
cv2.rectangle(image, (left, top - round(1.5*labelSize[1])),
(left + round(1.5*labelSize[0]) - 20, top + baseLine), (255, 255, 255), cv2.FILLED)
cv2.putText(image, label, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0,0,0), 1)
#runs drawPred for each box
def drawBoxes(frame, classes, boxes, distance_estimates):
for x in enumerate(boxes):
left = x[1][0]
top = x[1][1]
width = x[1][2]
height = x[1][3]
drawPred(frame, classes[x[0]], distance_estimates[x[0]], left, top, left + width, top + height, (255, 178, 50))
#takes an image and runs CLAHE on the light channel of the image in LAB colour space
def imagePrepCLAHELAB(imgL):
clahe = cv2.createCLAHE(clipLimit=2.0,tileGridSize=(8,8))
imgL_LAB = cv2.cvtColor(imgL, cv2.COLOR_BGR2LAB)
imgL_LAB_Planes = cv2.split(imgL_LAB)
imgL_LAB_Planes[0] = clahe.apply(imgL_LAB_Planes[0])
imgL_LAB = cv2.merge(imgL_LAB_Planes)
imgL_CLAHE = cv2.cvtColor(imgL_LAB, cv2.COLOR_LAB2BGR)
return imgL_CLAHE
#takes a grayscale image and runs CLAHE on it
def imagePrepCLAHEGRAY(imgL):
clahe = cv2.createCLAHE(clipLimit=2.0,tileGridSize=(8,8))
imgL_ = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
imgL_ = clahe.apply(imgL_)
return imgL_
#slice based on percentile
def betterSlice(a):
a = np.sort(a.flatten())
return a[int(len(a)*0.2):int(len(a)*0.5)]
#slice based on heuristic
def betterSlice2(a, name):
if name == "person":
print("here")
return a[int(len(a)*0.5):int(len(a)*0.8), int(len(a[1])*0.3):int(len(a[1])*0.7)]
else:
return a[int(len(a)*0.2):int(len(a)*0.5)]
def test(image_path):
#set-up directorys for the image stream
full_path_directory_left = os.path.join(master_path_to_dataset, "left-images")
full_path_directory_right = os.path.join(master_path_to_dataset, "right-images")
full_path_filename_left = os.path.join(full_path_directory_left, image_path)
full_path_filename_right = os.path.join(full_path_directory_right, image_path.replace("_L", "_R"))
time1 = t.time()
#read the images in and crop the car from the bottom
imgL = cropBottom(cv2.imread(full_path_filename_left))
imgR = cropBottom(cv2.imread(full_path_filename_right))
#crop to adjust for the black bar
yolo_imgL = np.copy(imgL)[:, 128:]
#imgL = cv2.bilateralFilter(imgL, 10, 16, 32)
#imgR = cv2.bilateralFilter(imgR, 10, 16, 32)
#run the preprocessing CLAHE on both images
imgL_CLAHE = imagePrepCLAHELAB(imgL)
imgR_CLAHE = imagePrepCLAHELAB(imgR)
#print("image pre-process time:", t.time() - time1)
#time1 = t.time()
#get boxes of objects and their classes (filtered to relevant objects)
classes, boxes = yolo_on_one_frame(cv2.medianBlur(yolo_imgL, 5))
#classes, boxes = yolo_on_one_frame(yolo_imgL)
#print("object detection:", t.time() - time1)
#time1 = t.time()
#get the depth-map by calculating disaparity, using a wls filter, and converting to depth
disparity_map = stereo_to_3d_wls(imgL_CLAHE, imgR_CLAHE, max_disparity=128)[:, 128:]
f = 399.9745178222656
B = 0.2090607502
depth_map = np.nan_to_num((f*B)/disparity_map, nan=0, posinf=0, neginf=0)
#print("dispartity and distance calculation:", t.time() - time1)
#time1 = t.time()
distance_estimates = []
#run through each box to get the distance estimate at this point
for x in enumerate(boxes):
left = max(0, x[1][0]) # make sure the box doesnt run off the end, else slicing returns nothing and nan is returned when calcing the mean
top = x[1][1]
width = x[1][2]
height = x[1][3]
vals = depth_map[top:top+height, left:left+width]
a = np.mean(betterSlice(vals))
z = np.mean(betterSlice2(vals, classes[x[0]]))
b = np.mean(vals)
c = np.median(vals)
distance_estimates.append(a)
#print(distance_estimates)
#draw the boxes and show the images
drawBoxes(yolo_imgL, classes, boxes, distance_estimates)
#print("distance estimation and drawing:", t.time() - time1)
time1 = t.time()
cv2.imshow("Image", yolo_imgL)
cv2.imshow("Disparity Adjusted", cv2.equalizeHist(disparity_map))
try:
min_ = str(min(distance_estimates))
print("running on image: \n" + image_path + "\n" + image_path.replace("_L", "_R") + " : nearest detected scene object " + min_ + "m" )
except:
print("running on image: \n" + image_path + "\n" + image_path.replace("_L", "_R"))
#pause using "p"
key = cv2.waitKey(100);
if key == ord("p"):
while True:
key2 = cv2.waitKey()
if key2 == ord("p"):
break
return imgL, depth_map
def all_files():
for _, _, filen in os.walk(os.path.join(master_path_to_dataset, "left-images")):
for x in filen:
time1 = t.time()
image, depth = test(x)
print("overall time for frame:", t.time() - time1)
print("")
print("")
all_files()
|
"""Application config"""
import os
PWD = os.path.abspath(os.curdir)
SECRET_KEY = "8dd09dcb561d308eca351346b8f5a37c6ff33dc39d41154e82b9c4ccc6fde33b691be96ef24692be"
DB_NAME = "truthiness.db"
NETWORK = b'\x6f'
SQLALCHEMY_DATABASE_URI = 'sqlite:///{}/{}'.format(PWD, DB_NAME)
|
import sys
import os
import django
from twilio.rest import Client
from twilio.twiml.messaging_response import MessagingResponse
sys.path.append(os.getcwd())
os.environ["DJANGO_SETTINGS_MODULE"] = "emblazEX.settings"
django.setup()
TWILIO_ACCOUNT_SID = os.environ.get("TWILIO_ACCOUNT_SID")
TWILIO_AUTH_TOKEN = os.environ.get("TWILIO_AUTH_TOKEN")
TWILIO_NUMBER = os.environ.get("TWILIO_NUMBER")
client = Client(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)
def fetch_response():
return client.messages.list(from_ = TWILIO_NUMBER)
def post_message(username, number, body):
msg = client.messages.create(
to = number,
from_ = TWILIO_NUMBER,
body = f"{body} (from:{username})"
)
return True |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv(r'C:\Users\HP\Desktop\Matter\python\machinelearning\House_Price.csv', header=0)
pd.set_option('display.expand_frame_repr', False)
print(df.head())
print(df.shape)
print(df.describe())
"""
print(sns.jointplot(x='n_hot_rooms',y='price',data=df))
plt.show()
print(sns.jointplot(x='rainfall',y='price',data=df))
plt.show()
print(df.head())
print(sns.countplot(x='airport',data=df))
plt.show()
print(sns.countplot(x='waterbody',data=df))
plt.show()
print(sns.countplot(x='bus_ter',data=df))
plt.show()
#Missing values in n_hos_beds
#Skewness or outliers in crime_rate
#Outliers in n_hot_rooms and rainfall
#Bus_ter has only one value
print(df.info())
print(np.percentile(df.n_hot_rooms,[99]))
print(np.percentile(df.n_hot_rooms,[99])[0])
uv=np.percentile(df.n_hot_rooms,[99])[0]
print(df[(df.n_hot_rooms>uv)])
print(df.n_hot_rooms[(df.n_hot_rooms> 3*uv)])
t=df.n_hot_rooms[(df.n_hot_rooms> 3*uv)]=3*uv
print(t)
print(df[(df.n_hot_rooms>uv)])
print(np.percentile(df.rainfall,[1])[0])
lv=np.percentile(df.rainfall,[1])[0]
da=df[(df.rainfall<lv)]
print(da)
df.rainfall[(df.rainfall< 0.3*lv)]=0.3*lv
dte=df[df.rainfall<lv]
print(dte)
print(sns.jointplot(x="crime_rate",y="price",data=df))
plt.show()
print(df.describe())
print(df.info())
df.n_hos_beds=df.n_hos_beds.fillna(df.n_hos_beds.mean())
print(df.info())
"""
print(sns.jointplot(x="crime_rate",y="price",data=df))
plt.show()
df.crime_rate=np.log(1+df.crime_rate)
print(sns.jointplot(x="crime_rate",y="price",data=df))
plt.show()
df['avg_dist']=(df.dist1+df.dist2+df.dist3+df.dist4)/4
print(df.describe())
del df['dist1']
print(df.describe())
del df['dist2']
del df['dist3']
del df['dist4']
print(df.describe())
del df['bus_ter']
print(df.head()) |
#!/bin/python3
import sys
def quickSort(arr):
left = []
equal = []
right = []
pivot = arr[0]
for x in arr:
if x == pivot:
equal.append(x)
elif x > pivot:
right.append(x)
else:
left.append(x)
arr[:] =[]
arr += left
arr += equal
arr += right
return arr
if __name__ == "__main__":
n = int(input().strip())
arr = list(map(int, input().strip().split(' ')))
result = quickSort(arr)
print (" ".join(map(str, result)))
|
import re
import time
import tornado
from tornado import gen
from tornado import web
from tornado.ioloop import IOLoop
from providers.google_rss import GoogleRSS
from utils import config
class FeedHandler(tornado.web.RequestHandler):
def data_received(self, chunk):
pass
def __init__(self, application, request, **kwargs):
self.logger = application.logger
self.data = application.data
super(FeedHandler, self).__init__(application, request, **kwargs)
@tornado.web.asynchronous
@gen.coroutine
def get(self):
self.set_header('Content-Type', 'application/rss+xml;charset=utf-8')
gid = self.get_argument('gid')
if not (gid and self.data.is_valid_gid(gid)):
raise tornado.web.HTTPError(403, reason='Unregistered ID {0}. Subscribe at https://magentariver.com'.format(gid))
filter_arg = self.get_argument('filter', '')
self.logger.info('Request: {0},{1}'.format(gid, filter_arg))
# check cache for miss/hit
if not self.data.cache.is_cache(gid, filter_arg):
# cache miss
self.logger.warning('Cache miss for [{0}]'.format(gid))
stamp = time.time()
# re-register gid if valid
if not self.data.is_valid_gid(gid):
# do a simple string validation
if re.search('[!-\*:-@\\\/]', gid):
self.logger.warning('Warning: Invalid characters in user ID for RSS {0}'.format(gid))
raise tornado.web.HTTPError(400, reason='Invalid characters in user ID {0}'.format(gid))
# do a validation before registering
# validate the GID
self.data.begin_validate_gid(gid)
valid_gid = None
# wait for data update from Google
for n in range(0, 5):
# wait for up to 10 seconds
yield gen.Task(IOLoop.instance().add_timeout, time.time() + 2)
valid_gid = self.data.get_validate_gid(gid)
if valid_gid:
break
if not valid_gid or 'None' == valid_gid:
self.logger.warning('Warning: invalid GID for RSS {0}'.format(gid))
raise tornado.web.HTTPError(400, reason='Invalid user ID {0}'.format(gid))
# now can re-register the gid
self.data.register_gid(gid)
#poll for an update
self.logger.info('Waiting for update...')
for n in range(0, 5):
yield gen.Task(IOLoop.instance().add_timeout, time.time() + 2)
if self.data.cache.is_poll_after(gid, stamp):
break
if self.data.cache.is_poll_after(gid, stamp):
self.logger.info('...update received')
else:
self.logger.warning('... Time out waiting for Google Plus API update')
# read the cache anyway
activities_doc = self.data.cache.get_activities(gid)
# process activities if any
if activities_doc:
updated = self.data.cache.get_poll_stamp(gid)
self.logger.info('Items received, update stamp: {0}'.format(time.strftime('%x %X %z', time.gmtime(updated))))
self.process_activities(gid, filter_arg, activities_doc)
else:
self.logger.warning('Warning: no activities for {0}'.format(gid))
raise tornado.web.HTTPError(400, reason='No data available for {0}'.format(gid))
def process_activities(self, gid, option, activities_doc):
items = GoogleRSS.gen_items(activities_doc, option, self.data.cache)
self.render('feed.xml', version=config.version, gid=gid, pubDate=GoogleRSS.format_timestamp_rss(time.time()), items=items)
|
import subprocess
from datetime import datetime
import time
import random
def jtalk_normal(t):
open_jtalk=['open_jtalk']
mech=['-x','/var/lib/mecab/dic/open-jtalk/naist-jdic']
htsvoice=['-m','/usr/share/hts-voice/mei/mei_normal.htsvoice']
speed=['-r','1.0']
outwav=['-ow','open_jtalk.wav']
cmd=open_jtalk+mech+htsvoice+speed+outwav
c = subprocess.Popen(cmd,stdin=subprocess.PIPE)
c.stdin.write(t)
c.stdin.close()
c.wait()
aplay = ['aplay','-q','open_jtalk.wav']
wr = subprocess.Popen(aplay)
def jtalk_happy(t):
open_jtalk=['open_jtalk']
mech=['-x','/var/lib/mecab/dic/open-jtalk/naist-jdic']
htsvoice=['-m','/usr/share/hts-voice/mei/mei_happy.htsvoice']
speed=['-r','1.0']
outwav=['-ow','open_jtalk.wav']
cmd=open_jtalk+mech+htsvoice+speed+outwav
c = subprocess.Popen(cmd,stdin=subprocess.PIPE)
c.stdin.write(t.encode())
c.stdin.close()
c.wait()
aplay = ['aplay','-q','open_jtalk.wav']
wr = subprocess.Popen(aplay)
def jtalk_angry(t):
open_jtalk=['open_jtalk']
mech=['-x','/var/lib/mecab/dic/open-jtalk/naist-jdic']
htsvoice=['-m','/usr/share/hts-voice/mei/mei_angry.htsvoice']
speed=['-r','1.0']
outwav=['-ow','open_jtalk.wav']
cmd=open_jtalk+mech+htsvoice+speed+outwav
c = subprocess.Popen(cmd,stdin=subprocess.PIPE)
c.stdin.write(t.encode())
c.stdin.close()
c.wait()
aplay = ['aplay','-q','open_jtalk.wav']
wr = subprocess.Popen(aplay)
def jtalk_sad(t):
open_jtalk=['open_jtalk']
mech=['-x','/var/lib/mecab/dic/open-jtalk/naist-jdic']
htsvoice=['-m','/usr/share/hts-voice/mei/mei_sad.htsvoice']
speed=['-r','1.0']
outwav=['-ow','open_jtalk.wav']
cmd=open_jtalk+mech+htsvoice+speed+outwav
c = subprocess.Popen(cmd,stdin=subprocess.PIPE)
c.stdin.write(t.encode())
c.stdin.close()
c.wait()
aplay = ['aplay','-q','open_jtalk.wav']
wr = subprocess.Popen(aplay)
def jtalk_bashful(t):
open_jtalk=['open_jtalk']
mech=['-x','/var/lib/mecab/dic/open-jtalk/naist-jdic']
htsvoice=['-m','/usr/share/hts-voice/mei/mei_bashful.htsvoice']
speed=['-r','1.0']
outwav=['-ow','open_jtalk.wav']
cmd=open_jtalk+mech+htsvoice+speed+outwav
c = subprocess.Popen(cmd,stdin=subprocess.PIPE)
c.stdin.write(t.encode())
c.stdin.close()
c.wait()
aplay = ['aplay','-q','open_jtalk.wav']
wr = subprocess.Popen(aplay)
|
r=input()
if r.isnumeric():
h=int(r)
if h%2==0:
print("Even")
else:
print("Odd")
else:
print("invalid")
|
#encoding=UTF8
'''
ๅฏผๅ
ฅredisๆฅๅฃ
ไฝฟ็จๆถ๏ผ็ดๆฅไฝฟ็จr.get,r.set็ญๆนๆณๅณๅฏ
'''
import redis
r=redis.Redis(host='192.168.184.128',port=6379,db=0)
|
# preprocessing for ml model
'''
1. clean the data (optional)
2. use tfiffvectorizer
'''
# preprocessing for dl model
'''
1. clean the data (optional)
2. use the tokenizer to convert to sequences
3. pad the sequences
'''
# Trying out preprocessing using the same pipeline as our project
#helper functions for lemmatizations
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import re
import pickle as pk
from nltk import pos_tag
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
list_classes = ["toxic", "severe toxic", "obscene", "threat", "insult", "identity hate"]
embed_size = 300 # how big is each word vector
max_features = 20000 # how many unique words to use (i.e num rows in embedding vector)
maxlen = 100 # max number of
wnl = WordNetLemmatizer()
stop_dict = stopwords.words('english')
tokenizer = pk.load(open('saved_models/tfidf','rb'))
model = pk.load(open('saved_models/model','rb'))
#loading bi-lstm model
dnn_tokenizer = pk.load(open('saved_models/lstm_glove_tok','rb'))
dnn_model = tf.keras.models.load_model('saved_models/lstm_glove.h5')
# Trying out preprocessing using the same pipeline as our project
#helper functions for lemmatizations
def penn2morphy(penntag):
""" Converts Penn Treebank tags to WordNet. """
morphy_tag = {'NN':'n', 'JJ':'a',
'VB':'v', 'RB':'r'}
try:
return morphy_tag[penntag[:2]]
except:
return 'n'
def lemmatize_sent(text):
wnl = WordNetLemmatizer()
# Text input is string, returns lowercased strings.
ls = list(wnl.lemmatize(word.lower(), pos=penn2morphy(tag)) for word, tag in pos_tag(word_tokenize(text)))
str = ''
for i in ls:
str += i.lower() + ' '
return str
def clean_text(x):
# remove html tags
regex = re.compile('<.*?>')
input = re.sub(regex, '', x)
#remove punctuations, numbers.
input = re.sub('[!@#$%^&*()\n_:><?\-.{}|+-,;""``~`โ]|[0-9]|/|=|\[\]|\[\[\]\]',' ',input)
input = re.sub('[โโ\']','',input)
#remove stopwords
tmp_str = ''
for i in word_tokenize(input):
if i not in stop_dict and len(set(i)) > 2:
tmp_str += i + ' '
#lemmatize the text.
return lemmatize_sent(tmp_str)
def ml_preprocess(text):
toxcity = True
text_ = clean_text(text)
text_ = tokenizer.transform([text_])
classes = model.predict(text_)
predict_probas = model.predict_proba(text_)
if sum(classes[0]) == 0:
toxcity = False
return {'classes':list_classes,
'proba':(predict_probas * 100)[0],
'toxic':toxcity,
'message':text,
'model':'SGDClassifier'
}
def dl_preporcess(text):
toxcity = True
text_ = clean_text(text)
seq = dnn_tokenizer.texts_to_sequences([text])
# print(seq)
padded_text_ = pad_sequences(seq,maxlen =maxlen)
# print(padded_text_.shape)
preds = dnn_model.predict(padded_text_)
if sum(preds[0] > 0.5) == 0:
toxcity = False
return {'classes':list_classes,
'proba':(preds * 100)[0],
'toxic':toxcity,
'message':text,
'model':'BILSTM+glove embeddings'
}
def proprocess_input(text,mode = 'ml'):
if mode == 'ml':
return ml_preprocess(text)
else:
return dl_preporcess(text)
#in the method we are going to convert the numeric output of class labels.
def postprocess_output(mode = 'ml'):
if mode:
pass
if __name__ == "__main__":
input_ = "im sorry I screwed around with someones talk page. It was very bad to do. I know how having the templates on their talk page helps you assert your dominance over them. I know I should bow down to the almighty administrators. But then again, I'm going to go play outside....with your mom. 76.122.79.82"
# input_ = 'COCKSUCKER BEFORE YOU PISS AROUND ON MY WORK'
print(proprocess_input(input_,mode = 'dnn')) |
import unittest
from src.insertion_sort import insertion_sort
class InsertionSortTest(unittest.TestCase):
def test_correct_worc(self):
unsorted_list = [5, 4, 3, 2, 1]
sorted_list = [1, 2, 3, 4, 5]
self.assertListEqual(insertion_sort(unsorted_list), sorted_list)
def test_second_correct_worc(self):
unsorted_list = [3, 5, 1, 2, 4]
sorted_list = [1, 2, 3, 4, 5]
self.assertListEqual(insertion_sort(unsorted_list), sorted_list)
def test_empty_list(self):
self.assertListEqual(insertion_sort([]), [])
if __name__ == '__main__':
unittest.main()
|
def disl_relax_script(pair_info, units, atom_style, masses, read_data, temp = 0):
mass = ''
group_move = ''
for i in xrange(len(masses)):
mass += 'mass %i %f\n'%(i+1,masses[i])
if i < len(masses)/2:
group_move += ' '+str(i+1)
newline = '\n'
script = newline.join(['boundary s s p',
'units ' + units,
'atom_style ' + atom_style,
'read_data ' + read_data,
'',
mass,
pair_info,
'',
'group move type' + group_move,
'group hold subtract all move',
'',
'compute peatom all pe/atom',
'',
'dump first all custom 100000 atom.* id type x y z c_peatom',
'dump_modify first format "%d %d %.13e %.13e %.13e %.13e"',
'thermo_style custom step pe',
''])
if temp == 0:
script = newline.join([script,
'fix nomove hold setforce 0.0 0.0 0.0',
'minimize 0 1e-5 10000 100000'])
else:
script = newline.join([script,
'velocity move create %f 9467 mom yes rot yes dist gaussian'%(2 * temp),
'fix nomove hold setforce 0.0 0.0 0.0',
'timestep 0.001',
'thermo 10000',
'fix 1 all nvt temp %f %f 0.1'%(temp, temp),
'',
'run 10000',
'minimize 0 1e-5 10000 100000'])
return script |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline to load bigquery datasets data into Inventory."""
import json
GET_PROJECTIDS_RETURN = [
'bq-test', 'bq-test'
]
EXPECTED_PROJECTIDS = GET_PROJECTIDS_RETURN
DATASET_PROJECT_MAP = [
[{'datasetId': 'test', 'projectId': 'bq-test'}],
[{'datasetId': 'test', 'projectId': 'bq-test'}]
]
DATASET_PROJECT_MAP_EXPECTED = DATASET_PROJECT_MAP
GET_DATASETS_FOR_PROJECTIDS_RETURN = [
{'datasetId': 'test', 'projectId': 'bq-test'}
]
GET_DATASET_ACCESS_RETURN = [
{'role': 'WRITER', 'specialGroup': 'projectWriters'},
{'role': 'OWNER', 'specialGroup': 'projectOwners'},
{'role': 'OWNER', 'userByEmail': 'user@domain.com'},
{'role': 'READER', 'specialGroup': 'projectReaders'}
]
RETRIEVE_DATASET_ACCESS_RETURN = GET_DATASET_ACCESS_RETURN
GET_DATASETS_LIST_RETURN = [
{'datasetId': 'test', 'projectId': 'bq-test'},
{'datasetId': 'test', 'projectId': 'bq-test'}
]
DATASET_PROJECT_ACCESS_MAP = [
('bq-test',
'test',
[{'role': 'WRITER', 'specialGroup': 'projectWriters'},
{'role': 'OWNER', 'specialGroup': 'projectOwners'},
{'role': 'OWNER', 'userByEmail': 'user@domain.com'},
{'role': 'READER', 'specialGroup': 'projectReaders'}]),
('bq-test',
'test',
[{'role': 'WRITER', 'specialGroup': 'projectWriters'},
{'role': 'OWNER', 'specialGroup': 'projectOwners'},
{'role': 'OWNER', 'userByEmail': 'user@domain.com'},
{'role': 'READER', 'specialGroup': 'projectReaders'}])
]
EXPECTED_TRANSFORM = [
{'access_domain': None,
'access_group_by_email': None,
'access_special_group': 'projectWriters',
'access_user_by_email': None,
'access_view_dataset_id': None,
'access_view_project_id': None,
'access_view_table_id': None,
'dataset_id': 'test',
'project_id': 'bq-test',
'raw_access_map': json.dumps({'role': 'WRITER',
'specialGroup': 'projectWriters'}),
'role': 'WRITER'},
{'access_domain': None,
'access_group_by_email': None,
'access_special_group': 'projectOwners',
'access_user_by_email': None,
'access_view_dataset_id': None,
'access_view_project_id': None,
'access_view_table_id': None,
'dataset_id': 'test',
'project_id': 'bq-test',
'raw_access_map': json.dumps({'role': 'OWNER',
'specialGroup': 'projectOwners'}),
'role': 'OWNER'},
{'access_domain': None,
'access_group_by_email': None,
'access_special_group': None,
'access_user_by_email': 'user@domain.com',
'access_view_dataset_id': None,
'access_view_project_id': None,
'access_view_table_id': None,
'dataset_id': 'test',
'project_id': 'bq-test',
'raw_access_map': json.dumps({'role': 'OWNER',
'userByEmail': 'user@domain.com'}),
'role': 'OWNER'},
{'access_domain': None,
'access_group_by_email': None,
'access_special_group': 'projectReaders',
'access_user_by_email': None,
'access_view_dataset_id': None,
'access_view_project_id': None,
'access_view_table_id': None,
'dataset_id': 'test',
'project_id': 'bq-test',
'raw_access_map': json.dumps({'role': 'READER',
'specialGroup': 'projectReaders'}),
'role': 'READER'},
{'access_domain': None,
'access_group_by_email': None,
'access_special_group': 'projectWriters',
'access_user_by_email': None,
'access_view_dataset_id': None,
'access_view_project_id': None,
'access_view_table_id': None,
'dataset_id': 'test',
'project_id': 'bq-test',
'raw_access_map': json.dumps({'role': 'WRITER',
'specialGroup': 'projectWriters'}),
'role': 'WRITER'},
{'access_domain': None,
'access_group_by_email': None,
'access_special_group': 'projectOwners',
'access_user_by_email': None,
'access_view_dataset_id': None,
'access_view_project_id': None,
'access_view_table_id': None,
'dataset_id': 'test',
'project_id': 'bq-test',
'raw_access_map': json.dumps({'role': 'OWNER',
'specialGroup': 'projectOwners'}),
'role': 'OWNER'},
{'access_domain': None,
'access_group_by_email': None,
'access_special_group': None,
'access_user_by_email': 'user@domain.com',
'access_view_dataset_id': None,
'access_view_project_id': None,
'access_view_table_id': None,
'dataset_id': 'test',
'project_id': 'bq-test',
'raw_access_map': json.dumps({'role': 'OWNER',
'userByEmail': 'user@domain.com'}),
'role': 'OWNER'},
{'access_domain': None,
'access_group_by_email': None,
'access_special_group': 'projectReaders',
'access_user_by_email': None,
'access_view_dataset_id': None,
'access_view_project_id': None,
'access_view_table_id': None,
'dataset_id': 'test',
'project_id': 'bq-test',
'raw_access_map': json.dumps({'role': 'READER',
'specialGroup': 'projectReaders'}),
'role': 'READER'}
]
DATASET_PROJECT_ACCESS_MAP_EXPECTED = DATASET_PROJECT_ACCESS_MAP
|
#!/usr/bin/env python
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import models
import unittest
from datetime import datetime
from google.appengine.api import memcache
from google.appengine.ext import testbed
class HelperTests(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
def tearDown(self):
self.testbed.deactivate()
def _assert_there_is_exactly_one_id_holder_and_matches(self, id):
id_holders = models.NumericIdHolder.all().fetch(5)
self.assertEqual(len(id_holders), 1)
self.assertTrue(id_holders[0])
self.assertEqual(id_holders[0].key().id(), id)
def test_create_in_transaction_with_numeric_id_holder(self):
def execute(id):
return models.Branch(id=id, name='some branch', key_name='some-branch').put()
self.assertEqual(len(models.Branch.all().fetch(5)), 0)
self.assertEqual(len(models.NumericIdHolder.all().fetch(5)), 0)
self.assertTrue(models.create_in_transaction_with_numeric_id_holder(execute))
branches = models.Branch.all().fetch(5)
self.assertEqual(len(branches), 1)
self.assertEqual(branches[0].name, 'some branch')
self.assertEqual(branches[0].key().name(), 'some-branch')
self._assert_there_is_exactly_one_id_holder_and_matches(branches[0].id)
def test_failing_in_create_in_transaction_with_numeric_id_holder(self):
def execute(id):
return None
self.assertEqual(len(models.Branch.all().fetch(5)), 0)
self.assertEqual(len(models.NumericIdHolder.all().fetch(5)), 0)
self.assertFalse(models.create_in_transaction_with_numeric_id_holder(execute))
self.assertEqual(len(models.Branch.all().fetch(5)), 0)
self.assertEqual(len(models.NumericIdHolder.all().fetch(5)), 0)
def test_raising_in_create_in_transaction_with_numeric_id_holder(self):
def execute(id):
raise TypeError
return None
self.assertEqual(len(models.Branch.all().fetch(5)), 0)
self.assertEqual(len(models.NumericIdHolder.all().fetch(5)), 0)
self.assertRaises(TypeError, models.create_in_transaction_with_numeric_id_holder, (execute))
self.assertEqual(len(models.Branch.all().fetch(5)), 0)
self.assertEqual(len(models.NumericIdHolder.all().fetch(5)), 0)
def test_delete_model_with_numeric_id_holder(self):
def execute(id):
return models.Branch(id=id, name='some branch', key_name='some-branch').put()
branch = models.Branch.get(models.create_in_transaction_with_numeric_id_holder(execute))
self.assertEqual(len(models.NumericIdHolder.all().fetch(5)), 1)
models.delete_model_with_numeric_id_holder(branch)
self.assertEqual(len(models.Branch.all().fetch(5)), 0)
self.assertEqual(len(models.NumericIdHolder.all().fetch(5)), 0)
def test_model_from_numeric_id(self):
def execute(id):
return models.Branch(id=id, name='some branch', key_name='some-branch').put()
branch = models.Branch.get(models.create_in_transaction_with_numeric_id_holder(execute))
self.assertEqual(models.model_from_numeric_id(branch.id, models.Branch).key(), branch.key())
self.assertEqual(models.model_from_numeric_id(branch.id + 1, models.Branch), None)
models.delete_model_with_numeric_id_holder(branch)
self.assertEqual(models.model_from_numeric_id(branch.id, models.Branch), None)
class BuilderTests(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
def tearDown(self):
self.testbed.deactivate()
def test_create(self):
builder_key = models.Builder.create('some builder', 'some password')
self.assertTrue(builder_key)
builder = models.Builder.get(builder_key)
self.assertEqual(builder.key().name(), 'some builder')
self.assertEqual(builder.name, 'some builder')
self.assertEqual(builder.password, models.Builder._hashed_password('some password'))
def test_update_password(self):
builder = models.Builder.get(models.Builder.create('some builder', 'some password'))
self.assertEqual(builder.password, models.Builder._hashed_password('some password'))
builder.update_password('other password')
self.assertEqual(builder.password, models.Builder._hashed_password('other password'))
# Make sure it's saved
builder = models.Builder.get(builder.key())
self.assertEqual(builder.password, models.Builder._hashed_password('other password'))
def test_hashed_password(self):
self.assertNotEqual(models.Builder._hashed_password('some password'), 'some password')
self.assertFalse('some password' in models.Builder._hashed_password('some password'))
self.assertEqual(len(models.Builder._hashed_password('some password')), 64)
def test_authenticate(self):
builder = models.Builder.get(models.Builder.create('some builder', 'some password'))
self.assertTrue(builder.authenticate('some password'))
self.assertFalse(builder.authenticate('bad password'))
class ReportLog(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
def tearDown(self):
self.testbed.deactivate()
def _create_log_with_payload(self, payload):
return models.ReportLog(timestamp=datetime.now(), headers='some headers', payload=payload)
def test_parsed_payload(self):
log = self._create_log_with_payload('')
self.assertFalse('_parsed' in log.__dict__)
self.assertEqual(log._parsed_payload(), False)
self.assertEqual(log._parsed, False)
log = self._create_log_with_payload('{"key": "value", "another key": 1}')
self.assertEqual(log._parsed_payload(), {"key": "value", "another key": 1})
self.assertEqual(log._parsed, {"key": "value", "another key": 1})
def test_get_value(self):
log = self._create_log_with_payload('{"string": "value", "integer": 1, "float": 1.1}')
self.assertEqual(log.get_value('string'), 'value')
self.assertEqual(log.get_value('integer'), 1)
self.assertEqual(log.get_value('float'), 1.1)
self.assertEqual(log.get_value('bad'), None)
def test_results(self):
log = self._create_log_with_payload('{"results": 123}')
self.assertEqual(log.results(), 123)
log = self._create_log_with_payload('{"key": "value"}')
self.assertEqual(log.results(), None)
def test_builder(self):
log = self._create_log_with_payload('{"key": "value"}')
self.assertEqual(log.builder(), None)
builder_name = "Chromium Mac Release (Perf)"
log = self._create_log_with_payload('{"builder-name": "%s"}' % builder_name)
self.assertEqual(log.builder(), None)
builder_key = models.Builder.create(builder_name, 'some password')
log = self._create_log_with_payload('{"builder-name": "%s"}' % builder_name)
self.assertEqual(log.builder().key(), builder_key)
# FIXME test_branch and test_platform
def test_build_number(self):
log = self._create_log_with_payload('{"build-number": 123}')
self.assertEqual(log.build_number(), 123)
log = self._create_log_with_payload('{"key": "value"}')
self.assertEqual(log.build_number(), None)
def test_webkit_revision(self):
log = self._create_log_with_payload('{"key": "value"}')
self.assertEqual(log.webkit_revision(), None)
log = self._create_log_with_payload('{"webkit-revision": 123}')
self.assertEqual(log.webkit_revision(), 123)
def chromium_revision(self):
log = self._create_log_with_payload('{"chromium-revision": 123}')
self.assertEqual(log.webkit_revision(), 123)
log = self._create_log_with_payload('{"key": "value"}')
self.assertEqual(log.webkit_revision(), None)
class PersistentCacheTests(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
self.testbed.deactivate()
def _assert_persistent_cache(self, name, value):
self.assertEqual(models.PersistentCache.get_by_key_name(name).value, value)
self.assertEqual(memcache.get(name), value)
def test_set(self):
self.assertEqual(len(models.PersistentCache.all().fetch(5)), 0)
models.PersistentCache.set_cache('some-cache', 'some data')
self._assert_persistent_cache('some-cache', 'some data')
models.PersistentCache.set_cache('some-cache', 'some other data')
self._assert_persistent_cache('some-cache', 'some other data')
def test_get(self):
self.assertEqual(memcache.get('some-cache'), None)
self.assertEqual(models.PersistentCache.get_cache('some-cache'), None)
models.PersistentCache.set_cache('some-cache', 'some data')
self.assertEqual(memcache.get('some-cache'), 'some data')
self.assertEqual(models.PersistentCache.get_cache('some-cache'), 'some data')
memcache.delete('some-cache')
self.assertEqual(memcache.get('some-cache'), None)
self.assertEqual(models.PersistentCache.get_cache('some-cache'), 'some data')
if __name__ == '__main__':
unittest.main()
|
# coding: utf-8
"""
Context manager
"""
fh = open('test_file_path.txt', 'a')
# fh.write('test string')
# fh.writelines(['test string'])
print fh.readlines() # -> ['', '']
fh.close()
with open('test_file_path.txt', 'r') as fh:
lines = fh.readlines()
print lines
class Hypervisor(object):
"""
Hypervisor representation
"""
__locked = False
def lock(self):
self.__locked = True
def unlock(self):
self.__locked = False
def is_locked(self):
return self.__locked
def power_action(self, action):
raise NotImplemented()
hyp1 = Hypervisor()
hyp1.lock()
# hyp1.power_action(1)
hyp1.unlock()
hypervisors = [Hypervisor() for i in range(100)]
for h in hypervisors:
h.lock()
# h.power_action(1)
h.unlock()
# ------------------------------------------------------------------------
# context manager
class LockNode(object):
"""
Lock node context manager
"""
def __init__(self, hypervisor):
super(LockNode, self).__init__()
self.hypervisor = hypervisor
def __enter__(self):
return self.hypervisor.lock()
def __exit__(self, exc_type, exc_val, exc_tb):
self.hypervisor.unlock()
return True
with LockNode(hyp1):
hyp1.power_action(1)
for h in hypervisors:
with LockNode(h):
h.power_action(1)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 23 16:15:06 2020
@author: 100293
"""
import cx_Oracle
import pandas as pd
conn=cx_Oracle.connect("xxxxxxx","xxxxxxx","xxxxxxx")
querry="SELECT * FROM pharma_sales_master WHERE ROWNUM <= 1000"
df1=pd.read_sql_query(querry,conn)
df1.shape
df1.describe()
df1.info()
null=df1.isnull().sum()
null_mean=df1.isnull().mean()
cln_df1=df1[df1.columns[df1.isnull().mean() < 0.7]]
cl_df1=cln_df1.drop(columns=['DISCOUNT_ID','PATIENT','USER_ID','SYSTEM_ID'], axis='columns')
corrr_matrix=cl_df1.corr()
##correlation
import seaborn as sns
##get correlation of each features in dataset
corrmat=cl_df1.corr()
top_corr_features=corrmat.index
plt.figure(figsize=(20,20))
#plot heatmap
g=sns.heatmap(cl_df1[top_corr_features].corr(),annot=True,cmap="RdYlGn")
data=cl_df1.drop(columns=['BILL_NO','TRA_DT','BILLING_TYPE','HOME_DELIVERY','BRANCH_ID','TRANSFER','WELFARE', 'CREDIT_CARD', 'FOUNDATION', 'BPL_MEMBER_ID', 'IGST', 'SGST','CGST', 'TAX_DISCOUNT', 'COUNTER_ID'], axis='columns')
data_profit=data['PROFIT']
data_profit=data['PROFIT'].describe()
data.columns
data.corr()
##correlation
import seaborn as sns
##get correlation of each features in dataset
corrmat=data.corr()
top_corr_features=corrmat.index
#plot heatmap
g=sns.heatmap(data[top_corr_features].corr(),annot=True,cmap="RdYlGn")
data.hist()
macare_data=data[['ITEM_VAL','DISCOUNT','BILL_AMT','COST','CASH','DOCTOR_ID','PROFIT']]
feature_columns=['ITEM_VAL','DISCOUNT','BILL_AMT','COST','CASH','DOCTOR_ID']
predicted_class=['PROFIT']
x=macare_data[feature_columns].values
y=macare_data[predicted_class].values
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.30,random_state=10)
from sklearn.linear_model import LinearRegression
regressor=LinearRegression()
regressor.fit(x,y)
import pickle
pickle.dump(regressor,open('model.pkl','wb'))
|
from django.core.management.base import BaseCommand
from django.conf import settings
from architect.monitor.models import Monitor
class Command(BaseCommand):
help = 'Synchronise Monitor objects'
def handle(self, *args, **options):
for engine_name, engine in settings.MONITOR_ENGINES.items():
if Monitor.objects.filter(name=engine_name).count() == 0:
engine_kind = engine.pop('engine')
monitor = Monitor(**{
'name': engine_name,
'engine': engine_kind,
'metadata': engine
})
monitor.save()
self.stdout.write(
self.style.SUCCESS(
'Monitor "{}" resource created'.format(engine_name)))
else:
monitor = Monitor.objects.get(name=engine_name)
monitor.metadata = engine
monitor.save()
self.stdout.write(
self.style.SUCCESS(
'Monitor "{}" resource '
'updated'.format(engine_name)))
|
from django.apps import AppConfig
class PhDAmissionPortalConfig(AppConfig):
name = 'phDAdmissionPortal'
|
import os
import boto3
from .formatter import FormatReport
from .sender import SendReport, sender_sns, sender_stdout
def parse_args():
return {'region': 'us-west-2'}
def list_to_dict(obj, key='Key', value='Value'):
return {o[key]: o[value] for o in obj}
class AutoScaling:
def __init__(self, region):
self._client = boto3.client('autoscaling', region)
def describe_auto_scaling_groups(self, **kwargs):
for page in self._client.get_paginator(
'describe_auto_scaling_groups'
).paginate(**kwargs):
for asg in page['AutoScalingGroups']:
asg['Tags'] = list_to_dict(asg['Tags'])
yield asg
def has_stateless_ha_tag(asg):
return 'StatelessHa' in asg['Tags']
# return asg['Tags'].get('StatelessHa', 'no').lower() == 'yes'
def not_enough_subnets(asg):
return len(asg['VPCZoneIdentifier'].split(',')) < 2
def enough_subnets(asg):
return len(asg['VPCZoneIdentifier'].split(',')) >= 2
def asg_name(asg):
return asg['AutoScalingGroupName']
def main(event, context, sender=sender_stdout):
autoscaling_groups = list(
filter(
has_stateless_ha_tag,
AutoScaling(
os.environ['AWS_DEFAULT_REGION']
).describe_auto_scaling_groups(),
)
)
report = {
'Sufficient Subnets': map(
asg_name, filter(not_enough_subnets, autoscaling_groups)
),
'Insufficient Subnets': map(
asg_name, filter(enough_subnets, autoscaling_groups)
),
}
SendReport(sender).send(FormatReport(report).format())
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from app.models.meta import metadata, Base
from sqlalchemy import Table, Column, Integer, Date, Text
from sqlalchemy.orm import mapper
from sqlalchemy.sql.expression import desc
import web
news_table = Table("NEWS", metadata,
Column("id", Integer, primary_key=True, nullable=False),
Column("news", Text, nullable=False),
Column("news_dt", Date, nullable=False)
)
class News(Base):
""" Website announcements """
@classmethod
def all(cls):
""" Overrides the default all method to guarantee the order by """
return Base.all.im_func(News, order_by_clause=desc(News.news_dt)) #@UndefinedVariable
def __repr__(self) :
return "<News(%s,%s)>" % (self.news, self.news_dt)
mapper(News, news_table)
web.debug("[MODEL] Successfully mapped News class")
|
# with open('weather_data.csv') as data_file:
# data = data_file.readlines()
# print(data)
# import csv
#
#
# with open('weather_data.csv') as data_file:
# data = csv.reader(data_file)
# flag = False
# temperatures = []
# for row in data:
# if flag:
# temperatures.append(int(row[1]))
# flag = True
# print(temperatures)
import pandas
# data = pandas.read_csv('weather_data.csv')
# print(data['temp'])
# data_dict = data.to_dict()
# # print(data_dict)
#
# data_list = data['temp'].to_list()
# print(sum(data_list)/len(data_list))
# print(data['temp'].mean())
# print(data['temp'].max())
# Get Data in Columns
# print(data.condition)
# Get Data in Rows
# print(data[data.day == "Monday"])
# Row of data with the Highest Temperature
# print(data[data.temp == data.temp.max()])
# monday = data[data.day == 'Monday']
#
# print(int(monday.temp) * 9 / 5 + 32)
# Create Dataframe from Scratch
# data_dict = {
# 'students': ["Amy", "James", "Angela"],\
# 'scores': [74, 56, 65]
# }
# data = pandas.DataFrame(data_dict)
# print(data)
# Save the Data to CSV
# data.to_csv("new_data.csv")
import pandas
squirrel_census_data = pandas.read_csv('2018_Central_Park_Squirrel_Census_-_Squirrel_Data.csv')
black_squirrels_count = len(squirrel_census_data[squirrel_census_data['Primary Fur Color'] == 'Black'])
grey_squirrels_count = len(squirrel_census_data[squirrel_census_data['Primary Fur Color'] == 'Gray'])
cinnamon_squirrels_count = len(squirrel_census_data[squirrel_census_data['Primary Fur Color'] == 'Cinnamon'])
data_dict = {
"Fur Color": ["Grey", "Cinnamon", "Black"],
"Count": [grey_squirrels_count, cinnamon_squirrels_count, black_squirrels_count]
}
df = pandas.DataFrame(data_dict)
df.to_csv("squirrel_count.csv")
print(black_squirrels_count)
print(grey_squirrels_count)
print(cinnamon_squirrels_count)
|
import random
inputs = {"hallo" : "hello", "katze" : "cat"}
def recover():
with open("words", "r") as f:
for line in f:
(key, value) = line.split()
inputs[key] = value
print(inputs)
def safe():
file = open("words", "w")
for keys in inputs:
file.write(keys + " " + inputs[keys])
file.write("\n")
def newwords():
while True:
german = input("German word: ")
if german == "#finish":
return
english = input ("English word: ")
if english == "#finish":
return
inputs[german] = english
def query():
while True:
rk = random.choice(list(inputs))
english = input("English translation from " + rk + ": ")
if english == "#finish":
return
if inputs[rk] == english:
print("Correct!")
else:
print("False! Right answer: " + inputs[rk])
def printall():
for key, value in dict.items(inputs):
print("{0} - {1}".format(key, value))
recover()
while True:
command = input("Command> ")
if command == "newwords":
newwords()
elif command == "query":
query()
elif command == "output":
printall()
elif command == "#finish":
safe()
break
|
import numpy as np
import torchvision.transforms as transforms
import torch
import cv2
from . import cifar
from . import cub200_2011
LOADER_LUT = {
'cifar' : cifar.CIFARData,
'cub200_2011': cub200_2011.CUBData,
}
def get_loader(dataset_type, data_path, loader_type, label_path=None, cfg=None, logger=None):
if loader_type == 'train':
if cfg.USE_AUG == True:
train_aug = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(cfg.ROATION),
#transforms.RandomCrop(cfg.CROP, cfg.PAD),
transforms.RandomResizedCrop(cfg.CROP),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
else:
train_aug = None
try:
_data_class = LOADER_LUT.get(dataset_type)
except:
logger.error("dataset type error, {} not exist".format(dataset_type))
_data = _data_class(data_path, dtype='train', label_path=label_path, aug=train_aug, cfg=cfg.RESIZE)
data_loader = torch.utils.data.DataLoader(_data,
batch_size=cfg.BATCHSIZE, shuffle=True, num_workers=cfg.NUM_WORKERS,
drop_last=False)
elif loader_type == 'eval':
if cfg.USE_AUG == True:
val_aug = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(int(cfg.CROP/0.875)),
transforms.CenterCrop(cfg.CROP),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
else:
val_aug = None
try:
_data_class = LOADER_LUT.get(dataset_type)
except:
logger.error("dataset type error, {} not exist".format(dataset_type))
_data = _data_class(data_path, dtype='eval', label_path=label_path, aug=val_aug, cfg=cfg.RESIZE)
data_loader = torch.utils.data.DataLoader(_data,
batch_size=cfg.BATCHSIZE, shuffle=False, num_workers=cfg.NUM_WORKERS,
drop_last=False)
elif loader_type == 'self_test':
augmentation = transforms.Compose([
transforms.ToTensor(),
#transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
try:
_data_class = LOADER_LUT.get(dataset_type)
except:
logger.error("dataset type error, {} not exist".format(dataset_type))
_data = _data_class(data_path, dtype='train', label_path=label_path, aug=augmentation, cfg=cfg)
data_loader = torch.utils.data.DataLoader(_data,
batch_size=2, shuffle=True, num_workers=0,
drop_last=True)
elif loader_type == 'test':
augmentaiton = transforms.Compose([
transforms.ToTensor()
])
#augmentaiton = None
try:
_data_class = LOADER_LUT.get(dataset_type)
except:
logger.error("dataset type error, {} not exist".format(dataset_type))
_data = _data_class(data_path, aug=augmentaiton, test_data=True)
data_loader = torch.utils.data.DataLoader(_data,
batch_size=1, shuffle=False, num_workers=0,
drop_last=False)
else:
logger.error("error, only support train type dataloader")
return data_loader
def inverse_preprocess(image):
image = image.numpy().transpose((1,2,0)) * 255
image = image.astype(np.uint8)
if image.shape[2] == 3:
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
#pass
return image
def test_():
import matplotlib.pyplot as plt
import random
random.seed(0)
torch.manual_seed(0)
train_loader = get_loader("cub200_2011",
['/home/ikenaga/Public/CUB_200_2011/images.txt',
'/home/ikenaga/Public/CUB_200_2011/train_test_split.txt',
'/home/ikenaga/Public/CUB_200_2011/images/'],
'self_test',
'/home/ikenaga/Public/CUB_200_2011/image_class_labels.txt',
224)
for i, (img_ori_tensor, label) in enumerate(train_loader):
# img_ori_tensor_0 = img_ori_tensor[0].numpy().astype(np.uint8)
# img_ori_tensor_1 = img_ori_tensor[1].numpy().astype(np.uint8)
img_ori_tensor_0 = inverse_preprocess(img_ori_tensor[0])
img_ori_tensor_1 = inverse_preprocess(img_ori_tensor[1])
fig = plt.figure()
a = fig.add_subplot(1,2,1)
a.set_title('img_ori_tensor_0%d'%label[0].numpy())
plt.imshow(img_ori_tensor_0)
a = fig.add_subplot(1,2,2)
a.set_title('img_ori_tensor_1%d'%label[1].numpy())
plt.imshow(img_ori_tensor_1)
plt.show()
if __name__ == "__main__":
test_() |
from sympy import pprint
from sympy import Symbol
from sympy import Eq
from sympy import simplify
from sympy.solvers import solve
ex_new = Symbol("E_x|t+dt; i,j,k")
ex_old = Symbol("E_x|t; i,j,k")
dt = Symbol("dt")
dx = Symbol("dx")
dy = Symbol("dy")
dz = Symbol("dz")
sigma_x = Symbol("o`x")
sigma_y = Symbol("o`y")
sigma_z = Symbol("o`z")
e0 = Symbol("e0")
c0 = Symbol("c0")
integral_e = Symbol("E_Sum")
integral_curl = Symbol("Curl_Sum")
exx = Symbol("exx")
hz = Symbol("H_z|t+dt/2; i,j,k")
hy = Symbol("H_y|t+dt/2; i,j,k")
hz_dy = Symbol("H_z|t+dt/2; i,j-1,k")
hy_dz = Symbol("H_y|t+dt/2; i,j,k-1")
equation = Eq((ex_new - ex_old) / dt + ((sigma_z + sigma_y) / e0) * ex_old + ((sigma_z * sigma_y * dt) / e0**2) * integral_e,
(c0 / exx) * (((hz - hz_dy) / dy) - ((hy - hy_dz) / dz)) + ((c0 * sigma_x * dt) / (exx * e0)) * integral_curl)
pprint(equation)
solved = solve(equation, ex_new)
pprint(solved)
|
from uc.itm import UCWrappedFunctionality
from uc.utils import wait_for
import logging
log = logging.getLogger(__name__)
class Contract_Pay(UCWrappedFunctionality):
def __init__(self, k, bits, sid, pid, channels, pump, poly, importargs):
self.ssid = sid[0]
self.P_s = sid[1]
self.P_r = sid[2]
self.b_s = sid[3]
self.b_r = sid[4]
self.delta = sid[5]
UCWrappedFunctionality.__init__(self, k, buts, crupt, sid, pid, channels, poly, pump, importargs)
self.leakbuffer = None
self.flag = 'OffChain'
self.nonce = 0
self.T_settle = 2 * self.delta
self.T_deadline = -1
self.state = (self.b_s, self.b_r, self.nonce)
def clock_round(self):
self.write('f2w', ('clock-round',), 0)
rnd = wait_for(self.channels['w2f']).msg[1]
return rnd
def check_sig(self, _sig, _state):
return True
def close(self, _sender, _state, _sig):
if self.flag == "OffChain" and self.check_sig(_sig, _state):
_b_s, _b_r, _nonce = _state
if _nonce >= self.nonce and _b_s + _b_r == self.b_s + self.b_r and _b_r >= self.b_r:
self.nonce = _nonce
self.state = _state
if _sender is self.P_r:
self.flag = "Closed"
self.broadcast( ("Closed", self.state), 0 )
else:
self.flag = "UnCoopClose"
self.T_deadline = self.clock_round() + self.T_settle
self.broadcast( ("UnCoopClose", self.state, self.T_deadline), 0)
else:
self.pump.write('')
def challenge(self, _sender, _state, _sig):
if _sender is self.P_r and self.flag is "UnCoopClose" and self.check_sig(_sig, _state):
_b_s, _b_r, _nonce = _state
if _nonce >= self.nonce:
self.flag = "Closed"
self.state = _state
self.nonce = _nonce
self.broadcsat( ("Closed", self.state), 0 )
else:
self.pump.write('')
else:
self.pump.write('')
def route_party_msg(sender, msg, imp):
if msg[0] == 'close':
_, _state, _sig = msg
self.close(sender, _state, _sig)
elif msg[0] == 'challenge':
_, _state, _sid = msg
self.challenge(sender, _state, _sig)
else:
self.pump.write('dump')
def party_msg(self, d):
msg = d.msg
imp = d.imp
(_sid, _sender),msg = msg
self.write( 'f2w',
('schedule',
self.route_party_msg,
(_sender, msg, imp),
self.delta),
0
)
assert wait_for(self.channels['w2f']).msg == ('OK',)
self.leak(msg)
self.pump.write('dump')
def send_to(self, to, msg, imp):
self.write('f2p', (to, msg), imp)
def broadcast(self, msg, imp):
self.leak(msg)
self.write( 'f2w',
('schedule',
self.send_to,
((self.sid, self.P_s), msg, imp),
1),
0
)
assert wait_for(self.channels['w2f']).msg == ('OK',)
self.write('f2w',
('schedule',
self.send_to,
((self.sid, self.P_r), msg, imp),
1),
0
)
assert wait_for(self.channels['w2f']).msg == ('OK',)
self.leak(('bcast', msg))
self.pump.write('dump')
|
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
train_20news = fetch_20newsgroups(subset='train', shuffle = True, random_state =21)
test_20news = fetch_20newsgroups(subset='test', shuffle = True, random_state =21 )
vectorizer = CountVectorizer()
X_train = vectorizer.fit_transform(train_20news.data)
X_test = vectorizer.transform(test_20news.data)
#dataTrain = csr_matrix(1*(X_train>0))
dataTrain = X_train.toarray()
targetTrain = train_20news.target
#dataTest = csr_matrix(1*(X_test>0))
dataTest = X_test.toarray()
targetTest = test_20news.target
def step1A(x,y):
separated = {}
for i, j in zip(dataTrain, targetTrain):
if (j not in separated):
separated[j] = []
separated[j].append(i)
return separated
def step1B(dataset):
summaries = [((np.sum(attribute)+1.0)/(len(attribute)+2.0)) for attribute in zip(*dataset)]
return summaries
def step1(x,y):
separated = step1A(x,y)
summaries = {}
for classValue, instances in separated.items():
summaries[classValue] = step1B(instances)
return summaries
def step2(dictClasses, classSize, totalSize):
sizes = np.ndarray((classSize,1), dtype = float )
for i, v in dictClasses.items():
sizes[i] = len(v)/totalSize
return sizes
def step3(dictClasses, testData, testtarget, length, totalClasses):
arr = np.ndarray((len(testData),len(dictClasses.keys())) , dtype = float)
for i in range(length):
for j in range(totalClasses):
arr[i][j] = np.product(np.trim_zeros(np.asarray((dataDic[targetTest[j]])*sizes[targetTest[j]]) * testData[i]))
return arr
def step4(testOutputClasses, lengthTest):
arr = np.ndarray((lengthTest,1) , dtype = float)
for i,row in zip(range(lengthTest), testOutputClasses):
arr[i] = np.argmax(row)
return arr
dataDic = step1(dataTrain, targetTrain)
lengthTrain = len(dataTrain)
lengthClasses = len(dataDic.keys())
sizes = step2(dataDic,lengthClasses ,lengthTrain)
lengthTest = len(dataTest)
arrFinal = step3(dataDic, dataTest, targetTest, lengthTest, lengthClasses)
predictions = step4(arrFinal, lengthTest)
print(accuracy_score(targetTest, predictions))
|
import torch
import torch.nn as nn
from torch.distributions import MultivariateNormal
import numpy as np
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Memory:
def __init__(self):
self.actions = []
self.ee_pos = []
self.logprobs = []
self.rewards = []
def clear_memory(self):
del self.actions[:]
del self.ee_pos[:]
del self.logprobs[:]
del self.rewards[:]
class ActorCritic(nn.Module):
def __init__(self, args, state_dim, action_dim, action_std):
super(ActorCritic, self).__init__()
self.actor_mean = nn.Sequential(
nn.Linear(state_dim, 64),
nn.Tanh(),
nn.Linear(64, 32),
nn.Tanh(),
nn.Linear(32, action_dim),
nn.Tanh()
)
self.actor_logstd = nn.Sequential(
nn.Linear(state_dim, 64),
nn.Tanh(),
nn.Linear(64, 32),
nn.Tanh(),
nn.Linear(32, action_dim),
)
# critic
self.critic = nn.Sequential(
nn.Linear(state_dim, 64),
nn.Tanh(),
nn.Linear(64, 32),
nn.Tanh(),
nn.Linear(32, 1)
)
self.action_var = torch.full((action_dim,), action_std*action_std)
def forward(self):
raise NotImplementedError
def act(self, state, memory):
state = torch.unsqueeze(torch.from_numpy(state), 0).cuda().float()
with torch.no_grad():
action_mean = self.actor_mean(state).cpu()
action_logstd = self.actor_logstd(state).cpu()
d = action_mean.shape[-1]
sample_size = action_mean.size()
sample_std_normal = torch.normal(torch.zeros(sample_size), torch.ones(sample_size))
action = action_mean + torch.exp(action_logstd) * sample_std_normal
action_logprob = (2 * np.pi)**(-d/2) * torch.exp(- torch.norm(sample_std_normal))
memory.ee_pos.append(state)
memory.actions.append(action)
memory.logprobs.append(action_logprob)
return action.detach()
def compute_logprobs(self, action_mean, action_var, action):
k = action.size(-1)
delta = action - action_mean
result = -k/2. * torch.log(torch.Tensor([2 * np.pi]).cuda()) - \
0.5 * torch.sum(torch.log(action_var), dim=-1) - \
0.5 * torch.sum(delta**2 / action_var, dim=-1)
return result
def evaluate(self, state, action):
action_mean = torch.squeeze(self.actor_mean(state))
action_logstd = torch.squeeze(self.actor_logstd(state))
action_std = torch.exp(action_logstd)
action_var = action_std * action_std
action_logprobs = self.compute_logprobs(action_mean, action_var, action)
state_value = self.critic(state)
return action_logprobs, torch.squeeze(state_value)
class PPO:
def __init__(self, args, state_dim, action_dim, action_std, lr, betas, gamma, K_epochs, eps_clip):
self.lr = lr
self.betas = betas
self.gamma = gamma
self.eps_clip = eps_clip
self.K_epochs = K_epochs
self.policy = ActorCritic(args, state_dim, action_dim, action_std).to(device)
self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=lr, betas=betas)
self.policy_old = ActorCritic(args, state_dim, action_dim, action_std).to(device)
self.MseLoss = nn.MSELoss()
def select_action(self, state, memory):
return self.policy_old.act(state, memory).cpu().data.numpy().flatten()
def update(self, memory):
# Monte Carlo estimate of rewards:
rewards = []
discounted_reward = 0
for reward in reversed(memory.rewards):
discounted_reward = reward + (self.gamma * discounted_reward)
rewards.insert(0, discounted_reward)
# Normalizing the rewards:
rewards = torch.tensor(rewards)
rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-5)
# convert list to tensor
old_ee_pos = torch.squeeze(torch.stack(memory.ee_pos)).detach()
old_actions = torch.squeeze(torch.stack(memory.actions)).detach()
old_logprobs = torch.squeeze(torch.stack(memory.logprobs)).detach()
# Optimize policy for K epochs:
for _ in range(self.K_epochs):
# shuffle data
randperm = np.random.permutation(len(old_actions))
old_ee_pos = old_ee_pos[randperm]
old_actions = old_actions[randperm]
old_logprobs = old_logprobs[randperm]
rewards = rewards[randperm]
batch_start = 0
batch_size = 32
while batch_start < len(old_actions):
# images_batch = old_images[batch_start: batch_start+batch_size].cuda()
ee_pos_batch = old_ee_pos[batch_start: batch_start+batch_size].cuda()
actions_batch = old_actions[batch_start: batch_start+batch_size].cuda()
logprobs_batch = old_logprobs[batch_start: batch_start+batch_size].cuda()
rewards_batch = rewards[batch_start: batch_start+batch_size].cuda()
# Evaluating old actions and values :
logprobs, state_values = self.policy.evaluate(ee_pos_batch, actions_batch)
# Finding the ratio (pi_theta / pi_theta__old):
ratios = torch.exp(logprobs - logprobs_batch.detach())
# Finding Surrogate Loss:
advantages = rewards_batch - state_values.detach()
surr1 = ratios * advantages
surr2 = torch.clamp(ratios, 1-self.eps_clip, 1+self.eps_clip) * advantages
loss = -torch.min(surr1, surr2) + 0.5*self.MseLoss(state_values.float(), rewards_batch.float())
# take gradient step
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
batch_start += batch_size
# Copy new weights into old policy:
self.policy_old.load_state_dict(self.policy.state_dict())
|
from betterapis.app import api, db
if __name__ == '__main__':
db.create_all()
api.run(port=8080, debug=True)
|
"""Top-level project Main function."""
from ColumnCropper import ColumnCropper
import ImageReader
import RunTimeData
import ColumnWindowFinder
import sys
sys.path.append('../../runtime_data/')
import RunTimeData
def main():
"""Read file directory images and the run the Image Operator aggregate function."""
starting_data = RunTimeData.starting_print_statement()
start_time = starting_data[0]
time_elapsed = starting_data[1]
files = ImageReader.read_files('include', 'output')
total_files = 0
year_out = ''
for page_index, file_path in enumerate(files):
year = file_path[-17:-13]
if ColumnWindowFinder.run_filter(page_index, year):
file_operate = ColumnCropper(file_path, page_index, start_time, time_elapsed)
time_elapsed = file_operate.time_elapsed
total_files += 1
RunTimeData.concluding_print_statement(start_time, time_elapsed)
print(total_files)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# LSST Data Management System
# Copyright 2014 LSST Corporation.
#
# This product includes software developed by the
# LSST Project (http://www.lsst.org/).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the LSST License Statement and
# the GNU General Public License along with this program. If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
"""
Create k8s Persistent Volumes and Persistent Volume Claims
@author Benjamin Roziere, IN2P3
@author Fabrice Jammes, IN2P3
"""
# -------------------------------
# Imports of standard modules --
# -------------------------------
import argparse
import os.path
import sys
import yaml
def _build_yaml(data_path, pvc_name, hostname, instance, output_dir, template_dir):
# yaml for persistent volume
#
tpl_fname = 'pv-qserv.tpl.yaml'
yaml_tpl = os.path.join(template_dir, tpl_fname)
with open(yaml_tpl, 'r') as f:
yaml_data = yaml.load(f, Loader=yaml.SafeLoader)
yaml_data['metadata']['name'] = "pv-{}".format(pvc_name)
yaml_data['metadata']['labels']['pvc_name'] = pvc_name
yaml_data['metadata']['labels']['instance'] = instance
node_name = yaml_data['spec']['nodeAffinity']['required']['nodeSelectorTerms'][0]['matchExpressions'][0]['values']
node_name[0] = hostname
yaml_data['spec']['local']['path'] = data_path
yaml_fname = "pv-{}.yaml".format(pvc_name)
yaml_fname = os.path.join(output_dir, yaml_fname)
with open( yaml_fname, "w") as f:
f.write(yaml.dump(yaml_data, default_flow_style=False))
# yaml for persistent volume claim
#
yaml_tpl = os.path.join(template_dir, 'pvc-qserv.tpl.yaml')
with open(yaml_tpl, 'r') as f:
yaml_data = yaml.load(f, yaml.SafeLoader)
yaml_data['metadata']['name'] = "{}".format(pvc_name)
yaml_data['metadata']['labels']['instance'] = instance
yaml_data['spec']['selector']['matchLabels']['pvc_name'] = pvc_name
yaml_fname = "pvc-{}.yaml".format(pvc_name)
yaml_fname = os.path.join(output_dir, yaml_fname)
with open( yaml_fname, "w") as f:
f.write(yaml.dump(yaml_data, default_flow_style=False))
if __name__ == "__main__":
try:
cdir = os.path.dirname(os.path.realpath(__file__))
manifest_dir = os.path.join(cdir, "manifests")
parser = argparse.ArgumentParser(description="Create k8s Persistent Volumes and Claims")
parser.add_argument('-p', '--path', dest='data_path',
required=True, metavar='<hostPath>',
help='Path on the host')
parser.add_argument('-n', '--pvcname', dest='pvc_name',
required=True, metavar='<persistentVolumeClaimName>',
help='Name of the PersistentVolumeClaim')
parser.add_argument('-H', '--hostname', dest='hostname',
required=False, metavar='<hostname>',
help='Hostname of the node')
parser.add_argument('-t', '--templateDir', dest='template_dir',
default=manifest_dir,
required=False, metavar='<templateDir>',
help='yaml template directory')
parser.add_argument('-o', '--outputDir', dest='output_dir',
required=True, metavar='<outputDir>',
help='Output directory for generated yaml files')
parser.add_argument('-i', '--instance', dest='instance',
required=True, metavar='<instance>',
help='Name of qserv instance')
args = parser.parse_args()
_build_yaml(args.data_path, args.pvc_name, args.hostname, args.instance, args.output_dir, args.template_dir)
except Exception as e:
print(e)
sys.exit(1)
|
#ๅฎไนๅ่กจ
class Node:
def __init__(self,v):
self.val=v
self.next=None
#ๅ่กจ่ฝฌ้พ่กจ
def ls_node(ls):
head=Node(None)
p=head #pไธบ้พ่กจๆ้
for i in ls:
p.next=Node(i)
p=p.next
return head.next
#้พ่กจ่ฝฌๅ่กจ
def node_ls(head):
ls=[]
while True:
ls.append(head.val)
head=head.next
if head==None:
return ls
|
UPLOAD_FOLDER = 'images/'
MAX_FILE_SIZE_MB = 5
|
#!/usr/bin/env python
"""Test suite for aospy.timedate module."""
import datetime
import cftime
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from itertools import product
from aospy.data_loader import set_grid_attrs_as_coords
from aospy.internal_names import (
BOUNDS_STR,
RAW_START_DATE_STR,
RAW_END_DATE_STR,
SUBSET_START_DATE_STR,
SUBSET_END_DATE_STR,
TIME_BOUNDS_STR,
TIME_STR,
TIME_WEIGHTS_STR,
)
from aospy.automate import _merge_dicts
from aospy.utils.times import (
apply_time_offset,
average_time_bounds,
monthly_mean_ts,
monthly_mean_at_each_ind,
ensure_datetime,
datetime_or_default,
month_indices,
_month_conditional,
extract_months,
ensure_time_avg_has_cf_metadata,
_assert_has_data_for_time,
add_uniform_time_weights,
assert_matching_time_coord,
ensure_time_as_index,
sel_time,
yearly_average,
infer_year,
maybe_convert_to_index_date_type,
prep_time_data,
)
_INVALID_DATE_OBJECTS = [1985, True, None]
def test_apply_time_offset():
start = datetime.datetime(1900, 5, 10)
years, months, days, hours = -2, 1, 7, 3
# test lengths 0, 1, and >1 of input time array
for periods in range(3):
times = pd.date_range(start=start, freq='M', periods=periods)
times = pd.to_datetime(times.values) # Workaround for pandas bug
actual = apply_time_offset(xr.DataArray(times), years=years,
months=months, days=days, hours=hours)
desired = (times + pd.DateOffset(
years=years, months=months, days=days, hours=hours
))
assert actual.identical(desired)
def test_monthly_mean_ts_single_month():
time = pd.date_range('2000-01-01', freq='6H', periods=4 * 31)
arr = xr.DataArray(np.random.random(time.shape), dims=[TIME_STR],
coords={TIME_STR: time})
desired = arr.mean(TIME_STR)
actual = monthly_mean_ts(arr)
np.testing.assert_allclose(actual, desired)
def test_monthly_mean_ts_submonthly():
time = pd.date_range('2000-01-01', freq='1D', periods=365 * 3)
arr = xr.DataArray(np.random.random(time.shape), dims=[TIME_STR],
coords={TIME_STR: time})
desired = arr.resample(**{TIME_STR: '1M'}).mean(TIME_STR)
actual = monthly_mean_ts(arr)
assert desired.identical(actual)
def test_monthly_mean_ts_monthly():
time = pd.date_range('2000-01-01', freq='1M', periods=120)
arr = xr.DataArray(np.random.random(time.shape), dims=[TIME_STR],
coords={TIME_STR: time})
actual = monthly_mean_ts(arr)
assert arr.identical(actual)
@pytest.mark.filterwarnings('ignore:Mean of empty slice')
def test_monthly_mean_ts_na():
time = pd.to_datetime(['2000-06-01', '2001-06-01'])
arr = xr.DataArray(np.random.random(time.shape), dims=[TIME_STR],
coords={TIME_STR: time})
arr = arr.resample(**{TIME_STR: '1M'}).mean(TIME_STR)
actual = monthly_mean_ts(arr)
desired = arr.dropna(TIME_STR)
assert desired.identical(actual)
def test_monthly_mean_at_each_ind():
times_submonthly = pd.to_datetime(['2000-06-01', '2000-06-15',
'2000-07-04', '2000-07-19'])
times_means = pd.to_datetime(['2000-06-01', '2000-07-01'])
len_other_dim = 2
arr_submonthly = xr.DataArray(
np.random.random((len(times_submonthly), len_other_dim)),
dims=[TIME_STR, 'dim0'], coords={TIME_STR: times_submonthly}
)
arr_means = xr.DataArray(
np.random.random((len(times_means), len_other_dim)),
dims=arr_submonthly.dims, coords={TIME_STR: times_means}
)
actual = monthly_mean_at_each_ind(arr_means, arr_submonthly)
desired_values = np.stack([arr_means.values[0]] * len_other_dim +
[arr_means.values[1]] * len_other_dim,
axis=0)
desired = xr.DataArray(desired_values, dims=arr_submonthly.dims,
coords=arr_submonthly.coords)
assert actual.identical(desired)
@pytest.mark.parametrize('date', [np.datetime64('2000-01-01'),
cftime.DatetimeNoLeap(1, 1, 1),
datetime.datetime(1, 1, 1),
'2000-01-01'])
def test_ensure_datetime_valid_input(date):
assert ensure_datetime(date) == date
def test_ensure_datetime_invalid_input():
with pytest.raises(TypeError):
for obj in _INVALID_DATE_OBJECTS:
ensure_datetime(obj)
def test_datetime_or_default():
date = np.datetime64('2000-01-01')
assert datetime_or_default(None, 'dummy') == 'dummy'
assert datetime_or_default(date, 'dummy') == ensure_datetime(date)
def test_month_indices():
np.testing.assert_array_equal(month_indices('ann'), range(1, 13))
np.testing.assert_array_equal(month_indices('jja'),
np.array([6, 7, 8]))
with pytest.raises(ValueError):
month_indices('dfm')
month_indices('j')
month_indices('q')
assert month_indices('s') == [9]
np.testing.assert_array_equal(month_indices('djf'),
np.array([12, 1, 2]))
assert month_indices(12) == [12]
def test_month_conditional():
test = pd.date_range('2000-01-01', '2000-03-01', freq='M')
test = xr.DataArray(test, dims=[TIME_STR], coords=[test])
result_jan = _month_conditional(test, [1])
np.testing.assert_array_equal(result_jan, np.array([True, False]))
result_jan_feb = _month_conditional(test, [1, 2])
np.testing.assert_array_equal(result_jan_feb, np.array([True, True]))
result_march = _month_conditional(test, [3])
np.testing.assert_array_equal(result_march, np.array([False, False]))
test = pd.date_range('1999-12-31 18:00:00', '2000-01-01 00:00:00',
freq='6H')
test = xr.DataArray(test, dims=[TIME_STR])
result_jan = _month_conditional(test, [1])
np.testing.assert_array_equal(result_jan,
np.array([False, True]))
result_jd = _month_conditional(test, [1, 12])
np.testing.assert_array_equal(result_jd,
np.array([True, True]))
# Test month not in range
result_march = _month_conditional(test, [3])
np.testing.assert_array_equal(result_march,
np.array([False, False]))
def test_extract_months():
time = xr.DataArray(pd.date_range(start='2001-02-18', end='2002-07-12',
freq='1D'), dims=[TIME_STR])
months = 'mam' # March-April-May
desired = xr.concat([
xr.DataArray(pd.date_range(start='2001-03-01', end='2001-05-31',
freq='1D'), dims=[TIME_STR]),
xr.DataArray(pd.date_range(start='2002-03-01', end='2002-05-31',
freq='1D'), dims=[TIME_STR])
], dim=TIME_STR)
actual = extract_months(time, months)
xr.testing.assert_identical(actual, desired)
def test_extract_months_single_month():
time = xr.DataArray(pd.date_range(start='1678-01-01', end='1678-01-31',
freq='1M'), dims=[TIME_STR])
months = 1
desired = time
actual = extract_months(time, months)
xr.testing.assert_identical(actual, desired)
def test_ensure_time_avg_has_cf_metadata(ds_time_encoded_cf):
ds = ds_time_encoded_cf
time = ds[TIME_STR].values
time_bounds = ds[TIME_BOUNDS_STR].values
units_str = ds[TIME_STR].attrs['units']
cal_str = ds[TIME_STR].attrs['calendar']
with pytest.raises(KeyError):
ds[TIME_BOUNDS_STR].attrs['units']
with pytest.raises(KeyError):
ds[TIME_BOUNDS_STR].attrs['calendar']
ds = ensure_time_avg_has_cf_metadata(ds)
result = ds[TIME_BOUNDS_STR].attrs['units']
assert result == units_str
result = ds[TIME_BOUNDS_STR].attrs['calendar']
assert result == cal_str
avg_DT_data = np.diff(time_bounds, axis=1).squeeze()
average_DT_expected = xr.DataArray(avg_DT_data,
coords=[time],
dims=[TIME_STR],
name=TIME_WEIGHTS_STR)
average_DT_expected[TIME_STR].attrs['units'] = units_str
average_DT_expected.attrs['units'] = 'days'
average_DT_expected[TIME_STR].attrs['calendar'] = cal_str
assert ds[TIME_WEIGHTS_STR].identical(average_DT_expected)
assert ds[RAW_START_DATE_STR].values == [0]
assert ds[RAW_START_DATE_STR].attrs['units'] == units_str
assert ds[RAW_START_DATE_STR].attrs['calendar'] == cal_str
assert ds[RAW_END_DATE_STR].values == [90]
assert ds[RAW_END_DATE_STR].attrs['units'] == units_str
assert ds[RAW_END_DATE_STR].attrs['calendar'] == cal_str
def test_add_uniform_time_weights():
time = np.array([15, 46, 74])
data = np.zeros((3))
ds = xr.DataArray(data,
coords=[time],
dims=[TIME_STR],
name='a').to_dataset()
units_str = 'days since 2000-01-01 00:00:00'
cal_str = 'noleap'
ds[TIME_STR].attrs['units'] = units_str
ds[TIME_STR].attrs['calendar'] = cal_str
with pytest.raises(KeyError):
ds[TIME_WEIGHTS_STR]
ds = add_uniform_time_weights(ds)
time_weights_expected = xr.DataArray(
[1, 1, 1], coords=ds[TIME_STR].coords, name=TIME_WEIGHTS_STR)
time_weights_expected.attrs['units'] = 'days'
assert ds[TIME_WEIGHTS_STR].identical(time_weights_expected)
def test_assert_has_data_for_time():
time_bounds = np.array([[0, 31], [31, 59], [59, 90]])
nv = np.array([0, 1])
time = np.array([15, 46, 74])
data = np.zeros((3))
var_name = 'a'
ds = xr.DataArray(data,
coords=[time],
dims=[TIME_STR],
name=var_name).to_dataset()
ds[TIME_BOUNDS_STR] = xr.DataArray(time_bounds,
coords=[time, nv],
dims=[TIME_STR, BOUNDS_STR],
name=TIME_BOUNDS_STR)
units_str = 'days since 2000-01-01 00:00:00'
ds[TIME_STR].attrs['units'] = units_str
ds = ensure_time_avg_has_cf_metadata(ds)
ds = set_grid_attrs_as_coords(ds)
ds = xr.decode_cf(ds)
da = ds[var_name]
start_date = np.datetime64('2000-01-01')
end_date = np.datetime64('2000-03-31')
_assert_has_data_for_time(da, start_date, end_date)
start_date_bad = np.datetime64('1999-12-31')
end_date_bad = np.datetime64('2000-04-01')
with pytest.raises(AssertionError):
_assert_has_data_for_time(da, start_date_bad, end_date)
with pytest.raises(AssertionError):
_assert_has_data_for_time(da, start_date, end_date_bad)
with pytest.raises(AssertionError):
_assert_has_data_for_time(da, start_date_bad, end_date_bad)
_CFTIME_DATE_TYPES = {
'noleap': cftime.DatetimeNoLeap,
'365_day': cftime.DatetimeNoLeap,
'360_day': cftime.Datetime360Day,
'julian': cftime.DatetimeJulian,
'all_leap': cftime.DatetimeAllLeap,
'366_day': cftime.DatetimeAllLeap,
'gregorian': cftime.DatetimeGregorian,
'proleptic_gregorian': cftime.DatetimeProlepticGregorian
}
@pytest.mark.filterwarnings('ignore:Unable to decode')
@pytest.mark.parametrize(['calendar', 'date_type'],
list(_CFTIME_DATE_TYPES.items()))
def test_assert_has_data_for_time_cftime_datetimes(calendar, date_type):
time_bounds = np.array([[0, 2], [2, 4], [4, 6]])
nv = np.array([0, 1])
time = np.array([1, 3, 5])
data = np.zeros((3))
var_name = 'a'
ds = xr.DataArray(data,
coords=[time],
dims=[TIME_STR],
name=var_name).to_dataset()
ds[TIME_BOUNDS_STR] = xr.DataArray(time_bounds,
coords=[time, nv],
dims=[TIME_STR, BOUNDS_STR],
name=TIME_BOUNDS_STR)
units_str = 'days since 0002-01-02 00:00:00'
ds[TIME_STR].attrs['units'] = units_str
ds[TIME_STR].attrs['calendar'] = calendar
ds = ensure_time_avg_has_cf_metadata(ds)
ds = set_grid_attrs_as_coords(ds)
ds = xr.decode_cf(ds)
da = ds[var_name]
start_date = date_type(2, 1, 2)
end_date = date_type(2, 1, 8)
_assert_has_data_for_time(da, start_date, end_date)
start_date_bad = date_type(2, 1, 1)
end_date_bad = date_type(2, 1, 9)
with pytest.raises(AssertionError):
_assert_has_data_for_time(da, start_date_bad, end_date)
with pytest.raises(AssertionError):
_assert_has_data_for_time(da, start_date, end_date_bad)
with pytest.raises(AssertionError):
_assert_has_data_for_time(da, start_date_bad, end_date_bad)
def test_assert_has_data_for_time_str_input():
time_bounds = np.array([[0, 31], [31, 59], [59, 90]])
nv = np.array([0, 1])
time = np.array([15, 46, 74])
data = np.zeros((3))
var_name = 'a'
ds = xr.DataArray(data,
coords=[time],
dims=[TIME_STR],
name=var_name).to_dataset()
ds[TIME_BOUNDS_STR] = xr.DataArray(time_bounds,
coords=[time, nv],
dims=[TIME_STR, BOUNDS_STR],
name=TIME_BOUNDS_STR)
units_str = 'days since 2000-01-01 00:00:00'
ds[TIME_STR].attrs['units'] = units_str
ds = ensure_time_avg_has_cf_metadata(ds)
ds = set_grid_attrs_as_coords(ds)
ds = xr.decode_cf(ds)
da = ds[var_name]
start_date = '2000-01-01'
end_date = '2000-03-31'
_assert_has_data_for_time(da, start_date, end_date)
start_date_bad = '1999-12-31'
end_date_bad = '2000-04-01'
# With strings these checks are disabled
_assert_has_data_for_time(da, start_date_bad, end_date)
_assert_has_data_for_time(da, start_date, end_date_bad)
_assert_has_data_for_time(da, start_date_bad, end_date_bad)
def test_assert_matching_time_coord():
rng = pd.date_range('2000-01-01', '2001-01-01', freq='M')
arr1 = xr.DataArray(rng, coords=[rng], dims=[TIME_STR])
arr2 = xr.DataArray(rng, coords=[rng], dims=[TIME_STR])
assert_matching_time_coord(arr1, arr2)
arr2 = arr2.sel(**{TIME_STR: slice('2000-03', '2000-05')})
with pytest.raises(ValueError):
assert_matching_time_coord(arr1, arr2)
def test_ensure_time_as_index_ds_no_times(ds_no_time):
with pytest.raises(ValueError):
ensure_time_as_index(ds_no_time)
def test_ensure_time_as_index_no_change():
# Already properly indexed, so shouldn't be modified.
arr = xr.DataArray([-23, 42.4], coords=[[1, 2]], dims=[TIME_STR])
arr[TIME_STR].attrs['units'] = 'days since 2000-01-01 00:00:00'
arr[TIME_STR].attrs['calendar'] = 'standard'
ds = arr.to_dataset(name='a')
ds.coords[TIME_WEIGHTS_STR] = xr.DataArray(
[1, 1], dims=[TIME_STR], coords={TIME_STR: arr[TIME_STR]}
)
ds.coords[TIME_BOUNDS_STR] = xr.DataArray(
[[0.5, 1.5], [1.5, 2.5]], dims=[TIME_STR, BOUNDS_STR],
coords={TIME_STR: arr[TIME_STR]}
)
xr.testing.assert_identical(ds, ensure_time_as_index(ds))
def test_ensure_time_as_index_with_change():
# Time bounds array doesn't index time initially, which gets fixed.
arr = xr.DataArray([-93], dims=[TIME_STR], coords={TIME_STR: [3]})
arr[TIME_STR].attrs['units'] = 'days since 2000-01-01 00:00:00'
arr[TIME_STR].attrs['calendar'] = 'standard'
ds = arr.to_dataset(name='a')
ds.coords[TIME_WEIGHTS_STR] = xr.DataArray(
[1], dims=[TIME_STR], coords={TIME_STR: arr[TIME_STR]}
)
ds.coords[TIME_BOUNDS_STR] = xr.DataArray(
[[3.5, 4.5]], dims=[TIME_STR, BOUNDS_STR],
coords={TIME_STR: arr[TIME_STR]}
)
ds = ds.isel(**{TIME_STR: 0})
actual = ensure_time_as_index(ds)
expected = arr.to_dataset(name='a')
expected.coords[TIME_WEIGHTS_STR] = xr.DataArray(
[1], dims=[TIME_STR], coords={TIME_STR: arr[TIME_STR]}
)
expected.coords[TIME_BOUNDS_STR] = xr.DataArray(
[[3.5, 4.5]], dims=[TIME_STR, BOUNDS_STR],
coords={TIME_STR: arr[TIME_STR]}
)
xr.testing.assert_identical(actual, expected)
def test_sel_time():
time_bounds = np.array([[0, 31], [31, 59], [59, 90]])
nv = np.array([0, 1])
time = np.array([15, 46, 74])
data = np.zeros((3))
var_name = 'a'
ds = xr.DataArray(data,
coords=[time],
dims=[TIME_STR],
name=var_name).to_dataset()
ds[TIME_BOUNDS_STR] = xr.DataArray(time_bounds,
coords=[time, nv],
dims=[TIME_STR, BOUNDS_STR],
name=TIME_BOUNDS_STR)
units_str = 'days since 2000-01-01 00:00:00'
ds[TIME_STR].attrs['units'] = units_str
ds = ensure_time_avg_has_cf_metadata(ds)
ds = set_grid_attrs_as_coords(ds)
ds = xr.decode_cf(ds)
da = ds[var_name]
start_date = np.datetime64('2000-02-01')
end_date = np.datetime64('2000-03-31')
result = sel_time(da, start_date, end_date)
assert result[SUBSET_START_DATE_STR].values == start_date
assert result[SUBSET_END_DATE_STR].values == end_date
def test_yearly_average_no_mask():
times = pd.to_datetime(['2000-06-01', '2000-06-15',
'2001-07-04', '2001-10-01', '2001-12-31',
'2004-01-01'])
arr = xr.DataArray(np.random.random((len(times),)),
dims=[TIME_STR], coords={TIME_STR: times})
dt = arr.copy(deep=True)
dt.values = np.random.random((len(times),))
actual = yearly_average(arr, dt)
yr2000 = (arr[0]*dt[0] + arr[1]*dt[1]) / (dt[0] + dt[1])
yr2001 = ((arr[2]*dt[2] + arr[3]*dt[3] + arr[4]*dt[4]) /
(dt[2] + dt[3] + dt[4]))
yr2004 = arr[-1]
yrs_coord = [2000, 2001, 2004]
yr_avgs = np.array([yr2000, yr2001, yr2004])
desired = xr.DataArray(yr_avgs, dims=['year'], coords={'year': yrs_coord})
xr.testing.assert_allclose(actual, desired)
def test_yearly_average_masked_data():
times = pd.to_datetime(['2000-06-01', '2000-06-15',
'2001-07-04', '2001-10-01', '2001-12-31',
'2004-01-01'])
arr = xr.DataArray(np.random.random((len(times),)),
dims=[TIME_STR], coords={TIME_STR: times})
arr[0] = -999
arr = arr.where(arr != -999)
dt = arr.copy(deep=True)
dt.values = np.random.random((len(times),))
actual = yearly_average(arr, dt)
yr2000 = arr[1]
yr2001 = ((arr[2]*dt[2] + arr[3]*dt[3] + arr[4]*dt[4]) /
(dt[2] + dt[3] + dt[4]))
yr2004 = arr[-1]
yrs_coord = [2000, 2001, 2004]
yr_avgs = np.array([yr2000, yr2001, yr2004])
desired = xr.DataArray(yr_avgs, dims=['year'], coords={'year': yrs_coord})
xr.testing.assert_allclose(actual, desired)
def test_average_time_bounds(ds_time_encoded_cf):
ds = ds_time_encoded_cf
actual = average_time_bounds(ds)[TIME_STR]
desired_values = ds[TIME_BOUNDS_STR].mean(dim=BOUNDS_STR).values
desired = xr.DataArray(desired_values, dims=[TIME_STR],
coords={TIME_STR: desired_values}, name=TIME_STR)
xr.testing.assert_identical(actual, desired)
_INFER_YEAR_TESTS = [
(np.datetime64('2000-01-01'), 2000),
(datetime.datetime(2000, 1, 1), 2000),
('2000', 2000),
('2000-01', 2000),
('2000-01-01', 2000)
]
_INFER_YEAR_TESTS = _INFER_YEAR_TESTS + [
(date_type(2000, 1, 1), 2000) for date_type in _CFTIME_DATE_TYPES.values()]
@pytest.mark.parametrize(
['date', 'expected'],
_INFER_YEAR_TESTS)
def test_infer_year(date, expected):
assert infer_year(date) == expected
@pytest.mark.parametrize('date', ['-0001', 'A001', '01'])
def test_infer_year_invalid(date):
with pytest.raises(ValueError):
infer_year(date)
_DATETIME_INDEX = pd.date_range('2000-01-01', freq='M', periods=1)
_DATETIME_CONVERT_TESTS = {}
for date_label, date_type in _CFTIME_DATE_TYPES.items():
key = 'DatetimeIndex-{}'.format(date_label)
_DATETIME_CONVERT_TESTS[key] = (_DATETIME_INDEX, date_type(2000, 1, 1),
np.datetime64('2000-01'))
_NON_CFTIME_DATES = {
'datetime.datetime': datetime.datetime(2000, 1, 1),
'np.datetime64': np.datetime64('2000-01-01'),
'str': '2000'
}
for date_label, date in _NON_CFTIME_DATES.items():
key = 'DatetimeIndex-{}'.format(date_label)
if isinstance(date, str):
_DATETIME_CONVERT_TESTS[key] = (_DATETIME_INDEX, date, date)
else:
_DATETIME_CONVERT_TESTS[key] = (_DATETIME_INDEX, date,
np.datetime64('2000-01'))
_CFTIME_INDEXES = {
'CFTimeIndex[{}]'.format(key): xr.CFTimeIndex([value(1, 1, 1)]) for
key, value in _CFTIME_DATE_TYPES.items()
}
_CFTIME_CONVERT_TESTS = {}
for ((index_label, index),
(date_label, date_type)) in product(_CFTIME_INDEXES.items(),
_CFTIME_DATE_TYPES.items()):
key = '{}-{}'.format(index_label, date_label)
_CFTIME_CONVERT_TESTS[key] = (index, date_type(1, 1, 1),
index.date_type(1, 1, 1))
_NON_CFTIME_DATES_0001 = {
'datetime.datetime': datetime.datetime(1, 1, 1),
'np.datetime64': np.datetime64('0001-01-01'),
'str': '0001'
}
for ((idx_label, index),
(date_label, date)) in product(_CFTIME_INDEXES.items(),
_NON_CFTIME_DATES_0001.items()):
key = '{}-{}'.format(index_label, date_label)
if isinstance(date, str):
_CFTIME_CONVERT_TESTS[key] = (index, date, date)
else:
_CFTIME_CONVERT_TESTS[key] = (index, date, index.date_type(1, 1, 1))
_CONVERT_DATE_TYPE_TESTS = _merge_dicts(_DATETIME_CONVERT_TESTS,
_CFTIME_CONVERT_TESTS)
@pytest.mark.parametrize(['index', 'date', 'expected'],
list(_CONVERT_DATE_TYPE_TESTS.values()),
ids=list(_CONVERT_DATE_TYPE_TESTS.keys()))
def test_maybe_convert_to_index_date_type(index, date, expected):
result = maybe_convert_to_index_date_type(index, date)
assert result == expected
def test_prep_time_data_with_time_bounds(ds_with_time_bounds):
assert (TIME_BOUNDS_STR in ds_with_time_bounds)
assert (TIME_WEIGHTS_STR not in ds_with_time_bounds)
result = prep_time_data(ds_with_time_bounds)
assert (TIME_WEIGHTS_STR in result)
def test_prep_time_data_no_time_bounds(ds_inst, caplog):
assert (TIME_BOUNDS_STR not in ds_inst)
prep_time_data(ds_inst)
log_record = caplog.record_tuples[0][-1]
assert log_record.startswith("dt array not found.")
|
# Generated by Django 3.0.4 on 2020-03-17 06:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('titanic', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='titanic',
name='Fare',
),
]
|
# Calculator of angle between hour and minute needle
print("This program indicates the angle between hour and minute needle!. "
"This program works with 12 hour clock !")
# Error handling code block.
H = 0
M = 0
while 1 >= H <= 12 and 1 >= M <= 60:
try:
H = int(input("Please introduce the hour between 1 and 12:"))
M = int(input("Please introduce the minute between 1 and 60:"))
except ValueError:
print("A corresponding number wasn't introduced !")
continue
else:
# everything is ok, even negative numbers can exit the loop ;)
break
# Check if angle of both is 0 degrees and print it.
H_angle = 360 / 12 * (H % 12)
M_angle = 360 / 60 * (M % 60)
H_angle += (M % 60) / 60 * 360 / 12
angle = (360 + H_angle - M_angle) % 360
if 180 - angle < 0:
angle = 360 - angle
r = "The angle is: %d" % angle + " degrees !"
print(r) |
from django import forms
import datetime
from django.contrib.admin import widgets
from django.contrib.admin.widgets import AdminTimeWidget,AdminDateWidget
#import html5.forms.widgets as html5_widgets
#from suit.widgets import SuitDateWidget, SuitTimeWidget, SuitSplitDateTimeWidget
from functools import partial
DateInput = partial(forms.DateInput, {'class': 'dateinput', 'readonly':'true'})
#time_widget = forms.widgets.TimeInput(attrs={'class': 'timepicker', 'readonly':'true'})
#time_widget.format = '%I:%M %p'
class loginForm(forms.Form):
username = forms.CharField(max_length=50)
password = forms.CharField(max_length=50)
#Transaccion = forms.CharField(widget=forms.TextInput(attrs={'readonly':'readonly'}))
|
import datetime
from io import StringIO
import sys
import signal
import time
import os
import configparser
from configobj import ConfigObj
from subprocess import call
import logging
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
from watchdog.events import FileSystemEventHandler
from threading import Thread
import sys, getopt
import subprocess
import ast
import re
from shutil import copyfile, copy
import shutil
from suitable import Api
from getpass import getpass
from suitable import Api
from getpass import getpass
from PyQt5 import Qt, QtWidgets, QtCore, QtGui, QtWebEngine, QtWebEngineWidgets, QtWebEngineWidgets
from PyQt5.QtCore import pyqtSlot
class Worker(QtCore.QRunnable):
'''
Worker thread
Inherits from QRunnable to handler worker thread setup, signals and wrap-up.
:param callback: The function callback to run on this worker thread. Supplied args and
kwargs will be passed through to the runner.
:type callback: function
:param args: Arguments to pass to the callback function
:param kwargs: Keywords to pass to the callback function
'''
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
# Store constructor arguments (re-used for processing)
self.fn = fn
self.args = args
self.kwargs = kwargs
@pyqtSlot()
def run(self):
'''
Initialise the runner function with passed args, kwargs.
'''
self.fn(*self.args, **self.kwargs)
class Plot():
def __init__(self):
self.name = 'Quickview Plot'
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args,**kwargs)
gui_ini = 'gui_cfg.ini'
config = configparser.ConfigParser(comment_prefixes='/', allow_no_value=True)
config.optionxform = lambda option: option # preserve case for letters
config.read(gui_ini)
self.stdout_result = StringIO()
self.fav_ini = config.get('Files','target_favs')
self.nextrad_ini = config.get('Files','nextrad_header')
self.connections_ini = config.get('Files','connection')
self.cnc_header_loc = config.get('HeaderDirects','cnc')
self.experiment_log_file = config.get('Files','log_file')
self.save_directory = config.get('Files','save_folder')
self.node_header_dir = config.get('HeaderDirects','nodes')
self.image_directory = config.get('HeaderDirects','save_imgs')
self.pentek_header_dir = config.get('HeaderDirects','penteks')
self.rhino_header_dir = config.get('HeaderDirects','rhinos')
self.gpsdo_header_dir = config.get('HeaderDirects','gpsdos')
self.local_copy_delay = config.get('Config','local_copy_delay')
self.tcu_pulse_editor = config.get('Files','tcu_pulse_editor')
self.gps0_ini = config.get('HeaderDirects','gps0') + '/gps_info.ini'
self.gps1_ini = config.get('HeaderDirects','gps1') + '/gps_info.ini'
self.gps2_ini = config.get('HeaderDirects','gps2') + '/gps_info.ini'
self.autosaving = int(config.get('Config','auto_save_default'))
self.map_style = int(config.get('Config','map_style_default'))
self.node_0_on = int(config.get('Config','node_0_default'))
self.node_1_on = int(config.get('Config','node_1_default'))
self.node_2_on = int(config.get('Config','node_2_default'))
print('NeXtRAD Header: ' + self.nextrad_ini)
#State Flags
#self.autosaving = 0 #Autosaving feature off by default
self.current_time = 0
self.stop_timer = 0
self.ansi_running = 0
self.time_running = 0
self.remaining_time = 0
self.experiment_running = 0
self.time_remaining = 0
self.plot_is_active = 0
self.save_scene_check = 0
self.num_pri = 0
self.pent_zero_include = 0
self.rhino_zero_include = 0
self.node_zero_include = 0
self.pent_one_include = 0
self.rhino_one_include = 0
self.node_one_include = 0
self.pent_two_include = 0
self.rhino_two_include = 0
self.node_two_include = 0
self.cam0_include = 0
self.cam1_include = 0
self.cam2_include = 0
self.abort_flag = 0
self.node_0_is_on = 0
self.node_1_is_on = 0
self.node_2_is_on = 0
#self.map_style = 0
#Initialisers
self.node_1_latlong = '0,0'
self.node_2_latlong = '0,0'
self.node_0_latlong = '0,0'
self.target_latlong = '0,0'
self.node_0_ht = '0'
self.node_0_ht = '0'
self.node_0_ht = '0'
self.target_ht = '0'
self.target_name = ''
#nodes
self.node0_location_name = ''
self.node0_location_name = ''
self.node0_location_name = ''
self.experiment_name = ''
self.target_description = ''
self.start_time = ''
self.experiment_start_string = ''
self.create_host_dict()
#Radar Parameters
self.start_time = 0
self.pri = []
self.num_pri = 0
self.bands = []
self.pols = []
self.pulse_width = []
#Start the Threadpool
self.threadpool = QtCore.QThreadPool()
print("Multithreading with maximum %d threads" % self.threadpool.maxThreadCount())
self.setup_layout()
#self.update_quickview_ims()
self.init_nextheader_values()
state_update = Worker(self.check_state)
self.threadpool.start(state_update)
self.init_map()
self.target_details.setText(self.target_latlong)
self.change_map_style()
self.experiment_name_edit.setText(self.experiment_name)
self.add_favourites()
self.show()
self.timer = QtCore.QTimer(self)
# self.imagetimer = QtCore.QTimer(self)
self.timer.timeout.connect(self.check_state)
#self.imagetimer.timeout.connect(self.quickview_image_refresh)
#self.imagetimer.timeout.connect(self.update_quickview_ims)
#self.timer.timeout.connect(self.update_video_stream)
self.timer.start(500)
# self.imagetimer.start(1000)
def collate(self):
print('TODO')
def setup_layout(self):
layout = QtWidgets.QGridLayout()
#GUI Text
general_font = QtGui.QFont('Open Sans',16)
self.lbl_experi_name = QtWidgets.QLabel('Experiment Description:')
self.lbl_trgt_latlong = QtWidgets.QLabel('Target (Lat,Long):')
self.lbl_scene = QtWidgets.QLabel('Scene Notes:')
self.lbl_timer = QtWidgets.QLabel('0')
timer_font = QtGui.QFont('Open Sans',20, QtGui.QFont.Bold)
self.lbl_timer.setFont(timer_font)
self.lbl_timer.setAlignment(Qt.Qt.AlignCenter)
#self.lbl_timer.setFont(18)
self.lbl_cnc = QtWidgets.QLabel('CNC')
self.pent_zero = QtWidgets.QLabel('Pentek 0')
self.pent_one = QtWidgets.QLabel('Pentek 1')
self.pent_two = QtWidgets.QLabel('Pentek 2')
self.rhino_zero = QtWidgets.QLabel('Rhino 0')
self.rhino_one = QtWidgets.QLabel('Rhino 1')
self.rhino_two = QtWidgets.QLabel('Rhino 2')
self.node_zero = QtWidgets.QLabel('Node 0')
self.node_one = QtWidgets.QLabel('Node 1')
self.node_two = QtWidgets.QLabel('Node 2')
self.lbl_cnc = QtWidgets.QLabel('CNC')
self.lbl_ntp = QtWidgets.QLabel('NTP')
self.lbl_bullet_0_tx = QtWidgets.QLabel('Bullet 0 TX ')
self.lbl_bullet_1_tx = QtWidgets.QLabel('Bullet 1 TX ')
self.lbl_bullet_2_tx = QtWidgets.QLabel('Bullet 2 TX ')
self.lbl_bullet_0_rx = QtWidgets.QLabel('Bullet 0 RX ')
self.lbl_bullet_1_rx = QtWidgets.QLabel('Bullet 1 RX ')
self.lbl_bullet_2_rx = QtWidgets.QLabel('Bullet 2 RX ')
# - Pulse Parameters
self.lbl_rt = QtWidgets.QLabel('Length: ' + str(self.time_remaining) + 's')
self.lbl_fc = QtWidgets.QLabel('Freq: ' + str(self.bands))
self.lbl_pw = QtWidgets.QLabel('PW: ' + str(self.pulse_width))
self.lbl_pol = QtWidgets.QLabel('Pols: ' + str(self.pols))
self.lbl_pri = QtWidgets.QLabel('PRI: ' + str(self.pri))
#Pushbuttons
self.map_style_button = QtWidgets.QPushButton('Map Style')
self.run_button = QtWidgets.QPushButton('Run')
self.abort_button = QtWidgets.QPushButton('Quickview')
self.save_button = QtWidgets.QPushButton('Save ADC Data')
self.map_style_button.clicked.connect(self.change_map_style)
self.save_button.clicked.connect(self.save_adc_data)
self.run_button.clicked.connect(self.run_experiment)
self.abort_button.clicked.connect(self.run_quickview)
self.target_accept = QtWidgets.QPushButton('Enter Details')
self.description_accept = QtWidgets.QPushButton('Enter Details')
self.scene_accept = QtWidgets.QPushButton('Enter Details')
self.target_accept.clicked.connect(self.enter_target_details)
self.description_accept.clicked.connect(self.enter_description)
self.pulses_button = QtWidgets.QPushButton('Pulses')
self.pulses_button.clicked.connect(self.run_pulse_editor)
#Textboxes
self.target_details = QtWidgets.QLineEdit(self)
self.target_details.editingFinished.connect(self.enter_target_details)
self.experiment_name_edit = QtWidgets.QLineEdit(self)
self.experiment_name_edit.editingFinished.connect(self.enter_description)
self.scene_edit = QtWidgets.QLineEdit(self)
#self.azimuth_rotation = QtWidgets.QLineEdit(self)
#self.scene_edit.editingFinished.connect(self.enter_scene)
#DropDown
self.fav_drop = QtWidgets.QComboBox(self)
self.fav_drop.activated[str].connect(self.to_fav)
#Checkboxes
self.node_0_disable = QtWidgets.QCheckBox("Node 0",self)
self.node_0_disable.stateChanged.connect(self.disable_node0)
if self.node_0_on == 1:
self.node_0_disable.setChecked(True)
self.node_1_disable = QtWidgets.QCheckBox("Node 1",self)
self.node_1_disable.stateChanged.connect(self.disable_node1)
if self.node_1_on == 1:
self.node_1_disable.setChecked(True)
self.node_2_disable = QtWidgets.QCheckBox("Node 2",self)
self.node_2_disable.stateChanged.connect(self.disable_node2)
if self.node_2_on == 1:
self.node_2_disable.setChecked(True)
self.auto_save = QtWidgets.QCheckBox("Autosave",self)
self.auto_save.stateChanged.connect(self.auto_saver)
if self.autosaving == 1:
self.auto_save.setChecked(True)
#Connection Stats
#Penteks
# self.pent_1 = QLabel('Pentek 0')
# self.pent_2 = QLabel('Pentek 0')
#images
#self.quickview_image_refresh()
self.view = QtWebEngineWidgets.QWebEngineView()
self.view.load(QtCore.QUrl.fromLocalFile(os.path.abspath('map.html')))
self.columna = QtWidgets.QGroupBox()
#groupbox.setCheckable(True)
#layout.addWidget(self.columna,0,0)
self.abox = QtWidgets.QVBoxLayout()
#self.columna.setLayout(self.abox)
self.columnb = QtWidgets.QGroupBox()
#groupbox.setCheckable(True)
layout.addWidget(self.columnb,0,0)
self.bbox = QtWidgets.QVBoxLayout()
self.columnb.setLayout(self.bbox)
self.mid = QtWidgets.QGroupBox('Target and Geometry')
#groupbox.setCheckable(True)
self.bbox.addWidget(self.mid)
self.midbox = QtWidgets.QGridLayout()
self.mid.setLayout(self.midbox)
self.columnd = QtWidgets.QGroupBox()
#groupbox.setCheckable(True)
#layout.addWidget(self.columnd,0,0)
self.dbox = QtWidgets.QVBoxLayout()
self.columnd.setLayout(self.dbox)
self.vids = QtWidgets.QGroupBox("Cameras")
#groupbox.setCheckable(True)
self.dbox.addWidget(self.vids)
self.vidbox = QtWidgets.QGridLayout()
#self.vidbox.addStretch(1)
self.vids.setLayout(self.vidbox)
self.columnc = QtWidgets.QGroupBox()
#groupbox.setCheckable(True)
layout.addWidget(self.columnc,0,2)
self.cbox = QtWidgets.QVBoxLayout()
self.columnc.setLayout(self.cbox)
self.runs = QtWidgets.QGroupBox("Run Experiment")
#groupbox.setCheckable(True)
self.cbox.addWidget(self.runs)
self.veepbox = QtWidgets.QVBoxLayout()
self.veepbox.addStretch(1)
self.runs.setLayout(self.veepbox)
self.groupbox = QtWidgets.QGroupBox("Radar Control")
#groupbox.setCheckable(True)
self.cbox.addWidget(self.groupbox)
self.vbox = QtWidgets.QVBoxLayout()
self.groupbox.setLayout(self.vbox)
self.pps = QtWidgets.QGroupBox("Pulse Parameters")
#groupbox.setCheckable(True)
self.cbox.addWidget(self.pps)
self.veebox = QtWidgets.QVBoxLayout()
self.pps.setLayout(self.veebox)
self.veepbox.addWidget(self.lbl_timer)
self.veepbox.addWidget(self.run_button)
self.vbox.addWidget(self.abort_button)
self.vbox.addWidget(self.pulses_button)
self.vbox.addWidget(self.save_button)
self.vbox.addWidget(self.auto_save)
self.cam0 = QtWebEngineWidgets.QWebEngineView()
self.cam0.setUrl(QtCore.QUrl(''))
self.vidbox.addWidget(self.cam0,0,0)
self.cam1 = QtWebEngineWidgets.QWebEngineView()
self.cam1.setUrl(QtCore.QUrl(''))
self.vidbox.addWidget(self.cam1,1,0)
self.cam2 = QtWebEngineWidgets.QWebEngineView()
self.cam2.setUrl(QtCore.QUrl(''))
self.vidbox.addWidget(self.cam2,2,0)
self.cam0.resize(100, 100)
self.cam1.resize(100, 100)
self.cam2.resize(100, 100)
self.veebox.addWidget(self.lbl_rt)
self.veebox.addWidget(self.lbl_fc)
self.veebox.addWidget(self.lbl_pri)
self.veebox.addWidget(self.lbl_pw)
self.veebox.addWidget(self.lbl_pol)
self.midbox.addWidget(self.lbl_experi_name,0,0)
self.midbox.addWidget(self.experiment_name_edit,0,1)
self.midbox.addWidget(self.description_accept,0,2)
self.midbox.addWidget(self.view,1,0,10,3)
self.midbox.addWidget(self.map_style_button,10,0)
self.midbox.addWidget(self.fav_drop,1,2)
self.midbox.addWidget(self.lbl_trgt_latlong,11,0)
self.midbox.addWidget(self.target_details,11,1)
self.midbox.addWidget(self.target_accept,11,2)
self.midbox.addWidget(self.lbl_scene,12,0)
self.midbox.addWidget(self.scene_edit,12,1,1,2)
#layout.addWidget(self.scene_accept,18,3)
self.connects = QtWidgets.QGroupBox('Network')
#groupbox.setCheckable(True)
self.cbox.addWidget(self.connects)
self.conbox = QtWidgets.QGridLayout()
self.connects.setLayout(self.conbox)
self.conbox.addWidget(self.lbl_cnc,0,0)
self.conbox.addWidget(self.lbl_ntp,0,1)
self.conbox.addWidget(self.lbl_bullet_0_tx,1,0)
self.conbox.addWidget(self.lbl_bullet_0_rx,2,0)
self.conbox.addWidget(self.lbl_bullet_1_tx,1,1)
self.conbox.addWidget(self.lbl_bullet_1_rx,2,1)
self.conbox.addWidget(self.lbl_bullet_2_tx,1,2)
self.conbox.addWidget(self.lbl_bullet_2_rx,2,2)
self.conbox.addWidget(self.node_0_disable,3,0)
self.conbox.addWidget(self.pent_zero,4,0)
self.conbox.addWidget(self.node_zero,5,0)
self.conbox.addWidget(self.rhino_zero,6,0)
# layout.addWidget(self.n0ch0,7,4,1,1)
# layout.addWidget(self.n0ch1,8,4,1,1)
# layout.addWidget(self.n0ch2,9,4,1,1)
self.conbox.addWidget(self.node_1_disable,3,1)
self.conbox.addWidget(self.pent_one,4,1)
self.conbox.addWidget(self.node_one,5,1)
self.conbox.addWidget(self.rhino_one,6,1)
# layout.addWidget(self.n1ch0,7,5,1,1)
# layout.addWidget(self.n1ch1,8,5,1,1)
# layout.addWidget(self.n1ch2,9,5,1,1)
self.conbox.addWidget(self.node_2_disable,3,2)
self.conbox.addWidget(self.pent_two,4,2)
self.conbox.addWidget(self.node_two,5,2)
self.conbox.addWidget(self.rhino_two,6,2)
# layout.addWidget(self.n2ch0,7,6,1,1)
# layout.addWidget(self.n2ch1,8,6,1,1)
# layout.addWidget(self.n2ch2,9,6,1,1)
# self.ims = QtWidgets.QGroupBox('Quickviews')
# #groupbox.setCheckable(True)
# #self.cbox.addWidget(self.ims)
# self.imbox = QtWidgets.QGridLayout()
# #self.ims.setLayout(self.imbox)
# self.lbl_ims_node0 = QtWidgets.QLabel('Node0')
# self.lbl_ims_node1 = QtWidgets.QLabel('Node1')
# self.lbl_ims_node2 = QtWidgets.QLabel('Node2')
# self.lbl_ims_ch0 = QtWidgets.QLabel('L')
# self.lbl_ims_ch1 = QtWidgets.QLabel('XV')
# self.lbl_ims_ch2 = QtWidgets.QLabel('XH')
# self.lbl_ims_node0.setAlignment(Qt.Qt.AlignCenter)
# self.lbl_ims_node1.setAlignment(Qt.Qt.AlignCenter)
# self.lbl_ims_node2.setAlignment(Qt.Qt.AlignCenter)
# self.lbl_ims_ch0.setAlignment(Qt.Qt.AlignCenter)
# self.lbl_ims_ch1.setAlignment(Qt.Qt.AlignCenter)
# self.lbl_ims_ch2.setAlignment(Qt.Qt.AlignCenter)
self.abox.addStretch(1)
self.cbox.addStretch(1)
w = QtWidgets.QWidget()
w.setLayout(layout)
self.setCentralWidget(w)
def add_favourites(self):
config = configparser.ConfigParser(comment_prefixes='/', allow_no_value=True)
config.optionxform = lambda option: option # preserve case for letters
config.read(self.fav_ini)
for item in config.items('Targets'):
self.fav_drop.addItem(item[0])
def to_fav(self, text):
config = configparser.ConfigParser(comment_prefixes='/', allow_no_value=True)
config.optionxform = lambda option: option # preserve case for letters
config.read(self.fav_ini)
self.target_latlong = config['Targets'][text]
self.target_details.setText(self.target_latlong)
self.update_map('var trgt = ','var trgt = [' + self.target_latlong + '];\n')
def update_quickview_ims(self):
self.imbox.addWidget(self.lbl_ims_node0,0,0)
self.imbox.addWidget(self.lbl_ims_node1,0,1)
self.imbox.addWidget(self.lbl_ims_node2,0,2)
self.imbox.addWidget(self.lbl_ims_ch0,1,3)
self.imbox.addWidget(self.lbl_ims_ch1,2,3)
self.imbox.addWidget(self.lbl_ims_ch2,3,3)
self.imbox.addWidget(self.n0ch0,1,0)
self.imbox.addWidget(self.n0ch1,2,0)
self.imbox.addWidget(self.n0ch2,3,0)
self.imbox.addWidget(self.n1ch0,1,1)
self.imbox.addWidget(self.n1ch1,2,1)
self.imbox.addWidget(self.n1ch2,3,1)
self.imbox.addWidget(self.n2ch0,1,2)
self.imbox.addWidget(self.n2ch1,2,2)
self.imbox.addWidget(self.n2ch2,3,2)
def update_video_stream(self):
if self.host_dict['ipcam0']['is_con'] == 0:
self.vidbox.addWidget(self.n1ch2,0,0)
else:
self.cam0 = QtWebEngineWidgets.QWebEngineView()
self.cam0.setUrl(QtCore.QUrl('https://www.youtube.com/watch?v=avaSdC0QOUM'))
self.vidbox.addWidget(self.cam0,0,0)
def quickview_image_refresh(self):
self.n0ch0 = QtWidgets.QLabel(self)
pixmap = QtGui.QPixmap('./quickview_images/Range-Time-Intensity-ch0-n0.jpg')
pixmap = pixmap.scaled(70, 70, QtCore.Qt.KeepAspectRatio)
self.n0ch0.setPixmap(pixmap)
#self.resize(pixmap.width(), pixmap.height())
self.n0ch1 = QtWidgets.QLabel(self)
pixmap = QtGui.QPixmap('./quickview_images/Range-Time-Intensity-ch1-n0.jpg')
pixmap = pixmap.scaled(70, 70, QtCore.Qt.KeepAspectRatio)
self.n0ch1.setPixmap(pixmap)
#self.resize(pixmap.width(), pixmap.height())
self.n0ch2 = QtWidgets.QLabel(self)
pixmap = QtGui.QPixmap('./quickview_images/Range-Time-Intensity-ch2-n0.jpg')
pixmap = pixmap.scaled(70, 70, QtCore.Qt.KeepAspectRatio)
self.n0ch2.setPixmap(pixmap)
#self.resize(pixmap.width(), pixmap.height())
self.n1ch0 = QtWidgets.QLabel(self)
pixmap = QtGui.QPixmap('./quickview_images/Range-Time-Intensity-ch0-n1.jpg')
pixmap = pixmap.scaled(70, 70, QtCore.Qt.KeepAspectRatio)
self.n1ch0.setPixmap(pixmap)
#self.resize(pixmap.width(), pixmap.height())
self.n1ch1 = QtWidgets.QLabel(self)
pixmap = QtGui.QPixmap('./quickview_images/Range-Time-Intensity-ch1-n1.jpg')
pixmap = pixmap.scaled(70, 70, QtCore.Qt.KeepAspectRatio)
self.n1ch1.setPixmap(pixmap)
#self.resize(pixmap.width(), pixmap.height())
self.n1ch2 = QtWidgets.QLabel(self)
pixmap = QtGui.QPixmap('./quickview_images/Range-Time-Intensity-ch2-n1.jpg')
pixmap = pixmap.scaled(70, 70, QtCore.Qt.KeepAspectRatio)
self.n1ch2.setPixmap(pixmap)
#self.resize(pixmap.width(), pixmap.height())
self.n2ch0 = QtWidgets.QLabel(self)
pixmap = QtGui.QPixmap('./quickview_images/Range-Time-Intensity-ch0-n2.jpg')
pixmap = pixmap.scaled(70, 70, QtCore.Qt.KeepAspectRatio)
self.n2ch0.setPixmap(pixmap)
#self.resize(pixmap.width(), pixmap.height())
self.n2ch1 = QtWidgets.QLabel(self)
pixmap = QtGui.QPixmap('./quickview_images/Range-Time-Intensity-ch1-n2.jpg')
pixmap = pixmap.scaled(70, 70, QtCore.Qt.KeepAspectRatio)
self.n2ch1.setPixmap(pixmap)
#self.resize(pixmap.width(), pixmap.height())
self.n2ch2 = QtWidgets.QLabel(self)
pixmap = QtGui.QPixmap('./quickview_images/Range-Time-Intensity-ch2-n2.jpg')
pixmap = pixmap.scaled(70, 70, QtCore.Qt.KeepAspectRatio)
self.n2ch2.setPixmap(pixmap)
#self.resize(pixmap.width(), pixmap.height())
# #Layout Setup
def init_nextheader_values(self):
config = configparser.ConfigParser(comment_prefixes='/', allow_no_value=True)
config.optionxform = lambda option: option # preserve case for letters
config.read(self.nextrad_ini)
self.node_0_latlong = config.get('GeometrySettings','NODE0_LOCATION_LAT')+','+config.get('GeometrySettings','NODE0_LOCATION_LON')
self.node_1_latlong = config.get('GeometrySettings','NODE1_LOCATION_LAT')+','+config.get('GeometrySettings','NODE1_LOCATION_LON')
self.node_2_latlong = config.get('GeometrySettings','NODE2_LOCATION_LAT')+','+config.get('GeometrySettings','NODE2_LOCATION_LON')
self.node_0_ht = config.get('GeometrySettings','NODE0_LOCATION_HT')
self.node_1_ht = config.get('GeometrySettings','NODE1_LOCATION_HT')
self.node_2_ht = config.get('GeometrySettings','NODE2_LOCATION_HT')
self.target_latlong = config.get('TargetSettings','TGT_LOCATION_LAT')+','+config.get('TargetSettings','TGT_LOCATION_LON')
self.target_ht = config.get('TargetSettings','TGT_LOCATION_HT')
self.experiment_name = config.get('Notes','EXPERIMENT_NAME')
self.target_description = config.get('Notes','EXPERIMENT_NOTES')
self.num_pri = int(config.get('PulseParameters','NUM_PRIS'))
self.pulses = config.get('PulseParameters','PULSES')
self.pri = []
self.bands = []
self.pols = []
self.pulse_width = []
pulses = []
pulses_arr = self.pulses.split('|')
for pulse in range(len(pulses_arr)):
pulses_arr[pulse]=pulses_arr[pulse].replace('\"','')
#print(pulse)
pulses.append(pulses_arr[pulse].split(','))
#print(pulses[pulse])
self.pri.append(float(pulses[pulse][1]))
self.pols.append(int(pulses[pulse][2]))
self.bands.append(float(pulses[pulse][3].replace('\"','')))
self.pulse_width.append(float(pulses[pulse][0].replace('\"','')))
self.time_remaining = self.pri[0]*self.num_pri #In microseconds
self.lbl_fc.setText('Freq: ' + str(self.bands))
self.lbl_rt.setText('Length: ' + str(self.time_remaining/1000000) + 's')
self.lbl_pw.setText('PW: ' + str(self.pulse_width))
self.lbl_pol.setText('Pols: ' + str(self.pols))
self.lbl_pri.setText('PRI: ' + str(self.pri))
def init_map(self):
self.update_map('var n0 = ','var n0 = [' + self.node_0_latlong + '];\n')
self.update_map('var n1 = ','var n1 = [' + self.node_1_latlong + '];\n')
self.update_map('var n2 = ','var n2 = [' + self.node_2_latlong + '];\n')
def auto_saver(self):
if self.auto_save.isChecked():
self.autosaving = 1
print('Autosaving On')
else:
self.autosaving = 0
print('Autosaving Off')
def create_valid_hosts(self):
self.valid_hosts = {}
self.valid_nodes = {}
self.valid_penteks = {}
self.valid_rhinos = {}
self.valid_runnable_nodes = {}
for h,d in self.host_dict.items():
if d['is_con'] == 1:
self.valid_hosts.update({h:d})
for k,v in self.valid_hosts.items():
if (k == 'node0') and (self.node_zero_include == 1):
self.valid_nodes.update({k:v})
if (k == 'node1') and (self.node_one_include == 1):
self.valid_nodes.update({k:v})
if (k == 'node2') and (self.node_two_include == 1):
self.valid_nodes.update({k:v})
if (k == 'pentek0') and (self.pent_zero_include == 1):
self.valid_penteks.update({k:v})
if (k == 'pentek1') and (self.pent_one_include == 1):
self.valid_penteks.update({k:v})
if (k == 'pentek2') and (self.pent_two_include == 1):
self.valid_penteks.update({k:v})
if (k == 'rhino0') and (self.rhino_zero_include == 1):
self.valid_rhinos.update({k:v})
if (k == 'rhino1') and (self.rhino_one_include == 1):
self.valid_rhinos.update({k:v})
if (k == 'rhino2') and (self.rhino_two_include == 1):
self.valid_rhinos.update({k:v})
self.valid_runnable_nodes = {}
#print(self.valid_nodes, self.valid_penteks, self.valid_rhinos)
def disable_node0(self,state):
if self.node_0_disable.isChecked():
self.node_0_is_on = 1
self.pent_zero_include = 1
self.rhino_zero_include = 1
self.node_zero_include = 1
else:
self.node_0_is_on = 1
self.pent_zero_include = 0
self.rhino_zero_include = 0
self.node_zero_include = 0
def disable_node1(self,state):
if self.node_1_disable.isChecked():
self.node_1_is_on = 1
self.pent_one_include = 1
self.rhino_one_include = 1
self.node_one_include = 1
else:
self.node_1_is_on = 0
self.pent_one_include = 0
self.rhino_one_include = 0
self.node_one_include = 0
def disable_node2(self,state):
if self.node_2_disable.isChecked():
self.node_2_is_on = 1
self.pent_two_include = 1
self.rhino_two_include = 1
self.node_two_include = 1
else:
self.node_2_is_on = 0
self.pent_two_include = 0
self.rhino_two_include = 0
self.node_two_include = 0
def create_host_dict(self):
config = configparser.ConfigParser(comment_prefixes='/', allow_no_value=True)
config.optionxform = lambda option: option # preserve case for letters
config.read(self.connections_ini)
hosts = config.items('Hosts')
self.host_dict = {}
for item in hosts:
self.host_dict.update({item[0] : item[1]})
for k,v in self.host_dict.items():
host_dict = self.host_dict
host_dict[k] = v.split(',')
host_dict[k][0] = '{\'user\':' + host_dict[k][0]
host_dict[k][1] = ',\'ip\':' + host_dict[k][1]
host_dict[k][2] = ',\'pswd\':' + host_dict[k][2]
host_dict[k][3] = ',\'is_con\':' + host_dict[k][3]+ '}'
host_dict[k] = ''.join(host_dict[k])
host_dict[k] = ast.literal_eval(host_dict[k])
self.host_dict = host_dict
self.create_valid_hosts()
def check_state(self):
self.create_host_dict()
if self.time_running == 0 and self.experiment_running == 0:
self.lbl_timer.setText(' Not Running ')
self.lbl_timer.setStyleSheet('color: black')
self.run_button.setStyleSheet("background-color: green")
elif self.time_running == 1 and self.experiment_running == 0:
self.lbl_timer.setStyleSheet('color: red')
self.run_button.setStyleSheet("background-color: red")
else:
self.lbl_timer.setStyleSheet('color: green')
self.run_button.setStyleSheet("background-color: red")
try:
self.lbl_cnc.setText('CNC')
if self.host_dict['cnc']['is_con'] == 0:
self.lbl_cnc.setStyleSheet('color: red')
else:
self.lbl_cnc.setStyleSheet('color: green')
except:
self.lbl_cnc.setStyleSheet('color: gray')
self.lbl_cnc.setText('Missing config')
try:
self.lbl_ntp.setText('NTP')
if self.host_dict['ntp']['is_con'] == 0:
self.lbl_ntp.setStyleSheet('color: red')
else:
self.lbl_ntp.setStyleSheet('color: green')
except:
self.lbl_ntp.setStyleSheet('color: gray')
self.lbl_ntp.setText('Missing config')
try:
self.pent_zero.setText('Pentek 0')
if self.host_dict['pentek0']['is_con'] == 0 and self.pent_zero_include == 1:
self.pent_zero.setStyleSheet('color: red')
elif self.host_dict['pentek0']['is_con'] == 1 and self.pent_zero_include == 1:
self.pent_zero.setStyleSheet('color: green')
else:
self.pent_zero.setStyleSheet('color: gray')
except:
self.pent_zero.setStyleSheet('color: gray')
self.pent_zero.setText('Missing config')
try:
self.pent_one.setText('Pentek 1')
if self.host_dict['pentek1']['is_con'] == 0 and self.pent_one_include == 1:
self.pent_one.setStyleSheet('color: red')
elif self.host_dict['pentek1']['is_con'] == 1 and self.pent_one_include == 1:
self.pent_one.setStyleSheet('color: green')
else:
self.pent_one.setStyleSheet('color: gray')
except:
self.pent_one.setStyleSheet('color: gray')
self.pent_one.setText('Missing config')
try:
self.pent_two.setText('Pentek 2')
if self.host_dict['pentek2']['is_con'] == 0 and self.pent_two_include == 1:
self.pent_two.setStyleSheet('color: red')
elif self.host_dict['pentek2']['is_con'] == 1 and self.pent_two_include == 1:
self.pent_two.setStyleSheet('color: green')
else:
self.pent_two.setStyleSheet('color: gray')
except:
self.pent_two.setStyleSheet('color: gray')
self.pent_two.setText('Missing config')
try:
self.node_zero.setText('Node 0')
if self.host_dict['node0']['is_con'] == 0 and self.node_zero_include == 1:
self.node_zero.setStyleSheet('color: red')
elif self.host_dict['node0']['is_con'] == 1 and self.node_zero_include == 1:
self.node_zero.setStyleSheet('color: green')
else:
self.node_zero.setStyleSheet('color: gray')
except:
self.node_zero.setStyleSheet('color: gray')
self.node_zero.setText('Missing config')
try:
self.node_one.setText('Node 1')
if self.host_dict['node1']['is_con'] == 0 and self.node_one_include == 1:
self.node_one.setStyleSheet('color: red')
elif self.host_dict['node1']['is_con'] == 1 and self.node_one_include == 1:
self.node_one.setStyleSheet('color: green')
else:
self.node_one.setStyleSheet('color: gray')
except:
self.node_one.setStyleSheet('color: gray')
self.node_one.setText('Missing config')
try:
self.node_two.setText('Node 2')
if self.host_dict['node2']['is_con'] == 0 and self.node_two_include == 1:
self.node_two.setStyleSheet('color: red')
elif self.host_dict['node2']['is_con'] == 1 and self.node_two_include == 1:
self.node_two.setStyleSheet('color: green')
else:
self.node_two.setStyleSheet('color: gray')
except:
self.node_two.setStyleSheet('color: gray')
self.node_two.setText('Missing config')
try:
self.rhino_zero.setText('Rhino 0')
if self.host_dict['rhino0']['is_con'] == 0 and self.rhino_zero_include == 1:
self.rhino_zero.setStyleSheet('color: red')
elif self.host_dict['rhino0']['is_con'] == 1 and self.rhino_zero_include == 1:
self.rhino_zero.setStyleSheet('color: green')
else:
self.rhino_zero.setStyleSheet('color: gray')
except:
self.rhino_zero.setStyleSheet('color: gray')
self.rhino_zero.setText('Missing config')
try:
self.rhino_one.setText('Rhino 1')
if self.host_dict['rhino1']['is_con'] == 0 and self.rhino_one_include == 1:
self.rhino_one.setStyleSheet('color: red')
elif self.host_dict['rhino1']['is_con'] == 1 and self.rhino_one_include == 1:
self.rhino_one.setStyleSheet('color: green')
else:
self.rhino_one.setStyleSheet('color: gray')
except:
self.rhino_one.setStyleSheet('color: gray')
self.rhino_one.setText('Missing config')
try:
self.rhino_two.setText('Rhino 2')
if self.host_dict['rhino2']['is_con'] == 0 and self.rhino_two_include == 1:
self.rhino_two.setStyleSheet('color: red')
elif self.host_dict['rhino2']['is_con'] == 1 and self.rhino_two_include == 1:
self.rhino_two.setStyleSheet('color: green')
else:
self.rhino_two.setStyleSheet('color: gray')
except:
self.rhino_two.setStyleSheet('color: gray')
self.rhino_two.setText('Missing config')
try:
if self.host_dict['tx_bullet0']['is_con'] == 0:
self.lbl_bullet_0_tx.setStyleSheet('color: red')
else:
self.lbl_bullet_0_tx.setStyleSheet('color: green')
except:
self.lbl_bullet_0_tx.setStyleSheet('color: gray')
self.lbl_bullet_0_tx.setText('Missing config')
try:
if self.host_dict['rx_bullet0']['is_con'] == 0:
self.lbl_bullet_0_rx.setStyleSheet('color: red')
else:
self.lbl_bullet_0_rx.setStyleSheet('color: green')
except:
self.lbl_bullet_0_rx.setStyleSheet('color: gray')
self.lbl_bullet_0_rx.setText('Missing config')
try:
if self.host_dict['tx_bullet1']['is_con'] == 0:
self.lbl_bullet_1_tx.setStyleSheet('color: red')
else:
self.lbl_bullet_1_tx.setStyleSheet('color: green')
except:
self.lbl_bullet_1_tx.setStyleSheet('color: gray')
self.lbl_bullet_1_tx.setText('Missing config')
try:
if self.host_dict['rx_bullet1']['is_con'] == 0:
self.lbl_bullet_1_rx.setStyleSheet('color: red')
else:
self.lbl_bullet_1_rx.setStyleSheet('color: green')
except:
self.lbl_bullet_1_rx.setStyleSheet('color: gray')
self.lbl_bullet_1_rx.setText('Missing config')
try:
if self.host_dict['tx_bullet2']['is_con'] == 0:
self.lbl_bullet_2_tx.setStyleSheet('color: red')
else:
self.lbl_bullet_2_tx.setStyleSheet('color: green')
except:
self.lbl_bullet_2_tx.setStyleSheet('color: gray')
self.lbl_bullet_2_tx.setText('Missing config')
try:
if self.host_dict['rx_bullet2']['is_con'] == 0:
self.lbl_bullet_2_rx.setStyleSheet('color: red')
else:
self.lbl_bullet_2_rx.setStyleSheet('color: green')
except:
self.lbl_bullet_2_rx.setStyleSheet('color: gray')
self.lbl_bullet_2_rx.setText('Missing config')
def run_quickview(self):
#if self.plot_is_active == 1:
subprocess.call('python3 plottty.py &',shell=True)
def run_pulse_editor(self):
subprocess.call('cd ' + self.tcu_pulse_editor + ' && python3 creator.py -f ' + self.nextrad_ini + ' -o ' + self.nextrad_ini + ' &',shell=True)
def abort_experiment(self):
print('Aborting Experiment!')
self.abort_flag = 1
self.time_running = 0
self.time_remaining = 0
self.experiment_running = 0
self.ansi_running = 0
self.ansi_shell_command(self.valid_penteks,'cd ' + self.pentek_header_dir + ' && ./killscript.sh &' )
self.ansi_shell_command(self.valid_nodes,'export PYTHONPATH=/home/nextrad/Documents/tcu_stuff/harpoon/ && cd ' + self.node_header_dir + ' && nohup ./tcu_abort_script.sh' )
self.run_button.setText('Run')
def save_folder(self):
print('temp')
def enter_target_details(self):
if self.target_details.text() != '':
self.target_latlong = self.target_details.text()
self.init_map()
self.update_map('var trgt = ','var trgt = [' + self.target_latlong + '];\n')
print('Target Details Updated')
else:
print('Please Enter Valid Address as Lat, Long')
def change_map_style(self):
if self.map_style == 0:
self.update_map('id: \'mapbox.',' id: \'mapbox.outdoors\',\n')
self.map_style = 1
else:
self.update_map('id: \'mapbox.',' id: \'mapbox.satellite\',\n')
self.map_style = 0
def enter_scene(self):
print('Scene description added to excel file')
def enter_description(self):
ini_file = self.nextrad_ini
config = configparser.ConfigParser(comment_prefixes='/', allow_no_value=True)
config.optionxform = lambda option: option # preserve case for letters
config.read(ini_file)
if self.experiment_name_edit.text() != '':
config['Notes']['EXPERIMENT_NAME'] = self.experiment_name_edit.text()
print('Description Set')
else:
print('Nothing Entered, Using Default \'No Description\'')
config['Notes']['EXPERIMENT_NAME'] = 'No Description'
with open(self.nextrad_ini,'w') as configfile:
config.write(configfile)
def update_map(self,textin,replace):
self.update_gps_positions(self.valid_nodes)
copyfile('map.js', 'map_backup.js')
with open('map.js') as fin, open('map_temp.js', 'w') as fout:
for line in fin:
if textin in line:
line = replace
fout.write(line)
os.rename('map_temp.js', 'map.js')
self.view.load(QtCore.QUrl.fromLocalFile(os.path.abspath('map.html')))
def update_gps_positions(self, host_dict):
for k,v in host_dict.items():
if k == 'node0' and v['is_con'] == 1:
with open(self.gps0_ini) as f0:
file_content0 = '[dummy_section]\n' + f0.read()
config0 = configparser.RawConfigParser(comment_prefixes=';', allow_no_value=True)
config0.optionxform = lambda option: option # preserve case for letters
config0.read_string(file_content0)
x = (config0.get('dummy_section','LATITUDE'))
y = (config0.get('dummy_section','LONGITUDE'))
# x = int(config0.get('dummy_section','LATITUDE'))
# y = int(config0.get('dummy_section','LONGITUDE'))
# if x > 2**31 - 1 :
# x = (2**32 - x)*(-1)
# else:
# x = x
# if y > 2**31 - 1 :
# y = (2**32 - y)*(-1)
# else:
# y = y
# self.node_0_latlong = str(x*90/324e6) +',' + str(y*90/324e6)
self.node_0_latlong = x + ',' + y
self.node_0_ht = str(config0.get('dummy_section','ALTITUDE'))
print('GPS0 Data: ' + self.node_0_latlong + ', ' + self.node_0_ht )
if k == 'node1' and v['is_con'] == 1:
with open(self.gps1_ini) as f1:
file_content1 = '[dummy_section]\n' + f1.read()
config1 = configparser.RawConfigParser(comment_prefixes=';', allow_no_value=True)
config1.optionxform = lambda option: option # preserve case for letters
config1.read_string(file_content1)
x = (config1.get('dummy_section','LATITUDE'))
y = (config1.get('dummy_section','LONGITUDE'))
# x = int(config1.get('dummy_section','LATITUDE'))
# y = int(config1.get('dummy_section','LONGITUDE'))
# if x > 2**31 - 1 :
# x = (2**32 - x)*(-1)
# else:
# x = x
# if y > 2**31 - 1 :
# y = (2**32 - y)*(-1)
# else:
# y = y
# self.node_1_latlong = str(x*90/324e6) +',' + str(y*90/324e6)
self.node_1_latlong = x + ',' + y
self.node_1_ht = str(config1.get('dummy_section','ALTITUDE'))
print('GPS1 Data: ' + self.node_1_latlong + ', ' + self.node_1_ht )
if k == 'node2' and v['is_con'] == 1:
with open(self.gps2_ini) as f2:
file_content2 = '[dummy_section]\n' + f2.read()
config2 = configparser.RawConfigParser(comment_prefixes=';', allow_no_value=True)
config2.optionxform = lambda option: option # preserve case for letters
config2.read_string(file_content2)
x = (config2.get('dummy_section','LATITUDE'))
y = (config2.get('dummy_section','LONGITUDE'))
# x = int(config2.get('dummy_section','LATITUDE'))
# y = int(config2.get('dummy_section','LONGITUDE'))
# if x > 2**31 - 1 :
# x = (2**32 - x)*(-1)
# else:
# x = x
# if y > 2**31 - 1 :
# y = (2**32 - y)*(-1)
# else:
# y = y
# self.node_2_latlong = str(x*90/324e6) +',' + str(y*90/324e6)
self.node_2_latlong = x + ',' + y
self.node_2_ht = str(config2.get('dummy_section','ALTITUDE'))
print('GPS2 Data: ' + self.node_2_latlong + ', ' + self.node_2_ht )
def ansi_shell_command(self,host_dict,command):
for k,v in host_dict.items():
subprocess.call('ansible '+ k +' -m shell -a \"' + command + '\" &',shell=True)
def ansi_copy(self,host_dict,file,destination):
for k,v in host_dict.items():
subprocess.call('ansible '+ k +' -m copy -a \"src='+ str(file) +' dest=' + str(destination) + '\" &',shell=True)
def experiment_start(self):
#send = subprocess.call('ansible cnc -m copy -a \"src='+self.nextrad_ini+' dest=' + self.cnc_header_loc + 'NeXtRAD.ini\"',shell=True)
print(self.valid_nodes, self.valid_penteks, self.valid_rhinos)
if self.valid_nodes or self.valid_penteks or self.valid_rhinos != {}:
print('Starting Experiment')
self.time_running = 1
self.ansi_running = 1
ansi_worker = Worker(self.ansi_play)
#print(self.valid_hosts)
self.threadpool.start(ansi_worker)
countdown_worker = Worker(self.timer_thread)
self.threadpool.start(countdown_worker)
else:
self.run_button.setText('Run')
print('No valid host to send header to.')
def ansi_play(self):
if self.ansi_running == 1:
self.ansi_copy(self.valid_nodes,self.nextrad_ini,self.gpsdo_header_dir)
self.ansi_copy(self.valid_penteks,self.nextrad_ini,self.pentek_header_dir)
#self.ansi_copy(self.valid_hosts,'~/Documents/NeXtRAD.ini')
#self.ansi_copy(self.valid_rhinos,self.nextrad_ini,self.rhino_header_dir)
self.ansi_shell_command(self.valid_penteks,'cd ' + self.pentek_header_dir + ' && ./run-cobalt.sh')
self.ansi_running = 0
self.ansi_shell_command(self.valid_nodes,'cd ' + self.node_header_dir + ' && rm NeXtRAD.ini')
time.sleep(int(self.local_copy_delay))
#copy('NeXtRAD.ini', self.cnc_header_loc + '/NeXtRAD.ini')
self.ansi_copy(self.valid_nodes,self.nextrad_ini,self.node_header_dir)
def timer_thread(self):
self.countdown_timer()
self.remaining_timer()
def countdown_timer(self):
delta = int((self.start_time - datetime.datetime.now()).total_seconds())
print('Experiment Running!')
while delta>1:
if self.time_running == 1:
self.lbl_timer.setText(' Countdown: ' + str(delta) + ' ')
delta = int((self.start_time - datetime.datetime.now()).total_seconds())
time.sleep(1)
else:
print('Experiment Aborted!')
self.time_running = 0
self.experiment_running = 0
self.run_button.setText('Run')
return
self.time_running = 0
self.experiment_running = 1
def remaining_timer(self):
while self.time_remaining>1:
if self.experiment_running == 1:
#print(str(self.time_remaining))
self.lbl_timer.setText(' Running: ' + str(int(self.time_remaining/1000000)) + ' ')
self.time_remaining = self.time_remaining - 1000000
time.sleep(1)
self.save_scene_check = 1
else:
print('Experiment Aborted')
self.run_button.setText('Run')
self.time_running = 0
self.experiment_running = 0
self.time_remaining = 0
self.save_scene_check = 0
self.time_running = 0
self.experiment_running = 0
self.time_remaining = 0
self.ansi_shell_command(self.valid_penteks,'cd ' + self.pentek_header_dir + ' && ./killscript.sh &' )
self.run_button.setText('Run')
#if self.save_scene_check == 1:
#self.save_scene()
if self.autosaving == 1 and self.abort_flag == 0:
time.sleep(1)
self.save_adc_data()
self.abort_flag = 0
def save_adc_data(self):
print('Saving ADC Files to External Hardrive/s')
if self.pent_zero_include:
subprocess.call('ansible pentek0 -m shell -a \"' + 'cd /home/transceiversystem/Documents/nextlook/build && nextlook -n 0 -x 0 -b' + '\" &',shell=True)
if self.pent_one_include:
subprocess.call('ansible pentek1 -m shell -a \"' + 'cd /home/transceiversystem/Documents/nextlook/build && nextlook -n 1 -x 0 -b' + '\" &',shell=True)
if self.pent_two_include:
subprocess.call('ansible pentek2 -m shell -a \"' + 'cd /home/transceiversystem/Documents/nextlook/build && nextlook -n 2 -x 0 -b' + '\" &',shell=True)
self.save_scene()
def save_scene(self):
if self.scene_edit.text() == '':
self.scene_edit.setText('No Notes. Change in text file if necessary.')
if (self.start_time) == 0:
self.experiment_start_string = 'SeeHeader' + str(datetime.datetime.now())
experiment_log = self.experiment_start_string + ';' + \
str(self.bands) + ';' + \
str(self.pols) + ';' + \
str(self.pulse_width) + ';' + \
str(self.pri) + ';' + \
str(self.num_pri) + ';' + \
str(self.experiment_name) + ';' + \
str(self.scene_edit.text())
with open(self.experiment_log_file,'a') as csvfile:
if os.path.getsize(self.experiment_log_file):
print('Log File Appended')
self.save_scene_check = 0
self.scene_edit.setText('')
csvfile.write(experiment_log + '\n')
else:
csvfile.write('Timestamp;RF Frequency;Polarisations;Pulse Widths;PRI;Number of PRIs;Experiment;Notes\n')
print('Log File Appended')
self.save_scene_check = 0
self.scene_edit.setText('')
csvfile.write(experiment_log + '\n')
copy(self.experiment_log_file, self.save_directory)
directory = self.save_directory + '/' + self.experiment_start_string
image_directory = directory + '/quicklook_images'
os.mkdir(directory)
os.mkdir(image_directory)
self.copytree(self.image_directory, image_directory)
copy(self.nextrad_ini,directory)
print('Save Folder: ' + self.experiment_start_string)
def copytree(self,src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def run_experiment(self):
if self.time_running == 0 and self.experiment_running == 0:
self.setup_header()
self.run_button.setText('Abort')
self.experiment_start()
else:
self.abort_experiment()
print('Experiment Aborted!')
self.time_running = 0
self.run_button.setText('Run')
def setup_header(self):
inputfile=self.nextrad_ini
outputfile=self.nextrad_ini
nextrad_ini = inputfile
config = configparser.ConfigParser(comment_prefixes='/', allow_no_value=True)
config.optionxform = lambda option: option # preserve case for letters
config.read(nextrad_ini)
delay = config.get('Timing','STARTTIMESECS')
self.start_time = datetime.datetime.now() + datetime.timedelta(seconds=int(delay))
self.experiment_start_string = str(self.start_time.year)[2:] + '_' + \
str(self.start_time.month) + '_' + \
str(self.start_time.day) + '_' + \
str(self.start_time.hour) + '_' + \
str(self.start_time.minute) + '_' + \
str(self.start_time.second)
config['Timing']['YEAR'] = str(self.start_time.year)
config['Timing']['MONTH'] = str(self.start_time.month).zfill(2)
config['Timing']['DAY'] = str(self.start_time.day).zfill(2)
config['Timing']['HOUR'] = str(self.start_time.hour).zfill(2)
config['Timing']['MINUTE'] = str(self.start_time.minute).zfill(2)
config['Timing']['SECOND'] = str(self.start_time.second).zfill(2)
config['GeometrySettings']['NODE0_LOCATION_LAT'] = str(eval(self.node_0_latlong)[0])
config['GeometrySettings']['NODE0_LOCATION_LON'] = str(eval(self.node_0_latlong)[1])
config['GeometrySettings']['NODE0_LOCATION_HT'] = self.node_0_ht
config['GeometrySettings']['NODE1_LOCATION_LAT'] = str(eval(self.node_1_latlong)[0])
config['GeometrySettings']['NODE1_LOCATION_LON'] = str(eval(self.node_1_latlong)[1])
config['GeometrySettings']['NODE1_LOCATION_HT'] = self.node_1_ht
config['GeometrySettings']['NODE2_LOCATION_LAT'] = str(eval(self.node_2_latlong)[0])
config['GeometrySettings']['NODE2_LOCATION_LON'] = str(eval(self.node_2_latlong)[1])
config['GeometrySettings']['NODE2_LOCATION_HT'] = self.node_2_ht
config['TargetSettings']['TGT_LOCATION_LAT'] = str(eval(self.target_latlong)[0])
config['TargetSettings']['TGT_LOCATION_LON'] = str(eval(self.target_latlong)[1])
config['TargetSettings']['TGT_LOCATION_HT'] = self.target_ht
config['GeometrySettings']['NODE0_INCLUDE'] = str(self.node_0_is_on)
config['GeometrySettings']['NODE1_INCLUDE'] = str(self.node_1_is_on)
config['GeometrySettings']['NODE2_INCLUDE'] = str(self.node_2_is_on)
#print('Experiment Start Time: ', self.start_time)
with open(outputfile,'w') as configfile:
config.write(configfile)
# Target Information
# Pulse Parameters
print('Using Header: ', inputfile)
print('Start Delay set to ', delay, ' seconds')
self.init_nextheader_values()
app = QtWidgets.QApplication([])
window = MainWindow()
window.setWindowIcon(QtGui.QIcon('icon.png'))
app.exec_()
|
import cv2
import os
import time
import click
@click.command()
@click.option('--model-name', default='haarcascade_frontalface_default.xml',
help='The name of the pre-trained model to load. Download more from https://github.com/opencv/opencv/tree/master/data/haarcascades')
@click.option('--camera-id', default=1,
help='The id of the camera to use.. You can discover the connected cameras by runnimg: ls -ltrh /dev/video*.')
@click.option('--trt-optimize', default=False,
help='Setting this to True, the downloaded TF model will be converted to TensorRT model.', is_flag=True)
def detector(model_name, camera_id, trt_optimize):
detector_model = './models/{}'.format(model_name)
classifier = cv2.CascadeClassifier()
if not classifier.load(detector_model):
raise ValueError('Could not find {}'.format(detector_model))
#video_capture = cv2.VideoCapture(camera_id)
video_capture = cv2.VideoCapture("nvcamerasrc ! video/x-raw(memory:NVMM), \
width=(int)640, height=(int)480, \
format=(string)I420, framerate=(fraction)30/1 \
! nvvidconv ! video/x-raw, format=(string)BGRx \
! videoconvert ! video/x-raw, format=(string)BGR ! \
appsink")
start_time = time.time()
while(True):
# Capture frame-by-frame
video_capture_result, frame = video_capture.read()
if video_capture_result == False:
raise ValueError('Error reading the frame from camera {}'.format(camera_id))
# face detection and other logic goes here
faces = classifier.detectMultiScale(frame, 1.3, 5)
for (x, y, w, h) in faces:
# send each face in mqtt topic
cv2.rectangle(frame, (x, y), (x+w, y+h), color=(0, 255, 0), thickness=2)
cv2.putText(frame, "FPS:{:0.1f}".format(1.0 / (time.time() - start_time)),
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
start_time = time.time()
cv2.imshow('Input', frame)
if cv2.waitKey(1) == 27:
break
if __name__ == "__main__":
detector()
|
#===============================================================================
# The application will not be able to exit out of the Task phase
# unless the criteria are met.
# Gating criteria are only evaluated during process.
# Expects GatingChannel's signal to be scaled so +1.0 is the top of the screen
# -e.g., isometric contraction: EMG input between 0 and 1, and 1=MVC
# -e.g., ERD threshold: channel input has mean 0 and variance=1
#===============================================================================
from math import ceil
from random import randint
import numpy as np
#import random
#import time
from AppTools.Shapes import Block
from AppTools.StateMonitors import addstatemonitor
class GatingApp(object):
params = [
#"Tab:SubSection DataType Name= Value DefaultValue LowRange HighRange // Comment (identifier)",
#See further details http://bci2000.org/wiki/index.php/Technical_Reference:Parameter_Definition
"PythonApp:Gating int GatingEnable= 0 0 0 1 // Enable: 0 no, 1 yes (boolean)",
"PythonApp:Gating list GatingChannel= 1 1 % % % // Input channel on which the criteria are evaluated",
"PythonApp:Gating float DurationMin= 2.6 2.6 0 % // Duration s which signal must continuously meet criteria before triggering",
"PythonApp:Gating float DurationRand= 0.3 0.3 0 % // Randomization s around the duration",
"PythonApp:Gating int GatingReset= 1 1 0 1 // Counter resets when exiting range: 0 no, 1 yes (boolean)",
#"PythonApp:Gating int RangeEnter= 0 0 0 2 // Signal must enter range from: 0 either, 1 below, 2 above (enumeration)",
]
states = [
"GatingOK 1 0 0 0", #Boolean if all Gating paramaters are currently satisfied.
"msecInRange 16 0 0 0", #Number of milliseconds in range, max 65536
]
@classmethod
def preflight(cls, app, sigprops):
if int(app.params['GatingEnable'])==1:
#Not yet supported
#if app.params['RangeEnter'].val: raise EndUserError, "RangeEnter not yet supported"
# Make sure GatingChannel is in the list of channels.
chn = app.inchannels()
pch = app.params['GatingChannel'].val
use_process = len(pch) != 0
if use_process:
if False in [isinstance(x, int) for x in pch]:
nf = filter(lambda x: not str(x) in chn, pch)
if len(nf): raise EndUserError, "GatingChannel %s not in module's list of input channel names" % str(nf)
app.gatechan = [chn.index(str(x)) for x in pch]
else:
nf = [x for x in pch if x < 1 or x > len(chn) or x != round(x)]
if len(nf): raise EndUserError, "Illegal GatingChannel: %s" % str(nf)
app.gatechan = [x-1 for x in pch]
else:
raise EndUserError, "Must supply GatingChannel"
@classmethod
def initialize(cls, app, indim, outdim):
if int(app.params['GatingEnable'])==1:
if int(app.params['ShowSignalTime']):
app.addstatemonitor('GatingOK')
app.addstatemonitor('msecInRange')
app.mindur = 1000*app.params['DurationMin'].val + randint(int(-1000*app.params['DurationRand'].val),int(1000*app.params['DurationRand'].val))#randomized EMG Gating duration
app.wasInRange = False
@classmethod
def halt(cls,app):
pass
@classmethod
def startrun(cls,app):
if int(app.params['GatingEnable'])==1:
app.forget('range_ok')
app.states['msecInRange']=0
@classmethod
def stoprun(cls,app):
if int(app.params['GatingEnable'])==1: pass
@classmethod
def transition(cls,app,phase):
if int(app.params['GatingEnable'])==1:
if phase == 'intertrial':
app.mindur = 1000*app.params['DurationMin'].val + randint(int(-1000*app.params['DurationRand'].val),int(1000*app.params['DurationRand'].val))#randomized EMG Gating duration
elif phase == 'baseline':
pass
elif phase == 'gocue':
pass
elif phase == 'task':
app.remember('range_ok')
app.states['msecInRange'] = 0
elif phase == 'response':
pass
elif phase == 'stopcue':
pass
@classmethod
def process(cls,app,sig):
if int(app.params['GatingEnable'])==1:
# ===================================================================
# Use ContinuousFeedbackExtension's determination about InRange
# if that extension is enabled. Else, determine it ourselves from
# the current signal and the target ranges.
# Target ranges should be specified within -100 to +100
# EMG signals are expected to range from 0 to 10 (10 = 100 % MVC)
# ERD signals are expected to range from -10 to +10 (10 = 100% baseline)
# Standard signals are expected to have mean 0 and unit variance, extremes -10 and +10
# Thus, multiply our signal by 10 to transform signal range to target range.
# ===================================================================
if 'ContFeedbackEnable' in app.params and int(app.params['ContFeedbackEnable'])==1:
inRange = app.states['InRange']
doReset = app.changed('InRange', only=1)
else:
t = app.states['LastTargetClass'] #Keeps track of the previous trial's TargetClass for feedback purposes.
x = sig[app.gatechan,:].mean(axis=1)#Extract the feedback channels.
x = x.A.ravel()[t-1]/3#Transform x to a measure mostly ranging from -3.26 to +3.26 SDs->Necessary for 16-bit integer state
#Save x to a state of uint16
x = min(x, 3.26)
x = max(x, -3.26)
x = x * 3
inRange = (x >= app.target_range[t-1][0]) and (x <= app.target_range[t-1][1])
doReset = inRange and not app.wasInRange
app.wasInRange = inRange
if doReset or not inRange:
app.remember('range_ok') #Resets range_ok unless we were already inrange.
if int(app.params['GatingReset']): app.states['msecInRange'] = 0 #Resets the timer
app.states['msecInRange'] = app.states['msecInRange'] + int(inRange)*int(app.block_dur)
rangeok = app.states['msecInRange'] >= app.mindur
enterok = True #TODO: Check entry direction condition.
app.states['GatingOK'] = rangeok and enterok
@classmethod
def event(cls, app, phasename, event):
if int(app.params['GatingEnable'])==1: pass |
raio = int(input())
PI = 3.14159
volume = (4 * PI * (raio**3)) / 3
print(f"VOLUME = {volume:.3f}")
|
# -*- coding: utf-8 -*-
from lxml import html
from dataPreprocess import dataPreprocess
from public_functions import *
__author__ = 'benywon'
class QASentdataPreprocess(dataPreprocess):
def __init__(self,
use_clean=False,
**kwargs):
dataPreprocess.__init__(self, **kwargs)
self.use_clean = use_clean
self.path = self.path_base + 'QAsent/'
append_str = '_batch' if self.batch_training else ''
append_str += '_clean' if self.use_clean else ''
self.data_pickle_path = self.path + 'QAsent' + append_str + '.pickle'
if self.reload:
self.__build_data_set__()
else:
self.load_data()
self.calc_data_stat()
self.dataset_name = 'QASent'
def __build_data_set__(self):
print 'start loading data from original file'
if self.use_clean:
trainfilepath = self.path + 'train-less-than-40.manual-edit.xml'
else:
trainfilepath = self.path + 'train2393.cleanup.xml'
testfilepath = self.path + 'test-less-than-40.manual-edit.xml'
devfilepath = self.path + 'dev-less-than-40.manual-edit.xml'
def get_one_set(filepath, train=True):
print 'process:' + filepath
all_the_text = open(filepath).read()
doc = html.fromstring(all_the_text)
target = []
data = []
for ele in doc:
one_question = {'right': [], 'wrong': []}
for pos_neg in ele:
tag = pos_neg.tag
content = pos_neg.text
content = content.split('\n')[1]
content = clean_string(content)
if tag == 'question':
one_question['question'] = self.get_sentence_id_list(content)
elif tag == 'positive':
one_question['right'].append(self.get_sentence_id_list(content))
elif tag == 'negative':
one_question['wrong'].append(self.get_sentence_id_list(content))
if len(one_question['right']) == 0 or len(one_question['wrong']) == 0:
continue
data.append(one_question)
if train:
q = []
yes = []
no = []
for one_question in data:
question = one_question['question']
pos = one_question['right']
neg = one_question['wrong']
for po in pos:
for ne in neg:
q.append(question)
yes.append(po)
no.append(ne)
target.append([x[0:self.Max_length] for x in q])
target.append([x[0:self.Max_length] for x in yes])
target.append([x[0:self.Max_length] for x in no])
return target
else:
for one_question in data:
one_patch = []
question = one_question['question']
pos = one_question['right']
neg = one_question['wrong']
for po in pos:
one_patch.append([question, po, 1])
for ne in neg:
one_patch.append([question, ne, 0])
target.append(one_patch)
return [[[np.asarray(x[0], dtype='int32'), np.asarray(x[1], dtype='int32'), x[2]] for x in t] for t
in target]
self.TRAIN = get_one_set(trainfilepath, train=True)
self.DEV = get_one_set(devfilepath, train=True)
self.TEST = get_one_set(testfilepath, train=False)
self.transfer_data()
print 'load data done'
if __name__ == '__main__':
c = QASentdataPreprocess(reload=True, batch_training=False, use_clean=False)
|
from django.contrib import admin
from riot.models import Places, Profile, RiotPronePlaces, NowRioting
# Register your models here.
admin.site.register(RiotPronePlaces)
admin.site.register(NowRioting)
admin.site.register(Profile)
admin.site.register(Places)
|
from opencv.cv import *
from opencv.highgui import *
import time
import sys
counter = 0
size = None
def initCamera():
global size
camera = cvCreateCameraCapture(1)
if not camera:
print "Could not open webcam!"
sys.ext(1)
cvSetCaptureProperty(camera, CV_CAP_PROP_FRAME_WIDTH, 320)
cvSetCaptureProperty(camera, CV_CAP_PROP_FRAME_HEIGHT, 240)
frame = cvQueryFrame(camera)
if frame is not None:
w = frame.width
h = frame.height
print "%d %d"%(w, h)
size = cvSize(w, h)
time.sleep(1)
return camera
def readFrom(filename):
global size
camera = cvCreateFileCapture(filename)
if not camera:
print "Could not open file!"
sys.ext(1)
cvSetCaptureProperty(camera, CV_CAP_PROP_FRAME_WIDTH, 320)
cvSetCaptureProperty(camera, CV_CAP_PROP_FRAME_HEIGHT, 240)
frame = cvQueryFrame(camera)
if frame is not None:
w = frame.width
h = frame.height
print "%d %d"%(w, h)
size = cvSize(w, h)
time.sleep(1)
return camera
def captureImage(camera):
global counter
frame = cvQueryFrame(camera)
cvSaveImage("images/test%d.jpg"%counter, frame)
counter+=1
if counter >= 1000: counter = 0
if not frame:
print "Couldn't grab frame."
sys.exit(1)
return frame
cvNamedWindow("Original", CV_WINDOW_AUTOSIZE)
cvMoveWindow("Original", 100, 100)
cvNamedWindow("Modified", CV_WINDOW_AUTOSIZE)
cvMoveWindow("Modified", 300, 100)
#camera = initCamera()
camera = readFrom("test1.avi")
hsvImage = cvCreateImage(size, IPL_DEPTH_8U, 3)
output = cvCreateImage(size, IPL_DEPTH_8U, 1)
output2 = cvCreateImage(size, IPL_DEPTH_8U, 1)
minH = cvCreateTrackbar("Min H", "Modified", 0, 255, lambda x: 1)
maxH = cvCreateTrackbar("Max H", "Modified", 43, 255, lambda x: 1)
minS = cvCreateTrackbar("Min S", "Modified", 49, 255, lambda x: 1)
maxS = cvCreateTrackbar("Max S", "Modified", 134, 255, lambda x: 1)
minV = cvCreateTrackbar("Min V", "Modified", 149, 255, lambda x: 1)
maxV = cvCreateTrackbar("Max V", "Modified", 255, 255, lambda x: 1)
#minH = cvCreateTrackbar("Min H", "Modified", 5, 255, lambda x: 1)
#maxH = cvCreateTrackbar("Max H", "Modified", 27, 255, lambda x: 1)
#minS = cvCreateTrackbar("Min S", "Modified", 135, 255, lambda x: 1)
#maxS = cvCreateTrackbar("Max S", "Modified", 255, 255, lambda x: 1)
#minV = cvCreateTrackbar("Min V", "Modified", 103, 255, lambda x: 1)
#maxV = cvCreateTrackbar("Max V", "Modified", 255, 255, lambda x: 1)
storage = cvCreateMemStorage(0)
while 1:
#print time.asctime()
image = captureImage(camera)
cvCvtColor(image, hsvImage, CV_BGR2HSV)
hsvMin = cvScalar(
cvGetTrackbarPos("Min H", "Modified"),
cvGetTrackbarPos("Min S", "Modified"),
cvGetTrackbarPos("Min V", "Modified"), 0)
hsvMax = cvScalar(
cvGetTrackbarPos("Max H", "Modified"),
cvGetTrackbarPos("Max S", "Modified"),
cvGetTrackbarPos("Max V", "Modified"), 255)
cvInRangeS(hsvImage, hsvMin, hsvMax, output)
cvSmooth(output, output2, CV_GAUSSIAN, 3, 3)
cvErode(output, output2)
circles = cvHoughCircles(output, storage, CV_HOUGH_GRADIENT, 1,
output.height / 2, 100, 20, 1, 900)
for i in range(circles.total):
circle = circles[i]
#print circle[2]
cvCircle(image, cvPoint(
cvRound(circle[0]), cvRound(circle[1])),
cvRound(circle[2]), cvScalar(0, 255, 0), -1, 8, 0)
cvCircle(output, cvPoint(
cvRound(circle[0]), cvRound(circle[1])),
cvRound(circle[2]), cvScalar(0, 255, 0), 1, 8, 0)
cvShowImage("Original", image)
cvShowImage("Modified", output)
cvWaitKey(125)
#hsv_frame = cvCreateImage(size, IPL_DEPTH_8U, 3)
#cvSmooth(image, hsv_frame, CV_GAUSSIAN, 3, 3)
#cvSaveImage("smoothed.jpg", hsv_frame)
|
๏ปฟ#coding:utf-8
#!/usr/bin/env python
from gclib.object import object
from game.utility.config import config
class almanac(object):
def __init__(self):
"""
ๆ้ ๅฝๆฐ
"""
object.__init__(self)
self.card = set()
self.equipment = set()
self.skill = set()
self.combine = []
self.user = None
return
def init(self):
"""
ๅๅงๅ
"""
pass
def install(self, roleid):
"""
ๅฎ่ฃ
"""
object.install(self, roleid)
def load(self, roleid, data):
"""
ๅ ่ฝฝ
"""
object.load(self, roleid, data)
self.roleid = roleid
self.card = set(data['card'])
self.equipment = set(data['equipment'])
self.skill = set(data['skill'])
self.combine = data['combine']
def getData(self):
"""
ๅพๅฐๆฐๆฎ
"""
data = object.getData(self)
data['card'] = list(self.card)
data['equipment'] = list(self.equipment)
data['skill'] = list(self.skill)
data['combine'] = self.combine
return data
def getClientData(self):
"""
ๅพๅฐclient data
"""
data = {}
data['almanac_card'] = list(self.card)
data['almanac_skill'] = list(self.skill)
data['almanac_equipment'] = list(self.equipment)
data['almanac_combine'] = self.combine
return data
def addCard(self, cardid):
"""
ๆทปๅ ๅก็
"""
if cardid in self.card:
return
self.card.add(cardid)
self.notifyCard(cardid)
self.save()
return
def addSkill(self, skillid):
"""
ๆทปๅ ๆ่ฝ
"""
if skillid in self.skill:
return
self.skill.add(skillid)
self.notifySkill(skillid)
self.save()
return
def addEquipment(self, equipmentid):
"""
ๆทปๅ ่ฃ
ๅค
"""
if equipmentid in self.equipment:
return
self.equipment.add(equipmentid)
self.notifyEquipment(equipmentid)
self.save()
return
def award(self, cmbid):
"""
้ขๅๅฅๅฑ
"""
combinaionConf = config.getConfig('almanac_combination')
if cmbid in self.combine:
return {'msg':'almanac_combine_already_get'}
if not combinaionConf.has_key(cmbid):
return {'msg':'almanac_combination_not_exist'}
combinationInfo = combinaionConf[cmbid]
isCombine = True
for cid in combinationInfo['combin_cardid']:
if cid not in self.card:
isCombine = False
for sid in combinationInfo['combin_skillid']:
if sid not in self.skill:
isCombine = False
for eid in combinationInfo['combin_equipmentid']:
if eid not in self.equipment:
isCombine = False
if not isCombine:
return {'msg':'almanac_not_combine'}
data = {}
self.combine.append(cmbid)
if combinationInfo['dropid']:
drop.open(self.user, combinationInfo['dropid'], data)
data['add_almanac_combine'] = cmbid
self.save()
self.user.save()
return data
def notifyCard(self, cardid):
"""
ๆ็คบๅก็
"""
usr = self.user
if not usr.notify.has_key('almanac_notify'):
usr.notify['almanac_notify'] = {}
if not usr.notify['almanac_notify'].has_key('card'):
usr.notify['almanac_notify']['card'] = []
usr.notify['almanac_notify']['card'].append(cardid)
usr.save()
def notifySkill(self, skillid):
"""
ๆ็คบๆ่ฝ
"""
usr = self.user
if not usr.notify.has_key('almanac_notify'):
usr.notify['almanac_notify'] = {}
if not usr.notify['almanac_notify'].has_key('skill'):
usr.notify['almanac_notify']['skill'] = []
usr.notify['almanac_notify']['skill'].append(skillid)
usr.save()
def notifyEquipment(self, equipmentid):
"""
ๆ็คบ่ฃ
ๅค
"""
usr = self.user
if not usr.notify.has_key('almanac_notify'):
usr.notify['almanac_notify'] = {}
if not usr.notify['almanac_notify'].has_key('equipment'):
usr.notify['almanac_notify']['equipment'] = []
usr.notify['almanac_notify']['equipment'].append(equipmentid)
usr.save()
|
from keras.datasets import cifar10
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.optimizers import SGD, Adam, RMSprop
import matplotlib.pyplot as plt
#CIFAR10์ 3์ฒด๋๋ก ๊ตฌ์ฑ๋ 32x32 ์ด๋ฏธ์ง 60000์ฅ์ ๊ฐ๋๋ค
IMG_CHANNELS = 3
IMG_ROWS = 32
IMG_COLS = 32
#์์ ์ ์ธ
BATCH_SIZE = 128
NB_EPOCH = 20
NB_CLASSES = 10
VERBOSE = 1
VALIDATION_SPLIT = 0.2
OPTIM = RMSprop()
#๋ฐ์ดํฐ์
๋ถ๋ฌ์ค๊ธฐ
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
#๋ฒ์ฃผํ์ผ๋ก ๋ณํ
y_train = np_utils.to_categorical(y_train, NB_CLASSES)
y_test = np_utils.to_categorical(y_test, NB_CLASSES)
#์ค์ํ์ผ๋ก ์ ์ฅํ๊ณ ์ ๊ทํ
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
#์ ๊ฒฝ๋ง ์ ์
model = Sequential()
model.add(Conv2D(32, (3,3), padding='same',
input_shape=(IMG_ROWS, IMG_COLS, IMG_CHANNELS)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
model.summary()
#ํ์ต
model.compile(loss= 'categorical_crossentropy', optimizer=OPTIM, metrics=['accuracy'])
history = model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=NB_EPOCH,
validation_split=VALIDATION_SPLIT, verbose = VERBOSE)
print('Testing...')
score = model.evaluate(x_test, y_test, batch_size = BATCH_SIZE, verbose= VERBOSE)
print('\nTest Score', score[0])
print('Test Accuracy', score[1])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model acc')
plt.ylabel('acc')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show() |
from django.urls import path, include
from . import views
urlpatterns = [
path('mapa', views.mapa, name='mapa'),
path('lugarocupado', views.lugarocupado, name='lugarocupado'),
path('propina', views.propina, name='propina'),
path('calificar', views.calificar, name='calificar'),
]
|
import time
import numpy as np
from util import get_data, np_loader
if __name__ == "__main__":
root = "data"
num = 1000000
path_list, label_list, num_to_cat = get_data(root)
print("Total images :", len(path_list))
mean = 0.0
var = 0.0
n = min(len(path_list), num) # Go through the whole dataset if possible
t0 = time.time()
t1 = t0
for i in range(n):
# img in shape [W, H, C]
img = np_loader(path_list[i]) / 255.0
mean += np.mean(img, axis=(0, 1))
var += np.var(img, axis=(0, 1)) # you can add var, not std
if (i+1) % 100 == 0:
t2 = time.time()
print("{}/{} measured. Total time={:.2f}s. Images per second {:.2f}.".format(i+1, n, t2-t0, 100/(t2-t1)))
t1 = t2
print("set_mean = [{:4.3f}, {:4.3f}, {:4.3f}]".format(*(mean/n)))
print("set_std = [{:4.3f}, {:4.3f}, {:4.3f}]".format(*np.sqrt(var/n)))
print("var :", var/n)
|
def nume_persoane(x):
return str(x)
def nume_persoana(y):
return str(y)
bucla = 1
bucla2 = 1
print("Buna ziua")
salut = input()
print("Bine ati venit in agenda telefonica calculatorului")
agenda_telefonica = {"Sebi" : 770421464, "Balau" : 748113188, "Bianca" : 768152514,
"Crisan" : 724248152}
print("Doriti sa accesati contactele noastre")
dorinta = input()
if dorinta == "da":
print("Contactele noastre sunt: ", agenda_telefonica.keys())
print("Pe cine doriti sa cautati in agenda?")
nume = input()
if nume_persoane(nume) in agenda_telefonica:
print("Numarul sau este", agenda_telefonica[nume_persoane(nume)])
while bucla == 1:
print("Vreti sa adaugati contacte noi in agenda? ")
dorinta2 = input()
if dorinta2 == "da":
print("Scrieti numele si numarul persoanei pe care vreti sa o adaugati: ")
nume2 = input()
numar = int(input())
agenda_telefonica[nume_persoana(nume2)] = numar
print("Ati adaugat cu succes pe", nume_persoana(nume2), "in contactele noastre")
print("Agenda ta telefonica arata acum asa", agenda_telefonica.keys())
else:
print("ok")
bucla = 0
else:
print("Ne pare rau, dar ", nume_persoane(nume), "nu a fost gasita in contactele noastre")
print("Incercati din nou")
elif dorinta == "nu":
print("ok")
print("Va multumim ca ati folosit agenda calculatorului")
|
import unittest
import os
import sys
from scholarly import scholarly, ProxyGenerator
from scholarly.publication_parser import PublicationParser
import random
import json
from contextlib import contextmanager
class TestLuminati(unittest.TestCase):
skipUnless = os.getenv("USERNAME") and os.getenv("PASSWORD") and os.getenv("PORT")
@unittest.skipUnless(skipUnless, reason="No Luminati credentials found.")
def test_luminati(self):
"""
Test that we can set up Luminati (Bright Data) successfully
"""
proxy_generator = ProxyGenerator()
success = proxy_generator.Luminati(usr=os.getenv("USERNAME"),
passwd=os.getenv("PASSWORD"),
proxy_port=os.getenv("PORT"))
self.assertTrue(success)
self.assertEqual(proxy_generator.proxy_mode, "LUMINATI")
class TestScraperAPI(unittest.TestCase):
skipUnless = os.getenv('SCRAPER_API_KEY')
@unittest.skipUnless(skipUnless, reason="No ScraperAPI key found")
def test_scraperapi(self):
"""
Test that we can set up ScraperAPI successfully
"""
proxy_generator = ProxyGenerator()
success = proxy_generator.ScraperAPI(os.getenv('SCRAPER_API_KEY'))
self.assertTrue(success)
self.assertEqual(proxy_generator.proxy_mode, "SCRAPERAPI")
class TestTorInternal(unittest.TestCase):
skipUnless = [_bin for path in sys.path if os.path.isdir(path) for _bin in os.listdir(path)
if _bin in ('tor', 'tor.exe')]
@unittest.skipUnless(skipUnless, reason='Tor executable not found')
def test_tor_launch_own_process(self):
"""
Test that we can launch a Tor process
"""
proxy_generator = ProxyGenerator()
if sys.platform.startswith("linux") or sys.platform.startswith("darwin"):
tor_cmd = 'tor'
elif sys.platform.startswith("win"):
tor_cmd = 'tor.exe'
else:
tor_cmd = None
tor_sock_port = random.randrange(9000, 9500)
tor_control_port = random.randrange(9500, 9999)
result = proxy_generator.Tor_Internal(tor_cmd, tor_sock_port, tor_control_port)
self.assertTrue(result["proxy_works"])
self.assertTrue(result["refresh_works"])
self.assertEqual(result["tor_control_port"], tor_control_port)
self.assertEqual(result["tor_sock_port"], tor_sock_port)
# Check that we can issue a query as well
query = 'Ipeirotis'
scholarly.use_proxy(proxy_generator)
authors = [a for a in scholarly.search_author(query)]
self.assertGreaterEqual(len(authors), 1)
class TestScholarly(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
Setup the proxy methods for unit tests
"""
scholarly.set_timeout(5)
scholarly.set_retries(5)
if "CONNECTION_METHOD" in scholarly.env:
cls.connection_method = os.getenv("CONNECTION_METHOD")
else:
cls.connection_method = "none"
scholarly.use_proxy(None)
return
# Use dual proxies for unit testing
secondary_proxy_generator = ProxyGenerator()
secondary_proxy_generator.FreeProxies()
proxy_generator = ProxyGenerator()
if cls.connection_method == "tor":
tor_password = "scholarly_password"
# Tor uses the 9050 port as the default socks port
# on windows 9150 for socks and 9151 for control
if sys.platform.startswith("linux") or sys.platform.startswith("darwin"):
tor_sock_port = 9050
tor_control_port = 9051
elif sys.platform.startswith("win"):
tor_sock_port = 9150
tor_control_port = 9151
else:
tor_sock_port = None
tor_control_port = None
proxy_generator.Tor_External(tor_sock_port, tor_control_port,
tor_password)
elif cls.connection_method == "tor_internal":
if sys.platform.startswith("linux") or sys.platform.startswith("darwin"):
tor_cmd = 'tor'
elif sys.platform.startswith("win"):
tor_cmd = 'tor.exe'
else:
tor_cmd = None
proxy_generator.Tor_Internal(tor_cmd = tor_cmd)
elif cls.connection_method == "luminati":
scholarly.set_retries(10)
proxy_generator.Luminati(usr=os.getenv("USERNAME"),
passwd=os.getenv("PASSWORD"),
proxy_port=os.getenv("PORT"))
elif cls.connection_method == "freeproxy":
# Use different instances for primary and secondary
proxy_generator = ProxyGenerator()
proxy_generator.FreeProxies()
elif cls.connection_method == "scraperapi":
proxy_generator.ScraperAPI(os.getenv('SCRAPER_API_KEY'))
else:
scholarly.use_proxy(None)
scholarly.use_proxy(proxy_generator, secondary_proxy_generator)
@staticmethod
@contextmanager
def suppress_stdout():
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
def test_search_author_empty_author(self):
"""
Test that sholarly.search_author('') returns no authors
"""
authors = [a for a in scholarly.search_author('')]
self.assertIs(len(authors), 0)
@unittest.skipIf(os.getenv("CONNECTION_METHOD") in {None, "none", "freeproxy"}, reason="No robust proxy setup")
def test_search_keyword_empty_keyword(self):
"""
As of 2020-04-30, there are 6 individuals that match the name 'label'
"""
# TODO this seems like undesirable functionality for
# scholarly.search_keyword() with empty string. Surely, no authors
# should be returned. Consider modifying the method itself.
authors = [a for a in scholarly.search_keyword('')]
self.assertGreaterEqual(len(authors), 6)
@unittest.skipIf(os.getenv("CONNECTION_METHOD") in {None, "none", "freeproxy"}, reason="No robust proxy setup")
def test_search_pubs_empty_publication(self):
"""
Test that searching for an empty publication returns zero results
"""
pubs = [p for p in scholarly.search_pubs('')]
self.assertIs(len(pubs), 0)
@unittest.skipIf(os.getenv("CONNECTION_METHOD") in {None, "none", "freeproxy"}, reason="No robust proxy setup")
def test_search_pubs_citedby(self):
"""
Testing that when we retrieve the list of publications that cite
a publication, the number of citing publication is the same as
the number of papers that are returned. We use a publication
with a small number of citations, so that the test runs quickly.
The 'Machine-learned epidemiology' paper had 11 citations as of
June 1, 2020.
"""
query = 'Machine-learned epidemiology: real-time detection of foodborne illness at scale'
pubs = [p for p in scholarly.search_pubs(query)]
self.assertGreaterEqual(len(pubs), 1)
filled = scholarly.fill(pubs[0])
cites = [c for c in scholarly.citedby(filled)]
self.assertEqual(len(cites), filled['num_citations'])
@unittest.skipIf(os.getenv("CONNECTION_METHOD") in {None, "none", "freeproxy"}, reason="No robust proxy setup")
def test_search_pubs_citedby_id(self):
"""
Test querying for citations by paper ID.
The 'Machine-learned epidemiology' paper had 11 citations as of
June 1, 2020.
"""
# Machine-learned epidemiology: real-time detection of foodborne illness at scale
publication_id = 2244396665447968936
pubs = [p for p in scholarly.search_citedby(publication_id)]
self.assertGreaterEqual(len(pubs), 11)
@unittest.skipIf(os.getenv("CONNECTION_METHOD") in {None, "none", "freeproxy"}, reason="No robust proxy setup")
def test_bibtex(self):
"""
Test that we get the BiBTeX entry correctly
"""
expected_result = \
("""@inproceedings{ester1996density,
abstract = {Clustering algorithms are attractive for the task of class identification in spatial databases. """
"""However, the application to large spatial databases rises the following requirements for clustering algorithms: """
"""minimal requirements of domain knowledge to determine the input},
author = {Ester, Martin and Kriegel, Hans-Peter and Sander, J{\\"o}rg and Xu, Xiaowei and others},
booktitle = {kdd},
number = {34},
pages = {226--231},
pub_year = {1996},
title = {A density-based algorithm for discovering clusters in large spatial databases with noise.},
venue = {kdd},
volume = {96}
}
"""
)
pub = scholarly.search_single_pub("A density-based algorithm for discovering clusters in large "
"spatial databases with noise", filled=True)
result = scholarly.bibtex(pub)
self.assertEqual(result, expected_result.replace("\n ", "\n"))
@unittest.skipIf(os.getenv("CONNECTION_METHOD") in {None, "none", "freeproxy"}, reason="No robust proxy setup")
def test_search_keyword(self):
"""
Test that we can search based on specific keywords
When we search for the keyword "3d_shape" the author
Steven A. Cholewiak should be among those listed.
When we search for the keyword "Haptics", Oussama Khatib
should be listed first.
"""
# Example 1
authors = [a['name'] for a in scholarly.search_keyword('3d_shape')]
self.assertIsNot(len(authors), 0)
self.assertIn(u'Steven A. Cholewiak, PhD', authors)
# Example 2
expected_author = {'affiliation': 'Stanford University',
'citedby': 43856,
'email_domain': '@cs.stanford.edu',
'filled': [],
'interests': ['Robotics',
'Haptics',
'Human Motion Understanding'],
'name': 'Oussama Khatib',
'scholar_id': '4arkOLcAAAAJ',
'source': 'SEARCH_AUTHOR_SNIPPETS',
'url_picture': 'https://scholar.google.com/citations?view_op=medium_photo&user=4arkOLcAAAAJ'
}
search_query = scholarly.search_keyword('Haptics')
author = next(search_query)
for key in author:
if (key not in {"citedby", "container_type", "interests"}) and (key in expected_author):
self.assertEqual(author[key], expected_author[key])
self.assertGreaterEqual(author["citedby"], expected_author["citedby"])
self.assertEqual(set(author["interests"]), set(expected_author["interests"]))
def test_search_keywords(self):
query = scholarly.search_keywords(['crowdsourcing', 'privacy'])
author = next(query)
self.assertEqual(author['scholar_id'], '_cMw1IUAAAAJ')
self.assertEqual(author['name'], 'Arpita Ghosh')
self.assertEqual(author['affiliation'], 'Cornell University')
def test_search_author_single_author(self):
query = 'Steven A. Cholewiak'
authors = [a for a in scholarly.search_author(query)]
self.assertGreaterEqual(len(authors), 1)
author = scholarly.fill(authors[0])
self.assertEqual(author['name'], u'Steven A. Cholewiak, PhD')
self.assertEqual(author['scholar_id'], u'4bahYMkAAAAJ')
self.assertEqual(author['homepage'], "http://steven.cholewiak.com/")
self.assertEqual(author['organization'], 6518679690484165796)
self.assertGreaterEqual(author['public_access']['available'], 10)
self.assertEqual(author['public_access']['available'],
sum(pub.get('public_access', None) is True for pub in author['publications']))
self.assertEqual(author['public_access']['not_available'],
sum(pub.get('public_access', None) is False for pub in author['publications']))
pub = author['publications'][2]
self.assertEqual(pub['author_pub_id'], u'4bahYMkAAAAJ:LI9QrySNdTsC')
self.assertTrue('5738786554683183717' in pub['cites_id'])
# Trigger the pprint method, but suppress the output
with self.suppress_stdout():
scholarly.pprint(author)
scholarly.pprint(pub)
# Check for the complete list of coauthors
self.assertGreaterEqual(len(author['coauthors']), 20)
if len(author['coauthors']) > 20:
self.assertGreaterEqual(len(author['coauthors']), 36)
self.assertTrue('I23YUh8AAAAJ' in [_coauth['scholar_id'] for _coauth in author['coauthors']])
def test_search_author_multiple_authors(self):
"""
As of May 12, 2020 there are at least 24 'Cattanis's listed as authors
and Giordano Cattani is one of them
"""
authors = [a['name'] for a in scholarly.search_author('cattani')]
self.assertGreaterEqual(len(authors), 24)
self.assertIn(u'Giordano Cattani', authors)
def test_search_author_id(self):
"""
Test the search by author ID. Marie Skลodowska-Curie's ID is
EmD_lTEAAAAJ and these IDs are permenant
"""
author = scholarly.search_author_id('EmD_lTEAAAAJ')
self.assertEqual(author['name'], u'Marie Skลodowska-Curie')
self.assertEqual(author['affiliation'],
u'Institut du radium, University of Paris')
def test_search_author_id_filled(self):
"""
Test the search by author ID. Marie Skลodowska-Curie's ID is
EmD_lTEAAAAJ and these IDs are permenant.
As of July 2020, Marie Skลodowska-Curie has 1963 citations
on Google Scholar and 179 publications
"""
author = scholarly.search_author_id('EmD_lTEAAAAJ', filled=True)
self.assertEqual(author['name'], u'Marie Skลodowska-Curie')
self.assertEqual(author['affiliation'],
u'Institut du radium, University of Paris')
self.assertEqual(author['public_access']['available'], 0)
self.assertEqual(author['public_access']['not_available'], 0)
self.assertGreaterEqual(author['citedby'], 1963) # TODO: maybe change
self.assertGreaterEqual(len(author['publications']), 179)
pub = author['publications'][1]
self.assertEqual(pub["citedby_url"],
"https://scholar.google.com/scholar?oi=bibs&hl=en&cites=9976400141451962702")
@unittest.skipIf(os.getenv("CONNECTION_METHOD") in {None, "none", "freeproxy"}, reason="No robust proxy setup")
def test_search_pubs(self):
"""
As of May 12, 2020 there are at least 29 pubs that fit the search term:
["naive physics" stability "3d shape"].
Check that the paper "Visual perception of the physical stability of asymmetric three-dimensional objects"
is among them
"""
pub = scholarly.search_single_pub("naive physics stability 3d shape")
pubs = list(scholarly.search_pubs('"naive physics" stability "3d shape"'))
# Check that the first entry in pubs is the same as pub.
# Checking for quality holds for non-dict entries only.
for key in {'author_id', 'pub_url', 'num_citations'}:
self.assertEqual(pub[key], pubs[0][key])
for key in {'title', 'pub_year', 'venue'}:
self.assertEqual(pub['bib'][key], pubs[0]['bib'][key])
self.assertGreaterEqual(len(pubs), 27)
titles = [p['bib']['title'] for p in pubs]
self.assertIn('Visual perception of the physical stability of asymmetric three-dimensional objects', titles)
@unittest.skipIf(os.getenv("CONNECTION_METHOD") in {None, "none", "freeproxy"}, reason="No robust proxy setup")
def test_search_pubs_total_results(self):
"""
As of September 16, 2021 there are 32 pubs that fit the search term:
["naive physics" stability "3d shape"], and 17'000 results that fit
the search term ["WIEN2k Blaha"] and none for ["sdfsdf+24r+asdfasdf"].
Check that the total results for that search term equals 32.
"""
pubs = scholarly.search_pubs('"naive physics" stability "3d shape"')
self.assertGreaterEqual(pubs.total_results, 32)
pubs = scholarly.search_pubs('WIEN2k Blaha')
self.assertGreaterEqual(pubs.total_results, 10000)
pubs = scholarly.search_pubs('sdfsdf+24r+asdfasdf')
self.assertEqual(pubs.total_results, 0)
@unittest.skipIf(os.getenv("CONNECTION_METHOD") in {None, "none", "freeproxy"}, reason="No robust proxy setup")
def test_search_pubs_filling_publication_contents(self):
'''
This process checks the process of filling a publication that is derived
from the search publication snippets.
'''
query = 'Creating correct blur and its effect on accommodation'
results = scholarly.search_pubs(query)
pubs = [p for p in results]
self.assertGreaterEqual(len(pubs), 1)
f = scholarly.fill(pubs[0])
self.assertTrue(f['bib']['author'] == u'Cholewiak, Steven A and Love, Gordon D and Banks, Martin S')
self.assertTrue(f['author_id'] == ['4bahYMkAAAAJ', '3xJXtlwAAAAJ', 'Smr99uEAAAAJ'])
self.assertTrue(f['bib']['journal'] == u'Journal of Vision')
self.assertTrue(f['bib']['number'] == '9')
self.assertTrue(f['bib']['pages'] == u'1--1')
self.assertTrue(f['bib']['publisher'] == u'The Association for Research in Vision and Ophthalmology')
self.assertTrue(f['bib']['title'] == u'Creating correct blur and its effect on accommodation')
self.assertTrue(f['pub_url'] == u'https://jov.arvojournals.org/article.aspx?articleid=2701817')
self.assertTrue(f['bib']['volume'] == '18')
self.assertTrue(f['bib']['pub_year'] == u'2018')
def test_extract_author_id_list(self):
'''
This unit test tests the extraction of the author id field from the html to populate the `author_id` field
in the Publication object.
'''
author_html_full = '<a href="/citations?user=4bahYMkAAAAJ&hl=en&oi=sra">SA Cholewiak</a>, <a href="/citations?user=3xJXtlwAAAAJ&hl=en&oi=sra">GD Love</a>, <a href="/citations?user=Smr99uEAAAAJ&hl=en&oi=sra">MS Banks</a> - Journal of vision, 2018 - jov.arvojournals.org'
pub_parser = PublicationParser(None)
author_id_list = pub_parser._get_author_id_list(author_html_full)
self.assertTrue(author_id_list[0] == '4bahYMkAAAAJ')
self.assertTrue(author_id_list[1] == '3xJXtlwAAAAJ')
self.assertTrue(author_id_list[2] == 'Smr99uEAAAAJ')
author_html_partial = "A Bateman, J O'Connell, N Lorenzini, <a href=\"/citations?user=TEndP-sAAAAJ&hl=en&oi=sra\">T Gardner</a>โฆ - BMC psychiatry, 2016 - Springer"
pub_parser = PublicationParser(None)
author_id_list = pub_parser._get_author_id_list(author_html_partial)
self.assertTrue(author_id_list[3] == 'TEndP-sAAAAJ')
def test_serialiazation(self):
"""
Test that we can serialize the Author and Publication types
Note: JSON converts integer keys to strings, resulting in the years
in `cites_per_year` dictionary as `str` type instead of `int`.
To ensure consistency with the typing, use `object_hook` option
when loading to convert the keys to integers.
"""
# Test that a filled Author with unfilled Publication
# is serializable.
def cpy_decoder(di):
"""A utility function to convert the keys in `cites_per_year` to `int` type.
This ensures consistency with `CitesPerYear` typing.
"""
if "cites_per_year" in di:
di["cites_per_year"] = {int(k): v for k,v in di["cites_per_year"].items()}
return di
author = scholarly.search_author_id('EmD_lTEAAAAJ', filled=True)
serialized = json.dumps(author)
author_loaded = json.loads(serialized, object_hook=cpy_decoder)
self.assertEqual(author, author_loaded)
# Test that a loaded publication is still fillable and serializable.
pub = author_loaded['publications'][0]
scholarly.fill(pub)
serialized = json.dumps(pub)
pub_loaded = json.loads(serialized, object_hook=cpy_decoder)
self.assertEqual(pub, pub_loaded)
def test_full_title(self):
"""
Test if the full title of a long title-publication gets retrieved.
The code under test gets executed if:
publication['source'] == PublicationSource.AUTHOR_PUBLICATION_ENTRY
so the long title-publication is taken from an author object.
"""
author = scholarly.search_author_id('Xxjj6IsAAAAJ')
author = scholarly.fill(author, sections=['publications'])
pub_index = -1
for i in range(len(author['publications'])):
if author['publications'][i]['author_pub_id'] == 'Xxjj6IsAAAAJ:u_35RYKgDlwC':
pub_index = i
self.assertGreaterEqual(i, 0)
# elided title
self.assertEqual(author['publications'][pub_index]['bib']['title'],
u'Evaluation of toxicity of Dichlorvos (Nuvan) to fresh water fish Anabas testudineus and possible modulation by crude aqueous extract of Andrographis paniculata: A preliminary โฆ')
# full text
pub = scholarly.fill(author['publications'][pub_index])
self.assertEqual(pub['bib']['title'],
u'Evaluation of toxicity of Dichlorvos (Nuvan) to fresh water fish Anabas testudineus and possible modulation by crude aqueous extract of Andrographis paniculata: A preliminary investigation')
def test_author_organization(self):
"""
"""
organization_id = 4836318610601440500 # Princeton University
organizations = scholarly.search_org("Princeton University")
self.assertEqual(len(organizations), 1)
organization = organizations[0]
self.assertEqual(organization['Organization'], "Princeton University")
self.assertEqual(organization['id'], str(organization_id))
search_query = scholarly.search_author_by_organization(organization_id)
author = next(search_query)
self.assertEqual(author['scholar_id'], "ImhakoAAAAAJ")
self.assertEqual(author['name'], "Daniel Kahneman")
self.assertEqual(author['email_domain'], "@princeton.edu")
self.assertEqual(author['affiliation'], "Princeton University (Emeritus)")
self.assertGreaterEqual(author['citedby'], 438891)
def test_coauthors(self):
"""
Test that we can fetch long (20+) and short list of coauthors
"""
author = scholarly.search_author_id('7Jl3PIoAAAAJ')
scholarly.fill(author, sections=['basics', 'coauthors'])
self.assertEqual(author['name'], "Victor Silva")
self.assertLessEqual(len(author['coauthors']), 20)
# If the above assertion fails, pick a different author profile
self.assertGreaterEqual(len(author['coauthors']), 6)
self.assertIn('Eleni Stroulia', [_coauth['name'] for _coauth in author['coauthors']])
self.assertIn('TyM1dLwAAAAJ', [_coauth['scholar_id'] for _coauth in author['coauthors']])
author = scholarly.search_author_id('PA9La6oAAAAJ')
scholarly.fill(author, sections=['basics', 'coauthors'])
self.assertEqual(author['name'], "Panos Ipeirotis")
self.assertGreaterEqual(len(author['coauthors']), 20)
# Don't break the build if the long list cannot be fetch.
# Chrome/Geckodriver are mentioned only as optional dependencies.
if (len(author['coauthors']) > 20):
self.assertIn('Eduardo Ruiz', [_coauth['name'] for _coauth in author['coauthors']])
self.assertIn('hWq7jFQAAAAJ', [_coauth['scholar_id'] for _coauth in author['coauthors']])
def test_public_access(self):
"""
Test that we obtain public access information
We check two cases: 1) when number of public access mandates exceeds
100, thus requiring fetching information from a second page and 2) fill
public access counts without fetching publications.
"""
author = scholarly.search_author_id("7x48vOkAAAAJ")
scholarly.fill(author, sections=['basics', 'public_access', 'publications'])
self.assertGreaterEqual(author["public_access"]["available"], 110)
self.assertEqual(author["public_access"]["available"],
sum(pub.get("public_access", None) is True for pub in author["publications"]))
self.assertEqual(author["public_access"]["not_available"],
sum(pub.get("public_access", None) is False for pub in author["publications"]))
author = next(scholarly.search_author("Daniel Kahneman"))
scholarly.fill(author, sections=["basics", "indices", "public_access"])
self.assertEqual(author["scholar_id"], "ImhakoAAAAAJ")
self.assertGreaterEqual(author["public_access"]["available"], 6)
def test_related_articles_from_author(self):
"""
Test that we obtain related articles to an article from an author
"""
author = scholarly.search_author_id("ImhakoAAAAAJ")
scholarly.fill(author, sections=['basics', 'publications'])
pub = author['publications'][0]
self.assertEqual(pub['bib']['title'], 'Prospect theory: An analysis of decision under risk')
related_articles = scholarly.get_related_articles(pub)
# Typically, the same publication is returned as the most related article
same_article = next(related_articles)
for key in {'pub_url', 'num_citations'}:
self.assertEqual(pub[key], same_article[key])
for key in {'title', 'pub_year'}:
self.assertEqual(str(pub['bib'][key]), (same_article['bib'][key]))
# These may change with time
related_article = next(related_articles)
self.assertEqual(related_article['bib']['title'], 'Choices, values, and frames')
self.assertEqual(related_article['bib']['pub_year'], '2013')
self.assertGreaterEqual(related_article['num_citations'], 16561)
self.assertIn("A Tversky", related_article['bib']['author'])
@unittest.skipIf(os.getenv("CONNECTION_METHOD") in {None, "none", "freeproxy"}, reason="No robust proxy setup")
def test_related_articles_from_publication(self):
"""
Test that we obtain related articles to an article from a search
"""
pub = scholarly.search_single_pub("Planck 2018 results-VI. Cosmological parameters")
related_articles = scholarly.get_related_articles(pub)
# Typically, the same publication is returned as the most related article
same_article = next(related_articles)
for key in {'author_id', 'pub_url', 'num_citations'}:
self.assertEqual(pub[key], same_article[key])
for key in {'title', 'pub_year'}:
self.assertEqual(pub['bib'][key], same_article['bib'][key])
# These may change with time
related_article = next(related_articles)
self.assertEqual(related_article['bib']['title'], 'Large Magellanic Cloud Cepheid standards provide '
'a 1% foundation for the determination of the Hubble constant and stronger evidence '
'for physics beyond ฮCDM')
self.assertEqual(related_article['bib']['pub_year'], '2019')
self.assertGreaterEqual(related_article['num_citations'], 1388)
self.assertIn("AG Riess", related_article['bib']['author'])
def test_author_custom_url(self):
"""
Test that we can use custom URLs for retrieving author data
"""
query_url = "/citations?hl=en&view_op=search_authors&mauthors=label%3A3d_shape"
authors = scholarly.search_author_custom_url(query_url)
self.assertIn(u'Steven A. Cholewiak, PhD', [author['name'] for author in authors])
@unittest.skipIf(os.getenv("CONNECTION_METHOD") in {None, "none", "freeproxy"}, reason="No robust proxy setup")
def test_pubs_custom_url(self):
"""
Test that we can use custom URLs for retrieving publication data
"""
query_url = ('/scholar?as_q=&as_epq=&as_oq=SFDI+"modulated+imaging"&as_eq=&as_occt=any&as_sauthors=&'
'as_publication=&as_ylo=2005&as_yhi=2020&hl=en&as_sdt=0%2C31')
pubs = scholarly.search_pubs_custom_url(query_url)
pub = next(pubs)
self.assertEqual(pub['bib']['title'], 'Quantitation and mapping of tissue optical properties using modulated imaging')
self.assertEqual(set(pub['author_id']), {'V-ab9U4AAAAJ', '4k-k6SEAAAAJ', 'GLm-SaQAAAAJ'})
self.assertEqual(pub['bib']['pub_year'], '2009')
self.assertGreaterEqual(pub['num_citations'], 581)
if __name__ == '__main__':
unittest.main()
|
from django.contrib import admin
from orders.models import Order
from django.contrib.auth.admin import User
from .models import *
class SystemoptionsAdmin (admin.ModelAdmin):
list_display = ["id", "email_send", "get_email_pool", "phone_send", "get_phone_pool", "email_from"]
list_editable = ["email_send", "phone_send", "email_from"]
class Meta:
model = Systemoptions
admin.site.register(Systemoptions, SystemoptionsAdmin)
class EmailwebserviceAdmin (admin.ModelAdmin):
list_display = ["id", "email_use_tls", "email_host", "email_port", "email_host_user", "email_host_password", "default_from_email", "default_to_email"]
list_editable = ["email_use_tls", "email_host", "email_port", "email_host_user", "email_host_password", "default_from_email", "default_to_email"]
class Meta:
model = Emailwebservice
admin.site.register(Emailwebservice, EmailwebserviceAdmin)
class EmailstaffAdmin (admin.ModelAdmin):
list_display = ["id", "email_manager", "user_manager"]
list_editable = ["email_manager", "user_manager"]
class Meta:
model = Emailstaff
admin.site.register(Emailstaff, EmailstaffAdmin)
class PhonestaffAdmin (admin.ModelAdmin):
list_display = ["id", "phone_manager", "user_manager"]
list_editable = ["phone_manager", "user_manager"]
class Meta:
model = Phonestaff
admin.site.register(Phonestaff, PhonestaffAdmin)
|
print("hello tests")
|
import sys
import compilador.helpers.file_parser
from compilador.helpers.file_parser import *
import compilador.vm.virtual_machine
from compilador.vm.virtual_machine import *
import game_engine.engine
from game_engine.engine import *
# CLASE EXECUTER
# Comunicaciรณn entre parser, vm y juego
class Executer(object):
def __init__(self, running_file):
self.running_file = running_file # Nombre del archivo
self.data = parser_file(running_file) # Manda archivo al parser y recibe datos
self.quads = self.data["q"] # Guarda cuadruplos
self.pretty_quads = self.data["str"] # Guarda cuarduplos en formato STR
self.function_table = self.data["ft"] # Guarda tabla de funciones
# Imprime cuadruplos
def __print_quads(self, pre_quads):
if pre_quads:
print("Program Quads before assignations:")
print("-------------------------------------")
print(self.pretty_quads)
else:
print("Program Quads after assignations:")
for q in self.quads:
print("--{}----------------------------------".format(q))
self.quads[q].print_quad()
print("-------------------------------------")
# Imprime instrucciones generadas
def __print_instructions(self, instructions):
print("\nResulting Instructions:")
print("-------------------------------------")
Instruction.print_instructions(instructions)
# Imprime nombre de archivo uq ese esta corriendo
def __print_running(self):
print("Running: {}".format(self.running_file))
print("-------------------------------------")
# Carga primer nivel del juego
def __load_level_one(self, instructions):
characters = {
"pepe": Character(0, 0, 50, 50, 50),
}
Engine.start(characters, instructions, "one")
# Carga segundo nivel del juego
def __load_level_two(self, instructions):
characters = {
"dinoAdrian": Character(0, 400, 50, 50, 50),
"rositaFresita": Character(0, 300, 50, 50, 50),
}
Engine.start(characters, instructions, "two")
# Corre la maquina vrtual y el juego
def run(self, **kwargs):
if kwargs.get("print_pre_quads"):
self.__print_quads(True)
vm = VirtualMachine(3000, 1000, 6000, self.function_table)
if kwargs.get("print_post_quads"):
self.__print_quads(False)
if kwargs.get("print_running"):
self.__print_running()
instructions = vm.run(self.quads)
if kwargs.get("print_instructions"):
self.__print_instructions(instructions)
if kwargs.get("return_quads") and not (
kwargs.get("run_game") or kwargs.get("play_level")
):
return self.quads
if (kwargs.get("run_game") or kwargs.get("play_level")) and len(instructions):
if kwargs.get("play_level") == 1:
self.__load_level_one(instructions)
elif kwargs.get("play_level") == 2:
self.__load_level_two(instructions)
return instructions
|
import os
#from google.appengine.api import memcache
from google.appengine.api import users
#from google.appengine.ext import db
from google.appengine.ext.webapp import template
#import src.accounts as accounts
#from app.model.account import Account
from app.model.accounts import Accounts
# Tools
# -----
class Context():
isAdmin = False
isAuthenticated = False
isLocal = False
account = None
authenticateUrl = ''
authenticateText = ''
appName = 'My-Trips'
def __init__(self, request):
user = users.get_current_user()
if user:
# Authenticated on Google, but not yet in this system
self.account = Accounts.loadByID(user.user_id())
# self.name = self.account.name
if self.account: # Authenticated
self.isAuthenticated = True
self.isAdmin = users.is_current_user_admin()
self.authenticateUrl = users.create_logout_url('/')
self.authenticateText = 'Logout'
else: # Not authenticated
self.isAuthenticated = False
self.isAdmin = False
self.authenticateUrl = users.create_login_url(request.uri)
self.authenticateText = 'Login'
# set the Debug option
if request.host_url.startswith("http://localhost"):
isLocal = True
def template(self, template):
return os.path.join(os.path.dirname(__file__), '../..', 'templates', template)
def authenticate(self, response):
# Scenarios
# user == None && account == None -> Real authenticate
# user != None && account == None -> Google is authenticated, but no Account... redirect to invite code
user = users.get_current_user()
if not user:
response.out.write(template.render(self.template('core-login.html'), {'context': self}))
else:
response.out.write(template.render(self.template('core-invite.html'), {'context': self}))
def noPermission(self, response):
return response.out.write(template.render(self.template('core-error.html'), {'context': self}))
|
from flask import *
app = Flask(__name__)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/aaaa', methods=["POST"])
def res_json():
return jsonify(
{
"XXXXXXXX": "YYYYYYYYYYYYYYYYYYY"
}
)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5000)
|
import sys
from datetime import datetime
from bs4 import BeautifulSoup
import numpy
import pandas as pd
import requests
import backtrader as bt
import pprint
from dateutil import relativedelta
# https://stackoverflow.com/questions/21806496/pandas-seems-to-ignore-first-column-name-when-reading-tab-delimited-data-gives
def remove_bom(filename):
fp = open(filename)
fp.read(1)
return fp
class SmaCross(bt.Strategy):
# list of parameters which are configurable for the strategy
params = dict(
pfast=50, # period for the fast moving average
pslow=200 # period for the slow moving average
)
#curdate = 0
def __init__(self):
sma1 = bt.ind.SMA(period=self.p.pfast) # fast moving average
sma2 = bt.ind.SMA(period=self.p.pslow) # slow moving average
self.crossover = bt.ind.CrossOver(sma1, sma2) # crossover signal
self.curdate = 0
def next(self):
if not self.position: # not in the market
if self.crossover > 0: # if fast crosses slow to the upside
self.buy() # enter long
self.curdate = self.datetime.date(ago=0)
# close out at 9 months. compare self.position.datetime - buy datetime
elif self.crossover < 0: # in the market & cross to the downside
self.close() # close long position
def get_day_rows_till_sell(start_index, stream):
days_after_sell = []
for index, row in stream[start_index:].iterrows():
if numpy.isnan(row['sell']):
days_after_sell.append(row)
else:
return days_after_sell
def get_day_rows_till_buy(start_index, stream):
days_after_sell = []
for index, row in stream[start_index:].iterrows():
if numpy.isnan(row['buy']):
days_after_sell.append(row)
else:
return days_after_sell
def get_day_rows_n_timespan(start_index, stream, date):
days = []
start_date = datetime.strptime(format_date(date), "%Y-%m-%d")
for index, row in stream[start_index:].iterrows():
if type(row['datetime']) == str:
current_date = datetime.strptime(format_date(row['datetime']), "%Y-%m-%d")
r = relativedelta.relativedelta(current_date, start_date)
# months will be zero based
# 5 months = 0 to 4, r.months < 5
# 1 month = 0 to 1, r.months < 1
# etc
if r.months < 5:
days.append(row)
else:
return days
else:
return days
def format_date(date):
return date.split(" ")[0]
def get_trends_n_timespan_buy(stream):
trends = {}
values = []
for index, row in stream.iterrows():
average_price = 0
cumulative_price = 0
if not numpy.isnan(row['buy']):
buy_date = row['datetime']
buy_price = row['adjclose']
days_after_buy = get_day_rows_n_timespan(index + 1, stream, buy_date)
days = 0
for day in days_after_buy:
days += 1
cumulative_price += day['adjclose']
average_price = cumulative_price / days
valid_sell = average_price > buy_price
values.append({'valid sell': valid_sell, 'sell date': buy_date, 'average price': average_price,
'close price at buy': buy_price})
trends['title'] = stream.columns[1]
trends['values'] = values
return trends
def get_trends_n_timespan_sell(stream):
trends = {}
values = []
for index, row in stream.iterrows():
average_price = 0
cumulative_price = 0
if not numpy.isnan(row['sell']):
sell_date = row['datetime']
sell_price = row['adjclose']
days_after_buy = get_day_rows_n_timespan(index + 1, stream, sell_date)
days = 0
for day in days_after_buy:
days += 1
cumulative_price += day['adjclose']
average_price = cumulative_price / days
valid_sell = average_price < sell_price
values.append({'valid sell': valid_sell, 'sell date': sell_date, 'average price': average_price,
'close price at sell': sell_price})
trends['title'] = stream.columns[1]
trends['values'] = values
return trends
# get trends of stock from each buy to a sell, Golden Cross
def get_trends_next_sell(stream):
trends = {}
values = []
for index, row in stream.iterrows():
average_price = 0
cumulative_price = 0
if not numpy.isnan(row['buy']):
# current_date = row['datetime']
close_price = row['adjclose']
days_after_buy = get_day_rows_till_sell(index + 1, stream)
days = 0
if days_after_buy:
for day in days_after_buy:
days += 1
cumulative_price += day['adjclose']
average_price = cumulative_price / days
valid_sell = average_price > close_price
buy_date = row['datetime']
values.append({'valid sell': valid_sell, 'date': buy_date, 'average price': average_price,
'close price': close_price})
#values.append({'valid sell': valid_sell, 'sell date': sell_date, 'close price': close_price,
# 'average price': average_price})
trends['title'] = stream.columns[1]
trends['values'] = values
return trends
# get trends of stock from each sell to next buy, Golden Cross
def get_trends_next_buy(stream):
trends = {}
values = []
for index, row in stream.iterrows():
average_price = 0
cumulative_price = 0
if not numpy.isnan(row['sell']):
# current_date = row['datetime']
close_price = row['adjclose']
days_after_buy = get_day_rows_till_buy(index + 1, stream)
days = 0
if days_after_buy:
for day in days_after_buy:
days += 1
cumulative_price += day['adjclose']
average_price = cumulative_price / days
valid_sell = average_price < close_price
buy_date = row['datetime']
values.append({'valid sell': valid_sell, 'date': buy_date, 'average price': average_price,
'close price': close_price})
#values.append({'valid sell': valid_sell, 'sell date': sell_date, 'close price': close_price,
# 'average price': average_price})
trends['title'] = stream.columns[1]
trends['values'] = values
return trends
def validate_trends(trends):
total_true = 0.0
total_trends = len(trends['values']) + .0
for trend in trends['values']:
if trend['valid sell']:
total_true += 1
percent_correct = total_true / total_trends
trends['percent correct'] = percent_correct
def get_overall_correct(trends):
average_percent_correct = 0.0
cummulative_percent = 0.0
total_trends = len(trends['values']) + .0
for trend in trends:
cummulative_percent += trend['percent correct']
average_percent_correct = cummulative_percent / total_trends
return average_percent_correct
def get_average_days(trends):
d1 = datetime.strptime(format_date(trends['values'][0]['date']), "%Y-%m-%d")
cumulative_days = 0
for trend in trends['values'][1:]:
d2 = datetime.strptime(format_date(trend['date']), "%Y-%m-%d")
r = relativedelta.relativedelta(d2, d1)
cumulative_days += r.days
average_days = cumulative_days / len(trends['values'])
trends['average days'] = average_days
def get_sp500():
'''
Goes to Wikipedia to get ticker symbols, and CIK codes for S&P 500 companies
'''
url = 'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies'
r = requests.get(url)
data = r.text
soup = BeautifulSoup(data, 'lxml')
company_table = soup.find_all('tr')[1:506]
company_dict = dict()
for row in company_table:
row = row.find_all('td')
# slice into table to grab ticker and CIK
company_dict[row[0].get_text()] = row[7].get_text()
return company_dict
def run_stock(name):
cerebro = bt.Cerebro() # create a "Cerebro" engine instance
# Create a data feed
data = bt.feeds.YahooFinanceData(dataname=name,
fromdate=datetime(2009, 1, 1),
todate=datetime(2019, 12, 31))
cerebro.adddata(data) # Add the data feed
cerebro.addstrategy(SmaCross) # Add the trading strategy
cerebro.addwriter(bt.WriterFile, csv=True, out='test_file.csv')
cerebro.run() # run it all
df = pd.read_csv(remove_bom('test_file.csv'), header=1)
# get golden cross trends and print results
#trends_golden_cross = get_trends_n_timespan_buy(df)
trends_golden_cross = get_trends_next_sell(df)
validate_trends(trends_golden_cross)
#get_average_days(trends_golden_cross)
pp = pprint.PrettyPrinter(indent=1)
print('\nGolden Cross:')
#pp.pprint(trends_golden_cross['average days'])
pp.pprint(trends_golden_cross)
#pp.pprint(trends_golden_cross['title'])
#pp.pprint(trends_golden_cross['percent correct'])
#pp.pprint(trends_golden_cross['average days'])
# get death cross trends and print results
print('\nDeath Cross')
trends_death_cross = get_trends_n_timespan_sell(df)
#trends_death_cross = get_trends_next_buy(df)
validate_trends(trends_death_cross)
#get_average_days(trends_death_cross)
pp.pprint(trends_death_cross)
#pp.pprint(trends_death_cross['title'])
#pp.pprint(trends_death_cross['percent correct'])
#pp.pprint(trends_death_cross['average days'])
cerebro.plot() # and plot it with a single command
return trends_golden_cross, trends_death_cross
'''
sp500_stocks = get_sp500()
stock_count = 1
cumulative_percent_correct_golden_cross = 0.0
cumulative_percent_correct_death_cross = 0.0
cumulative_days_golden_cross = 0.0 #golden cross
cumulative_days_death_cross = 0.0 #death cross
for ticker, cik in sp500_stocks.items():
try:
if stock_count < 600:
trends = run_stock('{}'.format(ticker).rstrip())
cumulative_percent_correct_golden_cross += trends[0]['percent correct']
cumulative_percent_correct_death_cross += trends[1]['percent correct']
cumulative_days_golden_cross += trends[0]['average days']
cumulative_days_death_cross += trends[1]['average days']
stock_count += 1
print('Ticker ' + '{}'.format(ticker).rstrip() + ' done. ' + 'Count: ' + str(stock_count - 1))
else:
break
except:
print('Exception: ' + 'Ticker: ' + '{}'.format(ticker).rstrip() + '\n')
overall_golden_cross_percent_correct = cumulative_percent_correct_golden_cross / (stock_count - 1)
overall_percent_correct_death_cross = cumulative_percent_correct_death_cross / (stock_count - 1)
overall_days_golden_cross = cumulative_days_golden_cross / (stock_count - 1)
overall_days_death_cross = cumulative_days_death_cross / (stock_count - 1)
print('SP500 Percent Correct Golden Cross: ' + str(overall_golden_cross_percent_correct) + '\n')
print('SP500 Percent Correct Death Cross: ' + str(overall_percent_correct_death_cross) + '\n')
#print('SP500 Buy to Sell Average Days (Golden Cross): ' + str(overall_days_golden_cross) + '\n')
#print('SP500 Sell to Buy Average Days (Death Cross): ' + str(overall_days_death_cross) + '\n')
'''
# well perfoming
run_stock('AAPL')
'''
#run_stock('GOOGL')
run_stock('MNST')
# poor performing
run_stock('AIG')
run_stock('XRX')
# mediocre
run_stock('RF')
run_stock('IPG')
run_stock('AMZN')
run_stock('AVP')
run_stock('WU')
run_stock('M')
'''
|
from django.contrib import admin
from .models import Parliament1
# Register your models here.
admin.site.register(Parliament1)
|
# -*- coding: utf-8 -*-
import random
from dataPreprocess import dataPreprocess
from public_functions import *
__author__ = 'benywon'
class insuranceQAPreprocess(dataPreprocess):
def __init__(self,
neg_low=15,
neg_high=30,
Max_length=50,
**kwargs):
dataPreprocess.__init__(self, **kwargs)
self.Max_length = Max_length
self.neg_high = neg_high
self.neg_low = neg_low
self.path = self.path_base + 'insuranceQA/'
append_str = '_batch' if self.batch_training else ''
self.data_pickle_path = self.path + 'insuranceQA' + append_str + '.pickle'
if self.reload:
self.__build_data_set__()
else:
self.load_data()
self.calc_data_stat()
self.dataset_name = 'insuranceQA'
def __build_data_set__(self):
print 'start loading data from original file'
trainfilepath = self.path + 'question.train.token_idx.label'
testfilepath1 = self.path + 'question.test1.label.token_idx.pool'
testfilepath2 = self.path + 'question.test2.label.token_idx.pool'
devfilepath = self.path + 'question.dev.label.token_idx.pool'
answer_pool_path = self.path + 'answers.label.token_idx'
vocabulary_path = self.path + 'vocabulary'
def get_sentence_id(string):
divs = string.split(' ')[0:self.Max_length]
def get_word(word_str):
return int(clean_str_remove(word_str.replace('idx_', '')))
return map(get_word, divs)
print 'start process answer pool....'
answer_pool = {}
with open(answer_pool_path, 'rb') as f:
for line in f:
divs = line.split('\t')
id = divs[0]
answer_sentence = get_sentence_id(divs[1])
answer_pool[id] = answer_sentence
pool_size = len(answer_pool)
cc=np.mean([len(answer_pool[x]) for x in answer_pool])
def get_test_or_dev_set(filepath):
print 'process:' + filepath + '...'
target = []
with open(filepath, 'rb') as f:
for line_question in f:
one_patch = []
divide = line_question.split('\t')
question = get_sentence_id(divide[1])
rights = divide[0].split(' ')
wrongs = divide[2].split(' ')
for right in rights:
yes = answer_pool[clean_str_remove(right.replace('idx_', ''))]
one_patch.append([self.transfun(question, 'int32'), self.transfun(yes, 'int32'), 1])
for wrong in wrongs:
no = answer_pool[clean_str_remove(wrong.replace('idx_', ''))]
one_patch.append([self.transfun(question, 'int32'), self.transfun(no, 'int32'), 0])
target.append(one_patch)
return target
self.TEST = get_test_or_dev_set(testfilepath1)
self.DEV = get_test_or_dev_set(devfilepath)
def get_neg_sample(id_in):
id_in = clean_str_remove(id_in.replace('idx_', ''))
sample_size = random.randint(self.neg_low, self.neg_high + 1)
neg_pool = []
for index in xrange(sample_size):
sample_index = str(random.randint(1, pool_size))
while id_in == sample_index:
sample_index = str(random.randint(1, pool_size))
neg_pool.append(answer_pool[sample_index])
return neg_pool
print 'process:' + trainfilepath + '...'
with open(trainfilepath, 'rb') as f:
q = []
yes = []
no = []
for line in f:
dive = line.split('\t')
question = get_sentence_id(dive[0])
positives = dive[1].split(' ')
for positive in positives:
pos = answer_pool[clean_str_remove(positive)]
negs = get_neg_sample(positive)
for neg in negs:
q.append(question)
yes.append(pos)
no.append(neg)
self.TRAIN.append(q)
self.TRAIN.append(yes)
self.TRAIN.append(no)
print 'train set length:' + str(len(self.TRAIN[0]))
print 'start load:' + vocabulary_path + '...'
with open(vocabulary_path, 'rb') as f:
for word_str in f:
divdes = word_str.split('\t')
word_id = int(clean_str_remove(divdes[0].replace('idx_', '')))
self.word2id[divdes[1]] = word_id
self.transfer_data(add_dev=False)
print 'load data done'
|
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Inspired by Paul Holzer (Byte, Feb 1986). Most of this code come from the
# Scandroid by Charles O. Hartman.
import string
import re
import our_regex
import variables
def my_filter(word):
dontwants = ['\n', 'S', ' ', '', 's', '\s']
if len(word) > 0:
return word not in dontwants
else:
return False
def less_picky_filter(word):
dontwants = ['S', '']
if len(word) > 0:
return word not in dontwants
else:
return False
def get_dimensions_of_poem(poem):
words_per_line = [len(line.split()) for line in poem if len(line.split()) != 0]
n = len(words_per_line)
m = max(words_per_line)
return [n, m]
def delimit(lst,delimiter):
result = [delimiter] * (len(lst) * 2 - 1)
result[0::2] = lst
return result
def trim_white_space(poem):
return
def named_entities_to_weights(matrix,partition):
A = matrix
wn = random_partition(17)
entities = ['PERSON', 'NORP', 'FACILITY', 'ORG', 'GPE', 'LOC', 'PRODUCT',
'EVENT', 'WORK_OF_ART', 'LANGUAGE', 'DATE', 'TIME', 'PERCENT'
'MONEY', 'QUANTITY', 'ORDINAL', 'CARDINAL']
# entities with associated weights:
ewaw = dict(zip(entities, wn))
n = len(A)
m = len(A[1])
for ii in range(0,n):
for jj in range(0,m):
entity = A[ii][jj]
if entity in entities:
A[ii][jj] = ewaw[entity]
return(A)
def pos_to_weights(matrix, partition):
A = matrix
wpos = partition
pos = ['CCONJ', 'CONJ', 'NUM', 'DET', 'ADP', 'PROPN', 'PRON', 'SYM', 'PART',
'INTJ', 'VERB', 'NOUN', 'ADJ', 'ADV']
# pos with associated weights:
poswaw = dict(zip(pos, wpos))
n = len(A)
m = len(A[1])
for ii in range(0,n):
for jj in range(0,m):
p = A[ii][jj]
if p in pos:
A[ii][jj] = float(poswaw[p])
return(A)
def dep_to_weights(matrix, partition):
A = matrix
wdep = [float(i) for i in partition]
deps = ['acl', 'acomp', 'advcl', 'advmod', 'agent', 'amod', 'appos', 'attr', 'aux', 'auxpass', 'case', 'cc', 'ccomp', 'compound', 'conj', 'csubj', 'csubjpass', 'dative', 'dep', 'det', 'dobj', 'expl', 'intj', 'iobj', 'mark', 'meta', 'neg', 'nmod', 'npadvmod', 'nsubj', 'nsubjpass', 'nummod', 'oprd', 'parataxis', 'pcomp', 'pobj', 'poss', 'preconj', 'predet', 'prep', 'prt', 'punct', 'quantmod', 'relcl', 'xcomp', 'ROOT']
# dep with associated weights:
depwaw = dict(zip(deps, wdep))
n = len(A)
m = len(A[1])
for ii in range(0,n):
for jj in range(0,m):
dep = A[ii][jj]
if dep in deps:
A[ii][jj] = depwaw[dep]
return(A)
def poem_to_matrix(poem, nlp):
cleaned_poem = clean_and_remove_junk(poem, nlp)
line_break = ['\n']
cleaned_poem_no_line_break = [line for line in cleaned_poem \
if line not in line_break]
words = [word for line in cleaned_poem_no_line_break for word in line.split()]
words_per_line = [len(line.split()) for line in poem \
if len(line.split()) != 0]
n = len(words_per_line)
m = max(words_per_line)
words_reversed = list(reversed(words))
A = [[0 for x in range(m)] for y in range(n)]
for ii in range(0,n):
for jj in range(0,words_per_line[ii]):
try:
A[ii][jj] = words_reversed.pop()
except:
pass
return A
def syllable_matrix(poem,nlp):
cleaned_poem = clean_and_remove_junk(poem,nlp)
line_break = ['\n']
cleaned_poem_no_line_break = [line for line in cleaned_poem \
if line not in line_break]
parsed_and_cleaned_poem = [parse_line(line,nlp) \
for line in cleaned_poem]
words = delimit([word for line in parsed_and_cleaned_poem \
for word in line],' ')
words_no_dontwants = [list(filter(my_filter,word)) for word in words]
cleaned_words = [word for word in words_no_dontwants if word != []]
numbers_of_syllables = [len(word) for word in cleaned_words \
if len(word) != 0]
# vector representing number of words per line:
words_per_line = [len(line.split()) for line in poem \
if len(line.split()) != 0]
n = len(words_per_line)
m = max(words_per_line)
syllables = list(reversed(numbers_of_syllables))
A = [[0 for x in range(m)] for y in range(n)]
for ii in range(0,n):
for jj in range(0,words_per_line[ii]):
A[ii][jj] = syllables.pop()
return A
def lexical_stress(poem,nlp):
cleaned_poem = clean_and_remove_junk(poem,nlp)
parsed_and_cleaned_poem = [parse_line(line,nlp) \
for line in cleaned_poem]
words = delimit([word for line in parsed_and_cleaned_poem \
for word in line],' ')
words_no_dontwants = [list(filter(my_filter,word)) for word in words]
cleaned_words = [word for word in words_no_dontwants if word != []]
numbers_of_syllables = [len(word) for word in cleaned_words \
if len(word) != 0]
# vector representing number of words per line:
words_per_line = [len(line.split()) for line in poem \
if len(line.split()) != 0]
dimensions = get_dimensions_of_poem(poem)
n = dimensions[0]
m = dimensions[1]
syllables = [syllable for word in cleaned_words for syllable in word]
import numpy as np
reversed_words = list(reversed(cleaned_words))
A = [[0 for x in range(m)] for y in range(n)]
for ii in range(n):
for jj in range(words_per_line[ii]):
word = list(reversed(reversed_words.pop()))
binary = []
for kk in range(len(word)):
syllable = word.pop()
if syllable.isupper():
binary.append(0.8)
elif syllable.islower():
binary.append(0.3)
A[ii][jj] = binary
return np.matrix(A)
def filter_poem(poem, nlp):
return list(filter(my_filter, clean_and_remove_junk(poem, nlp)))
def tokenize_poem(poem, nlp):
return [nlp(line) for line in filter_poem(poem, nlp)]
def random_partition(n):
import random
non_normalized_partition = [random.uniform(0,1) for partition in range(n)]
one_norm = sum(non_normalized_partition)
return [partition/one_norm for partition in non_normalized_partition]
def words_per_line(poem, nlp):
return [len(line.split()) for line in filter_poem(poem, nlp) if len(line.split()) != 0]
def pos_matrix(poem, nlp):
tokenized_poem = tokenize_poem(poem, nlp)
pos_tokens = [token.pos_ for line in tokenized_poem
for token in line if token.pos_!='SPACE']
wpl = words_per_line(poem, nlp)
dimensions = get_dimensions_of_poem(poem)
n = dimensions[0]
m = dimensions[1]
A = [[0 for x in range(m)] for y in range(n)]
pos = list(reversed(pos_tokens))
for ii in range(n):
for jj in range(wpl[ii]):
A[ii][jj] = pos.pop()
return A
def iter_products(docs):
for doc in docs:
for ent in doc.ents:
if ent.label_ == 'PRODUCT':
yield ent
def word_is_in_entity(word):
return word.ent_type != 0
def count_parent_verb_by_person(docs):
counts = defaultdict(defaultdict(int))
for doc in docs:
for ent in doc.ents:
if ent.label_ == 'PERSON' and ent.root.head.pos == VERB:
counts[ent.orth_][ent.root.head.lemma_] += 1
return counts
def entity_matrix(poem, nlp):
tokenized_poem = tokenize_poem(poem, nlp)
ent_tokens = [ent.label_ for line in tokenized_poem for ent in line.ents]
wpl = words_per_line(poem, nlp)
dimensions = get_dimensions_of_poem(poem)
n = dimensions[0]
m = dimensions[1]
A = [[0 for x in range(m)] for y in range(n)]
B = [[word_is_in_entity(word) for word in line] for line in tokenized_poem]
if len(ent_tokens) == 0:
return A
elif len(ent_tokens) > 0:
ent_reversed = list(reversed(ent_tokens))
for ii in range(n):
for jj in range(wpl[ii]):
if B[ii][jj] == True:
try:
A[ii][jj] = ent_reversed.pop()
except:
pass
else:
A[ii][jj] = 0
return A
def dependency_labels_to_root(token):
'''Walk up the syntactic tree, collecting the arc labels.'''
dep_labels = []
while token.head is not token:
dep_labels.append(token.dep)
token = token.head
return dep_labels
def dependencies_matrix(poem, nlp):
tokenized_poem = tokenize_poem(poem, nlp)
dep_tokens = []
for line in tokenized_poem:
for token in line:
if token.dep_ != '':
dep_tokens.append(token.dep_)
else:
dep_tokens.append(0)
wpl = words_per_line(poem, nlp)
dimensions = get_dimensions_of_poem(poem)
n = dimensions[0]
m = dimensions[1]
A = [[0 for x in range(m)] for y in range(n)]
dep_reversed = list(reversed(dep_tokens))
for ii in range(n):
for jj in range(wpl[ii]):
A[ii][jj] = dep_reversed.pop()
return A
def probability_matrix(poem, nlp):
import math
tokenized_poem = tokenize_poem(poem, nlp)
probability_tokens = [token.prob for line in tokenized_poem for token in line]
probs = [math.exp(prob) for prob in probability_tokens]
wpl = words_per_line(poem, nlp)
dimensions = get_dimensions_of_poem(poem)
n = dimensions[0]
m = dimensions[1]
A = [[0 for x in range(m)] for y in range(n)]
probs_in_reverse = list(reversed(probs))
for ii in range(n):
for jj in range(wpl[ii]):
A[ii][jj] = probs_in_reverse.pop()
return A
### This function takes a poem and returns a matrix A with (Aij) equal to the
### real number representing the number of syllables of the word in this
### position divided by the number of words in the line.
def relative_syllable_matrix(poem,nlp):
syllable_matrix1 = syllable_matrix(poem,nlp)
row_sum = [sum(row) for row in syllable_matrix1]
n = len(syllable_matrix1)
m = len(syllable_matrix1[1])
A = [[0 for x in range(m)] for y in range(n)]
for ii in range(n):
for jj in range(m):
A[ii][jj] = syllable_matrix1[ii][jj]/row_sum[ii]
return(A)
### This function simply loads our dictionary of exception words.
def load_dictionary():
import csv
dictionary = {}
with open('scandictionary.txt') as csv_file:
for row in csv.reader(csv_file, delimiter=','):
dictionary[row[0]] = row[1:]
return dictionary
def clean_and_remove_junk(poem,nlp):
tokens = [nlp(item) for item in poem]
alpha = string.ascii_lowercase + string.ascii_uppercase
letters = list(alpha)
# remove everything but letters and spaces and linebreaks:
filtered = []
dash = False
for line in poem:
l = ""
w = ""
for ch in line:
if ch in letters:
w += ch
elif (ch == " ") or (ch == "-"):
if (ch == "-") and (dash == False):
dash = True
l += w
l += " "
w = ""
elif (ch == "-") and (dash == True):
pass # stops the double dash insanity
else:
dash = False
l += w
l += " "
w = ""
l += w
l += '\n'
filtered.append(l)
return filtered
### remove punctuation other than "'":
def strip_punctuation(word):
punct = '!()-[]{}:;"\,<>.?@#$%^&*_~+='
np = ""
p = ""
for char in word:
if char not in punct:
np = np + char
else:
p = p + char
return {'word': np, 'punctuation': p}
### remove letters and keep only numbers representing stress:
def strip_letters(line):
meter = ""
for word in line:
for char in list(word):
if char.isdigit():
meter = meter + char
return meter
# If word, or word less -s/-ed ending, is in dict, return its syls/stress.
# Whenever we accept something from the dictionary, we copy it, so that our
# manipulations for the sake of this line won't change the dictionary for
# others (including other instances of this line).
def dictionary_lookup(word,dictionary):
if str(word) in dictionary:
return dictionary[word][:]
elif len(word) < 5:
return None # e.g., 'bed', is 5 big/small enough?
elif word[-1:] == 's':
try:
syls = dictionary[word[:-1]][:]
if syls[-1].isupper():
syls[-1] += 'S'
else: syls[-1] += 's'
return syls
except KeyError: return None
elif word[-2:] == 'ed':
try:
syls = dictionary[word[:-2]][:]
if syls[-1].isupper(): syls[-1] += 'ED'
else: syls[-1] += 'ed'
return syls
except KeyError:
try:
syls = dictionary[word[:-1]][:]
if syls[-1].isupper(): syls[-1] += 'D'
else: syls[-1] += 'd'
return syls
except KeyError: return None
else:
return None
# out-of-class functions to handle encoding of special-combination characters
def encode(ch):
return chr(ord(ch) & 0x3F)
def decode(ch):
return chr(ord(ch) | 0x40)
# encode [st] and i but not following vowel
def handleCiV(match):
c1 = encode(match.group()[0])
c2 = encode(match.group()[1])
return c1 + c2 + match.group()[2]
# adjusted for third-char test.
def handleCC(match):
ret = encode(match.group()[0]) + encode(match.group()[1])
if len(match.group()) > 2:
ret += match.group()[2]
return ret
def handleVyV(match):
return match.group()[0] + encode(match.group()[1]) + match.group()[2]
def preliminaries(word,nlp):
past_indicator = False
plurality_indicator = False
variables.isPast = False
variables.isPlural = False
variables.forceStress = 0
variables.numSuffixes= 0
variables.syllable_bounds = []
apostrophe = word.find("\'", -2)
if apostrophe != -1:
if word[-1] != '\'' and word[-1] in 'se' and word[-2] in our_regex.SIBILANTS:
variables.syllable_bounds.append(apostrophe)
# cut off ' or 's until last stage
word = word[:apostrophe]
# cut final s/d from plurals/pasts if not syllabic
# defaults used also for suffixes
if re.search(r"[^s]s\b", word): variables.isPlural = True # terminal single s (DUMB!)
if re.search(r"ed\b", word): variables.isPast = True # terminal 'ed'
if variables.isPast or variables.isPlural: word = word[:-1]
# final-syl test turns out to do better work *after* suffices cut off
no_suffix = find_suffix(word)
# if final syllable is l/r+e, reverse letters for processing as syllable
if len(no_suffix) > 3 and our_regex.liquidterm.search(no_suffix):
word = no_suffix[:-2] + no_suffix[-1] + no_suffix[-2]
return word
### Identify any known suffixes, mark off as syllables and possible
### stresses. We identify them and list them backwards so as to "cut off"
### the last first. We consult a list of those that force stress on
### previous syllable.
def find_suffix(word):
resultslist = []
for f in our_regex.suffixes.finditer(word):
resultslist.append((f.group(), f.start()))
if not resultslist:
return word
# make sure *end* of word is in list! otherwise, 'DESP erate'
if resultslist[-1][1] + len(resultslist[-1][0]) < len(word):
return word
resultslist.reverse()
for res in resultslist:
# if no vowel left before, false suffix ('singing')
# n.b.: will choke on 'quest' etc! put in dictionary, I guess
if not re.search('[aeiouy]', word[:res[1]]): break
if res[0] == 'ing' and word[res[1]-1] == word[res[1]-2]:
variables.syllable_bounds.append(res[1] - 1) # freq special case
else: variables.syllable_bounds.append(res[1]) # sorted later
word = word[:res[1]]
variables.numSuffixes += 1
if res[0] in our_regex.STRESSSUFFIX:
variables.forceStress = 0 - len(variables.syllable_bounds)
if res[0] in our_regex.MULTISUFFIX:
# tricky bit! it *happens* that secondary division in all these
# comes after its first character; NOT inevitable! also does not
# allow for 3-syl: 'ically' (which are reliable!)
variables.syllable_bounds.append(res[1]+1)
variables.numSuffixes += 1
return resultslist.pop()[0]
### Encode character-combinations so as to trick DivideCV. The combinations are
### contained in regexes compiled in the class's __init__. Encoding (*not* to
### be confused with Unicode functions!) is done by small functions outside of
### (and preceding) the class. The combinations in Paul Holzer's original code
### have been supplemented and tweaked in various ways. For example, the
### original test for [iy]V is poor; 'avionics' defeats it; so we leave that to
### a new disyllabic-vowel test. The messy encoding-and-sometimes-decoding of
### nonsyllabic final 'e' after a C seems the best that can be done, though I
### hope not.
def special_codes(special_codes_word):
if re.search(r"[^aeiouy]e\b", special_codes_word): # nonsyllabic final e after C
if ((not variables.isPlural or special_codes_word[-2] not in SIBILANTS) \
and (not variables.isPast or special_codes_word[-2] not in 'dt')):
special_codes_word = special_codes_word[:-1] + encode(special_codes_word[-1])
if not re.search(r"[aeiouy]", special_codes_word): # any vowel left??
special_codes_word = special_codes_word[:-1] + 'e' # undo the encoding
special_codes_word = our_regex.CiVcomb.sub(handleCiV, special_codes_word)
special_codes_word = our_regex.CCpair.sub(handleCC, special_codes_word)
special_codes_word = our_regex.VyVcomb.sub(handleVyV, special_codes_word)
return special_codes_word
### Divide the word among C and V groups to fill the variables.syllable_bounds list.
### Here, and here alone, we need to catch e-with-grave-accent to count it
### as not only a vowel but syllabic ('an aged man' vs. 'aged beef'). Other
### special characters might be useful to recognize, but won't make the
### same syllabic difference.
### I made some changes here to deal with None types:
### I am not sure what implications this has,
### All I know is that the script does not break here any more
def divide_cv(word):
result = re.search(our_regex.unicodeVowels, word)
if result != None:
firstvowel = re.search(our_regex.unicodeVowels, word).start()
else:
return word
for v in re.finditer(our_regex.unicodeVowels, word):
lastvowel = v.end() # replaced for each group, last sticks
disyllabicvowels = our_regex.sylvowels.search(v.group())
if disyllabicvowels:
variables.syllable_bounds.append(v.start() + disyllabicvowels.start() + 1)
for cc in re.finditer(our_regex.uniConsonants, word):
if cc.start() < firstvowel or cc.end() >= lastvowel: continue
numcons = len(cc.group())
if numcons < 3: pos = cc.end() - 1 # before single C or betw. 2
elif numcons > 3: pos = cc.end() - 2 # before penult C
else: # 3 consonants, divide 1/2 or 2/1?
cg = cc.group() # our CCC cluster
if cg[-3] == cg[-2] or our_regex.splitLeftPairs.search(cg):
pos = cc.end() - 2 # divide 1/2
else: pos = cc.end() - 1 # divide 2/1
if not word[pos-1].isalpha() and not word[pos].isalpha():
variables.syllable_bounds.append(pos-1)
else: variables.syllable_bounds.append(pos)
return word
def stress(origword):
numsyls = len(variables.syllable_bounds) + 1
if numsyls == 1:
return 1
variables.syllable_bounds.sort() # suffixes may have been marked first
if variables.forceStress: # suffixes like 'tion', 'cious'
return numsyls + variables.forceStress
if numsyls - variables.numSuffixes == 1: # pretty reliable I think
return 1
isprefix = origword[:variables.syllable_bounds[0]] in our_regex.PREFIXES
if numsyls - variables.numSuffixes == 2: # Nessly w/ suffix twist
if isprefix:
return 2
else:
return 1
elif isprefix and (numsyls - variables.numSuffixes == 3):
return 2
else:
# Nessley: 3+ syls, str penult if closed, else antepenult
# syl n is origword[variables.syllable_bounds[n-1]:variables.syllable_bounds[n]-1]; so?
if origword[variables.syllable_bounds[-1] - 1] not in 'aeiouy': # last char penult
retstress = numsyls - 1 # if closed, stress penult
else: retstress = numsyls - 2 # else, antepenult
if variables.numSuffixes == numsyls:
retstress -= 1
return retstress
def calculate_syllables(word,nlp):
if len(word) < 3: return [word.upper()] # 'ax' etc
variables.syllable_bounds = []
wordp = preliminaries(word,nlp)
wordpp = special_codes(wordp)
wordppp = divide_cv(wordp)
stressed = stress(wordppp)
variables.syllable_bounds.insert(0, 0) # ease the calc of syllable indices
variables.syllable_bounds.append(len(word)) # within the word
listOfSyls = []
i = 0
for s in variables.syllable_bounds:
if not s:
continue
i += 1
if i != stressed:
listOfSyls.append(word[variables.syllable_bounds[i-1]:s])
else:
listOfSyls.append(word[variables.syllable_bounds[i-1]:s].upper())
return listOfSyls
### Determine syls/stress in all words in line, store other data too. Divide
### the line into word tokens with spaCy. Look up each in dictionary, and there
### or by calculation in the Syllabizer
def parse_line(line,nlp):
words = []
dictionary = load_dictionary()
if len(line) < 1:
return None
elif len(line) >=1:
word_tokens = [token.string for token in nlp(line)]
word_lower = [x.lower().strip() for x in word_tokens]
### replace calculated words with dictionary words if they exist
dictionary_words = [dictionary_lookup(word,dictionary) for word in word_lower]
calculated_words = [calculate_syllables(word,nlp) for word in word_lower]
for dict_word, calc_word in zip(dictionary_words,calculated_words):
if calc_word[0] == '':
words.append('\n')
elif dict_word == None:
words.append(calc_word)
elif dict_word != None:
words.append(dict_word)
return words
|
# Imports
import numpy as np
import cv2
import dlib
from scipy.spatial import distance as dist
from scipy.spatial import ConvexHull
def eye_size(eye):
eyeWidth = dist.euclidean(eye[0], eye[3])
hull = ConvexHull(eye)
eyeCenter = np.mean(eye[hull.vertices, :], axis=0)
eyeCenter = eyeCenter.astype(int)
return int(eyeWidth), eyeCenter
def place_eye(frame, eyeCenter, eyeSize):
eyeSize = int(eyeSize*1.5)
x1 = int(eyeCenter[0, 0] - (eyeSize/2))
x2 = int(eyeCenter[0, 0] + (eyeSize/2))
y1 = int(eyeCenter[0, 1] - (eyeSize/2))
y2 = int(eyeCenter[0, 1] + (eyeSize/2))
h, w = frame.shape[:2]
# check for clipping
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 > w:
x2 = w
if y2 > h:
y2 = h
# re-calculate the size to avoid clipping
eyeOverlayWidth = x2 - x1
eyeOverlayHeight = y2 - y1
# calculate the masks for the overlay
eyeOverlay = cv2.resize(imgEye, (eyeOverlayWidth, eyeOverlayHeight), interpolation=cv2.INTER_AREA)
mask = cv2.resize(orig_mask, (eyeOverlayWidth, eyeOverlayHeight), interpolation=cv2.INTER_AREA)
mask_inv = cv2.resize(orig_mask_inv, (eyeOverlayWidth, eyeOverlayHeight), interpolation=cv2.INTER_AREA)
# take ROI for the verlay from background, equal to size of the overlay image
roi = frame[y1:y2, x1:x2]
# roi_bg contains the original image only where the overlay is not, in the region that is the size of the overlay.
roi_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
# roi_fg contains the image pixels of the overlay only where the overlay should be
roi_fg = cv2.bitwise_and(eyeOverlay, eyeOverlay, mask=mask)
# join the roi_bg and roi_fg
dst = cv2.add(roi_bg, roi_fg)
# place the joined image, saved to dst back over the original image
frame[y1:y2, x1:x2] = dst
# Path to image and pre trained models.
image_path = "osama.jpg"
cascade_path = "haarcascade_frontalface_default.xml"
predictor_path= "shape_predictor_68_face_landmarks.dat"
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascade_path)
# create the landmark predictor
predictor = dlib.shape_predictor(predictor_path)
# Read the image
image = cv2.imread(image_path)
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.05,
minNeighbors=5,
minSize=(100, 100),
flags=cv2.CASCADE_SCALE_IMAGE
)
print("Found {0} faces!".format(len(faces)))
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Converting the OpenCV rectangle coordinates to Dlib rectangle
dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))
detected_landmarks = predictor(image, dlib_rect).parts()
landmarks = np.matrix([[p.x, p.y] for p in detected_landmarks])
# copying the image so we can see side-by-side
image_copy = image.copy()
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
# annotate the positions
cv2.putText(image_copy, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.4,
color=(0, 0, 255))
# draw points on the landmark positions
cv2.circle(image_copy, pos, 3, color=(0, 255, 255))
# cv2.imshow('Original image', image)
# cv2.imshow('Point Draw image', image_copy)
# If you look closely at the numbers (and check with several images), youโll notice that the feature points are always coming in the same order. That is,
# 1. Points 0 to 16 is the Jawline
# 2. Points 17 to 21 is the Right Eyebrow
# 3. Points 22 to 26 is the Left Eyebrow
# 4. Points 27 to 35 is the Nose
# 5. Points 36 to 41 is the Right Eye
# 6. Points 42 to 47 is the Left Eye
# 7. Points 48 to 60 is Outline of the Mouth
# 8. Points 61 to 67 is the Inner line of the Mouth
JAWLINE_POINTS = list(range(0, 17))
RIGHT_EYEBROW_POINTS = list(range(17, 22))
LEFT_EYEBROW_POINTS = list(range(22, 27))
NOSE_POINTS = list(range(27, 36))
RIGHT_EYE_POINTS = list(range(36, 42))
LEFT_EYE_POINTS = list(range(42, 48))
MOUTH_OUTLINE_POINTS = list(range(48, 61))
MOUTH_INNER_POINTS = list(range(61, 68))
landmarks = np.matrix([[p.x, p.y]
for p in predictor(image, dlib_rect).parts()])
landmarks_display = landmarks[RIGHT_EYE_POINTS + LEFT_EYE_POINTS]
RADIUS = 2
COLOR = (0, 255, 255)
for idx, point in enumerate(landmarks_display):
pos = (point[0, 0], point[0, 1])
cv2.circle(image, pos, RADIUS, color=COLOR, thickness=-1)
# cv2.imshow('Only eyes', image)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
# ---------------------------------------------------------
# Load and pre-process the eye-overlay
# ---------------------------------------------------------
# Load the image to be used as our overlay
imgEye = cv2.imread('Eye.png', -1)
# Create the mask from the overlay image
orig_mask = imgEye[:, :, 3]
# Create the inverted mask for the overlay image
orig_mask_inv = cv2.bitwise_not(orig_mask)
# Convert the overlay image image to BGR
# and save the original image size
imgEye = imgEye[:, :, 0:3]
origEyeHeight, origEyeWidth = imgEye.shape[:2]
# Start capturing the WebCam
video_capture = cv2.VideoCapture(0)
while True:
ret, frame = video_capture.read()
if ret:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
for rect in rects:
x = rect.left()
y = rect.top()
x1 = rect.right()
y1 = rect.bottom()
landmarks = np.matrix([[p.x, p.y] for p in predictor(frame, rect).parts()])
left_eye = landmarks[LEFT_EYE_POINTS]
right_eye = landmarks[RIGHT_EYE_POINTS]
leftEyeSize, leftEyeCenter = eye_size(left_eye)
rightEyeSize, rightEyeCenter = eye_size(right_eye)
place_eye(frame, leftEyeCenter, leftEyeSize)
place_eye(frame, rightEyeCenter, rightEyeSize)
cv2.imshow("Faces with Overlay", frame)
ch = 0xFF & cv2.waitKey(1)
if ch == ord('q'):
break
# cv2.waitKey(0)
cv2.destroyAllWindows()
|
#
# Assignment 6
#
# Student Name : Aausuman Deep
# Student Number : 119220605
#
# Assignment Creation Date : March 7, 2020
from nltk.corpus import gutenberg as g
from nltk.stem.porter import *
def analyze(book_name):
# This function analyzes the 'book_name' file and prints out its characteristics using nltk package
# Extracting characters, words and sentences respectively below
characters = g.raw(book_name)
words = g.words(book_name)
sentences = g.sents(book_name)
max_length_word = words[0]
max_length_sentence = sentences[0]
max_length_sentence_word_count = len(max_length_sentence)
vocabulary = list()
stem_families = dict()
stemmer = PorterStemmer()
for word in words:
# Checking for the longest word
if len(word) > len(max_length_word):
max_length_word = word
stemmed_word = stemmer.stem(word)
# Creating a vocabulary of stemmed words and a dictionary for stem families
if stemmed_word not in vocabulary and stemmed_word.isalpha():
vocabulary.append(stemmed_word)
stem_families[stemmed_word] = list()
stem_families[stemmed_word].append(word.lower())
elif stemmed_word in vocabulary and word.lower() not in stem_families[stemmed_word]:
stem_families[stemmed_word].append(word.lower())
for sentence in sentences:
# Checking for the longest sentence
if len(sentence) > len(max_length_sentence):
max_length_sentence = sentence
max_length_sentence_word_count = len(max_length_sentence)
# Converting that largest sentence from a list of words to a cumulative string sentence
max_length_sentence_string = " "
max_length_sentence_string = max_length_sentence_string.join(max_length_sentence)
max_stem_family = list(list(stem_families.items())[0])
for key, value in stem_families.items():
# Checking for the largest stem family
if len(value) > len(max_stem_family[1]):
max_stem_family[0] = key
max_stem_family[1] = list(value)
# Printing the characteristics as requested
print("Analysis of '%s'" % book_name)
print("# chars =", len(characters))
print("# words =", len(words))
print("# sentences =", len(sentences))
print("Longest word = '%s'" % max_length_word)
print("Longest sentence = '%s' (%d words)" % (max_length_sentence_string, max_length_sentence_word_count))
print("Vocab size =", len(vocabulary))
print("Largest stem family '%s' : {" % max_stem_family[0], end=" ")
for i in range(len(max_stem_family[1])):
if i != 0:
print(",", end=" ")
print("'%s'" % max_stem_family[1][i], end=" ")
print("}")
|
i=1
s=0
even_s=0
odd_s=0
sevn_s=0
while i<=100:
s+=i
if(i%2==0):
even_s+=i
else:
odd_s+=i
if(i%7==0):
sevn_s+=i;
i+=1
print("1๋ถํฐ 100๊น์ง์ ํฉ = ",s)
print("1๋ถํฐ 100๊น์ง์ ์ง์์ ํฉ = ",even_s)
print("1๋ถํฐ 100๊น์ง์ ํ์์ ํฉ = ",odd_s)
print("1๋ถํฐ 100๊น์ง์ 7์ ๋ฐฐ์์ ํฉ = ",sevn_s)
|
from django import forms
class StarterForm(forms.Form):
METHOD_OPTIONS = (("", "Please select one"), ("0", "Harris Corner Detection"))
image = forms.ImageField(required=True)
method = forms.ChoiceField(choices=METHOD_OPTIONS, required=True)
custom_name = forms.CharField(max_length=50, required=False, label="Name (optional):")
|
#!/usr/bin/env python
import sys
from graph import Graph, Vertex
"""
Implementation of the Word Ladder Algorithm
using Breadth First Search on a Graph.
"""
def buildGraph(g):
d = {}
with open("words.txt") as file:
for word in file:
word = word.replace("\n", "")
for i in range(len(word)):
bucket = word[:i] + "_" + word[i + 1 :]
if bucket not in d:
d[bucket] = [word]
else:
d[bucket].append(word)
for vertex in d.keys():
for word in d[vertex]:
for word2 in d[vertex]:
if word != word2:
g.addEdge(word, word2)
return g
def BFS(g, start, goal):
visited = set()
queue = [start]
visited.add(start)
while len(queue) > 0:
currentVert = queue.pop()
visited.add(currentVert)
for nbr in g[currentVert].getConnections():
if nbr not in visited:
g[nbr].setDistance(g[currentVert].getDistance() + 1)
g[nbr].setPred(g[currentVert])
visited.add(nbr)
queue.insert(0, nbr)
return visited
def traverse(g, start, goal):
bfs = BFS(g, start, goal)
path = []
pred = g[goal].getPred()
if start in bfs and goal in bfs:
path.append(g[goal])
while pred != None:
path.append(pred)
pred = pred.getPred()
return reversed(path)
def main():
GRAPH = Graph()
g = buildGraph(GRAPH)
print("Breadth First Search Word Ladder Algorithm!")
print("=" * 43 + "\n")
try:
a, b = input("Choose word1: "), input("Choose word2: ")
b = traverse(g, a, b)
print("\nPATH:")
for v in b:
print(v.id)
except Exception as err:
print(
"\nERROR: Cannot reach '{}' from start: '{}' based on our words.txt file.".format(
b, a
)
)
if __name__ == "__main__":
main()
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.scala.dependency_inference.symbol_mapper import AllScalaTargets
from pants.backend.scala.subsystems.scala import ScalaSubsystem
from pants.backend.scala.util_rules.versions import (
ScalaArtifactsForVersionRequest,
ScalaArtifactsForVersionResult,
)
from pants.engine.internals.selectors import Get
from pants.engine.rules import collect_rules, rule
from pants.engine.unions import UnionRule
from pants.jvm.goals.lockfile import (
ValidateJvmArtifactsForResolveRequest,
ValidateJvmArtifactsForResolveResult,
)
from pants.jvm.resolve.common import Coordinate
from pants.jvm.subsystems import JvmSubsystem
from pants.jvm.target_types import JvmResolveField
from pants.util.docutil import bin_name
SCALA_LIBRARY_GROUP = "org.scala-lang"
SCALA_LIBRARY_ARTIFACT = "scala-library"
SCALA3_LIBRARY_ARTIFACT = "scala3-library_3"
class ConflictingScalaLibraryVersionInResolveError(ValueError):
"""Exception for when there is a conflicting Scala version in a resolve."""
def __init__(
self, resolve_name: str, required_version: str, conflicting_coordinate: Coordinate
) -> None:
super().__init__(
f"The JVM resolve `{resolve_name}` contains a `jvm_artifact` for version {conflicting_coordinate.version} "
f"of the Scala runtime. This conflicts with Scala version {required_version} which is the configured version "
"of Scala for this resolve from the `[scala].version_for_resolve` option. "
"Please remove the `jvm_artifact` target with JVM coordinate "
f"{conflicting_coordinate.to_coord_str()}, then re-run "
f"`{bin_name()} generate-lockfiles --resolve={resolve_name}`"
)
class MissingScalaLibraryInResolveError(ValueError):
def __init__(self, resolve_name: str, scala_library_coordinate: Coordinate) -> None:
super().__init__(
f"The JVM resolve `{resolve_name}` does not contain a requirement for the Scala runtime. "
"Since at least one Scala target type in this repository consumes this resolve, the resolve "
"must contain a `jvm_artifact` target for the Scala runtime.\n\n"
"Please add the following `jvm_artifact` target somewhere in the repository and re-run "
f"`{bin_name()} generate-lockfiles --resolve={resolve_name}`:\n"
"jvm_artifact(\n"
f' name="{scala_library_coordinate.group}_{scala_library_coordinate.artifact}_{scala_library_coordinate.version}",\n'
f' group="{scala_library_coordinate.group}",\n',
f' artifact="{scala_library_coordinate.artifact}",\n',
f' version="{scala_library_coordinate.version}",\n',
f' resolve="{resolve_name}",\n',
")",
)
class ValidateResolveHasScalaRuntimeRequest(ValidateJvmArtifactsForResolveRequest):
pass
@rule
async def validate_scala_runtime_is_present_in_resolve(
request: ValidateResolveHasScalaRuntimeRequest,
scala_subsystem: ScalaSubsystem,
scala_targets: AllScalaTargets,
jvm: JvmSubsystem,
) -> ValidateJvmArtifactsForResolveResult:
first_party_target_uses_this_resolve = False
for tgt in scala_targets:
if tgt[JvmResolveField].normalized_value(jvm) == request.resolve_name:
first_party_target_uses_this_resolve = True
break
if not first_party_target_uses_this_resolve:
return ValidateJvmArtifactsForResolveResult()
scala_version = scala_subsystem.version_for_resolve(request.resolve_name)
scala_artifacts = await Get(
ScalaArtifactsForVersionResult, ScalaArtifactsForVersionRequest(scala_version)
)
has_scala_library_artifact = False
for artifact in request.artifacts:
if (
artifact.coordinate.group == SCALA_LIBRARY_GROUP
and artifact.coordinate.artifact == scala_artifacts.library_coordinate.artifact
):
if artifact.coordinate.version != scala_version:
raise ConflictingScalaLibraryVersionInResolveError(
request.resolve_name, scala_version, artifact.coordinate
)
# This does not `break` so the loop can validate the entire set of requirements to ensure no conflicting
# scala-library requirement.
has_scala_library_artifact = True
if not has_scala_library_artifact:
raise MissingScalaLibraryInResolveError(
request.resolve_name, scala_artifacts.library_coordinate
)
return ValidateJvmArtifactsForResolveResult()
def rules():
return (
*collect_rules(),
UnionRule(ValidateJvmArtifactsForResolveRequest, ValidateResolveHasScalaRuntimeRequest),
)
|
'''
Created on 2016.6.14
@author: huke
'''
def combine(num2,max2,max):
i = num2
j = max-1
for x in range(max2,i):
if max2 >1:
combine(i-1,max2-1,max);
else:
for y in range(0,max-1):
print(max)
print('\n')
if __name__ == '__main__':
Max_ = 4
Num = 29
num = []
lottey = []
for x in range(0,Num):
num.append(x+1)
for y in range(0,Num):
lottey.append(0)
combine(Num, Max_, Max_);
|
import json
import re
import requests
from bs4 import BeautifulSoup as bs
from bs4.element import Comment
import os
import asyncio
import logging
# Write to a json file
def write_to_file(filename, data):
with open(os.getcwd() +f"/service_workers/data/{filename}.json", 'w') as fp:
json.dump(data, fp)
# Cleaning content of a string
def clean_white_space(sentence):
return re.sub('\s+',' ',sentence)
# Used for out link validation
def context_cleaner(body):
no_white_space = clean_white_space(body).split(' ')
temp = [x for x in no_white_space if not x == '' or not x == None]
return ' '.join(temp)
def is_valid_url(url):
return "http://" in url or "https://" in url
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
# Scraping individual web page
def scrape_web(url, index, link):
try:
page = requests.get(url)
results = bs(page.content, 'html.parser')
# Finding title of the page
title = results.find('title').text
# Finding all meta tag of the page
meta = results.find_all('meta', property="og:url")
meta_context = [context_cleaner(x['content']) for x in meta if not x is None and not x.has_attr(
'content')] if not meta == [] else []
# Out links
links = [x['href'].strip() for x in link if not x is None and x.has_attr(
'href') and is_valid_url(x['href'])] if not link == [] else []
# Body text
texts = results.findAll(text=True)
visible_texts = filter(tag_visible, texts)
body_text = u" ".join(t.strip() for t in visible_texts)
return {
"id": index,
"title": title,
"org_url": url,
"linked_urls": links,
"meta_context": meta_context,
"body": context_cleaner(body_text)
}
except (ConnectionError, Exception) as error:
logging.error(f"Exception is :{error}")
return None
# Scraping multiple web pages based on the given seed
async def scrape_seed(url,index,name,links,file_count,count):
if count == 0 and file_count > 1:
with open(os.getcwd() + f'/service_workers/data/{name}_{file_count}.json', 'a') as fp:
json.dump([scrape_web(url, index, links)], fp)
else:
with open(os.getcwd() + f'/service_workers/data/{name}_{file_count}.json','r') as json_file:
data = json.load(json_file)
data.append(scrape_web(url, index, links))
write_to_file(f'{name}_{file_count}', data)
|
def decompose_single_strand(single_strand):
output = ''
for frame in range(3):
output+='Frame {}: {} '.format(frame+1, single_strand[:frame]).strip()
for i in range(frame,len(single_strand),3):
output+=' {}'.format(single_strand[i:i+3])
output+='\n'
return output[:-1]
'''
In genetics a reading frame is a way to divide a sequence of nucleotides (DNA bases)
into a set of consecutive non-overlapping triplets (also called codon).
Each of this triplets is translated into an amino-acid during a translation
process to create proteins.
Input
In a single strand of DNA you find 3 Reading frames,
take for example the following input sequence:
AGGTGACACCGCAAGCCTTATATTAGC
Output
For the output we are going to take the combinations and show them in the following manner:
Frame 1: AGG TGA CAC CGC AAG CCT TAT ATT AGC
Frame 2: A GGT GAC ACC GCA AGC CTT ATA TTA GC
Frame 3: AG GTG ACA CCG CAA GCC TTA TAT TAG C
For frame 1 split all of them in groups of three starting by the first base (letter).
For frame 2 split all of them in groups of three starting by the second base (letter)
but having the first base (letter) at the beggining.
For frame 3 split all of them in groups of three starting by the third letter,
but having the first and second bases (letters) at the beginning in the same order.
'''
|
import sys
import time
import subprocess
import Jetson.GPIO as GPIO
from .controll_sys import LedBlink
class PowerListener(object):
"""docstring for PowerListener"""
def __init__(self, handler, pin_type, input_pin):
super(PowerListener, self).__init__()
self.__handler = handler
self.__input_pin = input_pin
self.__mode = GPIO.setmode(pin_type)
self.__setting = GPIO.setup(input_pin, GPIO.IN)
def __listen(self):
try:
called_low = False
while True:
value = GPIO.input(self.__input_pin)
if value == GPIO.LOW:
if not called_low:
self.__handler.clicked(time.time())
called_low = True
else:
if called_low:
self.__handler.unclicked(time.time())
called_low = False
time.sleep(0.01)
finally:
GPIO.cleanup()
def run(self):
self.__listen()
class PowerHandler(object):
__double_click_pre_time = 0.0
def __init__(self, led_blink, click_type):
super(PowerHandler, self).__init__()
self.__led_blink = led_blink
self.__click_type = click_type
self.__is_clicked = False
self.__clicked_time = 0.0
def clicked(self, current_time):
self.__is_clicked = True
self.__clicked_time = current_time
def unclicked(self, current_time):
if self.__click_type:
self.__three_secs(current_time)
else:
self.__double_click()
self.__is_clicked = False
def __three_secs(self, current_time):
__double_click_pre_time = current_time
if self.__is_clicked:
if current_time - self.__clicked_time < 3.0:
#sleep
self.__led_blink.sample("sleep : ", current_time - self.__clicked_time)
subprocess.call('suspend')
exit()
elif current_time - self.__clicked_time < 4.0:
#shut down
self.__led_blink.sample("shut down : ", current_time - self.__clicked_time)
subprocess.call('poweroff')
exit()
elif current_time - self.__clicked_time < 5.0:
pass
else:
exit()
def __double_click(self, current_time):
if self.__is_clicked:
if current_time - self.__clicked_time < 3.0:
#sleep
self.__led_blink.sample("sleep : ", current_time - self.__clicked_time)
elif current_time - self.__clicked_time < 4.0:
#shut down
self.__led_blink.sample("shut down : ", current_time - self.__clicked_time)
# elif :
# pass
else:
exit()
|
operand1 = 95
operand2 = 64.5
#operations
print operand1 + operand2
print operand1 - operand2
print operand1 * operand2
print operand1 / operand2
print operand1 % operand2
#This method is useful because you dont need to rewrite the numbers,
#you just change the variables and the result will automaticly appear.
|
#!/usr/bin/env python
import random
import string
import os
import os.path
def gen_random(str_len):
return ''.join(random.choice(string.hexdigits) for x in range(str_len))
def main():
# Don't need to create config folder if env vars are already set
if os.getenv('API_SECRET_KEY', None) and os.getenv('DATABASE_URI', None) and os.getenv('DB_ENCRYPTION_KEY'):
return
API_SECRET_KEY = os.getenv('API_SECRET_KEY', None)
ENCRYPTION_KEY = os.getenv('DB_ENCRYPTION_KEY', None)
if not API_SECRET_KEY:
SECRET_KEY = "export API_SECRET_KEY=\"{}\"".format(gen_random(48))
else:
SECRET_KEY = "export API_SECRET_KEY=\"{}\"".format(API_SECRET_KEY)
if not ENCRYPTION_KEY:
ENCRYPTION_KEY = "export DB_ENCRYPTION_KEY=\"{}\"".format(gen_random(16))
else:
ENCRYPTION_KEY = "export DB_ENCRYPTION_KEY=\"{}\"".format(ENCRYPTION_KEY)
if not os.path.isdir('./config'):
os.makedirs('./config')
f = open('./config/.env', "w")
f.writelines([SECRET_KEY, "\n", ENCRYPTION_KEY])
f.close()
main()
|
from main.activity.desktop_v3.activity_login import *
from main.activity.desktop_v3.activity_logout import *
from main.activity.desktop_v3.activity_myshop_editor import *
from utils.lib.user_data import *
from utils.function.setup import *
import unittest
class TestEditMyshopInfo(unittest.TestCase):
_site = "live"
def setUp(self):
print ('TEST "Myshop-Editor"')
self.driver = tsetup("firefox")
self.user = user1
def test_1_edit_shop_info(self):
print ("TEST #1 : Edit Shop Information")
print ("============================")
driver = self.driver
email = self.user['email']
pwd = self.user['pwd']
login = loginActivity()
myshopEdit = myshopEditorActivity()
logout = logoutActivity()
login.do_login(driver, self.user, email, pwd, self._site)
myshopEdit.setObject(driver)
myshopEdit.goto_myshop_editor(self._site)
myshopEdit.edit_slogan_shop()
myshopEdit.edit_shop_description()
myshopEdit.change_shop_status_to("close")
time.sleep(3)
myshopEdit.do_save_changes()
time.sleep(4)
logout.do_logout(driver, self._site)
def tearDown(self):
print("Testing akan selesai dalam beberapa saat..")
self.driver.close()
if __name__ == '__main__':
unittest.main(warnings='ignore')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.