text stringlengths 8 6.05M |
|---|
# OpenCV bindings
import cv2
# To performing path manipulations
import os
# Local Binary Pattern function
from skimage.feature import local_binary_pattern
# To calculate a normalized histogram
from scipy.stats import itemfreq
from sklearn.preprocessing import normalize
# Utility package -- use pip install cvutils to install
import cvutils
# To read class from file
import csv
import matplotlib.pyplot as plt
import numpy as np
###########################
# CODE FROM: http://hanzratech.in/2015/05/30/local-binary-patterns.html
###########################
# List for storing the LBP Histograms, address of images and the corresponding label
train_images = cvutils.imlist("input/")
# List for storing the LBP Histograms, address of images and the corresponding label
X_test = []
X_name = []
# y_test = []
# For each image in the training set calculate the LBP histogram
# and update X_test, X_name and y_test
import scipy.misc
for radius in [3]:
print radius
for train_image in train_images:
# Read the image
im = cv2.imread(train_image)
im = scipy.misc.imresize(im, (1057, 756))
scipy.misc.imsave(train_image, im)
# Convert to grayscale as LBP works on grayscale image
im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# radius = 3
# Number of points to be considered as neighbourers
no_points = 8 * radius
# Uniform LBP is used
lbp = local_binary_pattern(im_gray, no_points, radius, method='uniform')
# Calculate the histogram
x = itemfreq(lbp.ravel())
# Normalize the histogram
hist = x[:, 1]/sum(x[:, 1])
# Append image path in X_name
X_name.append(train_image)
# Append histogram to X_name
X_test.append(hist)
# Append class label in y_test
# y_test.append(train_dic[os.path.split(train_image)[1]])
# Display the training images
nrows = 2
ncols = 4
fig, axes = plt.subplots(nrows,ncols)
for row in range(nrows):
for col in range(ncols):
axes[row][col].imshow(cv2.cvtColor(cv2.imread(X_name[row*ncols+col]), cv2.COLOR_BGR2RGB))
axes[row][col].axis('off')
axes[row][col].set_title("{}".format(os.path.split(X_name[row*ncols+col])[1]))
plt.show()
# Store the path of testing images in test_images
test_images = cvutils.imlist("test/")
for test_image in test_images:
# Read the image
im = cv2.imread(test_image)
im = scipy.misc.imresize(im, (1057, 756))
scipy.misc.imsave(test_image, im)
# Convert to grayscale as LBP works on grayscale image
im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# radius = 3
# Number of points to be considered as neighbourers
no_points = 8 * radius
# Uniform LBP is used
lbp = local_binary_pattern(im_gray, no_points, radius, method='uniform')
# Calculate the histogram
x = itemfreq(lbp.ravel())
# Normalize the histogram
hist = x[:, 1]/sum(x[:, 1])
# Display the query image
cvutils.imshow("** Query Image -> {}**".format(test_image), im)
plt.show()
results = []
# For each image in the training dataset
# Calculate the chi-squared distance and the sort the values
for index, x in enumerate(X_test):
score = cv2.compareHist(np.array(x, dtype=np.float32), np.array(hist, dtype=np.float32), cv2.HISTCMP_CHISQR)
results.append((X_name[index], round(score, 3)))
results = sorted(results, key=lambda score: score[1])
# Display the results
fig, axes = plt.subplots(nrows,ncols)
fig.suptitle("** Scores for -> {}**".format(test_image))
for row in range(nrows):
for col in range(ncols):
axes[row][col].imshow(cv2.cvtColor(cv2.imread(results[row*ncols+col][0]), cv2.COLOR_BGR2RGB))
axes[row][col].axis('off')
axes[row][col].set_title("Score {}".format(results[row*ncols+col][1]))
plt.show()
|
'''
Koko 每小时最多吃一堆香蕉,如果吃不下的话留到下一小时再吃;
如果吃完了这一堆还有胃口,也只会等到下一小时才会吃下一堆。
在这个条件下,让我们确定 Koko 吃香蕉的最小速度(根/小时)
'''
# 第一版解法
def min_eating_speed_1(piles, hour):
max_num = max(piles)
speed = 1
while speed <= max_num:
if can_finish(piles, speed, hour):
return speed
speed += 1
return max_num
# 第二版解法
def min_eating_speed_2(piles, hour):
left, right = 1, max(piles)+1
while left < right:
mid = left + int((right - left) / 2)
if can_finish(piles, mid, hour):
right = mid
else:
left = mid + 1
return left
# 下面的函数都为辅助函数
def can_finish(piles, speed, hour):
time = 0
for pile in piles:
time += time_of(pile, speed)
if time < hour:
return True
else:
return False
def time_of(pile, speed):
need_another_hour = 1 if pile % speed > 0 else 0
return (pile / speed) + need_another_hour
piles = [1,3,6,9]
hour = 120
res_1 = min_eating_speed_1(piles, hour)
res_2 = min_eating_speed_2(piles, hour)
print(res_1, res_2) |
import pytest
import sort
def test_my_bs():
res = [2,1,2,8,5,0,6]
assert my_sort.bubble_sort(res) == sorted(res)
def test_my_qs():
res = [2,1,2,8,5,0,6]
assert my_sort.quick_sort(res) == sorted(res)
|
from enum import IntEnum
COMMAND_HEAD = b"\x6a\xa6"
COMMAND_TYPE_OUTPUT_CTR = 0x01
COMMAND_TYPE_READ = 0xFE
class Command(IntEnum):
OUTPUT_CTR = 0x01
DEVICE_TIME = 0x02
TAP_TIME = 0x03
DELAY_TIME = 0x04
MODBUS_ADDRESS = 0x05
OUTPUT_STATE = 0x06
INPUT_STATE = 0x07
CYCLE_TIME = 0x08
AUTO_ENABLE = 0x0F
AUTO_1 = 0x10
AUTO_2 = 0x11
AUTO_3 = 0x12
AUTO_4 = 0x13
AUTO_5 = 0x14
AUTO_6 = 0x15
AUTO_7 = 0x16
AUTO_8 = 0x17
AUTO_9 = 0x18
AUTO_10 = 0x19
AUTO_11 = 0x1A
AUTO_12 = 0x1B
AUTO_13 = 0x1C
AUTO_14 = 0x1D
AUTO_15 = 0x1E
AUTO_16 = 0x1F
TYPE_RESPONSE = 0xFF
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import hashlib
import json
from collections import OrderedDict
from enum import Enum
import pytest
from pants.base.hash_utils import CoercingEncoder, hash_all, json_hash
from pants.util.ordered_set import OrderedSet
class TestHashUtils:
def test_hash_all(self):
expected_hash = hashlib.sha1()
expected_hash.update(b"jakejones")
assert expected_hash.hexdigest() == hash_all(["jake", "jones"])
class TestCoercingJsonEncodingTest:
@staticmethod
def _coercing_json_encode(o):
return json.dumps(o, cls=CoercingEncoder)
def test_normal_object_encoding(self):
assert self._coercing_json_encode({}) == "{}"
assert self._coercing_json_encode(()) == "[]"
assert self._coercing_json_encode([]) == "[]"
assert self._coercing_json_encode(set()) == "[]"
assert self._coercing_json_encode([{}]) == "[{}]"
assert self._coercing_json_encode([("a", 3)]) == '[["a", 3]]'
assert self._coercing_json_encode({"a": 3}) == '{"a": 3}'
assert self._coercing_json_encode([{"a": 3}]) == '[{"a": 3}]'
assert self._coercing_json_encode({1}) == "[1]"
def test_rejects_ordered_dict(self):
with pytest.raises(TypeError, match=r"CoercingEncoder does not support OrderedDict inputs"):
self._coercing_json_encode(OrderedDict([("a", 3)]))
def test_non_string_dict_key_coercion(self):
assert self._coercing_json_encode({("a", "b"): "asdf"}) == r'{"[\"a\", \"b\"]": "asdf"}'
def test_string_like_dict_key_coercion(self):
assert self._coercing_json_encode({"a": 3}) == '{"a": 3}'
assert self._coercing_json_encode({b"a": 3}) == '{"a": 3}'
def test_nested_dict_key_coercion(self):
assert self._coercing_json_encode({(1,): {(2,): 3}}) == '{"[1]": {"[2]": 3}}'
def test_collection_ordering(self):
assert self._coercing_json_encode({2, 1, 3}) == "[1, 2, 3]"
assert self._coercing_json_encode({"b": 4, "a": 3}) == '{"a": 3, "b": 4}'
assert self._coercing_json_encode([("b", 4), ("a", 3)]) == '[["b", 4], ["a", 3]]'
assert self._coercing_json_encode([{"b": 4, "a": 3}]) == '[{"b": 4, "a": 3}]'
def test_enum(self) -> None:
class Test(Enum):
dog = 0
cat = 1
pig = 2
assert self._coercing_json_encode([Test.dog, Test.cat, Test.pig]) == "[0, 1, 2]"
class TestJsonHashing:
def test_known_checksums(self):
"""Check a laundry list of supported inputs to stable_json_sha1().
This checks both that the method can successfully handle the type of input object, but also
that the hash of specific objects remains stable.
"""
assert json_hash({}) == "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f"
assert json_hash(()) == "97d170e1550eee4afc0af065b78cda302a97674c"
assert json_hash([]) == "97d170e1550eee4afc0af065b78cda302a97674c"
assert json_hash(set()) == "97d170e1550eee4afc0af065b78cda302a97674c"
assert json_hash([{}]) == "4e9950a1f2305f56d358cad23f28203fb3aacbef"
assert json_hash([("a", 3)]) == "d6abed2e53c1595fb3075ecbe020365a47af1f6f"
assert json_hash({"a": 3}) == "9e0e6d8a99c72daf40337183358cbef91bba7311"
assert json_hash([{"a": 3}]) == "8f4e36849a0b8fbe9c4a822c80fbee047c65458a"
assert json_hash({1}) == "f629ae44b7b3dcfed444d363e626edf411ec69a8"
def test_rejects_ordered_collections(self):
with pytest.raises(TypeError, match=r"CoercingEncoder does not support OrderedDict inputs"):
json_hash(OrderedDict([("a", 3)]))
with pytest.raises(TypeError, match=r"CoercingEncoder does not support OrderedSet inputs"):
json_hash(OrderedSet([3]))
def test_non_string_dict_key_checksum(self) -> None:
assert json_hash({("a", "b"): "asdf"}) == "45deafcfa78a92522166c77b24f5faaf9f3f5c5a"
def test_string_like_dict_key_checksum(self) -> None:
assert json_hash({"a": 3}) == "9e0e6d8a99c72daf40337183358cbef91bba7311"
assert json_hash({b"a": 3}) == "9e0e6d8a99c72daf40337183358cbef91bba7311"
def test_nested_dict_checksum(self) -> None:
assert json_hash({(1,): {(2,): 3}}) == "63124afed13c4a92eb908fe95c1792528abe3621"
def test_checksum_ordering(self) -> None:
assert json_hash({2, 1, 3}) == "a01eda32e4e0b1393274e91d1b3e9ecfc5eaba85"
assert json_hash({"b": 4, "a": 3}) == "6348df9579e7a72f6ec3fb37751db73b2c97a135"
assert json_hash([("b", 4), ("a", 3)]) == "8e72bb976e71ea81887eb94730655fe49c454d0c"
assert json_hash([{"b": 4, "a": 3}]) == "4735d702f51fb8a98edb9f6f3eb3df1d6d38a77f"
|
from tkinter import *
from tkinter import messagebox
import sqlite3
from sqlite3 import Error
from tkinter import ttk
import pandas as pd
import random
import os
from PIL import ImageTk,Image
pd.set_option('display.max_columns', None)
pd.set_option('expand_frame_repr', False)
conn = sqlite3.connect('B:\StudyMaterials\DBMSProject\ResDB.db')
c=conn.cursor()
print(c.execute("select * from CUSTOMER"))
def chk_conn(conn):
try:
conn.cursor()
return True
except Exception as ex:
return False
print(chk_conn(conn))
def enter():
try:
screen1.destroy()
except:
pass
global screen2
screen2=Tk()
Label(screen2,text="Enter Details",bg ="grey", font=("Calibri",15),width =500,height=1).pack()
global CID,Cname,Address,DOB,EID,Ctoken
CID = StringVar()
Cname = StringVar()
Address= StringVar()
DOB = StringVar()
EID = StringVar()
Ctoken = StringVar()
global w1,w2,w3,w4,w5,w6,w7,w8
Label(screen2, text= "Please enter details").pack()
Label(screen2, text= "").pack()
Label(screen2, text= "CID").pack()
w1=Entry(screen2, textvariable = CID).pack()
Label(screen2, text= "Name").pack()
w2=Entry(screen2, textvariable = Cname).pack()
Label(screen2, text= "Address").pack()
w4=Entry(screen2, textvariable = Address).pack()
Label(screen2, text= "DOB").pack()
w5=Entry(screen2, textvariable = DOB).pack()
Label(screen2, text= "EID").pack()
w6=Entry(screen2, textvariable = EID).pack()
Label(screen2, text= "Ctoken").pack()
w7=Entry(screen2, textvariable = Ctoken).pack()
B1=Button(screen2,text="Enter Details",command=entry).pack(pady=5)
def entry():
w1=CID.get()
w2=Cname.get()
w4=Address.get()
w5=DOB.get()
w6=EID.get()
w7=Ctoken.get()
x=(w1,w2,w4,w5,w6,w7)
l1=['P','N']
res = random.choice(l1)
y=(res,w1)
c.execute("insert into Customer values(?,?,?,?,?,?)",x)
c.execute("select * from Cust_logs")
for log in c.fetchall():
print(log)
conn.commit()
screen2.destroy()
login_admin()
def gBill():
try:
screen1.destroy()
except:
pass
global screen61
screen61=Tk()
Label(screen61,text="Enter Details",bg ="grey", font=("Calibri",15),width =500,height=1).pack()
global CID
global d121
CID = StringVar()
Label(screen61, text= "Please enter details").pack()
Label(screen61, text= "").pack()
Label(screen61, text= "CID").pack()
d1=Entry(screen61, textvariable = CID).pack()
button=Button(screen61,text="Generate",command=genBill).pack(pady=5)
def genBill():
d121=CID.get()
c.execute(" SELECT SUM(AMOUNT),BILL.CID from BILL,CUSTOMER where BILL.CID=CUSTOMER.CID GROUP BY CUSTOMER.CID ")
b=c.fetchall()
c.execute("select CID,SUM(AMOUNT) from BILL where CID=(?)",[d121])
cx=pd.DataFrame(c.fetchall())
screen21=Tk()
text=Text(screen21)
text.insert(END,str(cx.iloc[:,0:]))
text.pack(fill=BOTH, expand=1)
screen61.destroy()
login_admin()
def getBill():
c.execute(" SELECT BILL.CID,SUM(AMOUNT) from BILL,CUSTOMER where BILL.CID=CUSTOMER.CID GROUP BY CUSTOMER.CID ")
b=pd.DataFrame(c.fetchall())
b.columns=["Customer","Price"]
messagebox.showinfo("Bill","Bill Calculated")
screen20=Tk()
text=Text(screen20)
text.insert(END,str(b.iloc[:,0:]))
text.pack(fill=BOTH, expand=1)
screen1.destroy()
login_admin()
def addorder():
try:
screen1.destroy()
except:
pass
global screen6
screen6=Tk()
Label(screen6,text="Enter Details",bg ="grey", font=("Calibri",15),width =500,height=1).pack()
global OID,Oname,Ono,Price,Portion,C_ID
global d1,d2,d3,d4,d5,d6
OID = StringVar()
Oname = StringVar()
Ono = StringVar()
Price = StringVar()
Portion= StringVar()
C_ID=StringVar()
Label(screen6, text= "Please enter details").pack()
Label(screen6, text= "").pack()
Label(screen6, text= "OID").pack()
d1=Entry(screen6, textvariable = OID).pack()
Label(screen6, text= "Oname").pack()
d2=Entry(screen6, textvariable = Oname).pack()
Label(screen6, text= "Ono").pack()
d3=Entry(screen6, textvariable = Ono).pack()
Label(screen6, text= "Price").pack()
d4=Entry(screen6, textvariable = Price).pack()
Label(screen6, text= "Portion").pack()
d5=Entry(screen6, textvariable = Portion).pack()
Label(screen6, text= "CID").pack()
d6=Entry(screen6, textvariable = C_ID).pack()
button=Button(screen6,text="Enter Details",command=add).pack(pady=5)
def add():
d1=OID.get()
d2=Oname.get()
d3=Ono.get()
d4=Price.get()
d5=Portion.get()
d6=C_ID.get()
x=(d1,d2,d3,d4,d5,d6)
y=(d6,d4)
c.execute("insert into ORD values(?,?,?,?,?,?)",x)
c.execute("insert into BILL(CID,AMOUNT) values(?,?)",y)
conn.commit()
messagebox.showinfo("Order Added","Order Added")
screen6.destroy()
login_admin()
def Employee():
global screen11
c.execute("select * from EMPLOYEE ")
a=pd.DataFrame(c.fetchall())
a.columns = ['EID','Ename','Phoneno.','EXP']
screen11=Tk()
screen11.geometry("750x300")
text=Text(screen11)
text.insert(END,str(a.iloc[:,0:]))
text.pack(fill=BOTH, expand=1)
def menu():
try:
screen1.destroy()
except:
pass
global screen23
screen23 =Tk()
screen23.geometry("700x720")
screen23.title("Restaurant Manager")
my_img=ImageTk.PhotoImage(Image.open("menu.jpg"))
my_canvas=Canvas(screen23,width=500,height=720)
my_canvas.pack(fill="both",expand=True)
my_canvas.create_image(10,10,image=my_img,anchor="nw")
b3=Button(screen23,text="Back",command=login_admin)
b3_window=my_canvas.create_window(600,670,anchor="nw",window=b3)
screen23.mainloop()
def login_admin():
try:
screen.destroy()
screen23.destroy()
except:
pass
global screen1
screen1=Tk()
screen1.geometry('800x300')
Label(screen1,text="Welcome to the Restaurant Management System",bg ="grey", font=("Calibri",15),width =500,height=1).pack()
B1=Button(screen1,text="Get Employee Details",width =30,height=1,command=Employee).pack(pady=5)
B2=Button(screen1,text="Enter Details",width =30,height=1,command=enter).pack(pady=5)
B3=Button(screen1,text="MENU",width =30,height=1,command=menu).pack(pady=5)
B4=Button(screen1,text="Get Bill",width =30,height=1,command=getBill).pack(pady=5)
B5=Button(screen1,text="Add Order",width =30,height=1,command=addorder).pack(pady=5)
B6=Button(screen1,text="GENERATE BILL",width =30,height=1,command=gBill).pack(pady=5)
def login_verify():
username1=username_verify.get()
password1=password_verify.get()
username_entry1.delete(0,END)
password_entry1.delete(0,END)
lof=os.listdir()
if username1 in lof:
file1=open(username1,"r")
verify=file1.read().splitlines()
if username1=='admin':
file2=open(username1,"r")
verify1=file2.read().splitlines()
if password1 in verify1 and password1=='admin':
try:
messagebox.showinfo("SUCCESS","login success")
screen8.destroy()
except:
pass
login_admin()
elif username1 in lof:
file3=open(username1,"r")
verify=file3.read().splitlines()
if password1 in verify:
try:
messagebox.showinfo("SUCCESS","login success")
screen8.destroy()
except:
pass
login_admin()
else:
messagebox.showinfo("ERROR","Password has not been Recognized")
else:
messagebox.showinfo("ERROR","User not found")
file1.close()
def login1():
try:
screen.destroy()
except:
pass
global screen8
screen8=Tk()
screen8.title("Login")
screen8.geometry('500x250')
global username_verify,password_verify,username_entry1,password_entry1
username_verify=StringVar()
password_verify=StringVar()
Label(screen8,text ="Please Enter Details").pack()
Label(screen8,text ="").pack()
Label(screen8,text ="Username * ").pack()
username_entry1=Entry(screen8,textvariable=username_verify)
username_entry1.pack()
Label(screen8,text ="Password * ").pack()
password_entry1=Entry(screen8,textvariable=password_verify)
password_entry1.pack()
Label(screen8,text ="").pack()
Button(text="Login",width =30,height=1,command=login_verify).pack()
def register_user():
username_info=username.get()
password_info=password.get()
file=open(username_info,"w")
file.write(username_info+"\n")
file.write(password_info)
file.close()
username_entry.delete(0,END)
password_entry.delete(0,END)
messagebox.showinfo("Registration Succesful","registration success")
screen9.destroy()
main_screen()
def register():
try:
screen.destroy()
except:
pass
global screen9
screen9=Tk()
screen9.title("Register")
screen9.geometry('500x250')
global username,password,username_info,password_info,username_entry,password_entry
username=StringVar()
password=StringVar()
Label(screen9,text ="Please Enter Details").pack()
Label(screen9,text ="").pack()
Label(screen9,text ="Username * ").pack()
username_entry=Entry(screen9,textvariable=username)
username_entry.pack()
Label(screen9,text ="Password * ").pack()
password_entry=Entry(screen9,textvariable=password)
password_entry.pack()
Label(screen9,text ="").pack()
Button(text="Register",width =30,height=1,command=register_user).pack()
def main_screen():
global screen
screen =Tk()
screen.geometry("640x540")
screen.title("Restaurant Manager")
# my_img1=ImageTk.PhotoImage(Image.open("Cover.jpg"))
my_canvas=Canvas(screen,width=640,height=540)
my_canvas.pack(fill="both")
# my_canvas.create_image(0,0,image=my_img1,anchor="nw")
b1=Button(screen,text="Login",width =10,height=1,command= login1)
b2=Button(screen,text="Register",width =10,height=1,command=register)
b3=Button(screen,text="Exit Program",width =10,height=1,command=screen.quit)
b1_window=my_canvas.create_window(30,10,anchor="nw",window=b1)
b2_window=my_canvas.create_window(150,10,anchor="nw",window=b2)
b3_window=my_canvas.create_window(530,480,anchor="nw",window=b3)
screen.mainloop()
main_screen()
|
import sys
import json
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
import jsonlines
import math
import numpy as np
costhetas = []
for i,e in enumerate(jsonlines.Reader(open(sys.argv[1]))):
els = [p for p in e['particles'] if p['id'] == 11]
mus = [p for p in e['particles'] if p['id'] == 13]
assert len(mus) == 1
assert len(els) == 1
mu = mus[0]
el = els[0]
el_px, el_py, el_pz = [el[x] for x in ['px','py','pz']]
mu_px, mu_py, mu_pz = [mu[x] for x in ['px','py','pz']]
costheta = mu_pz/el_pz
costhetas.append(costheta)
plt.hist(costhetas, bins = 100, histtype='stepfilled')
plt.savefig(sys.argv[2])
|
def a(n):
if n==1:
return 1
return n*a(n-1)
def b(n):
s=1
for i in range(1,n+1):
s=s*i
return s
if __name__=='__main__':
print b(4)
print a(5) |
media = mediaF= maior = cont = 0
menor = 99999999
ans = 's'
listN = []
while ans == 's':#ou while ans in 'Ss'
cont += 1
n = int(input('Digite um valor: '))
listN.append(n)
ans = str(input('Deseja continuar? [S/N]: ')).lower()
media += n
if n > maior:
maior = n
if n < menor:
menor = n
mediaF = media / cont
print('O maior número é: {}'.format(maior))
print('O menor número é: {}'.format(menor))
print('A média dos números é: {}'.format(mediaF))
print('Os números digitados foram: {}'.format(listN))
|
from rest_framework import generics
from rest_framework.generics import get_object_or_404
from .models import Livro
from .serializers import LivroSerializer
class LivrosAPIView(generics.ListCreateAPIView):
queryset = Livro.objects.all()
serializer_class = LivroSerializer
class LivroAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = Livro.objects.all()
serializer_class = LivroSerializer
|
import VdManalyze as v
detectors = ['PLT','HFET', 'HFOC']
detector = 'PLT'
folder = '/brildata/vdmoutput/Automation/Analysed_Data/'
bcmfolder = '/brildata/vdmoutput/Automation/Analysed_Data/'
print detector, 'all'
df = v.load_all([detector], main_folder=bcmfolder, forcemodel=True)
df.to_csv(detector + '_const_2017.csv')
print detector, 'leading'
ldf = v.load_all([detector], main_folder=bcmfolder, forcemodel=True, leading = True)
ldf.to_csv(detector + '_const_leading_2017.csv')
print detector, 'train'
tdf = v.load_all([detector], main_folder=bcmfolder, forcemodel=True, train = True)
tdf.to_csv(detector + '_const_train_2017.csv')
# for detector in detectors:
# print detector, 'all'
# df = v.load_all([detector], main_folder=folder)
# df.to_csv(detector + '_const_2017.csv')
# print detector, 'leading'
# ldf = v.load_all([detector], main_folder=folder, leading = True)
# ldf.to_csv(detector + '_const_leading_2017.csv')
# print detector, 'train'
# tdf = v.load_all([detector], main_folder=folder, train = True)
# tdf.to_csv(detector + '_const_train_2017.csv')
|
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, Input, AveragePooling2D, GlobalAveragePooling2D, Concatenate
from keras.regularizers import l2
from keras.layers.normalization import BatchNormalization
import keras
import keras.backend as K
def TransitionBlock(x,nb_filters,dropout_rate=None,weight_decay):
x = BatchNormalization()(x)
x = Conv2D(nb_filters,(1,1),padding='same')
x = GlobalAveragePooling2D(pool_size=(2,2),strides=(2,2))
return x
def ConvBlock(x,nb_filters,dropout_rate=None,weight_decay):
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(nb_filters,(1,1),padding='same')
x = Conv2D(nb_filters,(3,3),padding='same')
def DenseBlock(x,nb_layers,nb_filters,growth_rate,dropout_rate=None,weight_decay):
for index in range(nb_layers):
x = ConvBlock(x,nb_filters,dropout_rate=None,weight_decay)
nb_filters+=growth_rate
return x, nb_filters
def DenseNet169(nb_classes,img_dim,depth,nb_dense_block,
growth_rate,nb_filters,dropout_rate=None,weight_decay):
model_input = Input(shape=img_dim)
x = Conv2D(nb_filters,(3,3),padding='same',kernel_regularizer=l2(weight_decay))(model_input)
# Add dense blocks
for block_id in range(nb_dense_block-1):
x, nb_filters = DenseBlock(x,nb_layers,nb_filters,growth_rate,dropout_rate=None,weight_decay)
x = TransitionBlock(x,nb_filters,dropout_rate=None,weight_decay)
# adding the last dense block
x, nb_filters = DenseBlock(x,nb_layers,nb_filters,growth_rate,dropout_rate=None,weight_decay)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D(pool_size=(7,7))(x)
x = Dense(nb_classes,activation='softmax')
return x
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-03-15 09:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quicklook', '0007_exerciseandreporting_did_workout'),
]
operations = [
migrations.AddField(
model_name='exerciseandreporting',
name='activities_duration',
field=models.TextField(blank=True),
),
]
|
# Generated by Django 3.0.5 on 2020-08-03 22:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mobileapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(blank=True, max_length=255)),
('document', models.FileField(upload_to='documents/')),
('uploaded_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AlterModelOptions(
name='products',
options={'verbose_name': 'Продукт', 'verbose_name_plural': 'Продукты'},
),
migrations.AlterField(
model_name='products',
name='purchase_date',
field=models.DateTimeField(verbose_name='Дата покупки'),
),
]
|
def esaustivo(n):
'''Stampa tutte le permutazioni dei numeri da 0 a n - 1 con numeri pari
nelle posizioni pari.'''
def check(cifre):
for i in range(len(cifre)):
if not i % 2 and cifre[i] % 2:
return False
return True
def genera(cifre, utilizzate):
if len(cifre) == n: # nodo foglia
if check(cifre):
print(cifre)
else: # nodo interno
for i in range(n):
# Se la cifra è già presente nella lista delle cifre la scarta.
if i not in utilizzate:
utilizzate.add(i)
cifre.append(i)
genera(cifre, utilizzate)
cifre.pop()
utilizzate.remove(i)
if n >= 0:
cifre = []
utilizzate = set()
genera(cifre, utilizzate)
def stampaPermutazioniNumPari(n):
'''Stampa tutte le permutazioni dei numeri da 0 a n - 1 con numeri pari
nelle posizioni pari.'''
def genera(cifre, utilizzate):
if len(cifre) == n: # nodo foglia
print(cifre)
else: # nodo interno
for i in range(n):
# Se la cifra è già presente nella lista delle cifre la scarta.
# Inoltre se l'indice è pari anche la cifra deve esserlo.
if i not in utilizzate and len(cifre) % 2 == i % 2:
utilizzate.add(i)
cifre.append(i)
genera(cifre, utilizzate)
cifre.pop()
utilizzate.remove(i)
if n >= 0:
cifre = []
utilizzate = set()
genera(cifre, utilizzate)
if __name__ == "__main__":
from time import time
start_esaustivo = time()
esaustivo(10)
end_esaustivo = time()
start_ottimo = time()
stampaPermutazioniNumPari(10)
end_ottimo = time()
print()
print("Esaustivo:", end_esaustivo - start_esaustivo)
print("Ottimo:", end_ottimo - start_ottimo)
|
import socket
s = socket.socket()
host = socket.gethostname()
port = 12345
s.bind((host, port))
s.listen(5)
while True:
c, addr = s.accept()
print 'client address:', addr
c.send('welcome to cainiao jiaocheng')
c.close()
|
import json
import re
import sys
import traceback
import typing
from datetime import datetime
import yaml
from flask import Flask, request, abort
from matrix_client.client import MatrixClient
from matrix_client.errors import MatrixRequestError
application = Flask(__name__)
# Not going to care for specifics like the underscore.
# Generally match room alias or id [!#]anything:example.com with unicode support.
room_pattern = re.compile(r'^[!#]\w+:[\w\-.]+$')
"""
config.yml Example:
secret: "..."
matrix:
server: https://matrix.org
username: ...
password: "..."
"""
with open("config.yml", 'r') as ymlfile:
cfg = yaml.safe_load(ymlfile)
def check_token(header_field: str):
token = request.headers.get(header_field)
if token != cfg['secret']:
abort(401)
def get_a_room():
if 'channel' not in request.args:
abort(400)
room = request.args.get('channel')
# sanitize input
if room_pattern.fullmatch(room) is None:
abort(400)
return room
def get_msg_type():
if 'msgtype' not in request.args:
return "m.notice"
msgtype = request.args.get('msgtype')
if msgtype in ["m.text", "m.notice"]:
return msgtype
else:
abort(400)
def iter_first_line(string: str):
return iter(map(str.rstrip, string.lstrip().splitlines(keepends=False)))
def shorten(string: str, max_len: int = 80, appendix: str = "..."):
if len(string) > max_len:
return string[:max_len - len(appendix)] + appendix
else:
return string
def matrix_error(error: MatrixRequestError):
# see Flask.make_response, this will be interpreted as (body, status)
return f"Error from Matrix: {error.content}", error.code
def process_gitlab_request():
check_token('X-Gitlab-Token')
msgtype = get_msg_type()
room = get_a_room()
gitlab_event = request.headers.get("X-Gitlab-Event")
if gitlab_event == "Push Hook":
if request.json["total_commits_count"] < 1:
return "", 204
try:
client = MatrixClient(cfg["matrix"]["server"])
client.login(username=cfg["matrix"]["username"], password=cfg["matrix"]["password"])
room = client.join_room(room_id_or_alias=room)
except MatrixRequestError as e:
return matrix_error(e)
def sort_commits_by_time(commits):
return sorted(commits, key=lambda commit: commit["timestamp"])
def extract_commit_info(commit):
msg = shorten(next(iter_first_line(commit["message"]), "$EMPTY_COMMIT_MESSAGE - impossibruh"))
url = commit["url"]
return msg, url
username = request.json["user_name"]
project_name = request.json["project"]["name"]
if request.json["ref"].startswith("refs/heads/"):
to_str = f" to branch {request.json['ref'][len('refs/heads/'):]} on project {project_name}"
else:
to_str = f" to {project_name}"
commit_messages = list(map(extract_commit_info, sort_commits_by_time(request.json["commits"])))
html_commits = "\n".join((f' <li><a href="{url}">{msg}</a></li>' for (msg, url) in commit_messages))
text_commits = "\n".join((f"- [{msg}]({url})" for (msg, url) in commit_messages))
try:
room.send_html(f"<strong>{username} pushed {len(commit_messages)} commits{to_str}</strong><br>\n"
f"<ul>\n{html_commits}\n</ul>\n",
body=f"{username} pushed {len(commit_messages)} commits{to_str}\n{text_commits}\n",
msgtype=msgtype)
except MatrixRequestError as e:
return matrix_error(e)
# see Flask.make_response, this is interpreted as (body, status)
return "", 204
def process_jenkins_request():
check_token('X-Jenkins-Token')
msgtype = get_msg_type()
room = get_a_room()
jenkins_event = request.headers.get("X-Jenkins-Event")
if jenkins_event == "Post Build Hook":
try:
client = MatrixClient(cfg["matrix"]["server"])
client.login(username=cfg["matrix"]["username"], password=cfg["matrix"]["password"])
room = client.join_room(room_id_or_alias=room)
except MatrixRequestError as e:
return matrix_error(e)
project_url = request.json["githubProjectUrl"]
def extract_change_message(change):
change_message = next(iter_first_line(change["message"]), "")
if len(change_message) > 0:
htimestamp = datetime.fromtimestamp(change['timestamp'] / 1000).strftime("%d. %b %y %H:%M")
bare_commit_link = f"({shorten(change['commitId'], 7, appendix='')})"
if project_url is not None and project_url:
commit_link = f"<a href=\"{project_url}commit/{change['commitId']}\">{bare_commit_link}</a>"
else:
commit_link = bare_commit_link
return (
f"- {shorten(change_message)} {bare_commit_link} by {change['author']} at {htimestamp}",
f" <li>{shorten(change_message)} {commit_link} by {change['author']} at {htimestamp}</li>",
)
else:
dump = shorten(json.dumps(change), appendix="...}")
return (
dump,
dump.replace("<", "<").replace(">", ">")
)
build_name = request.json["displayName"]
project_name = request.json["project"]["fullDisplayName"]
result_type = request.json["result"]["type"]
result_color = request.json["result"]["color"]
changes = request.json['changes']
if len(changes) > 0:
text_change_messages, html_change_messages = zip(*map(extract_change_message, changes))
else:
text_change_messages, html_change_messages = (), () # it's an owl!
newline = '\n'
try:
room.send_html(f"<p><strong>Build {build_name} on project {project_name} complete: "
f"<font color=\"{result_color}\">{result_type}</font></strong>, "
f"{len(changes)} commits</p>\n"
"" + (f"<ul>\n{newline.join(html_change_messages)}\n</ul>\n" if len(html_change_messages) > 0 else ""),
body=f"**Build {build_name} on project {project_name} complete: {result_type}**, "
f"{len(changes)} commits\n"
"" + (f"{newline.join(text_change_messages)}\n" if len(text_change_messages) > 0 else ""),
msgtype=msgtype)
except MatrixRequestError as e:
return matrix_error(e)
# see Flask.make_response, this is interpreted as (body, status)
return "", 204
def process_prometheus_request():
secret = request.args.get('secret')
if secret != cfg['secret']:
abort(401)
msgtype = get_msg_type()
room = get_a_room()
# written for version 4 of the alertmanager webhook JSON
# https://prometheus.io/docs/alerting/configuration/#webhook_config
def color_status_html(status: str, text: typing.Optional[str] = None):
_status_colors = {"resolved": "34A91D", "firing": "EF2929"}
if text is None:
text = status
if status in _status_colors:
return f'<font color="#{_status_colors[status]}">{text}</font>'
else:
return text
def extract_alert_message(alert: typing.Dict[str, typing.Any]) -> typing.Tuple[str, str]:
"""Takes the alert object and returns (text, html) as a string tuple."""
alert_status = alert.get("status", "None")
alert_labels = str(alert.get("labels", None))
alert_annotations = str(alert.get("annotations", None))
alert_start = alert.get("startsAt", None)
alert_end = alert.get("endsAt", None)
alert_daterange = []
if alert_start is not None:
alert_start = datetime.fromisoformat(alert_start).strftime("%d. %b %y %H:%M %Z").rstrip()
alert_daterange.append(f'Started at {alert_start}')
if alert_end is not None:
alert_end = datetime.fromisoformat(alert_end).strftime("%d. %b %y %H:%M %Z").rstrip()
alert_daterange.append(f'Ended at {alert_end}')
alert_daterange = "" if len(alert_daterange) == 0 else f'({", ".join(alert_daterange)})'
alert_generator_url = alert.get("generatorURL", "None")
return (
f'[{alert_status}] Labels: {alert_labels}, Annotations: {alert_annotations} - {alert_daterange} | Generator: {alert_generator_url}',
f'<strong>{color_status_html(alert_status)}</strong> Labels: {alert_labels}, Annotations: {alert_annotations} - {alert_daterange} | Generator: {alert_generator_url}',
)
def extract_prometheus_message() -> typing.Tuple[str, str]:
"""Dissects the request's JSON and returns (text, html) as a string tuple."""
group_key = request.json.get("groupKey", "None")
status = request.json.get("status", "None")
receiver = request.json.get("receiver", "None")
group_labels = str(request.json.get("groupLabels", None))
common_labels = str(request.json.get("commonLabels", None))
common_annotations = str(request.json.get("commonAnnotations", None))
ext_url = request.json.get("externalURL", "None")
alerts = request.json.get("alerts", []) # type: typing.List[typing.Dict[str, typing.Any]]
text_alerts, html_alerts = zip(*map(extract_alert_message, alerts))
text_alerts = "\n" + "\n".join((f"- {msg}" for msg in text_alerts))
html_alerts = "<br>\n<ul>\n" + "\n".join((f" <li>{msg}</li>" for msg in html_alerts)) + "\n</ul>"
return (
f'*{status.title()} alert for group {group_key}*\n Receiver: {receiver}\n Labels: {group_labels} | {common_labels}\n Annotations: {common_annotations}\n External URL: {ext_url}\nAlerts:{text_alerts}',
f'<strong>{color_status_html(status, f"{status.title()} alert for group {group_key}")}</strong><br>\n <em>Receiver:</em> {receiver}<br>\n <em>Labels:</em> {group_labels} | {common_labels}<br>\n <em>Annotations:</em> {common_annotations}<br>\n <em>External URL:</em> {ext_url}<br>\n<em>Alerts:</em>{html_alerts}',
)
try:
html, body = extract_prometheus_message()
except (LookupError, ValueError, TypeError):
print("Error parsing JSON and forming message:", file=sys.stderr)
traceback.print_exc()
return "Error parsing JSON and forming message", 500
try:
client = MatrixClient(cfg["matrix"]["server"])
client.login(username=cfg["matrix"]["username"], password=cfg["matrix"]["password"])
room = client.join_room(room_id_or_alias=room)
room.send_html(html=html, body=body, msgtype=msgtype)
except MatrixRequestError as e:
return matrix_error(e)
# see Flask.make_response, this is interpreted as (body, status)
return "", 204
@application.route('/matrix', methods=("POST",))
def notify():
if 'X-Gitlab-Token' in request.headers:
return process_gitlab_request()
elif 'X-Jenkins-Token' in request.headers:
return process_jenkins_request()
elif 'type' in request.args and request.args.get('type') == "prometheus":
return process_prometheus_request()
else:
return "Cannot determine the request's webhook cause", 400
|
from django.shortcuts import render
from rest_framework import viewsets, views, mixins, status, permissions, generics
from rest_framework.response import Response
from .models import LinkShop
from .serializers import LinkShopSerializer
from tracking.settings import DEFAULT_IDENTIFIER_SHOP
# Create your views here.
class LinkShopView(generics.ListCreateAPIView,
generics.UpdateAPIView,
generics.GenericAPIView):
serializer_class = LinkShopSerializer
permission_classes = [permissions.IsAdminUser, ]
def list(self, request):
query = LinkShop.objects.all()
query_longitude = len(query)
if query_longitude == 1:
serializer = self.get_serializer(query[query_longitude - 1])
return Response(serializer.data, status = status.HTTP_200_OK)
return Response("No se ha podido hallar ningun registro en el server", status = status.HTTP_404_NOT_FOUND)
def create(self, request):
data = request.data
print(data)
link = LinkShop.objects.all()
if link.exists():
return self.update(request, **data)
LinkShop.objects.create(**data)
message = "Link de tienda actualizado exitosamente"
return Response(message, status = status.HTTP_205_RESET_CONTENT)
def update(self, request, **link):
print(link)
serializer = self.get_serializer_class()
link_shop = LinkShop.objects.get(key = DEFAULT_IDENTIFIER_SHOP)
object_to_update = serializer(link_shop, data = link)
object_to_update.is_valid(raise_exception = True)
self.perform_update(object_to_update)
message = "Link actualizado"
return Response(message, status = status.HTTP_200_OK)
|
from ckeditor.fields import RichTextField
from django.contrib.auth import get_user_model
from django.db import models
# Create your models here.
class Term(models.Model):
user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, related_name='terms')
school = models.CharField(max_length=20, blank=False)
year = models.IntegerField(blank=False, null=True)
session = models.CharField(max_length=40, blank=False)
term_slug = models.SlugField(null=True)
current = models.BigIntegerField(default=False)
def __str__(self):
return f'{self.session}'
class Meta:
ordering = ['-year']
class Course(models.Model):
user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, null=True, related_name='courses')
terms = models.ForeignKey(Term, on_delete=models.CASCADE, null=True, related_name='courses')
course_code = models.CharField(max_length=40, unique=True, blank=False)
title = models.CharField(max_length=40, blank=False)
course_slug = models.SlugField(null=True)
def __str__(self):
return f'{self.course_code}'
class Meta:
ordering = ['-course_code']
class ClassNote(models.Model):
user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, related_name='notes')
created_at = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=50, blank=False)
body = RichTextField(config_name='ckeditor')
note_slug = models.SlugField(null=True)
course = models.ForeignKey(Course, on_delete=models.CASCADE, null=True, related_name='notes')
def __str__(self):
return f'{self.title}'
def join_title(self):
joined_title = ''.join(self.title.lower().split(' '))
return joined_title
class Meta:
ordering = ['course', '-created_at']
|
# Python program to print all prime numbers
start = input("Enter the start number: ")
end = input("Enter the end number: ")
for i in range(start,end):
if i>1:
for j in range(2,i):
if(i % j==0):
break
else:
print(i) |
from . import nx_agraph as nx_agraph, nx_pydot as nx_pydot
from .layout import *
from .nx_latex import *
from .nx_pylab import *
|
# -*- coding: cp936 -*-
# import pygame
import wx
import os
import win32com.client
# import keyevent
import server
import threading
import httpserver
window_size = (420,350)
button_size = (80,80)
filelist_size = (350,200)
frame_colour = wx.Colour(255,255,255)
text_colour = wx.Colour(40,139,213)
totalFileList = []
pptApplication = win32com.client.DispatchEx("PowerPoint.Application")
class MyFrame(wx.Frame):
def __init__(self,image=None,parent=None,id=-1,pos=wx.DefaultPosition,title="PPTCtrl"):
framestyle = wx.DEFAULT_FRAME_STYLE & ~ (wx.RESIZE_BORDER |
wx.RESIZE_BOX | wx.MAXIMIZE_BOX)
wx.Frame.__init__(self,parent,id,title,pos,size=window_size,style=framestyle)
self.Bind(wx.EVT_CLOSE,self.OnClose)
def OnClose(self,event):
print 'destroy'
self.Destroy()
class MyApp(wx.App):
def __init__(self,redirect=False):
wx.App.__init__(self,redirect)
self.pptlist = []
self.netIP = None
self.ppt_chosen = "None"
self.sock = None
self.socketHandle = threading.Thread(target=self.sockServer)
self.socketHandle.start()
self.webpyHandle = threading.Thread(target=self.webpyServer)
self.webpyHandle.start()
def OnExit(self):
self.sock.close()
httpserver.close()
def OnInit(self):
self.Font = wx.Font(15,wx.MODERN,wx.NORMAL,wx.NORMAL)
self.win = MyFrame()
bkg = wx.Panel(self.win)
bkg.SetBackgroundColour(frame_colour)
# browser_image = wx.Image("browserbtn.jpg",wx.BITMAP_TYPE_JPEG).ConvertToBitmap()
boxlist = []
hbox1 = wx.BoxSizer()
boxlist.append(hbox1)
# browser_size = (browser_image.GetWidth, browser_image.GetHight)
self.browser_btn = wx.Button(bkg,label="browser")
self.browser_btn.Bind(wx.EVT_BUTTON,self.Browser)
self.start_btn = wx.Button(bkg,label="start")
self.start_btn.Bind(wx.EVT_BUTTON,self.StartButton)
boxlist[-1].Add(self.browser_btn,flag=wx.EXPAND)
boxlist[-1].Add(self.start_btn,flag=wx.EXPAND)
self.textPPTChosen = wx.StaticText(bkg,-1,label="Drag ppts here")
self.textPPTChosen.SetFont(self.Font)
self.fileList = wx.ListBox(bkg,style=wx.TE_MULTILINE|wx.HSCROLL,size=filelist_size)
self.fileList.SetForegroundColour(text_colour)
self.fileList.SetFont(self.Font)
self.fileList.Bind(wx.EVT_LISTBOX, self.OnChoose)
self.fileList.Bind(wx.EVT_LISTBOX_DCLICK,self.DClick)
vbox = wx.BoxSizer(wx.VERTICAL)
boxlist.append(vbox)
boxlist[-1].Add(self.textPPTChosen,flag=wx.EXPAND,border=5)
boxlist[-1].Add(self.fileList,flag=wx.EXPAND,border=10)
dropfile = MyFileDropTarget(self.fileList)
self.fileList.SetDropTarget(dropfile)
totalbox = wx.BoxSizer(wx.VERTICAL)
for box in boxlist:
totalbox.Add(box,flag=wx.EXPAND|wx.ALL,border=5)
bkg.SetSizer(totalbox)
self.win.Show()
return True
def Browser(self,event):
file_wlidcard = "*.ppt;*.pptx"
fdlg = wx.FileDialog(self.win,'choose pptfile',os.getcwd(),wildcard=file_wlidcard,style=wx.OPEN)
if fdlg.ShowModal() == wx.ID_OK:
sourceFileDir = fdlg.GetDirectory()
filename = fdlg.GetFilename()
self.ppt_chosen = sourceFileDir+'\\\\'+filename
self.textPPTChosen.SetLabel(filename)
totalFileList.append(self.ppt_chosen)
item = [filename]
self.fileList.InsertItems(item,0)
fdlg.Destroy()
else:
return
def sockServer(self):
try:
self.sock = server.Server()
self.netIP = self.sock.getBindIp()
# print 'sockserver self.netip',self.netIP
self.sock.run()
except Exception as e:
print e
def webpyServer(self):
while self.netIP == None:
pass
netIP = self.sock.getBindIp()
httpserver.main(netIP)
# print 'webpy server ip is ',netIP
# os.system("python httpserver.py %s:2015"%netIP)
def OnChoose(self,event):
self.textPPTChosen.SetLabel(event.GetString())
self.ppt_chosen = totalFileList[len(totalFileList)-self.fileList.GetSelection()-1]
# print "onChoose:%s"%event.GetString()
def DClick(self,event):
self.textPPTChosen.SetLabel(event.GetString())
# print "DClick:"+totalFileList[event.GetSelection()]
self.ppt_chosen = totalFileList[len(totalFileList)-self.fileList.GetSelection()-1]
filepath = self.ppt_chosen
self.startppt(filepath)
def startppt(self,filepath):
try:
if os.path.isfile(filepath):
filepath.replace('\\','\\\\')
pptApplication.Presentations.Open(filepath)
pptApplication.Visible = True
pptApplication.WindowState = 1
except Exception as e:
print e
def StartButton(self,event):
filepath = self.ppt_chosen
if not os.path.isfile(filepath):
return False
self.startppt(filepath)
return True
class MyFileDropTarget(wx.FileDropTarget):
def __init__(self,window):
wx.FileDropTarget.__init__(self)
self.window = window
def OnDropFiles(self,x,y,filenames):
# self.window.AppendText("")
for file in filenames:
ext = file.split('.')[-1]
if ext=='ppt' or ext=='pptx' or ext=='pptm' \
or ext=='PPT' or ext=='PPTX' or ext=='PPTM':
item = [file.split('\\')[-1]]
self.window.InsertItems(item,0)
totalFileList.append(file)
else:
pass
def main():
app = MyApp()
app.MainLoop()
if __name__ == "__main__":
main()
|
from collections import OrderedDict
from treadmill.infra.setup import base_provision
from treadmill.infra import configuration, constants, exceptions, connection
from treadmill.api import ipa
class Zookeeper(base_provision.BaseProvision):
def setup(self, image, key, cidr_block, instance_type,
ipa_admin_password, proid, subnet_name, count=3):
ldap_hostname, ipa_server_hostname = self.hostnames_for(
roles=[
constants.ROLES['IPA'],
constants.ROLES['LDAP'],
]
)
if not ipa_server_hostname:
raise exceptions.IPAServerNotFound()
if not ldap_hostname:
raise exceptions.LDAPNotFound()
_ipa = ipa.API()
_zk_hostnames = self._hostname_cluster(count=count)
_cfg_data = self._construct_cfg_data(_zk_hostnames)
for _idx in _zk_hostnames.keys():
_zk_h = _zk_hostnames[_idx]
_otp = _ipa.add_host(hostname=_zk_h)
_ipa.service_add('zookeeper', _zk_h, {
'domain': connection.Connection.context.domain,
'hostname': _zk_h,
})
self.name = _zk_h
self.configuration = configuration.Zookeeper(
ldap_hostname=ldap_hostname,
ipa_server_hostname=ipa_server_hostname,
hostname=_zk_h,
otp=_otp,
idx=_idx,
proid=proid,
cfg_data=_cfg_data
)
super().setup(
image=image,
count=1,
cidr_block=cidr_block,
key=key,
instance_type=instance_type,
subnet_name=subnet_name,
sg_names=[constants.COMMON_SEC_GRP],
)
def _construct_cfg_data(self, hostnames):
return '\n'.join(
['server.' + _h[0] + '=' + _h[1] + ':2888:3888'
for _h in OrderedDict(sorted(hostnames.items())).items()]
)
|
import cross_deletions_with_dgv_results_Vfor_new
from collections import OrderedDict
from openpyxl import Workbook
from openpyxl.comments import Comment
from openpyxl.styles import Font, Fill, PatternFill, Alignment
from openpyxl.styles.borders import Border, Side
def make_reg_to_test(params, chrr, start,end):
dic={"ovl":"full", "version": params["version"], "perc":"70", "tt":params["tt"], "chrA":chrr[0], "brA":start+"-"+end, "dats":["gnomad", "chaisson", "collins", "DGV", "1000Genomes", "ClinGenben", "ClinGenlben"]}
gg=cross_deletions_with_dgv_results_Vfor_new.exect_for_ACMG(dic)
return gg
def merge_two_dicts(x, y, w):
#z = x.copy() # start with x's keys and values
#z.update(y) # modifies z with y's keys and values & returns None
z = {**x, **y, **w}
return z
def correct_oes(oes1):
oes=set()
for key,value in oes1.items():
if "b" in value[0] or "b" in value[1]:
oes.add(key)
return oes
def make_ACMG(params, chrr, start, end, ws2):
scores=OrderedDict()
#genes afetados
br=False
to_count=set()
oes=correct_oes(params["oes"])
if "dic1" in params:
dic1=params["dic1"]
else:
dic1={}
if "dic2" in params:
dic2=params["dic2"]
else:
dic2={}
if "dictext1" in params:
dictext1=params["dictext1"]
else:
dictext1={}
if "dictext2" in params:
dictext2=params["dictext2"]
else:
dictext2={}
for el in dic1:
if "Breakpoint" in el and br==False:
br==True
elif "Breakpoint" not in el and br==True:
to_count.add(el)
elif "Breakpoint" in el and br==True:
break
if len(dic2)>0:
for ee in dic2:
if "Breakpoint" not in ee and br==True:
to_count.add(ee)
elif "Breakpoint" in el and br==True:
break
if "TDel" in params:
for ele in params["TDel"]:
to_count.add(ele)
for el in to_count:
if el in oes:
scores["1A"]=["Genes affected by the CNV", 0]
scores["2A"]=["Complete overlap of an HI/TS gene", 1]
for eee in dictext2:
to_count.add(eee)
for ele in dictext1:
to_count.add(ele)
if "TDel_tx" in params:
for tt in params["TDel_tx"]:
to_count.add(tt)
if len(to_count)==0:
scores["1B"]=["No genes affected by the CNV", -0.6]
else:
scores["1A"]=["Genes affected by the CNV", 0]
#os genes affectados sao haploins
print("oes",oes)
if "2A" not in scores:
print("dic", dictext1, dictext2)
if "TDel_tx" in params:
z=merge_two_dicts(dictext1, dictext2,params["TDel_tx"])
else:
z=merge_two_dicts(dictext1, dictext2,{})
print("z",z)
if len(z)>0:
for w,value in z.items():
if w in oes:
print(w,value)
if params["tt"]=="Duplication":
if "2I-2L" not in scores:
scores["2I-2L"]=["One or both breakpoints affect an HI/TS gene", "0 to 0.9"]
elif "5'" in value[0] and "2C1" not in scores:
scores["2C1"]=["Affects the 5' UTR of the gene and codding sequence", 0.9]
elif "3'" in value[0] and "2D2-2D4" not in scores:
scores["2D2-2D4"]=["Affects the 3' UTR of the gene and coding sequence", "0.3 to 0.9"]
else:
scores["2E"]=["Both breakpoints are located in the same gene", "0 to 0.9"]
#make the overlap
gg=make_reg_to_test(params, chrr, start, end)
if True in gg and params["tt"]=="Deletion":
scores["2F"]=["Completely contained within an established benign CNV region", -1]
elif True in gg and params["tt"]=="Duplication":
scores["2C,2D or 2F"]=["A benign duplication having the same gene content as the input", "-0.9 to -1"]
if len(to_count)<=34:
scores["3A"]=["Number of affected genes: 0-34",0]
elif len(to_count)>34 and len(to_count)<=49:
scores["3B"]=["Number of affected genes: 35-49", 0.45]
else:
scores["3C"]=["Number of affected genes: >49", 0.9]
print(scores)
if params["tt"]=="Deletion":
write_ACMG_del(scores, ws2)
else:
write_ACMG_dup(scores, ws2)
def write_ACMG_del(scores, ws2):
dell={"1A":["7"], "1B":["8"], "2A":["10"], "2C1":["13"], "2D2-2D4":["17","18","19"], "2E":["20"], "2F":["21"], "3A":["25"], "3B":["26"], "3C":["27"]}
ws2.append(["CNV Interpretation Scoring Rubric: Copy Number LOSS"])
ws2.append(["The parameters presented bellow are according to ", '=HYPERLINK("https://cnvcalc.clinicalgenome.org/cnvcalc/", "ClinGen CNV Pathogenicity Calculator")'])
ws2.append(['Parameters with grey background were automatically filled. Parameters 2D-2, 2D-3 and 2D-4 must be reviewed manually.'])
ws2.append([''])
ws2.append(["", "Sujested Points", "Maximum Score", "Points Given"])
ws2.append(["Section 1: Initial Assessment of Genomic Content"])
ws2.append(["1A. Contains protein-coding or other known functionally important elements", "0", "-0.6"])
ws2.append(["1B. Does NOT contain protein-coding or any known functionally important elements", "0", "-0.6"])
ws2.append(["Section 2 : Overlap with Established/Predicted HI or Established Benign Genes/Genomic Regions"])
ws2.append(["2A. Complete overlap of an established HI gene/genomic region", "1", "1"])
ws2.append(["2B. Partial overlap of an established HI genomic region", "0", "0"])
ws2.append(["2C. Partial overlap with the 5’ end of an established HI gene (3’ end of the gene not involved)..."])
ws2.append(["2C-1. …and coding sequence is involved", "0.90 (Range : 0.45 to 1.00)", "1"])
ws2.append(["2C-2. …and only the 5’ UTR is involved", "0 (Range : 0 to 0.45)", "0.45"])
ws2.append(["2D. Partial overlap with the 3’ end of an established HI gene (5’ end of the gene not involved)…"])
ws2.append(["2D-1. …and only the 3’ untranslated region is involved.", "0", "0"])
ws2.append(["2D-2. …and only the last exon is involved. Other established pathogenic variants have been reported in this exon.", "0.90 (Range : 0.45 to 0.90)", "0.9"])
ws2.append(["2D-3. …and only the last exon is involved. No other established pathogenic variants have been reported in this exon.", "0.30 (Range : 0 to 0.45)", "0.45"])
ws2.append(["2D-4. …and it includes other exons in addition to the last exon. Nonsense-mediated decay is expected to occur.", "0.90 (Range : 0.45 to 1.00)", "1"])
ws2.append(["2E. Both breakpoints are within the same gene (gene-level sequence variant)", "(Range : 0 to 0.90)", ""])
ws2.append(["2F. Completely contained within an established benign CNV region"])
ws2.append(["2G. Overlaps an established benign CNV, but includes additional genomic material", "0", "0"])
ws2.append(["2H. Multiple HI predictors suggest that AT LEAST ONE gene in the interval is haploinsufficient (HI)", "0.15", "0.15"])
ws2.append(["Section 3: Evaluation of Gene Number"])
ws2.append(["3A. 0-24 Genes", "0", "0"])
ws2.append(["3B 25-34 Genes", "0.45", "0.45"])
ws2.append(["3C. 35 or more Genes", "0.9", "0.9"])
ws2.append(["Section 4: Detailed Evaluation of Genomic Content Using Published Literature, Public Databases, and/or Internal Lab Data"])
ws2.append(["Reported proband has either:"])
ws2.append(["A complete deletion of or a LOF variant within gene encompassed by the observed copy number loss OR"])
ws2.append(["an overlapping copy number loss similar in genomic content to the observed copy number loss AND…"])
ws2.append(["4A. …the reported phenotype is highly specific and relatively unique to the gene or genomic region", "(Range : 0.15 to 0.45)", "0.9 (total)"])
ws2.append(["4B. …the reported phenotype is consistent with the gene/genomic region, is highly specific, but not necessarily unique to the gene/genomic region", "(Range : 0.15 to 0.45)", "0.9 (total)"])
ws2.append(["4C. …the reported phenotype is consistent with the gene/genomic region, but not highly specific and/or with high genetic heterogeneity", "(Range : 0 to 0.30)", "0.9 (total)"])
ws2.append(["4D.…the reported phenotype is NOT consistent with what is expected for the gene/genomic region or not consistent in general", "0 (Range: 0 to -0.30)", "-0.3 (total)"])
ws2.append(["4E. Reported proband has a highly specific phenotype consistent with the gene/genomic region, but the inheritance of the variant is unknown.", "0.1 (Range : 0 to 0.15)", "0.3 (total)"])
ws2.append(["4F. 3-4 observed segregation", "0.15", "0.15"])
ws2.append(["4G. 5-6 observed segregation", "0.3", "0.3"])
ws2.append(["4H. 7 or more observed segregation", "0.45", "0.45"])
ws2.append(["4I. Variant is NOT found in another individual in the proband’s family AFFECTED with a consistent, specific, well-defined phenotype (no known phenocopies)", "-0.45 (Range: 0 to -0.45)", "-0.45"])
ws2.append(["4J. Variant IS found in another individual in the proband’s family UNAFFECTED with the specific, well-defined phenotype observed in the proband", "-0.3 (Range: 0 to -0.30)", "-0.3"])
ws2.append(["4K. Variant IS found in another individual in the proband’s family UNAFFECTED with the non-specific phenotype observed in the proband", "-0.15-0.3(Range: 0 to -0.15)", "-0.3"])
ws2.append(["4L. Statistically significant increase amongst observations in cases (with a consistent, specific, well-defined phenotype) compared to controls", "0.45 (range: 0 to 0.45)", "0.45"])
ws2.append(["4M. Statistically significant increase amongst observations in cases (without a consistent, non-specific phenotype OR unknown phenotype) compared to controls", "0.30 (range: 0 to 0.30)", "0.45"])
ws2.append(["4N. No statistically significant difference between observations in cases and controls", "-0.9 (Range :0 to -0.9)", "-0.9"])
ws2.append(["4O. Overlap with common population variation", " -1.00 (Range :0 to -1.00)", "-1"])
ws2.append(["Section 5: Evaluation of Inheritance Pattern/Family History for Patient Being Studied"])
ws2.append(["5A. Use appropriate category from de novo scoring section in Section 4.", "", "0.45"])
ws2.append(["5B. Patient with specific, well-defined phenotype and no family history. CNV is inherited from an apparently unaffected parent.", "-0.30 (Range : 0 to -0.45)", "-0.45"])
ws2.append(["5C. Patient with non-specific phenotype and no family history. CNV is inherited from an apparently unaffected parent.", "-0.15 (Range : 0 to -0.30)", "-0.3"])
ws2.append(["5D. CNV segregates with a consistent phenotype observed in the patient’s family. (use scoring from section 4)", "", "0.45"])
ws2.append(["5E. Use appropriate category from non-segregation section in Section 4. (use scoring from section 4)", "", "-0.45"])
ws2.append(["5F. Inheritance information is unavailable or uninformative", "0", "0"])
ws2.append(["5G. Inheritance information is unavailable or uninformative. The patient phenotype is non-specific, but is consistent with what has been described in similar cases.", "0.10 (Range : 0 to 0.15)", "0.15"])
ws2.append(["5H. Inheritance information is unavailable or uninformative. The patient phenotype is highly specific and consistent with what has been described in similar cases.", "0.30 (Range : 0 to 0.30)", "0.30"])
ws2.append([""])
ws2.append(["", "", "Total","=SUM(A7:D55)"])
ws2.append(["","","Classification",'=IF(D57>=0.99,"Pathogenic",IF(AND(D57>=0.9,D57<=0.98),"Potentialy pathogenic",IF(AND(D57>=-0.89,D57<=0.89),"VUS",IF(AND(D57>=-0.98,D57<=-0.9),"Potentialy benign","Benign"))))'])
ll=["A","B","C", "D"]
for key,value in scores.items():
for el in dell[key]:
gg=ws2["D"+el]
gg.value=value[-1]
for ele in ll:
ws2[ele+el].fill=PatternFill("solid", fgColor="DDDDDD")
make_ACMG_table_format(ws2)
def make_ACMG_table_format(ws2):
didths={"A":101.86, "B":21,"C":18.86,"D":11.14}
for key,value in didths.items():
ws2.column_dimensions[key].width = value
i=6
cols=["A", "B", "C", "D"]
ws2["A1"].font=Font(size=18, name="Arial Narrow")
ws2["A2"].font=Font(size=10, name="Arial Narrow")
ws2["B2"].font=Font(color="0000CC", size=10, name="Arial Narrow")
ws2["A3"].font=Font(size=10, name="Arial Narrow")
ws2["B5"].font=Font(size=10, name="Arial Narrow", bold=True)
ws2["C5"].font=Font(size=10, name="Arial Narrow", bold=True)
ws2["D5"].font=Font(size=10, name="Arial Narrow", bold=True)
while i<=ws2.max_row:
index="A"+str(i)
bb=ws2[index].value
if bb!=None:
if bb.startswith("Section"):
ws2[index].font=Font(name="Arial Narrow", size=10, bold=True, color="009900")
for el in cols:
ws2[el+str(i)].border=Border(bottom=Side(style='thin'))
elif ws2["C"+str(i)].value=="Total":
ws2["C"+str(i)].font=Font(size=10, name="Arial Narrow", bold=True)
ws2["D"+str(i)].font=Font(size=10, name="Arial Narrow", bold=True)
ws2["C"+str(i)].border=Border(bottom=Side(style='thin'))
ws2["D"+str(i)].border=Border(bottom=Side(style='thin'))
ws2["C"+str(i+1)].font=Font(size=10, name="Arial Narrow", bold=True)
ws2["D"+str(i+1)].font=Font(size=10, name="Arial Narrow", bold=True)
ws2["C"+str(i+1)].border=Border(bottom=Side(style='thin'))
ws2["D"+str(i+1)].border=Border(bottom=Side(style='thin'))
elif bb!="":
for el in cols:
ws2[el+str(i)].font=Font(size=10, name="Arial Narrow")
i+=1
def write_ACMG_dup(scores, ws2):
dell={"1A":["7"], "1B":["8"], "2A":["10"], "2I-2L":["18", "19","20","21"], "2C,2D or 2F":["12","13","15"], "3A":["23"], "3B":["24"], "3C":["25"]}
ws2.append(["CNV Interpretation Scoring Rubric: Copy Number GAIN"])
ws2.append(["The parameters presented bellow are according to ", '=HYPERLINK("https://cnvcalc.clinicalgenome.org/cnvcalc/", "ClinGen CNV Pathogenicity Calculator")'])
ws2.append(['Parameters with grey background were automatically filled.'])#####
ws2.append([''])
ws2.append(["", "Sujested Points", "Maximum Score", "Points Given"])
ws2.append(["Section 1: Initial Assessment of Genomic Content"])
ws2.append(["1A. Contains protein-coding or other known functionally important elements", "0", "0"])
ws2.append(["1B. Does NOT contain protein-coding or any known functionally important elements", "-0.6", "-0.6"])
ws2.append(["Section 2: Overlap with Established Triplosensitive (TS), Haploinsufficient (HI), or Benign Genes or Genomic Regions"])
ws2.append(["2A. Complete overlap; the TS gene or minimal critical region is fully contained within the observed copy number gain", "1", "1"])
ws2.append(["2B. Partial overlap of an established TS genomic region", "0", "0"])
ws2.append(["2C. Identical in gene content to the established benign copy number gain", "-1","-1"])
ws2.append(["2D. Smaller than established benign copy number gain, breakpoint(s) does not interrupt protein-coding genes", "-1", "-1"])
ws2.append(["2E. Smaller than established benign copy number gain, breakpoint(s) potentially interrupts protein-coding gene", "0", "0"])
ws2.append(["2F. Larger than known benign copy number gain, does not include additional protein-coding genes", "-0.90 (Range: 0 to -1.00)", "-1"])
ws2.append(["2G. Overlaps a benign copy number gain but includes additional genomic material", "0", "0"])
ws2.append(["2H. HI gene fully contained within observed copy number gain", "0", "0"])
ws2.append(["2I. Both breakpoints are within the same gene (gene-level sequence variant, possibly resulting in loss of function (LOF))", "(Range : 0 to 0.9)", "0.9"])
ws2.append(["2J. One breakpoint is within an established HI gene, patient’s phenotype is either inconsistent with what is expected for LOF of that gene OR unknown", "0", "0"])
ws2.append(["2K. One breakpoint is within an established HI gene, patient’s phenotype is highly specific and consistent with what is expected for LOF of that gene", "0.45", "0.45"])
ws2.append(["2L. One or both breakpoints are within gene(s) of no established clinical significance", "0","0"])
ws2.append(["Section 3: Evaluation of Gene Number"])
ws2.append(["3A. 0-24 Genes", "0", "0"])
ws2.append(["3B 25-34 Genes", "0.45", "0.45"])
ws2.append(["3C. 35 or more Genes", "0.9", "0.9"])
ws2.append(["Section 4: Detailed Evaluation of Genomic Content Using Published Literature, Public Databases, and/or Internal Lab Data"])
ws2.append(["Reported proband has either:"])
ws2.append(["complete duplication of one or more genes within the observed copy number gain OR"])
ws2.append(["an overlapping copy number gain similar in genomic content to the observed copy number gain AND…"])
ws2.append(["4A. …the reported phenotype is highly specific and relatively unique to the gene or genomic region", "(Range : 0.15 to 0.45)", "0.9 (total)"])
ws2.append(["4B. …the reported phenotype is consistent with the gene/genomic region, is highly specific, but not necessarily unique to the gene/genomic region", "(Range : 0.15 to 0.45)", "0.9 (total)"])
ws2.append(["4C. …the reported phenotype is consistent with the gene/genomic region, but not highly specific and/or with high genetic heterogeneity", "(Range : 0 to 0.30)", "0.9 (total)"])
ws2.append(["4D.…the reported phenotype is NOT consistent with what is expected for the gene/genomic region or not consistent in general", "0 (Range: 0 to -0.30)", "-0.3 (total)"])
ws2.append(["4E. Reported proband has a highly specific phenotype consistent with the gene/genomic region, but the inheritance of the variant is unknown.", "0.1 (Range : 0 to 0.15)", "0.3 (total)"])
ws2.append(["4F. 3-4 observed segregation", "0.15", "0.15"])
ws2.append(["4G. 5-6 observed segregation", "0.3", "0.3"])
ws2.append(["4H. 7 or more observed segregation", "0.45", "0.45"])
ws2.append(["4I. Variant is NOT found in another individual in the proband’s family AFFECTED with a consistent, specific, well-defined phenotype (no known phenocopies)", "-0.45 (Range: 0 to -0.45)", "-0.45"])
ws2.append(["4J. Variant IS found in another individual in the proband’s family UNAFFECTED with the specific, well-defined phenotype observed in the proband", "-0.3 (Range: 0 to -0.30)", "-0.3"])
ws2.append(["4K. Variant IS found in another individual in the proband’s family UNAFFECTED with the non-specific phenotype observed in the proband", "-0.15-0.3(Range: 0 to -0.15)", "-0.3"])
ws2.append(["4L. Statistically significant increase amongst observations in cases (with a consistent, specific, well-defined phenotype) compared to controls", "0.45 (range: 0 to 0.45)", "0.45"])
ws2.append(["4M. Statistically significant increase amongst observations in cases (without a consistent, non-specific phenotype OR unknown phenotype) compared to controls", "0.30 (range: 0 to 0.30)", "0.45"])
ws2.append(["4N. No statistically significant difference between observations in cases and controls", "-0.9 (Range :0 to -0.9)", "-0.9"])
ws2.append(["4O. Overlap with common population variation", " -1.00 (Range :0 to -1.00)", "-1"])
ws2.append(["Section 5: Evaluation of Inheritance Pattern/Family History for Patient Being Studied"])
ws2.append(["5A. Use appropriate category from de novo scoring section in Section 4.", "", "0.45"])
ws2.append(["5B. Patient with specific, well-defined phenotype and no family history. CNV is inherited from an apparently unaffected parent.", "-0.30 (Range : 0 to -0.45)", "-0.45"])
ws2.append(["5C. Patient with non-specific phenotype and no family history. CNV is inherited from an apparently unaffected parent.", "-0.15 (Range : 0 to -0.30)", "-0.3"])
ws2.append(["5D. CNV segregates with a consistent phenotype observed in the patient’s family. (use scoring from section 4)", "", "0.45"])
ws2.append(["5E. Use appropriate category from non-segregation section in Section 4. (use scoring from section 4)", "", "-0.45"])
ws2.append(["5F. Inheritance information is unavailable or uninformative", "0", "0"])
ws2.append(["5G. Inheritance information is unavailable or uninformative. The patient phenotype is non-specific, but is consistent with what has been described in similar cases.", "0.10 (Range : 0 to 0.15)", "0.15"])
ws2.append(["5H. Inheritance information is unavailable or uninformative. The patient phenotype is highly specific and consistent with what has been described in similar cases.", "0.30 (Range : 0 to 0.30)", "0.30"])
ws2.append([""])
ws2.append(["", "", "Total","=SUM(A7:D53)"])
ws2.append(["","","Classification",'=IF(D55>=0.99,"Pathogenic",IF(AND(D55>=0.9,D55<=0.98),"Potentialy pathogenic",IF(AND(D55>=-0.89,D55<=0.89),"VUS",IF(AND(D55>=-0.98,D55<=-0.9),"Potentialy benign","Benign"))))'])
ll=["A","B","C", "D"]
for key,value in scores.items():
for el in dell[key]:
gg=ws2["D"+el]
gg.value=value[-1]
for ele in ll:
ws2[ele+el].fill=PatternFill("solid", fgColor="DDDDDD")
make_ACMG_table_format(ws2)
|
#!/usr/bin/env python3
import subprocess
# import optparse
#
# parser = optparse.OptionParser()
# parser.add_option("-i", "--interface", dest="interface", help="Interfae to change it's Mac_Address.")
# parser.add_option("-m", "--mac", dest="mac_address", help="New Mac_Address.")
# (options, arguments) = parser.parse_args()
subprocess.call(["ip", "-br", "-c", "link", "show"])
interface = input("Please select your interface (eg.eth0/wlan0) : ")
mac_address = input("Please enter the mac_address.(eg.00:11:22:33:44:55) : ")
print("[+] Changing mac_address for :", interface + " to " + mac_address )
# This part is unsecure because any one can assign different commands within the input parameter
# subprocess.call("sudo ifconfig "+ interface +" down", shell=True)
# subprocess.call("sudo ifconfig "+ interface +" hw ether "+ mac_address, shell=True)
# subprocess.call("sudo ifconfig "+ interface +" up", shell=True)
subprocess.call(["ifconfig", interface, "down"])
subprocess.call(["ifconfig", interface, "hw", "ether", mac_address])
subprocess.call(["ifconfig", interface, "up"])
print("[+] Your mac_address is successfully changed.[+]")
|
"""
Created on Fri Sep 6 09:24:55 2019
@author: Fenrir
function
math function明示的な注記のない限り、戻り値は全て浮動小数点数になります。
https://docs.python.org/ja/3/library/math.html#module-math
"""
import math
'''
int(x)
文字列を数値に変換するにはint()を使います。
'''
x = "7"
print(10 + int(x))
'''
float(x)
文字列を浮動小数点数に変換するにはfloat()を使います。
'''
x = "1.25"
print(6.52 + float(x))
'''
sqrt(x)
x の平方根を返します。
'''
print("x : ")
x = int(input()) #文字列から数値に変換する
print(x, "の平方根 = ", math.sqrt(x))
'''
fabs(x)
x の絶対値を返します。
'''
print("x : ")
x = input()
print(x, "の絶対値 = ", math.fabs(int(x)))
# int() 文字列から数値に変換する
'''
pow(x, y)
x の y 乗を返します。
'''
print("x : ")
x = float(input())
print("y : ")
y = float(input())
print(x, "の", y,"乗 = ", math.pow(x, y))
# int() 文字列から数値に変換する
'''
floor(x)
x 以下の最大の整数を返す。
'''
print("x : ")
x = input()
print(x, "の平方根 = ", math.floor(x))
'''
ceil(x)
x 以上の最小の整数を返す。
'''
print("x : ")
x = input()
print(x, "の平方根 = ", math.ceil(x))
'''
pi
数学常数 π = 3.141592...
'''
print("π =", math.pi)
'''
len(a)
list の長さ (要素の数) を返します。
'''
a = [7, 5, 8, 10, 15, 3]
print(len(a))
'''
sum(a)
list の要素を左から右へ合計し、総和を返します。
'''
a = [7, 5, 8, 10, 15, 3]
print(sum(a))
'''
max(a)
list の中で最大の要素を返します。
'''
a = [7, 5, 8, 10, 15, 3]
print(max(a))
'''
min(a)
list の中で最小の要素を返します。
'''
a = [7, 5, 8, 10, 15, 3]
print(min(a))
|
#!/usr/bin/python
import sys
# Represents how we define a question.
class Question:
# Question identifier
id = None
# Related users
users = []
def __init__(self, id, author):
self.id = id
self.users = [int(author)]
# Add an user.
def addUser(self, user):
self.users.append(int(user))
# Print the result
def printResult(self):
print self.id, '\t', self.users
question = None
for line in sys.stdin:
mappedData = line.strip().split("\t")
if len(mappedData) != 2:
# Something has gone wrong. Skip this line.
continue
# Load line to variables
questionId, user = mappedData
if question == None:
question = Question(questionId, user)
elif question.id == questionId:
question.addUser(user)
else:
question.printResult()
question = Question(questionId, user)
# Print last question
if question != None:
question.printResult()
|
# -*- coding: utf-8 -*-
a=int(input())
if a % 2 == 0:
print(a,"is an even number.")
else:
print(a,"is not an even number.")
|
from Player import Player
class SmartPlayer(Player):
"""
Player that makes decisions based on logic written down
The main goal is to create an artificial intelligence that can
play Hearts, but for now, we need a basis on how to play
in order to create a ranking system with ELO
"""
def take_turn(self, initial, first_card=None, hearts_broken=False):
# can't change function parameters because this is a overriding function
# uncomment to just use the original function (only if do not want a smart player)
# return super(SmartPlayer, self).take_turn(initial, first_card, hearts_broken)
# get how many cards have been played before you
# how many people are left to play after you is important
play_count = len(self.tracker.current_center)
# set the weights for each suit
# default: spades=10, hearts=10, diamond=10, clubs=10
suit_weights = self.get_weights(play_count)
# create a dict of (card : point_value) pairs based on the policy
points = dict()
for c in self.get_valid_cards(initial, first_card, hearts_broken):
points[c] = self.policy(c, suit_weights, play_count)
# get the card with the max policy value in this situation
# if multiple with the same value, chooses the first (should not matter)
# only problem I see is that we prioritize lower cards over higher cards
# meaning - 1, 2, 3, etc for each suit, and spade, hearts, diamonds, clubs for suits
card_index = self.hand.int_deck_array.index(max(points.keys(), key=(lambda i: points[i])))
# this is the card we should play according to our policy
card = self.hand.deck_array[card_index]
self.hand.remove_card(card_index)
return card
def add_tracker(self, tracker):
"""
adds the tracker to the player object
tracker is the same tracker inside the game object (if init is done correctly)
"""
self.tracker = tracker
def get_weights(self, play_count):
"""
returns the weight of each suit based on game conditions
"""
weights = {0:10, 1:10, 2:10, 3:10}
"""
if not the first turn, we do not have to play 2 of clubs
Priority of suits after first turn is:
if queen of spades has not been played:
if we do not have queen of spades:
if we have spades other than king or ace of spades:
play spades
else:
clubs or diamonds
else (we have queen of spades):
if we have more than (13 // len(players)) spades in our hand:
we can play spades
else:
play suit with lowest card count, unless hearts
else (queen of spades has been played):
spades, clubs, and diamonds are equal value
(else clubs or diamonds, or if spades equal to clubs or diamonds):
either get rid of one suit, or pass along turn responsability to other player
if cards are all low, low risk of winning any turn:
pass turn to someone else
else:
if all cards are high and high risk of winning:
if lowest count suit can be successfully gotten rid of:
play suit
else:
you're fucked, but keep playing
else (have low and high cards):
play low to high
"""
# we need to weigh the rest of the suits based on conditions
# so far only have spades if first pick, and if not first pick weigh initial suit
# if we are the first to play, need to choose the initial card
if play_count == 0:
# if player has queen of spades (determine weights of spades)
if 10 in self.hand.int_deck_array:
# if we reasonably have more spades than anyone else
# if (num spades left // len(num players that do not have an off suit or none of the off suits is spades)) less than spades in hand
if (self.tracker.suits[0] // len(list([p for p in range(self.tracker.player_count) if not p in self.tracker.off_suit.keys() \
or 0 not in self.tracker.off_suit[p]]))) < len(list([c for c in self.hand.int_deck_array if c // 13 == 0])):
# treat spades with more weight
"""
if we play more spades, forces out higher spades?
forces others to play off-suit, draining their hand?
"""
weights[0] *= 10
# if we have only a few spades, need to conserve them and drain other suits
else:
weights[0] /= 10
# do not have queen of spades in our hand
else:
# if queen of spades has already been played, we do not need to
# rush spades to force queen of spades. instead, focus on hearts
if 10 in self.tracker.cards_played:
pass
# if queen of spades has not been played, we can choose to rush spades if
# we have more spades than ace or king, or we can play normally
else:
# if we reasonably have more spades than anyone else
# if (num spades left // len(num players that do not have an off suit or none of the off suits is spades)) less than spades in hand
if (self.tracker.suits[0] // len(list([p for p in range(self.tracker.player_count) if not p in self.tracker.off_suit.keys() \
or 0 not in self.tracker.off_suit[p]]))) < len(list([c for c in self.hand.int_deck_array if c // 13 == 0])):
weights[0] *= 10
else:
# if playing spades puts us at risk, we should avoid
if 11 in self.hand.int_deck_array or 12 in self.hand.int_deck_array:
weights[0] /= 5
# if we can't force out queen ourselves, it is still a good option, but not a priority
else:
weights[0] *= 5
# we have to follow the suit, so weigh the initial suit appropriately
else:
weights[self.tracker.current_center[0] // 13] = 100
# our next objective is to get rid of an entire suit
# even if we are not first, we need to weigh in case we don't have the initial suit
for s in range(4):
# weight of each suit inversely related to the amount of cards left of that suit in your hand
weights[s] += (1.0 / ((1.0 + len(list([c for c in self.hand.int_deck_array if (c // 13) == s])))) ** 2)
return weights
def policy(self, card, suit_weights, play_count):
"""
assigns a point value to the card based on the
potential benefits of playing the card in this situation
"""
# suit of the card to be evaluated
suit = card // 13
# how many points would be gained if we won
points_weight = 0
# weight card based on how high the card is (higher is better)
card_weight = card % 13
if play_count == 0:
# if this is the first turn in the round, we obviously need to play 2 of clubs
# so we treat 2 of clubs as 100 and all else as 1 so that we know it is the max value
if self.tracker.turn_number == 1:
# we should always have 2 of clubs if we get here
if 39 not in self.hand.int_deck_array:
return -1
# if card is 2 of clubs
if card == 39:
return 100
else:
return 1
# if first card this turn, but not the first card this round (not restricted to 2 of clubs)
else:
risk_score = 0
return suit_weights[suit] + risk_score + card_weight
else:
# if we are the last to play, we can potentially choose to take the turn or give it up
if play_count == 3:
# counts how many hearts are in the center
points = list([c // 13 for c in self.tracker.current_center]).count(1)
# if queen of spades in center
if 10 in self.tracker.current_center:
points += 13
initial_suit = self.tracker.current_center[0] // 13
# does this card win the hand? if card is the same suit as initial and is higher than the other cards in center of initial suit
wins_turn = (initial_suit == suit) and card > max(list([c for c in self.tracker.current_center if (c // 13) == initial_suit]))
# if it wins, we want to make sure the points are small
if wins_turn:
# weight card based on the inverse of points gained
points_weight = 1.0 / ((points + 1.0) ** 2)
"""
we need to decide whether it is safe or not to beat the current hand state
If we win, do we get points? If we lose, do we risk getter more points later?
If first player:
Will this suit result in hearts being played?
Will this suit cause someone else to get the Queen of Spades?
Is it safe for me to play a high card?
Do I want to win this turn or should I pass the turn to someone else?
"""
# based on this policy, the card has this value at being played
# highest card that can be played of the best suit that won't give us too many points
return suit_weights[suit] - points_weight + card_weight
|
class Drone(object):
"""docstring for Drone"""
def __init__(self, x,y,storageCapacity,ID,weightList):
super(Drone, self).__init__()
self.x = x
self.y=y
self.storageCapacity=storageCapacity
self.ID=ID
self.items={}
self.currentWeight=0
self.weightList=weightList
def addItem(self,nbItem,itemID):
if itemID in self.items.keys():
self.items[itemID]+=nbItem
else:
self.items[itemID]=nbItem
self.currentWeight+= (nbItem*self.weightList[itemID])
def removeItem(self,nbItem,itemID):
self.items[itemID]-=nbItem
self.currentWeight-= (nbItem*self.weightList[itemID])
if (self.items[itemID] <0 ):
print("error remove item in drone")
|
# -*- coding:utf-8 -*-
#FND.py
import smbus
import time
import threading
bus = smbus.SMBus(1)
#FND configuration
addr_fnd = 0x20
config_port = 0x06
out_port = 0x02
data_fnd = (0xFC, 0x60, 0xDA, 0xF2, 0x66, 0xB6, 0x3E, 0xE0, 0xFE, 0xF6, 0x01,0x02,0x1A)
digit = (0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB)
out_disp = 0
#Temp/Humi configuration
addr_temp = 0x40
cmd_temp = 0xf3
soft_reset = 0xfe
ns_new = ""
stop = 0
def temp_read():
temp = 0.0
val = 0
data = [0, 0]
bus.write_byte(addr_temp, soft_reset)
time.sleep(0.05)
#temperature
bus.write_byte(addr_temp, cmd_temp)
time.sleep(0.260)
for i in range(0,2,1):
data[i] = bus.read_byte(addr_temp)
val = data[0] << 8 | data[1]
temp = -46.85 + 175.72 / 65536 * val
print ('temp : %.2f' %(temp))
tp = str(temp)
if (tp[1] != '.'):
return tp[0:2]+tp[3:5]
else:
return '0'+tp[0]+tp[2:4]
def fnd_disp():
ns_new = temp_read()
while (ns_new == ""):
time.sleep(0)
bus.write_word_data(addr_fnd, config_port, 0x0000)
while (ns_new != ""):
#ns_new = temp_read()
while True:
#ns_new = temp_read()
for i in range(0,len(ns_new),1):
if (ns_new[i] == '-'):
out_disp = data_fnd[11] << 8 | digit[0]
bus.write_word_data(addr_fnd, out_port, out_disp)
# notation of dot(.)
out_disp = data_fnd[10] << 8 | digit[1]
bus.write_word_data(addr_fnd, out_port, out_disp)
n = int(ns_new[i])
out_disp = data_fnd[n] << 8 | digit[i]
bus.write_word_data(addr_fnd, out_port, out_disp)
time.sleep(0.01)
#ns_new = temp_read()
if (stop == 1):
break
def doFND():
try:
#th = threading.Thread(target=fnd_disp)
#th.start()
while True:
ns_new = temp_read()
if ns_new != "":
fnd_disp()
except KeyboardInterrupt:
stop = 1
pass
finally:
pass
#if __name__ == '__main__':
# doFND()
|
"""
Split a compressed text file into multiple smaller (compressed) text files.
"""
import gzip
import bz2
import lzma
from itertools import islice
import click
from . import cli
COMP_OPEN = {"gzip": gzip.open, "bz2": bz2.open, "xz": lzma.open, "text": open}
COMP_OPTIONS = list(COMP_OPEN.keys()) + ["infer"]
def do_csplit(
input_file,
output_file_format,
lines_per_file=1000,
encoding="utf-8",
input_compression="infer",
output_compression="infer",
):
"""
Split a large compressed text file into multiple smaller (compressed) text files.
{index} in output_file_format will be replaced by index of the output file.
index of output files start from 0
"""
if input_compression == "infer":
if input_file.endswith(".gz"):
input_compression = "gzip"
elif input_file.endswith(".bz2"):
input_compression = "bz2"
elif input_file.endswith(".xz"):
input_compression = "xz"
else:
input_compression = "text"
if output_compression == "infer":
if output_file_format.endswith(".gz"):
output_compression = "gzip"
elif output_file_format.endswith(".bz2"):
output_compression = "bz2"
elif output_file_format.endswith(".xz"):
output_compression = "xz"
else:
output_compression = "text"
in_open = COMP_OPEN[input_compression]
out_open = COMP_OPEN[output_compression]
with in_open(input_file, mode="rt", encoding=encoding) as fin:
fin = iter(fin)
index = 0
while True:
lines = list(islice(fin, lines_per_file))
if not lines:
return
output_file = output_file_format.format(index=index)
with out_open(output_file, mode="xt", encoding=encoding) as fout:
for line in lines:
fout.write(line)
index += 1
@cli.command()
@click.option(
"-n",
"--lines-per-file",
default=1000,
help="Maximum number of lines per output file",
)
@click.option(
"-e", "--encoding", default="utf-8", help="Text encoding of input and output files"
)
@click.option(
"-c",
"--input-compression",
default="infer",
type=click.Choice(COMP_OPTIONS),
help="Compression format of the input file",
)
@click.option(
"-d",
"--output-compression",
default="infer",
type=click.Choice(COMP_OPTIONS),
help="Compression format of output files",
)
@click.argument("input-file")
@click.argument("output-file-format")
def csplit(
lines_per_file,
encoding,
input_compression,
output_compression,
input_file,
output_file_format,
):
"""
Split a large compressed text file into multiple smaller (compressed) text files.
{index} in output_file_format will be replaced by index of the output file.
index of output files start from 0
"""
do_csplit(
input_file,
output_file_format,
lines_per_file,
encoding,
input_compression,
output_compression,
)
|
# with open(r"C:\Users\admin\OneDrive\デスクトップ\python1\07\example.txt", encoding="sjis") as file:
# print(file.readline().rstrip("\n"))
# print(file.readline().rstrip("\n"))
# print(file.readline().rstrip("\n"))
with open(r"C:\Users\admin\OneDrive\デスクトップ\python1\07\numbers.txt", encoding="sjis") as file:
a = float(file.readline())
b = float(file.readline())
c = float(file.readline())
print(a + b + c)
# line = "2015/4/1, 13.9, 8, 1"
# fields = line.split(",")
# print(fields)
# print(f"年月日:{fields[0]}")
# print(f"平均気温:{fields[1]}")
# print(f"品質情報:{fields[2]}")
# print(f"均質番号:{fields[3]}")
# data = input("年/月/日:")
# datas = data.split("/")
# print(f"{datas[0]}年{datas[1]}月{datas[2]}日")
# with open(r"C:\Users\admin\OneDrive\デスクトップ\python1\07\numbers.txt", encoding="sjis") as file:
# count = 0
# total = 0
# for line in file:
# total += float(line)
# print(total)
# count += 1
# print(count)
# average = total / count
# print(f"average:{average}")
# with open(r"C:\Users\admin\OneDrive\デスクトップ\python1\07\target.txt", mode="w") as file:
# print(file.name)
# file.write(str(100) + "\n")
# number = 100
# file.write(f"{number}")
#numbers = eval(input("リスト:"))
#with open(r"C:\Users\admin\OneDrive\デスクトップ\python1\07\target.txt", mode="w") as file:
# for number in numbers:
# file.write(f"{number}\n")
#
|
# Generated by Django 2.1.1 on 2018-09-08 05:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('name', models.CharField(max_length=20, primary_key=True, serialize=False)),
('description', models.TextField(max_length=200)),
('price', models.PositiveIntegerField()),
('category', models.CharField(choices=[('Veg', 'Veg'), ('Non-Veg', 'Non-Veg')], max_length=20)),
('sub_category', models.CharField(choices=[('Starters', 'Starters'), ('Main-Course', 'Main-Course'), ('Desserts', 'Desserts')], max_length=20)),
],
),
migrations.CreateModel(
name='Menu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='menus', to='foodapp.Item')),
],
),
migrations.CreateModel(
name='Restaurant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('address', models.CharField(max_length=50)),
('rating', models.PositiveIntegerField(default=0)),
('delivery_time', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='menu',
name='restaurant',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='menus', to='foodapp.Restaurant'),
),
]
|
from braindecode.datasets.filterbank import generate_filterbank
import numpy as np
import pytest
def test_generate_filterbank():
filterbands = generate_filterbank(min_freq=2, max_freq=16,
last_low_freq=8, low_width=6, low_overlap=4,
high_width=10, high_overlap=6, low_bound=0.2)
assert np.array_equal([[0.2,5],[1,7],[3,9],[5,11],[7,17],[11,21]],
filterbands)
def test_generate_filterbank_low_bound_0():
filterbands = generate_filterbank(min_freq=2, max_freq=16,
last_low_freq=8, low_width=6, low_overlap=4,
high_width=10, high_overlap=6, low_bound=0.)
assert np.array_equal([[0.,5],[1,7],[3,9],[5,11],[7,17],[11,21]],
filterbands)
def test_generate_filterbank_only_low_width_freqs():
filterbands = generate_filterbank(min_freq=2, max_freq=8,
last_low_freq=8, low_width=6, low_overlap=4,
high_width=10, high_overlap=6, low_bound=0.2)
assert np.array_equal([[0.2,5],[1,7],[3,9],[5,11]],
filterbands)
def test_generate_filterbank_failure():
with pytest.raises(AssertionError) as excinfo:
generate_filterbank(min_freq=2, max_freq=22,
last_low_freq=8, low_width=6, low_overlap=4,
high_width=10, high_overlap=6, low_bound=0.2)
assert ("max freq needs to be exactly the center "
"of a filter band "
"Close center: 20") == excinfo.value.message
with pytest.raises(AssertionError) as excinfo:
generate_filterbank(min_freq=2, max_freq=20,
last_low_freq=9, low_width=6, low_overlap=4,
high_width=10, high_overlap=6, low_bound=0.2)
assert ("last low freq "
"needs to be exactly the center of a low_width filter band. "
"Close center: 8") == excinfo.value.message
|
# _*_ coding: utf-8 _*_
import ta
import os
import sys
import warnings
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
warnings.filterwarnings("ignore")
import pandas as pd
class TechnicalAnalysis(object):
def __init__(self, data, window_size):
self.data = data
self.window_size = window_size
def getDatas(self):
return None
class BollingerBand(TechnicalAnalysis):
def __init__(self, data, window_size=20, sigma_size=2):
super().__init__(data, window_size)
self.sigma_size = sigma_size
self.hband = None
self.lband = None
def getDatas(self):
self.hband = ta.bollinger_hband(self.data, self.window_size, self.sigma_size)
self.lband = ta.bollinger_lband(self.data, self.window_size, self.sigma_size)
bol_bands = self.data.to_frame(name='value')
bol_bands['hband'] = self.hband
bol_bands['lband'] = self.lband
return bol_bands
class RSI(TechnicalAnalysis):
def __init__(self, data, window_size=14):
super().__init__(data, window_size)
self.score = None
def getDatas(self):
self.score = ta.rsi(self.data, self.window_size)
return self.score
class MACD(TechnicalAnalysis):
def __init__(self, data, swindow_size=12, lwindow_size=26):
super().__init__(data, swindow_size)
self.swindow_size = swindow_size
self.lwindow_size = lwindow_size
self.score = None
def getDatas(self):
self.score = ta.macd(self.data, self.swindow_size, self.lwindow_size)
return self.score
|
from . import fleet
def connect() -> None:
"""
Connect to fleet signals to make tracking react to it
"""
fleet.connect()
|
from math import sqrt
def problem32():
nums = set()
for i in range(1, 10000):
for k in range(2, int(sqrt(i)) + 1):
if i % k == 0:
pandi = str(i) + str(k) + str(i//k)
if ordstr(pandi) == "123456789":
nums.add(i)
return sum(nums)
def ordstr(instr):
list = []
outstr = ""
for i in instr:
list.append(i)
list.sort()
for i in range(len(list)):
outstr += str(list[i])
return outstr
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('news.views',
url(r'^$', 'news', name='news'),
url(r'^(?P<post_id>\d+)/$', 'one_new', name='one_new'),
) |
# Generated by Django 2.2.1 on 2019-06-21 16:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questi', '0018_prof_class'),
]
operations = [
migrations.AlterField(
model_name='questions',
name='quest',
field=models.CharField(max_length=40000, verbose_name='la question'),
),
migrations.AlterField(
model_name='questions',
name='rep_1',
field=models.CharField(max_length=40000, verbose_name='reponse 1'),
),
migrations.AlterField(
model_name='questions',
name='rep_2',
field=models.CharField(max_length=40000, verbose_name='reponse 2'),
),
migrations.AlterField(
model_name='questions',
name='rep_3',
field=models.CharField(max_length=40000, verbose_name='reponse 3'),
),
migrations.AlterField(
model_name='questions',
name='rep_4',
field=models.CharField(max_length=40000, verbose_name='reponse 4'),
),
migrations.AlterField(
model_name='questions',
name='rep_tru_id',
field=models.IntegerField(null=True, verbose_name='indiquer le numero de la reponse juste'),
),
]
|
print ('menentukan bilangan terbesar')
print ('menentukan 3 bilangan yang diinginkan')
a = int (input ('bilangan pertama ='))
b = int (input ('bilangan kedua ='))
c = int (input ('bilangan ketiga ='))
if a>b and a>c :
print ('bilangan terbesar =',a)
elif b>a and b>c :
print ('bilangan terbesar =',b)
else :
print (c,'bilangan terbesar =',c) |
from data_structures import Shot
import numpy as np
from scipy.io import wavfile
import re
from sys import argv
import os, sys
import glob
import evaluate_method
import multiprocessing
import time
import random
import json
sys.path.insert(0, 'document_similarity/')
from document_similarity import DocSim
from gensim.models.keyedvectors import KeyedVectors
from genetic_algorithm import GA
from aubio import source
from aubio import pitch as pt
import pika
import time
from DAO.connection import Connection
import os
import multiprocessing
import json
import logging
import ast
import threading
import functools
from files_ms_client import upload, download
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
stopwords = None
googlenews_model_path = '/word2vec/GoogleNews-vectors-negative300.bin'
stopwords_path = "document_similarity/data/stopwords_en.txt"
docSim = None
with open(stopwords_path, 'r') as fh:
stopwords = fh.read().split(",")
model = KeyedVectors.load_word2vec_format(googlenews_model_path, binary=True, limit=1000000)
docSim = DocSim.DocSim(model, stopwords=stopwords)
#
class Summary:
def __init__(self, video_path):
self.video_path = video_path
self.video_file = None
self.chunks_path = self.video_path + "chunks/"
self.n_chunks = len(glob.glob(self.chunks_path+ "chunk*"))
self.chunks = []
self.video_length = 0
'''Method that create a audio chunk object passing the extracted features'''
def createShots(self, i, pause, ocr_on, time,end_time, docSim, prosodic_file):
pitch = 0
volume = 0
try:
with open(prosodic_file) as f:
data = json.load(f)
pitch = float(data[str(i)][0])
volume = float(data[str(i)][1])
except FileNotFoundError:
print('Prosodic features not found')
s = Shot(i, pitch, volume, pause, [], init_time=time, end_time=end_time)
s.extractTranscriptAndConcepts(self.video_path, ocr_on, docSim=docSim)
return s
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
def callback(channel, method, properties, body, args):
(connection, threads) = args
delivery_tag = method.delivery_tag
t = threading.Thread(target=do_work, args=(connection, channel, delivery_tag, body))
t.start()
threads.append(t)
def do_work(connection, channel, delivery_tag, body):
try:
print(" [x] Received %r" % body, flush=True)
args = json.loads(body)
oid = args['oid']
project_id = args['project_id']
conn = Connection()
# file = conn.get_file(oid)
# file = conn.get_doc_mongo(file_oid=oid)
file = download(args['file'], buffer=True)
result = ast.literal_eval(file.decode('utf-8'))
#print(result.keys(), flush=True)
chunks = []
low_features_dict = ast.literal_eval(result['low_level_features'].decode('utf-8'))
asr_dict = ast.literal_eval(result['asr'].decode('utf-8'))
print(low_features_dict, flush=True)
print(asr_dict, flush=True)
for k, v in low_features_dict.items():
s = Shot(k, low_features_dict[k]['pitch'], low_features_dict[k]['volume'],
low_features_dict[k]['pause'], [], init_time=low_features_dict[k]['init_time'], end_time=0)
s.extractTranscriptAndConcepts(asr_dict[k], False, docSim=docSim)
chunks.append(s)
# print(result['low_level_features'], flush=True)
# print(result['asr'], flush=True)
chunks = [s for s in chunks if s.valid_vector]
if len(chunks) < 2:
boundaries = [0]
else:
'''calls the genetic algorithm'''
ga = GA.GeneticAlgorithm(population_size=100, constructiveHeuristic_percent=0.3, mutation_rate=0.05,
cross_over_rate=0.4, docSim=docSim, shots=chunks,
n_chunks=len(chunks), generations=500, local_search_percent=0.3,
video_length=100, stopwords=stopwords, ocr_on=False)
boundaries = ga.run()
#print(chunks, flush=True)
print(boundaries, flush=True)
topics = {}
topics["topics"] = boundaries
payload = bytes(str(topics), encoding='utf-8')
uploaded = upload(payload, buffer=True, mime='text/json')
file_oid = conn.insert_doc_mongo(payload)
conn = Connection()
conn.insert_jobs(type='segmentation', status='done', file=file_oid, project_id=project_id)
#
# #print(result, flush=True)
# count = 0
# dict_result = {}
# previous_duration = 0
# for key, value in result.items():
# result = main(value['bytes'])
# dict_result[count] = result
# count += 1
# #time.sleep(1)
#
# payload = bytes(str(dict_result), encoding='utf-8')
# conn = Connection()
#
# # inserts the result of processing in database
# file_oid = conn.insert_doc_mongo(payload)
# conn.insert_jobs(type='asr', status='done', file=file_oid, project_id=project_id)
#
# message = {'type': 'aggregator', 'status': 'new', 'oid': file_oid, 'project_id': project_id}
#
# # post a message on topic_segmentation queue
# connection_out = pika.BlockingConnection(pika.ConnectionParameters(host=os.environ['QUEUE_SERVER']))
# channel2 = connection_out.channel()
#
# channel2.queue_declare(queue='aggregator', durable=True)
# channel2.basic_publish(exchange='', routing_key='aggregator', body=json.dumps(message))
except Exception as e:
# print(e, flush=True)
print('Connection Error %s' % e, flush=True)
print(" [x] Done", flush=True)
cb = functools.partial(ack_message, channel, delivery_tag)
connection.add_callback_threadsafe(cb)
def ack_message(channel, delivery_tag):
"""Note that `channel` must be the same pika channel instance via which
the message being ACKed was retrieved (AMQP protocol constraint).
"""
if channel.is_open:
channel.basic_ack(delivery_tag)
else:
# Channel is already closed, so we can't ACK this message;
# log and/or do something that makes sense for your app in this case.
pass
def consume():
logging.info('[x] start consuming')
success = False
while not success:
try:
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=os.environ['QUEUE_SERVER'], heartbeat=5))
channel = connection.channel()
success = True
except:
time.sleep(30)
pass
channel.queue_declare(queue='segmentation', durable=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.basic_qos(prefetch_count=1)
threads = []
on_message_callback = functools.partial(callback, args=(connection, threads))
channel.basic_consume(queue='segmentation', on_message_callback=on_message_callback)
try:
channel.start_consuming()
except KeyboardInterrupt:
channel.stop_consuming()
# Wait for all to complete
for thread in threads:
thread.join()
connection.close()
consume()
'''
workers = int(os.environ['NUM_WORKERS'])
pool = multiprocessing.Pool(processes=workers)
for i in range(0, workers):
pool.apply_async(consume)
# Stay alive
try:
while True:
continue
except KeyboardInterrupt:
print(' [*] Exiting...')
pool.terminate()
pool.join()''' |
import math
b = float(input("Entre com o 1o. cateto: "))
c = float(input("Entre com o 2o. cateto: "))
a = math.sqrt(b**2 + c**2)
print ("A hipotenusa é: ",a)
|
import pandas as pd
url = "data_v3.csv"
insider = pd.read_csv(url, header=0)
print insider.shape
row_num = insider['side'].count()+1
train_num = int(row_num /3*2)
test_num = -1*int(row_num /3)
col_list = ['side', 'return_t5', "return_t30", "vol_sh_out_pct","stake_pct_chg", "tran_value","mkt_cap", "prev_tran_num","hit_rate_5d", "hit_rate_30d", "hit_rate_90d"]
# Apply Min / Max Scaling
def scaler(col_name):
insider[col_name] = (insider[col_name]-insider[col_name].min())/ (insider[col_name].max()-insider[col_name].min())
scaler_list = [ 'side', 'return_t5', "return_t30", "vol_sh_out_pct","stake_pct_chg", "tran_value", "mkt_cap", "prev_tran_num","hit_rate_5d", "hit_rate_30d", "hit_rate_90d"]
for i in scaler_list:
scaler(i)
X_train = insider[col_list][:train_num]
y_train_5d = insider.return_5d[:train_num]
y_train_30d = insider.return_30d[:train_num]
y_train_90d = insider.return_90d[:train_num]
X_test = insider[col_list][test_num:]
y_test_5d = insider.return_5d[test_num:]
y_test_30d = insider.return_30d[test_num:]
y_test_90d = insider.return_90d[test_num:]
# Splitting test data into two parts
X_test1 = X_test[:8620]
X_test2 = X_test[-8620:]
y_test_90d1 = y_test_90d[:8620]
y_test_90d2 = y_test_90d[-8620:]
## Import the Classifier.
from sklearn.neighbors import KNeighborsClassifier
## Instantiate the model with 5 neighbors.
knn = KNeighborsClassifier(n_neighbors=37)
## Fit the model on the training data.
knn.fit(X_train, y_train_5d)
print 'KNN score of 5d:', round(knn.score(X_test, y_test_5d)*100,2)
knn.fit(X_train, y_train_30d)
print 'KNN score of 30d:', round(knn.score(X_test, y_test_30d)*100,2)
knn.fit(X_train, y_train_90d)
print 'KNN score of 90d:', round(knn.score(X_test, y_test_90d)*100, 2)
print 'KNN score of 90d (test1):', round(knn.score(X_test1, y_test_90d1)*100, 2)
print 'KNN score of 90d (test2):', round(knn.score(X_test2, y_test_90d2)*100, 2)
#import matplotlib.pyplot as plt
#import numpy as np
#
#def knn_train(n):
# knn2 = KNeighborsClassifier(n_neighbors=n)
# knn2.fit(X_train, y_train_90d)
# return knn2.score(X_test, y_test_90d)
#
#x = np.arange(1, 10, 1)
#y = []
#for i in x:
# y.append(knn_train(i))
#
#plt.plot(x, np.asarray(y))
#plt.show()
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.base.build_root import BuildRoot
from pants.bsp.spec.base import BuildTargetIdentifier
from pants.bsp.util_rules.targets import BSPResourcesRequest, BSPResourcesResult
from pants.core.target_types import ResourceSourceField
from pants.core.util_rules import stripped_source_files
from pants.core.util_rules.source_files import SourceFilesRequest
from pants.core.util_rules.stripped_source_files import StrippedSourceFiles
from pants.engine.addresses import Addresses
from pants.engine.fs import AddPrefix, Digest
from pants.engine.internals.selectors import Get
from pants.engine.rules import collect_rules
from pants.engine.target import CoarsenedTargets, SourcesField
from pants.util.strutil import path_safe
def _jvm_resources_directory(target_id: BuildTargetIdentifier) -> str:
# TODO: Currently, we have a single BuildTarget per group, and so we include the transitive
# resource dependencies in one owning directory. As part of #15051 we'll likely need to find
# "owning" BuildTargets for each resources target in order to avoid having all of them
# emit the transitive resources.
return f"jvm/resources/{path_safe(target_id.uri)}"
async def _jvm_bsp_resources(
request: BSPResourcesRequest,
build_root: BuildRoot,
) -> BSPResourcesResult:
"""Generically handles a BSPResourcesRequest (subclass).
This is a rule helper rather than a `@rule` for the same reason as `_jvm_bsp_compile`.
"""
coarsened_targets = await Get(
CoarsenedTargets, Addresses([fs.address for fs in request.field_sets])
)
source_files = await Get(
StrippedSourceFiles,
SourceFilesRequest(
[tgt.get(SourcesField) for tgt in coarsened_targets.closure()],
for_sources_types=(ResourceSourceField,),
enable_codegen=True,
),
)
rel_resources_dir = _jvm_resources_directory(request.bsp_target.bsp_target_id)
output_digest = await Get(
Digest,
AddPrefix(source_files.snapshot.digest, rel_resources_dir),
)
return BSPResourcesResult(
resources=(
# NB: IntelliJ requires that directory URIs end in slashes.
build_root.pathlib_path.joinpath(".pants.d/bsp", rel_resources_dir).as_uri()
+ "/",
),
output_digest=output_digest,
)
def rules():
return [
*collect_rules(),
*stripped_source_files.rules(),
]
|
#!/usr/bin/python3
"""
Rectangle module
"""
class Rectangle():
"""
Recatangle class
"""
pass
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.animation import FuncAnimation
import mpl_toolkits.mplot3d.axes3d as p3
from scipy import integrate
G=6.67408e-11 #m^3Kg^-1s^-1 #Big G
"""
Below are the default values for scales of mass, distance, velocity, as well
as the time scale
"""
Mass_scale = 1e24 #Kg
Distance_scale = 1000000 #m (1e6)
Velocity_scale = 1000 #m/s (1e3)
Time_scale = 365.25*24*3600 #s #orbital period of earth
colours = ['r','g','b','y','pink', 'c', 'm'] #Colours used for plotting
def SetRelativeValues(mass_scale, dist_scale, vel_scale, time_scale):
"""
Calculates constants used by the differential equation solver based off
the given values.
Inputs:
mass_scale (M)- The scale unit for mass (kg) that all object masses
are relative to
dist_scale (D)- The scale unit for distance (m) that all distances are
relative to
vel_scale (V)- The scale unit for velocity (m/s) that all velocities are
relative to
time_scale (T)- The time span (s) that a value of 1 in the time span
used in scipy.integrare.odeint represents
Outputs:
k1 - The product (G*T*M)/(D^2 * V), constant to find dr/dt in ODE solver
k2 - The product (V*T)/D, constant to find dv/dt
"""
k1 = G*time_scale*mass_scale/(dist_scale**2 * vel_scale)
k2 = vel_scale*time_scale/dist_scale
return k1,k2
def SetDefaultReatives():
"""
Calculates constants used by the differential equation solver based off
the default values.
Default values:
Mass_scale = 10e23 Kg, Distance_scale = 10e5 m, Velocity_scale = 1000 m/s,
Time_scale = 365.25*24*3600 s, 1 orbit for Earth
Outputs:
k1 - The product (G*T*M)/(D^2 * V), constant to find dr/dt in ODE solver
k2 - The product (V*T)/D, constant to find dv/dt
"""
k1 = G*Time_scale*Mass_scale/(Distance_scale**2 * Velocity_scale)
k2 = Velocity_scale*Time_scale/Distance_scale
return k1,k2
class NBodySolver:
def __init__(self):
"""
Initialises the NBodySolver
"""
#Set up list of bodies
self.bodies = []
#Set constants as default, set solved to False
self.k1, self.k2 = SetDefaultReatives()
self.dist_scale = Distance_scale
self.solved=False
def AddBody(self, body):
"""
Adds the supplied body to this solver.
Inputs:
Body - the body to add to this solver
"""
self.bodies.append(body)
def AddNewBody(self, name, mass, position, velocity):
"""
Creates a new Body based on the given arguments, and then adds it to
this solver.
Inputs:
name - The name of the body
mass - The mass of the body relative to the Mass_scale
position - The initial position of the body relative to Distance_scale
velocity - The initial velocity of the body relative to Velocity_scale
"""
self.bodies.append(Body(name, mass, position, velocity))
def SetSolverRelativeValues(self, mass_scale, dist_scale, vel_scale, time_scale):
"""
Calculates constants used by the differential equation solver based off
the given values.
Inputs:
mass_scale (M)- The scale unit for mass (kg) that all object masses
are relative to
dist_scale (D)- The scale unit for distance (m) that all distances are
relative to
vel_scale (V)- The scale unit for velocity (m/s) that all velocities are
relative to
time_scale (T)- The time span (s) that a value of 1 in the time span
used in scipy.integrare.odeint represents
"""
self.k1,self.k2 = SetRelativeValues(mass_scale, dist_scale, vel_scale, time_scale)
self.dist_scale = dist_scale
def SolveNBodyProblem(self, time_span):
"""
Prepairs the bodies of this solver, ready to be added to the ODE solver.
Extracts the relevent data from the result, and saves to the object.
Inputs:
time_span - The time span that the simulation should be run over, a
time span of 1 represents 1 Time_scale
shouldCM - bool value, if true all values will be from the centre of
mass reference frame
"""
self.time_span = time_span
initial, masses = PrepairValues(self.bodies)
self.N = len(self.bodies)
n_body_sol = integrate.odeint(CoupledNBodyODE, initial, time_span, args=(self.k1,self.k2,self.N, [masses]))
#Create array of just the positions of the solution
self.bodySol = []
for i in range(self.N):
self.bodySol.append(n_body_sol[:,(3*i):(3*(i+1))])
self.solved=True
#Return both the neat solution, as well as the full solution
def PlotNBodySolution(self, ax=None, show=True, saveFile=None, legend=True):
if(self.solved==False):
print("Solution must be found before plotting. Use NBodySolver.SolveNBodyProblem")
return
if(ax==None):
fig=plt.figure(figsize=(5,5))
ax=fig.add_subplot(111,projection="3d")
#Iterate each body and extract full path for each body, then plot.
for i in range(self.N):
b = self.bodySol[i]
ax.scatter(b[0,0], b[0,1], b[0,2], marker='o', color=colours[i])
ax.plot(b[:,0],b[:,1],b[:,2],color=colours[i], label=self.bodies[i].name)
ax.scatter(b[-1,0], b[-1,1], b[-1,2], marker='*', color=colours[i])
dim = r"m$\times10^{"+str(np.log10(1000000))+"}$"
print(dim)
#Add details to plot, then show
ax.set_xlabel("x - " + dim,fontsize=10)
ax.set_ylabel("y - " + dim,fontsize=10)
ax.set_zlabel("z - " + dim,fontsize=10)
ax.set_title("Visualization of a 3 body system\n",fontsize=14)
if(legend):
ax.legend(loc="lower left",fontsize=10)
if(show):
plt.show()
if(saveFile != None):
fig.savefig(saveFile, dpi=fig.dpi)
def AnimateNBodySolution(self, axis_size=None):
if(self.solved==False):
print("Solution must be found before animating. Use NBodySolver.SolveNBodyProblem")
return
data = []
for i in range(self.N):
data.append([self.bodySol[i][:,0],self.bodySol[i][:,1],self.bodySol[i][:,2]])
#Turn to numpy array
data = np.array(data)
#Check if axis_size is defined. If not, define it
if(axis_size==None):
axis_size = np.max(self.bodySol)
#Create 3d figure to plot on
fig=plt.figure(figsize=(6,6))
ax=fig.add_subplot(111,projection="3d")
#Extract data into a set of 3 dimensional lines
lines = [ax.plot(dat[0,0:1],dat[1,0:1],dat[2,0:1], label=self.bodies[i].name)[0] for i, dat in enumerate(data)]
for i, line in enumerate(lines):
line.set_color(colours[i])
#line.label("test" + str(i))
def update_lines(num, dataLines, lines):
"""
Update function for the animation.
Inputs:
num - the current iteration of the animation
dataLines - all of the 3d position solutions of all bodies
lines - the lines to animate
"""
#i=0
for line, data in zip(lines, dataLines):
#line.set_color(colours[i])
line.set_data(data[0:2, :num])
line.set_3d_properties(data[2, :num])
#i+=1
return lines
ax.legend(loc="upper left",fontsize=14)
#Set up axis of plot
ax.set_xlim3d([-axis_size,axis_size])
ax.set_xlabel('X')
ax.set_ylim3d([-axis_size,axis_size])
ax.set_ylabel('Y')
ax.set_zlim3d([-axis_size,axis_size])
ax.set_zlabel('Z')
#Create animation, then show to user
line_ani = FuncAnimation(fig, update_lines, len(self.time_span), fargs=(data, lines),
interval=0.1, blit=True, repeat=False)
plt.show()
class Body:
"""
class that holds details for each body
"""
def __init__(self, name, mass, startPos, startVel):
"""
Initiates the Body with the supplied values
Inputs:
name - The name of the body
mass - The mass of the body relative to the Mass_scale supplied to
the N body solver
startPos - Array like (3 dimensions, [x,y,z]). Position relative to
the Distance_scale supplied to the N body solver
startVel - Array like (3 dimensions, [v_x, v_y, v_z]). Velocity
relative to the Velocity_Scale supplied to the N body solver.
"""
self.name=name
self.mass=mass
self.startPos=startPos
self.startVel=startVel
def CoupledNBodyODE(rv, t, k1, k2, N, masses):
"""
Calculates the new velocity and position of each of the bodies at the given
iteration.
Inputs:
rv - array like, holds position and velocity of all bodies
t - supplied for function to work with scipy.integrate.odeint, unused
k1 - constant calculated based on scale values, used to find velocity
of each body
k2 - constant calculated based on scale values, used to find acceleration
of each body
masses - array like, mass of each of the bodies
shouldCM - bool value, if true all values will be from the centre of
mass reference frame
Outputs - flat 1d array of floats, represents position and velocity of all
bodies. Same format as input 'rv'
"""
#Prepair arrays to hold positions, velocities, and position deivatives
all_r = []
all_v = []
all_drdt = []
delta_to_v = 3*N
cm = np.array([0,0,0])
cm_v = np.array([0,0,0])
#Turn masses array to flat numpy array for ease
masses = np.array(masses).flatten()
tMass = np.sum(masses)
#Iterate the data set and fill arrays with required values
for i in range(N):
all_r.append(rv[3*i:3*(i+1)])
all_drdt.append(rv[(3*i+delta_to_v):(3*(i+1)+delta_to_v)]*k2)
#Sum weighted position of each body to find COM
cm=np.add(cm,all_drdt[i]*masses[i])
cm/=tMass
#Convert to numpy arrays for efficiences and ease
all_r = np.array(all_r)
all_v = np.array(all_v)
all_drdt = np.array(all_drdt)
#Put all positions in COM reference frame
for i in range(N):
all_drdt[i] -= cm
#Create matrix of distances between each body
rs = np.zeros((N,N))
for i in range(N):
for j in range(N):
#Any distance r_ij for j=i is 0
if(i==j):
continue
#rs_ij=rs_ji, prevents double calculations
if(rs[j,i] != 0):
rs[i,j] = rs[j,i]
else:
#Calculate distance between bodies i and j
rs[i,j] = np.linalg.norm(all_r[j]-all_r[i])
#Initiate velocity derivative array
all_dvdt=[]
#Iterate each body
for i in range(N):
#Initiate the velocity derivative for body i as (0,0,0), to prepair for calcualtion
body_i_pre_mult = np.zeros(3)
for j in range(N):
if(j!=i):
#Add the acceleration contribution from body j to the body i
body_i_pre_mult += masses[j]*(all_r[j]-all_r[i])/rs[i,j]**3
#Add total calcualted velocity change to total array
all_dvdt.append(k1*body_i_pre_mult)
#Turn to numpy arrays, concatenate, and flatten, then return to odeint
all_dvdt = np.array(all_dvdt)
return np.concatenate((all_drdt, all_dvdt)).flatten()
def PrepairValues(bodies):
"""
Takes a list of bodies and turns into a set of values to be used by the solver
Inputs:
bodies - array like, list Body objects
Outputs:
initial - array like, array of initial position and velocity of all bodies
masses - array like, array of masses of all bodies.
"""
#Prepair empty lists
masses = []
positions = []
velocities =[]
#iterate each body and append relevent lists
for body in bodies:
masses.append(body.mass)
positions.append(body.startPos)
velocities.append(body.startVel)
#Create array of initial positions and velocities, then flatten
initial = np.array(positions+velocities)
initial = initial.flatten()
#Create array of masses, then return initial values and masses
masses = np.array(masses)
return initial, masses
"""
Code Use
NBodyPlotter.py is designed such that the user can easily simulated any n body
system, and either plot or animate.
Example code commented out shows use of code
"""
# #Initialises the solver with default values
# solver = NBodySolver()
#
# #Add 4 bodies to the solver with iniial starting conditions
# solver.AddBody(Body("sun",1e6, [-145000,0,0], [0,-10,0]))
# solver.AddBody(Body("second_sun",1e6,[145000, 0, 0], [0,10,0]))
# solver.AddBody(Body("third_sun", 1e6, [0,145000,0], [-10,0,0]))
# solver.AddBody(Body("third_sun", 1e6, [0,-145000,0], [10,0,0]))
# #Define a time span of 5 solar years, with 12000 data points total
# time_span=np.linspace(0,5,12000)
#
# #Solver the problem over the time span, and save to "test.png"
# solver.SolveNBodyProblem(time_span)
# solver.PlotNBodySolution(saveFile="test.png")
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#####################################################################################
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Jan 16, 2020 #
# #
#####################################################################################
import os
import sys
import re
import string
import time
import numpy
import astropy.io.fits as pyfits
import Ska.engarchive.fetch as fetch
import Chandra.Time
import datetime
import random
#
#--- reading directory list
#
path = '/data/mta/Script/MTA_limit_trends/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
f.close()
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append path to a private folder
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- import several functions
#
import mta_common_functions as mcf #---- contains other functions commonly used in MTA scripts
import envelope_common_function as ecf
import fits_operation as mfo
#
#--- set a temporary file name
#
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
data_dir = '/data/mta/Script/MTA_limit_trends/Scripts/Simdiag/Recover/Outdir/'
mday_list = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
mday_list2 = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
def update_simdiag_data(date = ''):
"""
collect sim diag msids
input: date ---- the date in yyyymmdd format. if not given, yesterday's date is used
output: fits file data related to grad and comp
"""
#
#--- read group names which need special treatment
#
#sfile = house_keeping + 'msid_list_simdiag'
sfile = './msid_list_simdiag'
data = mcf.read_data_file(sfile)
cols = []
g_dir = {}
for ent in data:
atemp = re.split('\s+', ent)
cols.append(str(atemp[0]))
g_dir[atemp[0]] = atemp[1]
#
#--- create msid <---> unit dictionary
#
[udict, ddict] = ecf.read_unit_list()
#
#--- read mta database
#
mta_db = ecf.read_mta_database()
#
#--- read mta msid <---> sql msid conversion list
#
mta_cross = ecf.read_cross_check_table()
day_list = []
for year in range(1999, 2021):
cyear = str(year)
for mon in range(1, 13):
if year == 1999:
if mon < 8:
continue
if year == 2020:
if mon > 1:
break
cmon = str(mon)
if mon < 10:
cmon = '0' + cmon
if mcf.is_leapyear(year):
lday = mday_list2[mon-1]
else:
lday = mday_list[mon-1]
for day in range(1, lday+1):
cday = str(day)
if day < 10:
cday = '0' + cday
sday = cyear + '-' + cmon + '-' + cday
day_list.append(sday)
for sday in day_list:
if sday == '2020-01-17':
break
print("Date: " + sday)
start = sday + 'T00:00:00'
stop = sday + 'T23:59:59'
line = 'operation=retrieve\n'
line = line + 'dataset = flight\n'
line = line + 'detector = sim\n'
line = line + 'level = 0\n'
line = line + 'filetype = simdiag\n'
line = line + 'tstart = ' + start + '\n'
line = line + 'tstop = ' + stop + '\n'
line = line + 'go\n'
flist = mcf.run_arc5gl_process(line)
if len(flist) < 1:
print("\t\tNo data")
continue
#
#--- combined them
#
flen = len(flist)
if flen == 0:
continue
elif flen == 1:
cmd = 'cp ' + flist[0] + ' ./ztemp.fits'
os.system(cmd)
else:
mfo. appendFitsTable(flist[0], flist[1], 'ztemp.fits')
if flen > 2:
for k in range(2, flen):
mfo. appendFitsTable('ztemp.fits', flist[k], 'out.fits')
cmd = 'mv out.fits ztemp.fits'
os.system(cmd)
#
#--- remove indivisual fits files
#
cmd = 'chmod 777 *.fits.gz'
os.system(cmd)
for ent in flist:
cmd = 'rm -rf ' + ent
os.system(cmd)
#
#--- read out the data for the full day
#
[cols_xxx, tbdata] = ecf.read_fits_file('ztemp.fits')
cmd = 'rm -f ztemp.fits out.fits'
os.system(cmd)
#
#--- get time data in the list form
#
dtime = list(tbdata.field('time'))
for k in range(0, len(cols)):
#
#--- select col name without ST_ (which is standard dev)
#
col = cols[k]
#
#---- extract data in a list form
#
data = list(tbdata.field(col))
#
#--- change col name to msid
#
msid = col.lower()
#
#--- get limit data table for the msid
#
try:
tchk = convert_unit_indicator(udict[msid])
except:
tchk = 0
glim = ecf.get_limit(msid, tchk, mta_db, mta_cross)
#
#--- update database
#
tstart = convert_time_format(start)
tstop = convert_time_format(stop)
update_database(msid, g_dir[msid], dtime, data, glim, pstart=tstart, pstop=tstop)
#-------------------------------------------------------------------------------------------
#-- update_database: update/create fits data files of msid --
#-------------------------------------------------------------------------------------------
def update_database(msid, group, dtime, data, glim, pstart=0, pstop=0, step=3600.0):
"""
update/create fits data files of msid
input: msid --- msid
pstart --- starting time in seconds from 1998.1.1; defulat = 0 (find from the data)
pstop --- stopping time in seconds from 1998.1.1; defulat = 0 (find from the data)
step --- time interval of the short time data set:default 3600.0
output: <msid>_data.fits, <msid>_short_data.fits
"""
cols = ['time', msid, 'med', 'std', 'min', 'max', 'ylower', 'yupper',\
'rlower', 'rupper', 'dcount', 'ylimlower', 'ylimupper', \
'rlimlower', 'rlimupper', 'state']
out_dir = data_dir + group + '/'
#
#--- make sure that the sub directory exists
#
if not os.path.isdir(out_dir):
cmd = 'mkdir ' + out_dir
os.system(cmd)
fits = out_dir + msid + '_data.fits'
fits2 = out_dir + msid + '_short_data.fits'
fits3 = out_dir + msid + '_week_data.fits'
#
#-- if the starting time and stopping time are given, use them.
#-- otherwise find from the data for the starting time and today's date -1 for the stopping time
#
stday = time.strftime("%Y:%j:00:00:00", time.gmtime())
stday = Chandra.Time.DateTime(stday).secs - 86400.0 #--- set the ending to the day before
mago = stday - 31536000.0 #--- a year ago
# mago2 = stday - 604800.0 #--- a week ago
#
#--- if the fits files already exist, append the data --------------------
#
if os.path.isfile(fits):
#
#--- extract data from archive one day at a time
#
[week_p, short_p, long_p] = process_day_data(msid, dtime, data, glim, step=3600.)
#
#--- add to the data to the long term fits file
#
ecf.update_fits_file(fits, cols, long_p)
#--- remove the older data from the short term fits file, then append the new data
#
if mago <= pstart:
try:
if os.path.isfile(fits2):
ecf.update_fits_file(fits2, cols, short_p)
else:
ecf.create_fits_file(fits2, cols, short_p)
except:
ecf.create_fits_file(fits2, cols, short_p)
#
#--- remove the older data from the week long data fits file, then append the new data
#
try:
if os.path.isfile(fits3):
ecf.update_fits_file(fits3, cols, week_p)
else:
ecf.create_fits_file(fits3, cols, week_p)
except:
ecf.create_fits_file(fits3, cols, week_p)
#
#--- if the fits files do not exist, create new ones ----------------------
#
else:
#
#--- one day step; a long term data
#
[week_p, short_p, long_p] = process_day_data(msid, dtime, data, glim, step=3600.)
ecf.create_fits_file(fits, cols, long_p)
#
#--- short term data
#
if mago <= pstart:
ecf.create_fits_file(fits2, cols, short_p)
#
#--- week long data
#
ecf.create_fits_file(fits3, cols, week_p)
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
def cut_the_data(data, cut):
pos = 0
for k in range(0, len(data[0])):
if data[0][k] < cut:
continue
else:
pos = k
break
save = []
for k in range(0, len(data)):
save.append(data[k][pos:])
return save
#-------------------------------------------------------------------------------------------
#-- process_day_data: extract data from the archive and compute the stats ---
#-------------------------------------------------------------------------------------------
def process_day_data(msid, time, data, glim, step = 3600.0):
"""
extract data from the archive and compute the stats
input: msid --- msid of the data
glim --- a list of limit tables
step --- interval of the data. defalut: 3600 sec
output: a list of two lists which contain:
week_p:
wtime --- a list of time in sec from 1998.1.1
wdata --- a list of the mean of each interval
wmed --- a list of the median of each interval
wstd --- a list of the std of each interval
wmin --- a list of the min of each interval
wmax --- a list of the max of each interval
wyl --- a list of the rate of yellow lower violation
wyu --- a list of the rate of yellow upper violation
wrl --- a list of the rate of red lower violation
wru --- a list of the rate of red upper violation
wcnt --- a list of the total data counts
wyl --- a list of the lower yellow limits
wyu --- a list of the upper yellow limits
wrl --- a list of the lower red limits
wru --- a list of the upper red limits
wstate
short_p:
btime --- a list of time in sec from 1998.1.1
bdata --- a list of the mean of each interval
bmed --- a list of the median of each interval
bstd --- a list of the std of each interval
bmin --- a list of the min of each interval
bmax --- a list of the max of each interval
byl --- a list of the rate of yellow lower violation
byu --- a list of the rate of yellow upper violation
brl --- a list of the rate of red lower violation
bru --- a list of the rate of red upper violation
bcnt --- a list of the total data counts
byl --- a list of the lower yellow limits
byu --- a list of the upper yellow limits
brl --- a list of the lower red limits
bru --- a list of the upper red limits
bstate
long_p:
--- all in one element list form
ftime --- a mid time of the entier extracted data period
fdata --- the mean of the entire extracted data
fstd --- the std of the entire extracted data
fmin --- the min of the entire extracte data
fmax --- the max of the entire extracte data
ylow --- the reate of yellow lower violation
yupper --- the reate of yellow upper violation
rlow --- the reate of red lower violation
rupper --- the reate of red upper violation
tcnt --- the total counts of the data
ylow --- the lower yellow limit
yup --- the upper yellow limit
rlow --- the lower red limit
rup --- the upper red limit
state
"""
#
#--- week long data 5 min step
#
wtime = []
wdata = []
wmed = []
wstd = []
wmin = []
wmax = []
wyl = []
wyu = []
wrl = []
wru = []
wcnt = []
wstate= []
step2 = 300.0
wstart= time[0] - 10.0 #---- set the starting time to 10 sec before the first entry
#
#--- one year long data 1 hr step
#
btime = []
bdata = []
bmed = []
bstd = []
bmin = []
bmax = []
byl = []
byu = []
brl = []
bru = []
bcnt = []
bstate=[]
wsave = []
vsave = []
#
#--- extract data from archive
#
xxx = 9999
###if xxx == 9999:
try:
data = numpy.array(data)
dtime = numpy.array(time)
mask = ~(numpy.isnan(data))
data = data[mask]
dtime = dtime[mask]
#
#--- get stat for the entire period
#
ftime = dtime.mean()
fdata = data.mean()
fmed = numpy.median(data)
fstd = data.std()
fmin = data.min()
fmax = data.max()
#
#--- find the violation limits of that time
#
vlimits = find_violation_range(glim, ftime)
#
#--- get the violation rate of the entier period
#
[ylow, yupper, rlow, rupper, tcnt] = find_violation_rate(data, vlimits)
long_p = [[ftime], [fdata], [fmed], [fstd], [fmin], [fmax]]
long_p = long_p + [[ylow], [yupper], [rlow], [rupper], [tcnt]]
long_p = long_p + [[vlimits[0]], [vlimits[1]], [vlimits[2]], [vlimits[3]]]
long_p = long_p + [['none']]
#
#--- if asked, devide the data into a smaller period (step size)
#
if step != 0:
spos = 0
spos2 = 0
chk = 1
chk2 = 2
send = dtime[spos] + step
send2 = dtime[spos2] + step2
for k in range(0, len(dtime)):
if dtime[k] > wstart:
if dtime[k] < send2:
chk2 = 0
else:
sdata = data[spos2:k]
avg = sdata.mean()
med = numpy.median(sdata)
sig = sdata.std()
amin = sdata.min()
amax = sdata.max()
stime = dtime[spos2 + int(0.5 * (k-spos2))]
vlimits = find_violation_range(glim, stime)
[yl, yu, rl, ru, tot] = find_violation_rate(sdata, vlimits)
wtime.append(stime)
wdata.append(avg)
wmed.append(med)
wstd.append(sig)
wmin.append(amin)
wmax.append(amax)
wyl.append(yl)
wyu.append(yu)
wrl.append(rl)
wru.append(ru)
wcnt.append(tot)
wsave.append(vlimits)
wstate.append('none')
spos2 = k
send2 = dtime[k] + step2
chk2 = 1
else:
send2 = dtime[spos2] + step2
if dtime[k] < send:
chk = 0
else:
rdata = data[spos:k]
avg = rdata.mean()
med = numpy.median(rdata)
sig = rdata.std()
amin = rdata.min()
amax = rdata.max()
stime = dtime[spos + int(0.5 * (k-spos))]
vlimits = find_violation_range(glim, stime)
[yl, yu, rl, ru, tot] = find_violation_rate(rdata, vlimits)
btime.append(stime)
bdata.append(avg)
bmed.append(med)
bstd.append(sig)
bmin.append(amin)
bmax.append(amax)
byl.append(yl)
byu.append(yu)
brl.append(rl)
bru.append(ru)
bcnt.append(tot)
vsave.append(vlimits)
bstate.append('none')
spos = k
send = dtime[k] + step
chk = 1
#
#--- check whether there are any left over; if so add it to the data lists
#
if chk2 == 0:
rdata = data[spos2:k]
avg = rdata.mean()
med = numpy.median(rdata)
sig = rdata.std()
amin = rdata.min()
amax = rdata.max()
stime = dtime[spos2 + int(0.5 * (k-spos2))]
vlimits = find_violation_range(glim, stime)
[yl, yu, rl, ru, tot] = find_violation_rate(rdata, vlimits)
wtime.append(dtime[spos2 + int(0.5 * (k-spos2))])
wdata.append(avg)
wmed.append(med)
wstd.append(sig)
wmin.append(amin)
wmax.append(amax)
wyl.append(yl)
wyu.append(yu)
wrl.append(rl)
wru.append(ru)
wcnt.append(tot)
wsave.append(vlimits)
wstate.append('none')
if chk == 0:
rdata = data[spos:k]
avg = rdata.mean()
med = numpy.median(rdata)
sig = rdata.std()
amin = rdata.min()
amax = rdata.max()
stime = dtime[spos + int(0.5 * (k-spos))]
vlimits = find_violation_range(glim, stime)
[yl, yu, rl, ru, tot] = find_violation_rate(rdata, vlimits)
btime.append(dtime[spos + int(0.5 * (k-spos))])
bdata.append(avg)
bmed.append(med)
bstd.append(sig)
bmin.append(amin)
bmax.append(amax)
byl.append(yl)
byu.append(yu)
brl.append(rl)
bru.append(ru)
bcnt.append(tot)
vsave.append(vlimits)
bstate.append('none')
###else: #----REMOVE!!
except:
ftime = 0
fdata = 0
fmed = 0
fstd = 0
fmin = 0
fmax = 0
ylow = 0
yupper= 0
rlow = 0
rupper= 0
tcnt = 0
vlimits = [-9.0e9, -9.0e9, 9.0e9, 9.0e9]
long_p = [ftime, fdata, fmed, fstd, fmin, fmax, ylow, yupper, rlow, rupper, tcnt]
week_p = [wtime, wdata, wmed, wstd, wmin, wmax, wyl, wyu, wrl, wru, wcnt]
short_p = [btime, bdata, bmed, bstd, bmin, bmax, byl, byu, brl, bru, bcnt]
#
#--- adding limits to the table
#
vtemp = [[], [], [], []]
for k in range(0, len(wsave)):
for m in range(0, 4):
vtemp[m].append(wsave[k][m])
week_p = week_p + vtemp + [wstate]
#
vtemp = [[], [], [], []]
for k in range(0, len(vsave)):
for m in range(0, 4):
vtemp[m].append(vsave[k][m])
short_p = short_p + vtemp + [bstate]
#long_p = [[ftime], [fdata], [fmed], [fstd], [fmin], [fmax]]
#long_p = long_p + [[ylow], [yupper], [rlow], [rupper], [tcnt]]
#long_p = long_p + [[vlimits[0]], [vlimits[1]], [vlimits[2]], [vlimits[3]]]
return [week_p, short_p, long_p]
#-------------------------------------------------------------------------------------------
#-- remove_old_data: remove the data older the cut time --
#-------------------------------------------------------------------------------------------
def remove_old_data(fits, cols, cut):
"""
remove the data older the cut time
input: fits --- fits file name
cols --- a list of column names
cut --- cut time in seconds from 1998.1.1
output: updated fits file
"""
f = pyfits.open(fits)
data = f[1].data
f.close()
#
#--- find where the cut time
#
pos = 0
dtime = list(data['time'])
for k in range(0, len(dtime)):
if dtime[k] >= cut:
pos = k
break
#
#--- remove the data before the cut time
#
udata = []
for k in range(0, len(cols)):
udata.append(list(data[cols[k]][pos:]))
mcf.rm_files(fits)
ecf.create_fits_file(fits, cols, udata)
#-------------------------------------------------------------------------------------------
#-- find_the_last_entry_time: find the last logged time --
#-------------------------------------------------------------------------------------------
def find_the_last_entry_time(yesterday):
"""
find the last entry date and then make a list of dates up to yesterday
input: yesterday --- date of yesterday in the format of yyyymmdd
output: otime --- a list of date in the format of yyyymmdd
"""
#
#--- find the last entry date from the "testfits" file
#
f = pyfits.open(testfits)
data = f[1].data
f.close()
#
#--- find the last time logged and changed to a python standard time insecods
#
ltime = numpy.max(data['time']) + 883630800.0
#
#--- find the time of the start of the day
#
ct = time.strftime('%Y%m%d', time.gmtime(ltime))
year = int(ct[0:4])
mon = int(ct[4:6])
day = int(ct[6:8])
dt = datetime.datetime(year, mon, day)
ltime = time.mktime(dt.timetuple())
#
#--- set starting day to the next day
#
ltime = ltime + 86400.0
#
#--- convert yesterday to seconds
#
yesterday = str(yesterday)
year = int(yesterday[0:4])
mon = int(yesterday[4:6])
day = int(yesterday[6:8])
dt = datetime.datetime(year, mon, day)
stime = time.mktime(dt.timetuple())
ctime = [ltime]
while ltime < stime:
ltime += 86400.0
ctime.append(ltime)
#
#--- convert the list to yyyymmdd format
#
otime = []
for ent in ctime:
oday = time.strftime('%Y%m%d', time.gmtime(ent))
otime.append(oday)
return otime
#-------------------------------------------------------------------------------------------
#-- find_violation_range: set violation range --
#-------------------------------------------------------------------------------------------
def find_violation_range(glim, time):
"""
set violation range
input: glim --- a list of lists of violation set [start, stop, yl, yu, rl, ru]
time --- time of the violation check
output: vlimit --- a four element list of [yl, yu, rl, ru]
"""
vlimit = [-9.0e9, -9.0e9, 9.0e9, 9.0e9]
for lim_set in glim:
start = float(lim_set[0])
stop = float(lim_set[1])
if (time >= start) and (time < stop):
vlimit = [lim_set[2], lim_set[3], lim_set[4], lim_set[5]]
return vlimit
#-------------------------------------------------------------------------------------------
#-- find_violation_rate: find rate of yellow, red violations in both lower and upper limits
#-------------------------------------------------------------------------------------------
def find_violation_rate(carray, limits):
"""
find rate of yellow, red violations in both lower and upper limits
input: carray --- numpy array of the data
limits --- a list of limit [yellow lower, yellow upper, red lower, red upper]
output: [yl, yu, rl, ru, tot]: rate of yellow lower
rate of yellow upper
rate of red lower
rate of red upper
totla number of the data
"""
tot = len(carray)
ftot = float(tot)
yl = find_num_of_elements(carray, limits[0], side=0)
yu = find_num_of_elements(carray, limits[1], side=1)
rl = find_num_of_elements(carray, limits[2], side=0)
ru = find_num_of_elements(carray, limits[3], side=1)
yl -= rl
yu -= ru
yl /= ftot
yu /= ftot
rl /= ftot
ru /= ftot
return [yl, yu, rl, ru, tot]
#-------------------------------------------------------------------------------------------
#-- find_num_of_elements: find the numbers of elements above or lower than limit comparing to the total data #
#-------------------------------------------------------------------------------------------
def find_num_of_elements(carray, lim, side=0):
"""
find the numbers of elements above or lower than limit comparing to the total data #
input: carray --- numpy array of the data
lim --- the limit value
side --- lower:0 or upper:1 limit
output: cnt --- the numbers of the values beyond the limit
"""
#
#--- assume that the huge limit value means that there is no limit
#
if abs(lim) > 1e6:
return 0
try:
if side == 0:
out = numpy.where(carray < lim)
else:
out = numpy.where(carray > lim)
try:
cnt = len(out[0])
except:
cnt = 0
except:
cnt = 0
return cnt
#-------------------------------------------------------------------------------------------
#-- convert_unit_indicator: convert the temperature unit to glim indicator --
#-------------------------------------------------------------------------------------------
def convert_unit_indicator(cunit):
"""
convert the temperature unit to glim indicator
input: cunit --- degc, degf, or psia
output: tchk --- 1, 2, 3 for above. all others will return 0
"""
try:
cunit = cunit.lower()
if cunit == 'degc':
tchk = 1
elif cunit == 'degf':
tchk = 2
elif cunit == 'psia':
tchk = 3
else:
tchk = 0
except:
tchk = 0
return tchk
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
def convert_time_format(stime):
atemp = re.split('T', stime)
btemp = re.split('-', atemp[0])
year = btemp[0]
mon = int(float(btemp[1]))
yday = int(float(btemp[2]))
if mcf.is_leapyear(year):
alist = mday_list2
else:
alist = mday_list
if mon > 1:
for k in range(0, mon-1):
yday += alist[k]
lyday = str(yday)
if yday < 10:
lyday = '00' + lyday
elif yday < 100:
lyday = '0' + lyday
date = year + ':' + lyday + ':' + atemp[1]
stday = Chandra.Time.DateTime(date).secs
return stday
#-------------------------------------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) > 1:
date = sys.argv[1]
date.strip()
update_simdiag_data(date)
else:
update_simdiag_data()
|
l=[1,2,3,4,5,6,7,8,9,10]
for i in l:
print (i)
print (i)
|
#-*-coding:utf-8-*-
#__author__='maxiaohui'
#当前只能自动抓取log,自动分析结果
#下一步:Smoke基本功能实现自动化之后,可以全面实现自动化
from adb import loggerHandler,deviceLogger
from config import config
import time
def runFr1N():
print("请测试:在**模式下,连续刷脸")
pro=deviceLogger.getLogcat(config.deviceId,"desktop29",config.faceRecognizationKey)
time.sleep(65)
pro[0].terminate()
print(" 测试log文件:%s"%pro[1])
loggerHandler.analyzeLog(pro[1],config.oneByNPass)
def runFr11():
print("请测试:在**模式下,连续刷脸")
pro=deviceLogger.getLogcat(config.deviceId,"desktop29",config.faceRecognizationKey)
time.sleep(65)
pro[0].terminate()
print(" 测试log文件:%s"%pro[1])
loggerHandler.analyzeLog(pro[1],config.oneByonePass)
def runImportLocal():
print("请测试:**1")
pro = deviceLogger.getLogcat(config.deviceId, "desktop29", config.importLocalKey)
time.sleep(1800) #这里默认是半小时
pro[0].terminate()
print(" 测试log文件:%s" % pro[1])
loggerHandler.analyzeLog(pro[1], config.importLocal)
def runExportLocal():
print("请测试:**2")
pro = deviceLogger.getLogcat(config.deviceId, "desktop29", config.exportLocalKey)
time.sleep(1800) # 这里默认是半小时,后期需要根据实际情况进行条件判断自动决定测试时间
pro[0].terminate()
print(" 测试log文件:%s" % pro[1])
loggerHandler.analyzeLog(pro[1], config.exportLocal)
def runSaasIssue():
print("请测试:**3")
pro = deviceLogger.getLogcat(config.deviceId, "desktop29", config.issueBySaasKey)
time.sleep(5*60*60) # 这里默认是4小时,后期需要根据实际情况进行条件判断自动决定测试时间
pro[0].terminate()
print(" 测试log文件:%s" % pro[1])
loggerHandler.analyzeLog(pro[1], config.saasIssueSuccess)
if __name__ == "__main__": #当前脚本运行实例
runSaasIssue()
#runFr11()
|
import pymongo
from collections import Counter
DB = 'controversy'
mongo_col = 'test'
col = pymongo.MongoClient()[DB][mongo_col]
print('#retweets {}'.format(col.find().count()))
hashtag_freq = Counter([h for r in col.find() for h in r['hashtags']])
print(hashtag_freq.most_common(10))
|
"""
import math
x = float(input("Enter x: "))
y = math.sqrt(x)
print("The square root of",x,"equals to",y)
"""
"""
#ZeroDivisonError
try:
print("1")
x = 1/0
print("2")
except:
print("Oh dear, something went wrong...")
print("3")
"""
"""
try:
x = int(input("Enter a number: "))
y = 1/x
print(y)
except ZeroDivisionError:
print("You can´t devide by zero, sorry.")
except ValueError:
print("You must enter an interger value.")
except:
print("Oh dear, something went wrong...")
print("THE END")
"""
#the order matters
try:
y = 1/0
except ZeroDivisionError:
print("Zero Division")
except ArithmeticError:
print("Arithmetic Error")
print("THE END") |
import numpy as np
def read_ucr(filename):
data = np.loadtxt(filename, delimiter="\t")
y = data[:, 0]
x = data[:, 1:]
return x, y.astype(int)
x_train, y_train = read_ucr("FordA_TRAIN.txt")
x_test, y_test = read_ucr("FordA_TEST.txt")
np.save("FordA", (x_train, y_train, x_test, y_test), allow_pickle=True)
|
from django.shortcuts import render,redirect
from . models import *
from django.contrib import messages
def index(request):
if 'id' in request.session:
return redirect('/dashboard')
else:
return render(request, 'black_app/index.html')
def register(request):
if request.method == 'POST':
# validate form data
print("///////////////////////////////")
deervalid = User.objects.registerValidator(request.POST)
# if valid:
if deervalid['valid']:
# store user in session
user = deervalid['user']
request.session['id'] = user.id
# redirect to wall page
return redirect('/dashboard')
# else if invalid
# create error messages
# redirect to /
else:
for error in deervalid['errors']:
messages.add_message(request, messages.ERROR, error)
return redirect('/')
def login(request):
if request.method == 'POST':
validation_response = User.objects.loginValidator(request.POST)
if validation_response['valid']:
user = validation_response['user']
request.session['id'] = user.id
return redirect('/dashboard')
else:
for error in validation_response['errors']:
messages.add_message(request, messages.ERROR, error)
return redirect('/')
def createJob(request):
if request.method == 'POST':
user = User.objects.get(id=request.session['id'])
print(request.POST['title'])
print(request.POST['description'])
print(request.POST['location'])
Job.objects.create(created_by=user, title=request.POST['title'], description=request.POST['description'], location=request.POST['location'])
return redirect("/dashboard")
return render(request, 'black_app/addJob.html')
def dashboard(request):
user = User.objects.get(id=request.session['id'])
all_jobs = Job.objects.all()
my_jobs = all_jobs.filter(added_by=user)
# esli ubrat nije 4 linii koda s 57 do 61 lines poluchitsya kogda dobavlyaew job,
# job is still available in general list or table it will not automaticalyy removes from the table too, i rabotaet kak favorite , no ne udalyaet s general table.
open_jobs = []
for j in all_jobs:
if not j.added_by:
open_jobs.append(j)
context = {
"all_jobs": all_jobs,
"user": user,
"my_jobs": my_jobs,
"open_jobs": open_jobs
}
return render(request,'black_app/dashboard.html', context)
def addJob(request, id):
user = User.objects.get(id=request.session['id'])
job = Job.objects.get(id=id)
job.added_by=user
job.save()
return redirect('/dashboard')
#takim obrazom здесь job v drugoi my list мы добавляем когда релайшн из one to many
def viewJob(request, num):
user = User.objects.get(id=request.session['id'])
job = Job.objects.get(id=num)
context={
'job': job,
'signed_user': user,
'took': job in user.added_jobs.all(),
}
return render(request, 'black_app/view.html', context)
def editJob(request, id):
if request.method == "POST":
edit = Job.objects.get(id=id)
edit.title = request.POST['title']
edit.description = request.POST['description']
edit.location= request.POST['location']
edit.save()
return redirect('/dashboard')
else:
job = Job.objects.get(id=id)
context = {
"job" : job
}
return render (request,'black_app/edit.html',context)
def backtoDashboard(request):
return redirect('/dashboard')
def deleteFromList(request,id):
job = Job.objects.get(id=id)
job.delete()
return redirect('/dashboard')
def deleteFromMyList(request,id):
# user = User.objects.get(id=request.session['id'])
# job_mylist = Job.objects.get(id=id)
# user.added_jobs.remove(job_mylist)
job = Job.objects.get(id=id)
job.delete()
# this tho lines of code let the "delete button " to delete in my list and DB(database)odnovremenno , but i have uncomment 3 lines above
return redirect('/dashboard')
def logout(request):
request.session.clear()
return redirect('/')
def backtoDashboard(request):
return redirect('/dashboard')
|
import unittest
from katas.kyu_6.regexp_basics_parsing_time import to_seconds
class ToSecondsTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(to_seconds('00:00:00'), 0)
def test_equals_2(self):
self.assertEqual(to_seconds('01:02:03'), 3723)
def test_equals_3(self):
self.assertEqual(to_seconds('99:59:59'), 359999)
def test_none_(self):
self.assertIsNone(to_seconds('01:02:60'))
def test_none_2(self):
self.assertIsNone(to_seconds('01:60:03'))
def test_none_3(self):
self.assertIsNone(to_seconds('0:00:00'))
def test_none_4(self):
self.assertIsNone(to_seconds('00:0:00'))
def test_none_5(self):
self.assertIsNone(to_seconds('00:00:0'))
def test_none_6(self):
self.assertIsNone(to_seconds('00:00:00\n'))
def test_none_7(self):
self.assertIsNone(to_seconds('\n00:00:00'))
|
import os
import sys
sys.path.append(os.getcwd())
from base.get_driver import GetDriver
from page.page_biannianaolai import PageBaiNianAoLai
class TestLogin():
def setup(self):
self.driver = GetDriver()
self.login = PageBaiNianAoLai(self.driver)
def teardown(self):
self.driver.quit()
def test_login(self, username="itheima", pwd="123456"):
self.login.page_me()
self.login.page_existing_account()
self.login.page_username(username)
self.login.page_pwd(pwd)
self.login.page_click_login()
self.login.page_click_setting()
self.login.page_drag_and_drop()
self.login.page_exit()
self.login.page_yes()
|
# coding=utf-8
"""
题目:用两个栈实现一个队列
"""
class Queue(object):
def __init__(self):
self.push_stack = list()
self.pop_stack = list()
def push(self, value):
self.push_stack.append(value)
def pop(self):
if self.pop_stack:
return self.pop_stack.pop()
else:
if not self.push_stack:
raise Exception
while self.push_stack:
self.pop_stack.append(self.push_stack.pop())
return self.pop_stack.pop()
"""
用两个队列实现一个栈
"""
class Stack(object):
def __init__(self):
self.queue = list()
self.help_queue = list()
def add(self, value):
self.queue.append(value)
def pop(self):
while len(self.queue) != 1:
self.help_queue.append(self.queue.pop(0))
pop_value = self.queue.pop()
self.queue, self.help_queue = self.help_queue, self.queue
return pop_value
if __name__ == '__main__':
q = Queue()
q.push(1)
q.push(2)
q.push(3)
print q.pop()
s = Stack()
s.add(1)
s.add(2)
s.add(3)
print s.pop() |
#!/usr/bin/env
# -*- coding: utf-8 -*-
__author__ = 'vmture'
import csv
import wx
from new_script import qihu_new, apple_new, baidu_new, tengxun_new, xiaomi_new
from com.common import CommonFunction
class ButtonFrame(wx.Frame):
def __init__(self):
self.update = '渠道已更新\n\n'
self.un_update = '渠道未更新\n\n'
a = CommonFunction()
a.create_folders()
wx.Frame.__init__(self, None, -1, u'今日头条更新信息',
size=(500, 150))
self.panel = wx.Panel(self, -1)
button_x_pos = 500
button_y_pos = 300
self.button_xiaomi = wx.Button(self.panel, -1, u"小米渠道", pos=(50, 20))
self.button_baidu = wx.Button(self.panel, -1, u'百度渠道', pos=(button_x_pos/3+50, 20))
self.button_tengxun = wx.Button(self.panel, -1, u'腾讯渠道', pos=(button_x_pos/3*2+50, 20))
self.button_360 = wx.Button(self.panel, -1, u'360渠道', pos=(50, button_y_pos/5+20))
self.button_apple = wx.Button(self.panel, -1, u'苹果渠道', pos=(button_x_pos/3+50, button_y_pos/5+20))
self.button_guanwang = wx.Button(self.panel, -1, u'官网渠道', pos=(button_x_pos/3*2+50, button_y_pos/5+20))
self.Bind(wx.EVT_BUTTON, self.OnClick_xiaomi, self.button_xiaomi)
self.Bind(wx.EVT_BUTTON, self.OnClick_baidu, self.button_baidu)
self.Bind(wx.EVT_BUTTON, self.OnClick_tengxun, self.button_tengxun)
self.Bind(wx.EVT_BUTTON, self.OnClick_360, self.button_360)
self.Bind(wx.EVT_BUTTON, self.OnClick_apple, self.button_apple)
self.Bind(wx.EVT_BUTTON, self.OnClick_guanwang, self.button_guanwang)
self.Bind(wx.EVT_CLOSE, self.OnClose)
def OnClick_xiaomi(self, event):
file_path, update = xiaomi_new.run()
if update == 1:
str = self.update
else:
str = self.un_update
if 'csv' in file_path:
datas = list(csv.reader(open(file_path, 'r')))
str += '文件已创建,路径为:'+file_path+'\n'
old_datas = datas[0]
new_datas = datas[-1]
for i in old_datas:
str += i+':\n'
str += new_datas[old_datas.index(i)]+'\n'
else:
str = file_path
# self.text.SetValue(str)
dlg = wx.MessageDialog(None, str.decode(encoding='utf-8'), u'小米渠道', wx.YES_DEFAULT)
if dlg.ShowModal() == wx.ID_YES:
dlg.Destroy()
def OnClick_baidu(self, event):
file_path, update = baidu_new.run()
if update == 1:
str = self.update
else:
str = self.un_update
if 'csv' in file_path:
datas = list(csv.reader(open(file_path, 'r')))
str += '文件已创建,路径为:'+file_path+'\n'
old_datas = datas[0]
new_datas = datas[-1]
for i in old_datas:
str += i+':\n'
str += new_datas[old_datas.index(i)]+'\n'
else:
str = file_path
dlg = wx.MessageDialog(None, str.decode(encoding='utf-8'), u'百度渠道', wx.YES_DEFAULT)
if dlg.ShowModal() == wx.ID_YES:
dlg.Destroy()
def OnClick_tengxun(self, event):
file_path, update = tengxun_new.run()
if update == 1:
str = self.update
else:
str = self.un_update
if 'csv' in file_path:
datas = list(csv.reader(open(file_path, 'r')))
str += '文件已创建,路径为:'+file_path+'\n'
old_datas = datas[0]
new_datas = datas[-1]
for i in old_datas:
str += i+':\n'
str += new_datas[old_datas.index(i)]+'\n'
else:
str = file_path
dlg = wx.MessageDialog(None, str.decode(encoding='utf-8'), u'腾讯渠道', wx.YES_DEFAULT)
if dlg.ShowModal() == wx.ID_YES:
dlg.Destroy()
def OnClick_360(self, event):
file_path, update = qihu_new.run()
if update == 1:
str = self.update
else:
str = self.un_update
if 'csv' in file_path:
datas = list(csv.reader(open(file_path, 'r')))
str += '文件已创建,路径为:'+file_path+'\n'
old_datas = datas[0]
new_datas = datas[-1]
for i in old_datas:
str += i+':\n'
str += new_datas[old_datas.index(i)]+'\n'
else:
str = file_path
dlg = wx.MessageDialog(None, str.decode(encoding='utf-8'), u'360渠道', wx.YES_DEFAULT)
if dlg.ShowModal() == wx.ID_YES:
dlg.Destroy()
def OnClick_apple(self, event):
file_path, update = apple_new.run()
if update == 1:
str = self.update
else:
str = self.un_update
if 'csv' in file_path:
datas = list(csv.reader(open(file_path, 'r')))
str += '文件已创建,路径为:'+file_path+'\n'
old_datas = datas[0]
new_datas = datas[-1]
for i in old_datas:
str += i+':\n'
str += new_datas[old_datas.index(i)]+'\n'
else:
str = file_path
dlg = wx.MessageDialog(None, str.decode(encoding='utf-8'), u'苹果渠道', wx.YES_DEFAULT)
if dlg.ShowModal() == wx.ID_YES:
dlg.Destroy()
def OnClick_guanwang(self, event):
dlg = wx.MessageDialog(None, u'功能还未完成', u'官网渠道', wx.YES_DEFAULT)
if dlg.ShowModal() == wx.ID_YES:
dlg.Destroy()
def OnClose(self, event):
wx.Exit()
self.panel.Destroy()
if __name__ == '__main__':
app = wx.App()
frame = ButtonFrame()
frame.Show()
app.MainLoop()
|
import string, sys, math
def getStart(m):
#return math.floor(len(m) / 2), math.floor(len(m) / 2)
return 7000, 2000 # y, x --> optimized for puzzle input to fit in 32bit memory :| TODO: need better solution ;)
def initField(width, height):
m = []
for y in range(0, height):
m.append([])
for x in range(0, width):
m[y].append(0)
return m
def countSteps(m, line, key):
y, x = getStart(m)
results = []
steps = 0
for i in range(0, len(line)):
cmd = line[i][0:1]
num = int(line[i][1:])
for j in range (0, num):
if m[y][x] == key:
return steps
if cmd == 'R':
x += 1
elif cmd == 'L':
x -= 1
elif cmd == 'U':
y += 1
elif cmd == 'D':
y -= 1
steps += 1
# dont forget the last point
if m[y][x] == key:
return steps
return results
def findLowestSteps(m, line, line2, key):
y, x = getStart(m)
results = []
steps = 0
for i in range(0, len(line)):
cmd = line[i][0:1]
num = int(line[i][1:])
for j in range (0, num):
if m[y][x] == key:
print("-found %i at y=%i x=%i after s=%i" % (key, y, x, steps))
m[y][x] = 9
results.append(steps + countSteps(m, line2, 9))
m[y][x] = key
if cmd == 'R':
x += 1
elif cmd == 'L':
x -= 1
elif cmd == 'U':
y += 1
elif cmd == 'D':
y -= 1
steps += 1
# dont forget the last point
if m[y][x] == key:
print("--found %i at y=%i x=%i s=%i" % (key, y, x, steps))
m[y][x] = 9
results.append(steps + countSteps(m, line2, 9))
m[y][x] = key
print("after %s steps=%i" % (line[i], steps))
return results
def addLine(m, line, key):
y, x = getStart(m)
try:
for i in range(0, len(line)):
cmd = line[i][0:1]
num = int(line[i][1:])
for j in range (0, num):
m[y][x] = m[y][x] + key # just add the new key on top
if cmd == 'R':
x += 1
elif cmd == 'L':
x -= 1
elif cmd == 'U':
y += 1
elif cmd == 'D':
y -= 1
# dont forget the last point
m[y][x] = m[y][x] + key # just add the new key on top
except IndexError:
sys.stderr.write("Index out of range at i=%s %s - y=%i / x=%i -- j=%i end=%i left=%i\n" % (i, cmd, y, x, j, num, num - j))
sys.exit(-1)
pass
return m
def printField(m):
print('########################################')
for y in range(0, len(m)):
for x in range(0, len(m[0])):
if m[y][x] == 10:
sys.stdout.write('.')
elif m[y][x] > 0:
sys.stdout.write(str(m[y][x]))
else:
sys.stdout.write(' ')
sys.stdout.write('\n')
print('########################################')
def manDist(p1X, p1Y, p2X, p2Y):
return abs(p1X - p2X) + abs(p1Y - p2Y)
def findIntersect(m):
sY, sX = getStart(m)
minD = len(m) + len(m)
for y in range(0, len(m)):
for x in range(0, len(m)):
if m[y][x] == 5:
d = manDist(x, y, sX, sY)
print("found x at %i|%i with d=%i" %(y, x, d))
if minD > d:
minD = d
return minD
def calcDist(line1, line2, size):
m = initField(size, size)
sY, sX = getStart(m)
line1 = line1.split(",")
line2 = line2.split(",")
print(line1)
print(line2)
addLine(m, line1, 2)
addLine(m, line2, 3)
m[sY][sX] = 1 # ignore the starting point
#printField(m)
m2 = m.copy()
res = findLowestSteps(m, line1, line2, 5)
res.sort()
print(res)
return res[0]
#return min(sum1, sum2)
if len(sys.argv) > 1 and sys.argv[1] == "test":
print("test mode:")
tests = [
"R8,U5,L5,D3", "U7,R6,D4,L4", 20, 6,
"R75,D30,R83,U83,L12,D49,R71,U7,L72", "U62,R66,U55,R34,D71,R55,D58,R83", 500, 610
]
i = 0
while i < len(tests):
res = calcDist(tests[i], tests[i+1], tests[i+2])
print("test %i %i => %s" % (i, res, "OK" if res == tests[i+3] else "ERR!"))
i += 4
else:
with open(__file__.replace('.py', '.input.txt'), 'r') as f:
line1 = f.readline().strip()
line2 = f.readline().strip()
print(calcDist(line1, line2, 15000)) |
"""marquee logging formatter."""
import json
from logging import Formatter
class MarqueeFormatter(Formatter):
def __init__(self, source=None, *args, **kwargs):
"""marquee logging formatter.
Args:
source (str): application from which your are logging
"""
self.source = source if source is not None else __name__
super(MarqueeFormatter, self).__init__(*args, **kwargs)
def format(self, record):
"""Overridden format method.
This will create a json object containing data for the CloudWatch Event.
"""
return json.dumps({
'created': self.formatTime(record),
'level': record.levelname,
'level_number': record.levelno,
'message': super(MarqueeFormatter, self).format(record)
})
class MarqueeEventFormatter(Formatter):
def __init__(self, event_type=None, source=None, *args, **kwargs):
"""marquee event formatter.
"""
self.source = source if source is not None else __name__
self.event_type = event_type if event_type else self.source
super(MarqueeEventFormatter, self).__init__(*args, **kwargs)
def format(self, record):
"""Overridden format method.
This will create a json object containing data for the CloudWatch Event.
"""
return json.dumps({
'created': self.formatTime(record),
'event-type': self.event_type,
'event': super(MarqueeEventFormatter, self).format(record)
})
|
# -*- coding: utf-8 -*-
# @Time : 2020/5/26
# @Author : J
# @File : 图像的几何变换.py
# @Software: PyCharm
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
#缩放
# img = cv.imread("../image2.jpg")
# res = cv.resize(img,None,fx=2,fy=2,interpolation = cv.INTER_CUBIC)#缩放
# # height,width = img.shape[:2]
# # res = cv.resize(img,(2*width,2*height),interpolation = cv.INTER_CUBIC)#缩放
# cv.imshow("image",img)
# cv.waitKey(0)
# cv.destroyAllWindows()
#平移
# img = cv.imread("../image5.jpg")
# rows,cols,ch = img.shape
# M = np.float32([[1,0,100],[0,1,50]])#偏移(100,50)
# dst = cv.warpAffine(img,M,(cols,rows))#偏移
# cv.imshow("image",dst)
# cv.waitKey(0)
# cv.destroyAllWindows()
#旋转
# img = cv.imread("../image5.jpg")
# rows,cols,ch = img.shape
# M = cv.getRotationMatrix2D(((cols-1)/2.0,(rows-1)/2.0),90,1)
# dst = cv.warpAffine(img,M,(cols,rows))
# cv.imshow("image",dst)
# cv.waitKey(0)
# cv.destroyAllWindows()
#仿射变换 坐标装换
# img = cv.imread("../image5.jpg")
# rows,cols,ch = img.shape
# pst1 = np.float32([[50,50],[200,50],[50,200]])
# pst2 = np.float32([[10,100],[200,50],[100,250]])
# M = cv.getAffineTransform(pst1,pst2) #创建2*3矩阵
# dst = cv.warpAffine(img,M,(cols,rows))
# # plt.subplot(121),plt.imshow(img),plt.title("Input")
# # plt.subplot(122),plt.imshow(dst),plt.title("Output")
# cv.imshow("image",dst)
# cv.waitKey(0)
# cv.destroyAllWindows()
#透视变换 试卷扫描
img = cv.imread("../image5.jpg")
rows,cols,ch = img.shape
pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
M = cv.getPerspectiveTransform(pts1,pts2)
dst = cv.warpPerspective(img,M,(300,300))
# plt.subplot(121),plt.imshow(img),plt.title("Input")
# plt.subplot(122),plt.imshow(dst),plt.title("Output")
# plt.show()
cv.imshow("image",dst)
cv.waitKey(0)
cv.destroyAllWindows()
|
from indicator import Indicator
import states
class Ichimoku(Indicator):
def __init__(self, utils, config, logger, timeframe):
Indicator.__init__(self, utils, config, logger, timeframe)
self.senkou_span_b_period = self.cfg.SENKOU_SPAN_B_PERIOD
self.displacement_period = self.cfg.DISPLACEMENT_PERIOD
self.tenkan_sen_period = self.cfg.TENKAN_SAN_PERIOD
self.kijun_sen_period = self.cfg.KIJUN_SEN_PERIOD
def calc_ichi(prices: list) -> list:
## TODO, MAKE SURE PRICES ARE BEING PASSED IN CORRECTLY
#Calculate Tenkan Sen
tenkan_sen = (
max(prices[:self.tenkan_sen_period]) +
min(prices[:self.tenkan_sen_period])) / 2
#Calculate Kijun Sen
kijun_sen = (
max(prices[:self.kijun_sen_period]) +
min(prices[:self.kijun_sen_period])) / 2
#Calculate Current Senkou Span A
old_tenkan_sen = (
max(
prices[
self.displacement_period :
self.displacement_period + self.tenkan_sen_period
]
) +
min(
prices[
self.displacement_period:
self.displacement_period + self.tenkan_sen_period
]
)) / 2
old_kijun_sen = (
max(
prices[
self.displacement_period :
self.displacement_period + self.tenkan_sen_period
]
) +
min(
prices[
self.displacement_period :
self.displacement_period + self.tenkan_sen_period
]
)) / 2
current_senkou_span_a = (old_tenkan_sen + old_kijun_sen) / 2
#Calculate Current Senkou Span B
current_senkou_span_b = (
max(
prices[
self.displacement_period :
self.displacement_period + self.senkou_span_b_period
]
) +
min(
prices[
self.displacement_period :
self.displacement_period + self.senkou_span_b_period
]
)) / 2
#Calculate Future Senkou Span A
future_senkou_span_a = (tenkan_sen + kijun_sen) / 2
#Calculate Future Senkou Span B
future_senkou_span_b = (
max(prices[:self.senkou_span_b_period]) +
min(prices[:self.senkou_span_b_period])) / 2
#Get Chikou Span == Current Closing Price
chikou_span_and_current_closing_price = prices[0]
#return previous_prices[0]
return [tenkan_sen, kijun_sen, current_senkou_span_a, current_senkou_span_b, future_senkou_span_a, future_senkou_span_b, chikou_span_and_current_closing_price]
def signal(previous_ichimokus: list):
data = previous_ichimokus
current_close_index = len(data) - 1
previous_close_index = len(data) - 2
signals = []
#Tenkan Sen / Kijun Sen Cross
if data[previous_close_index][1] < data[previous_close_index][2] and data[current_close_index][1] > data[current_close_index][2]:
current_signal = []
current_signal.append("Tenkan Sen / Kijun Sen Cross")
current_signal.append("Bullish")
signals.append(current_signal)
elif data[previous_close_index][1] > data[previous_close_index][2] and data[current_close_index][1] < data[current_close_index][2]:
current_signal = []
current_signal.append("Tenkan Sen / Kijun Sen Cross")
current_signal.append("Bearish")
signals.append(current_signal)
#Kijun Sen Cross
if data[previous_close_index][7] < data[previous_close_index][2] and data[current_close_index][7] > data[current_close_index][2]:
current_signal = []
current_signal.append("Kijun Sen Cross")
current_signal.append("Bullish")
signals.append(current_signal)
elif data[previous_close_index][7] > data[previous_close_index][2] and data[current_close_index][7] < data[current_close_index][2]:
current_signal = []
current_signal.append("Kijun Sen Cross")
current_signal.append("Bearish")
signals.append(current_signal)
#Kumo Breakout
if data[previous_close_index][7] < max(data[previous_close_index][-6:-3]) and data[previous_close_index][7] > max(data[previous_close_index][-6:-3]):
current_signal = []
current_signal.append("Kumo Breakout")
current_signal.append("Bullish")
signals.append(current_signal)
elif data[previous_close_index][7] > max(data[previous_close_index][-6:-3]) and data[previous_close_index][7] > min(data[previous_close_index][-6:-3]):
current_signal = []
current_signal.append("Kumo Breakout")
current_signal.append("Bearish")
signals.append(current_signal)
'''
#Tenkan Sen / Kijun Sen Cross
if (data[len(data) - 1][0] > data[len(data) - 1][1] and data[len(data) -2][0] < data[len(data) - 2][1]):
current_signal = []
current_signal.append("Tenkan Sen / Kijun Sen Cross")
current_signal.append("bullish")
signals.append(current_signal)
elif (data[len(data) - 1][0] < data[len(data) - 1][1] and data[len(data) -2][0] > data[len(data) - 2][1]):
current_signal = []
current_signal.append("Tenkan Sen / Kijun Sen Cross")
current_signal.append("bearish")
signals.append(current_signal)
#Kijun Sen Cross
if (data[len(data) - 1][6] > data[len(data) - 1][1] and data[len(data) -2][5] < data[len(data) - 2][1]):
current_signal = []
current_signal.append("Kijun Sen Cross")
current_signal.append("bullish")
signals.append(current_signal)
elif (data[len(data) - 1][6] < data[len(data) - 1][1] and data[len(data) -2][5] > data[len(data) - 2][1]):
current_signal = []
current_signal.append("Kijun Sen Cross")
current_signal.append("bearish")
signals.append(current_signal)
#Kumo Breakout
if data[len(data) - 1][6] > max(data[len(data) - 2][2], data[len(data) - 2][3]) and min(data[len(data) - 2][2], data[len(data) - 2][3]) < data[len(data) - 2][5] < max(data[len(data) - 2][2], data[len(data) - 2][3]):
current_signal = []
current_signal.append("Kumo Breakout")
current_signal.append("bullish")
signals.append(current_signal)
elif data[len(data) - 1][6] < min(data[len(data) - 2][2], data[len(data) - 2][3]) and min(data[len(data) - 2][2], data[len(data) - 2][3]) < data[len(data) - 2][5] < max(data[len(data) - 2][2], data[len(data) - 2][3]):
current_signal = []
current_signal.append("Kumo Breakout")
current_signal.append("bearish")
signals.append(current_signal)
#Senkou Span Cross
if data[len(data) - 1][2] > data[len(data) - 1][3] and data[len(data) - 2][2] < data[len(data) - 1][3]:
current_signal = []
current_signal.append("Senkou Span Cross")
current_signal.append("bullish")
signals.append(current_signal)
elif data[len(data) - 1][2] < data[len(data) - 1][3] and data[len(data) - 2][2] > data[len(data) - 1][3]:
current_signal = []
current_signal.append("Senkou Span Cross")
current_signal.append("bearish")
signals.append(current_signal)
#Chikou Span Cross
if data[len(data) - 1][4] > data[len(data) - 1][5] and data[len(data) - 1][4] < data[len(data) - 1][5]:
current_signal = []
current_signal.append("Chikou Span Cross")
current_signal.append("bullish")
signals.append(current_signal)
elif data[len(data) - 1][4] > data[len(data) - 1][5] and data[len(data) - 1][4] < data[len(data) - 1][5]:
current_signal = []
current_signal.append("Chikou Span Cross")
current_signal.append("bearish")
signals.append(current_signal)
'''
#Bull or Bear Kumo Cloud
if data[current_close_index][5] > data[current_close_index][6]:
current_signal = []
current_signal.append("Kumo Cloud")
current_signal.append("Bullish")
signals.append(current_signal)
elif data[current_close_index][5] < data[current_close_index][6]:
current_signal = []
current_signal.append("Kumo Cloud")
current_signal.append("Bearish")
signals.append(current_signal)
return signals
async def acalc_ichi(symbol: str):
length = self.displacement_period + max(self.tenkan_sen_period,
self.kijun_sen_period, self.senkou_span_b_period)
prices = await self.utils.get_historical_data(symbol, length, self.timeframe)
return self.calc_ichi(prices) |
"""Manage core level cgroups.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import click
from treadmill import cgroups
from treadmill import cgutils
from treadmill import utils
_LOGGER = logging.getLogger(__name__)
def init():
"""Return top level command handler."""
# Disable too many branches warning.
#
# pylint: disable=R0912
@click.group(chain=True)
def top():
"""Manage core cgroups."""
@top.command(name='exec')
@click.option('--into', multiple=True)
@click.argument('subcommand', nargs=-1)
def cgexec(into, subcommand):
"""execs command into given cgroup(s).
"""
cgrps = [cgrp.split(':') for cgrp in into]
for (subsystem, path) in cgrps:
pathplus = path.split('=')
if len(pathplus) == 2:
group = os.path.dirname(pathplus[0])
pseudofile = os.path.basename(pathplus[0])
value = pathplus[1]
cgroups.set_value(subsystem, group, pseudofile, value)
else:
cgutils.create(subsystem, path)
cgroups.join(subsystem, path)
if subcommand:
execargs = list(subcommand)
utils.sane_execvp(execargs[0], execargs)
del cgexec
return top
|
# flake8: noqa
from .bresenham import bresenham, bresenham_multiply
from .douglas_peucker import douglas_peucker
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 14 21:40:08 2020
@author: user
"""
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
x = torch.linspace(-5, 5, 100) #creat array from -5 to 5
print(x)
x = Variable(x) #charnge to variable
#-------------------------------------
x_np = x.data.numpy()
y_relu = F.relu(x).data.numpy()
y_sigmoid = F.sigmoid(x).data.numpy()
y_tanh = F.tanh(x).data.numpy()
y_softplus = F.softplus(x).data.numpy()
#--------------------------------------
plt.figure(1, figsize=(10, 10)) #創一個figure,並設定寬度、長度
plt.subplot(221) #2*2表格的第一格
plt.plot(x_np, y_relu,'-b', label='relu')
plt.ylim((-1, 5))
plt.legend(loc='best')
plt.subplot(222)
plt.plot(x_np, y_sigmoid,'-g', label='sigmoid')
plt.ylim((-0.2, 1.2))
plt.legend(loc='best')
plt.subplot(223)
plt.plot(x_np, y_tanh,'-r', label='tanh')
plt.ylim((-1.2, 1.2))
plt.legend(loc='best')
plt.subplot(224)
plt.plot(x_np, y_softplus,'-c', label='softplus')
plt.ylim((-0.2, 6))
plt.legend(loc='best')
plt.show() |
import torch
import torch.nn as nn
import numpy as np
from test01 import get_data
from test02 import Net
path = r"./data1.xls"
net = Net()
# net.load_state_dict(torch.load("./params"))
loss_fn = nn.BCELoss()
optimizer = torch.optim.Adam(net.parameters())
datas = get_data.red_excel(path)
max_data = np.max(datas)
train_data = np.array(datas)/max_data
print(train_data)
for epoch in range(10):
for i in range(len(train_data)-9):
x = train_data[i:i+9]
y = train_data[i+9:i+10]
xs = torch.reshape(torch.tensor(x,dtype=torch.float32),[-1,1,9])
ys = torch.tensor(y,dtype=torch.float32)
_y = net(xs)
loss = loss_fn(_y,ys)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(loss.item())
out = int(_y*max_data)
label = int(ys*max_data)
print(out)
print(label)
print(i,"/",epoch)
torch.save(net.state_dict(),"./params.pth") |
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
lhs, rhs = [1], [1]
res = []
for n in nums[:-1]:
lhs.append(n*lhs[-1])
for n in reversed(nums[1:]):
rhs.append(n*rhs[-1])
rhs.reverse()
for i in range(len(nums)):
res.append(lhs[i]*rhs[i])
return res
|
import os, sys, time, re
class g:
auto_set = True
start_byte = ""
second_byte = ""
third_byte = ""
start = ""
end = ""
arg_list = ""
logfile = os.path.splitext(os.path.basename(__file__))[0] + "-Log.txt"
def set_dns(dns_primary, dns_secondary=""):
if (dns_primary=="auto"):
os.system("netsh interface ipv4 set dns \"Wireless Network Connection\" source = dhcp")
return
os.system("netsh interface ipv4 set dns \"Wireless Network Connection\" static " + dns_primary +" primary validate=no")
os.system("netsh interface ipv4 add dns \"Wireless Network Connection\" "+ dns_secondary +" index=2 validate=no")
def tail( f, window=20 ):
BUFSIZ = 1024
f.seek(0, 2)
bytes = f.tell()
size = window
block = -1
data = []
while size > 0 and bytes > 0:
if (bytes - BUFSIZ > 0):
# Seek back one whole BUFSIZ
f.seek(block*BUFSIZ, 2)
data.append(f.read(BUFSIZ))
else:
# file too small, start from begining
f.seek(0,0)
# only read what was not read
data.append(f.read(bytes))
linesFound = data[-1].count('\n')
size -= linesFound
bytes -= BUFSIZ
block -= 1
return '\n'.join(''.join(data).splitlines()[-window:])
def get_last_run_options():
logfile = os.path.join(os.path.dirname(sys.argv[0]), g.logfile)
args = 0
if os.path.isfile(logfile):
f = file(logfile, 'r')
lines = tail(f, 1)
print "Last successful run: " + lines
args = re.search("\[(.*)\]", lines).group(1)
return args
def get_args(argv = sys.argv):
for (index, args) in enumerate(argv):
if (args == "--reset" or args == "-r"):
os.system("netsh interface ipv4 set dns \"Wireless Network Connection\" source=dhcp")
os.system("netsh interface ipv4 set address \"Wireless Network Connection\" source=dhcp")
sys.exit(0)
elif (args == "--last-run" or args == "-l"):
arg_list = get_last_run_options()
if arg_list:
g.arg_list = arg_list
args_f = g.arg_list.split()
get_args(args_f)
break
elif (args == "--googledns" or args == "-gdns"):
set_dns("8.8.8.8", "8.8.4.4")
elif (args == "--autodns" or args == "-adns"):
set_dns("auto")
elif (args == "--exit" or args == "-q"):
sys.exit(0)
elif (args == "--start_iter" or args == "-s"):
g.start = int(argv[index+1])
elif (args == "--end_iter" or args == "-e"):
g.end = int(argv[index+1])
elif (args == "--start_byte" or args == "-b1"):
g.start_byte = argv[index+1]
elif (args == "--second_byte" or args == "-b2"):
g.second_byte = argv[index+1]
elif (args == "--third_byte" or args == "-b3"):
g.third_byte = argv[index+1]
def main():
print "\nCommand : " + str(sys.argv) + "\n"
if (len(sys.argv) < 2):
if (g.auto_set): get_args(["-l"])
else : get_args()
if (g.arg_list == ""):
for arg in sys.argv[1:]:
g.arg_list = g.arg_list + arg + " "
if (g.start_byte == ""):
g.start_byte = 10
if (g.second_byte == ""):
g.second_byte = 89
if (g.third_byte == ""):
g.third_byte = 3
subnet = "255.255.252.0"
gateway = "10.89.0.1"
dns_primary = "8.8.8.8"
dns_secondary = "8.8.4.4"
ping_addr = "google.com"
if (g.start == ""):
g.start = 0
if (g.end == ""):
g.end = 255
x = range(g.start,g.end)
for last_byte in x:
ip_addr = str(g.start_byte) + "." + str(g.second_byte) + "." + str(g.third_byte) + "." + str(last_byte)
print "\nChecking " + ip_addr
output = os.system("netsh interface ipv4 set address \"Wireless Network Connection\" static " + ip_addr + " " + subnet + " " + gateway + " & sleep 3 & ping " + ping_addr + " & sleep 3 & ping " + ping_addr)
if (output == 0):
print "Success : " + ip_addr
f = file(os.path.dirname(sys.argv[0]) + "/" + g.logfile, 'a+')
try:
f.seek(-2000, 2)
except:
f.seek(0, 0)
log_recent = f.readlines()
if not re.search(time.strftime("%x"), str(log_recent)):
f.write("\n\n" + time.strftime("%x") + ":\n")
f.write("\n" + time.strftime("%X") + ": " + ip_addr + " [" + g.arg_list + "]")
f.close()
return
print "Failed. Unable to determine any working IP address"
if __name__ == "__main__":
main() |
import torch
import math
import random
import os
import subprocess
import numpy
import gym
import matplotlib.pyplot as plt
import time
from heapq import *
from drivingenvs.vehicles.ackermann import AckermannSteeredVehicle
from drivingenvs.envs.base_driving_env import BaseDrivingEnv
from drivingenvs.envs.driving_env_with_vehicles import DrivingEnvWithVehicles
from drivingenvs.priors.lane_following import LaneFollowing
from drivingenvs.planning.input_space_sampler import InputSpaceSampler
from drivingenvs.planning.diayn_sampler import DIAYNSampler
from drivingenvs.planning.max_dispersion_sampler import MaxDispersionSampler
from drivingenvs.planning.hand_designed_sampler import HandDesignedSampler
from drivingenvs.planning.astar import *
veh = AckermannSteeredVehicle((4, 2))
sampling_env = BaseDrivingEnv(veh, distance = 2000.0, n_lanes = 5, dt=0.2, max_steps = 100)
env = DrivingEnvWithVehicles(veh, distance = 2000.0, n_lanes = 5, dt=0.2, max_steps = 100)
policy = torch.load('../../scripts/base_env_skills_prior/_best/policy.cpt')
sampler = InputSpaceSampler(env)
sampler = DIAYNSampler(sampling_env, policy)
#sampler = pickle.load(open('dispersion_sampler.cpt', 'rb'))
#sampler = MaxDispersionSampler(sampling_env, 10, 100, 10000)
sampler = HandDesignedSampler(sampling_env, LaneFollowing(env, lookahead=10.0))
sampler.env = sampling_env
astar = AStar(env, sampler)
env.reset()
sampling_env.reset()
t_skill = 10
v_df = constant_vel_sim(env.vehicles, 1000, env.dt)
goal_lane = random.randint(0, 4)
goal_y = (env.lane_loc[goal_lane] + env.lane_loc[goal_lane+1])/2
goal_fn = lambda x:x[1] > goal_y-0.5 and x[1] < goal_y+0.5 and x[2].abs() < 0.05
heuristic_fn = lambda x:((x[1] - goal_y) ** 2 + (x[2]) ** 2) ** 0.5
print('Goal y = {}, Start y = {}'.format(goal_y, env.ego_state[1]))
t_avg = []
solved = 0
avg_samples = 0
for i in range(1000):
print('run {}'.format(i))
env.reset()
sampling_env.reset()
v_df = constant_vel_sim(env.vehicles, 1000, env.dt)
path, n_samples, t_avg = astar.astar(env.ego_state, goal_fn, heuristic_fn, env, v_df, t_skill)
avg_samples += n_samples
if n_samples > 0:
solved += 1
t_avg += t_avg
print('Success = {}, samples = {}, t = {}'.format(solved/1000, avg_samples/1000, t_avg/1000))
|
# Changed all TimeField() with CharField() to resolve
# timezone issue in Postgres on server (have to resolve this)
from django.db import models
from django.contrib.auth.models import User
from django.core.validators import MinValueValidator ,MaxValueValidator
class UserQuickLook(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateField()
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
dtime = str(self.created_at)
return "{}-{}".format(self.user.username,dtime)
class Meta:
unique_together = ("user", "created_at")
class Grades(models.Model,):
GRADE_CHOICES = (
('A','A'),
('B','B'),
('C','C'),
('D','D'),
('F','F'),
('N/A','N/A')
)
user_ql = models.OneToOneField(UserQuickLook, related_name = "grades_ql")
overall_health_grade = models.CharField(choices=GRADE_CHOICES, max_length=3,blank=True)
overall_health_gpa = models.FloatField(blank=True,null=True)
movement_non_exercise_steps_grade = models.CharField(choices=GRADE_CHOICES,
max_length=3,blank=True)
movement_non_exercise_steps_gpa = models.FloatField(blank=True,null=True)
movement_consistency_grade = models.CharField(choices=GRADE_CHOICES, max_length=3, blank=True)
avg_sleep_per_night_grade = models.CharField(choices=GRADE_CHOICES, max_length=3, blank=True)
avg_sleep_per_night_gpa = models.FloatField(blank=True,null=True)
exercise_consistency_grade = models.CharField(choices=GRADE_CHOICES, max_length=3, blank=True)
exercise_consistency_score = models.FloatField(blank=True, null=True)
overall_workout_grade = models.CharField(choices=GRADE_CHOICES, max_length=3,blank=True)
overall_workout_gpa = models.FloatField(blank=True, null=True)
workout_duration_grade = models.CharField(choices=GRADE_CHOICES, max_length=3,blank=True)
workout_duration_gpa = models.FloatField(blank=True, null=True)
workout_effortlvl_grade = models.CharField(choices=GRADE_CHOICES, max_length=3,blank=True)
workout_effortlvl_gpa = models.FloatField(blank=True, null=True)
avg_exercise_hr_grade = models.CharField(choices=GRADE_CHOICES, max_length=3,blank=True)
avg_exercise_hr_gpa = models.FloatField(blank=True, null=True)
prcnt_unprocessed_food_consumed_grade = models.CharField(choices=GRADE_CHOICES,
max_length=3, blank=True)
prcnt_unprocessed_food_consumed_gpa = models.FloatField(blank=True, null=True)
alcoholic_drink_per_week_grade = models.CharField(choices=GRADE_CHOICES, max_length=3,blank=True)
alcoholic_drink_per_week_gpa = models.FloatField(blank=True,null=True)
sleep_aid_penalty = models.FloatField(blank=True, null=True)
ctrl_subs_penalty = models.FloatField(blank=True, null=True)
smoke_penalty = models.FloatField(blank=True, null=True)
class ExerciseAndReporting(models.Model):
Low = 'low'
Medium = 'medium'
High = 'high'
Yes = 'yes'
No = 'no'
Easy = 'easy'
MEDIUM = 'medium'
Hard = 'hard'
TR = 'trademil run'
OR = 'outdoor run'
Bike = 'bike'
Swim = 'swim'
Elliptical = 'elliptical'
CS = 'controlled substances'
ID = 'illicit drugs'
SA = 'sleep aids'
STRESS_LEVEL_CHOICES = (
( Low,'low'),
( Medium,'medium'),
( High,'high'),
)
WORKOUT_TYPE = (
(TR, 'Trademil Run'),
(OR, 'Outdoor Run'),
(Bike, 'Bike'),
(Swim, 'Swim'),
(Elliptical, 'Elliptical'),
)
EH_CHOICES = (
(Easy, 'Easy'),
(MEDIUM,'Medium'),
(Hard, 'Hard'),
)
YN_CHOICES = (
(Yes, 'Yes'),
(No, 'No'),
)
DRUGS = (
(CS,'Controlled Substances'),
(ID, 'Illicit Drugs'),
(SA, 'Sleep Aids')
)
user_ql = models.OneToOneField(UserQuickLook, related_name = "exercise_reporting_ql")
did_workout = models.CharField(choices = YN_CHOICES,max_length=10, blank=True)
workout_easy_hard = models.CharField(choices=EH_CHOICES, max_length=10, blank=True)
workout_type = models.CharField(choices=WORKOUT_TYPE, max_length=20, blank=True)
workout_time = models.CharField(max_length=10, blank=True)
# workout_time = models.TimeField()
workout_location = models.TextField(blank=True)
workout_duration = models.CharField(max_length=10,blank=True)
# workout_duration = models.TimeField()
maximum_elevation_workout = models.IntegerField(blank=True,null=True)
minutes_walked_before_workout = models.CharField(max_length=10,blank=True)
# minutes_walked_before_workout = models.TimeField()
distance_run = models.FloatField(blank=True,null=True)
distance_bike = models.FloatField(blank=True,null=True)
distance_swim = models.FloatField(blank=True,null=True)
distance_other = models.FloatField(blank=True,null=True)
pace = models.CharField(max_length=10,blank=True)
# pace = models.TimeField()
avg_heartrate = models.TextField(blank=True)
activities_duration = models.TextField(blank=True)
avg_exercise_heartrate = models.FloatField(blank=True,null=True)
avg_non_strength_heartrate = models.FloatField(blank=True,null=True)
total_exercise_activities = models.PositiveIntegerField(blank=True,null=True)
total_strength_activities = models.PositiveIntegerField(blank=True,null=True)
elevation_gain = models.IntegerField(blank=True,null=True)
elevation_loss = models.IntegerField(blank=True,null=True)
effort_level = models.PositiveIntegerField(blank=True,null=True)
dew_point = models.FloatField(blank=True,null=True)
temperature = models.FloatField(blank=True,null=True)
humidity = models.FloatField(blank=True,null=True)
temperature_feels_like = models.FloatField(blank=True,null=True)
wind = models.FloatField(blank=True,null=True)
hrr_time_to_99 = models.CharField(max_length=10,blank=True)
hrr_starting_point = models.IntegerField(blank=True,null=True)
hrr_beats_lowered_first_minute = models.IntegerField(blank=True,null=True)
resting_hr_last_night = models.IntegerField(blank=True,null=True)
lowest_hr_during_hrr = models.IntegerField(blank=True, null=True)
highest_hr_first_minute = models.IntegerField(blank=True, null=True)
vo2_max = models.FloatField(blank=True,null=True)
running_cadence = models.IntegerField(blank=True,null=True)
nose_breath_prcnt_workout = models.FloatField(
validators=[MinValueValidator(0),MaxValueValidator(100)],
blank=True,null=True)
water_consumed_workout = models.FloatField(blank=True,null=True)
chia_seeds_consumed_workout = models.IntegerField(
validators = [MinValueValidator(0),MaxValueValidator(20)],
blank=True,null=True)
fast_before_workout = models.CharField(choices= YN_CHOICES, max_length=3,blank=True)
pain = models.CharField(choices=YN_CHOICES, max_length=3, blank=True)
pain_area = models.TextField(blank=True)
stress_level = models.CharField(choices=STRESS_LEVEL_CHOICES, max_length=6,
blank=True)
sick = models.CharField(choices=YN_CHOICES, max_length=3,blank=True)
drug_consumed = models.CharField(choices=YN_CHOICES, max_length=3, blank=True)
drug = models.TextField(blank=True)
medication = models.TextField(blank=True)
smoke_substance = models.CharField(choices=YN_CHOICES, max_length=3,blank=True)
exercise_fifteen_more = models.CharField(choices=YN_CHOICES, max_length=3,blank=True)
workout_elapsed_time = models.CharField(max_length=10,blank=True)
# workout_elapsed_time = models.TimeField()
timewatch_paused_workout = models.CharField(max_length=10,blank=True)
# timewatch_paused_workout = models.TimeField()
exercise_consistency = models.FloatField(validators=[MinValueValidator(0),MaxValueValidator(7)],
blank=True,null=True)
heartrate_variability_stress = models.IntegerField(blank=True,null=True)
fitness_age = models.IntegerField(blank=True,null=True)
workout_comment = models.TextField(blank=True)
class SwimStats(models.Model):
user_ql = models.OneToOneField(UserQuickLook, related_name = "swim_stats_ql")
pace_per_100_yard = models.FloatField(blank=True,null=True)
total_strokes = models.IntegerField(blank=True,null=True)
class BikeStats(models.Model):
user_ql = models.OneToOneField(UserQuickLook, related_name = "bike_stats_ql")
avg_speed = models.FloatField(blank=True,null=True)
avg_power = models.FloatField(blank=True,null=True)
avg_speed_per_mile = models.FloatField(blank=True,null=True)
avg_cadence = models.FloatField(blank=True,null=True)
class Steps(models.Model):
user_ql = models.OneToOneField(UserQuickLook, related_name = "steps_ql")
non_exercise_steps = models.PositiveIntegerField(blank=True,null=True)
exercise_steps = models.PositiveIntegerField(blank=True,null=True)
total_steps = models.PositiveIntegerField(blank=True,null=True)
floor_climed = models.PositiveIntegerField(blank=True,null=True)
movement_consistency = models.TextField(blank=True)
weight = models.TextField(blank=True)
class Sleep(models.Model):
Yes = 'yes'
No = 'no'
YN_CHOICES = (
(Yes, 'Yes'),
(No, 'No'),
)
user_ql = models.OneToOneField(UserQuickLook, related_name = "sleep_ql")
sleep_per_wearable = models.CharField(max_length=10,blank=True)
# sleep_per_wearable = models.TimeField()
sleep_per_user_input = models.CharField(blank=True,max_length=10)
# sleep_per_user_input = models.TimeField(blank=True,null=True)
sleep_aid = models.CharField(choices=YN_CHOICES, max_length=3,blank=True)
# TODO : AM or PM should be taken care
sleep_bed_time = models.CharField(max_length=20,blank=True)
sleep_awake_time = models.CharField(max_length=20,blank=True)
deep_sleep = models.CharField(max_length=10,blank=True)
light_sleep = models.CharField(max_length=10,blank=True)
awake_time = models.CharField(max_length=10,blank=True)
sleep_comments = models.TextField(blank=True)
rem_sleep = models.CharField(max_length=10,blank=True)
restless_sleep = models.CharField(max_length=10,blank=True)
# sleep_bed_time = models.TimeField()
# sleep_awake_time = models.TimeField()
# deep_sleep = models.TimeField()
# light_sleep = models.TimeField()
# awake_time = models.TimeField()
class Food(models.Model):
user_ql = models.OneToOneField(UserQuickLook, related_name = "food_ql")
prcnt_non_processed_food = models.FloatField(validators=[
MinValueValidator(0),MaxValueValidator(100)],
blank=True,null=True)
non_processed_food = models.TextField(blank=True)
processed_food = models.TextField(blank=True)
no_plants_consumed_ql = models.CharField(max_length=5,blank=True,null=True)
list_of_pants_consumed_ql = models.TextField(blank=True,null=True)
# choices are not provided, will be choice field in the future
diet_type = models.TextField(blank=True)
class Alcohol(models.Model):
user_ql = models.OneToOneField(UserQuickLook, related_name = "alcohol_ql")
alcohol_day = models.CharField(max_length = 4,blank=True)
alcohol_week = models.FloatField(blank=True,null=True) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Juaumpc
"""
from Token import Token
from SymbolTable import SymbolTable
from TableEntry import TableEntry
from SymbolTableTree import SymbolTableTree
from ASA import *
class Syntactic():
token = ''
arrayToken = []
indexToken = ''
no = ''
symbolTableTree = ''
tableEntry = ''
actualTable = ''
def __init__ (self, arrayToken):
self.arrayToken = arrayToken
self.token = self.arrayToken[0]
self.indexToken = 0
self.actualTable = SymbolTable()
self.symbolTableTree = SymbolTableTree(self.actualTable)
self.no = AST('AST')
def match(self,tok):
if(self.token.getCodigoToken() == tok):
'''for k,v in self.actualTable.symbolTable.items():
print(v.toString())'''
self.indexToken = self.indexToken + 1
if (self.indexToken < len(self.arrayToken)):
self.token = self.arrayToken[self.indexToken]
else:
print('token invalido ' + self.token.getCodigoToken())
def imprimeErro(self):
i = self.indexToken - 1;
#print('Tokens ' + str(Follow[sync_token.type]) + ' esperados na entrada.')
#continua a análise para verificar outros erros
self.indexToken = self.indexToken + 1
self.token = self.arrayToken[self.indexToken]
#sincroniza(sync_token)
def program(self):
#match first token for any code in c-small
print(self.token.__str__())
self.match('INT')
self.match('MAIN')
self.match('LBRACKET')
self.match('RBRACKET')
self.match('LBRACE')
print(self.token.value)
#start recursion and build ASA
print('bla ' + self.no.nome)
self.decl_comand(self.no)
print('analise sintática realizada com sucesso')
print('resultado')
print(self.no.children)
print_tree(self.no)
a = open('../../tp2/output/saidateste.txt','w')
for k,v in self.actualTable.symbolTable.items():
a.write(v.toString() + '\r\n')
ToXML.toXML(self.no)
a.close()
def decl_comand(self,no):
if(self.token.getCodigoToken() == 'INT' or self.token.getCodigoToken() == 'FLOAT'):
self.declaration(no)
if(self.token.getCodigoToken() == 'INT' or self.token.getCodigoToken() == 'FLOAT' or self.token.getCodigoToken() == 'LBRACE' or self.token.getCodigoToken() == 'ID' or self.token.getCodigoToken() == 'IF' or self.token.getCodigoToken() == 'WHILE' or self.token.getCodigoToken() == 'READ' or self.token.getCodigoToken() == 'PRINT' or self.token.getCodigoToken() == 'FOR'):
self.decl_comand(no)
elif(self.token.getCodigoToken() == 'LBRACE' or self.token.getCodigoToken() == 'ID' or self.token.getCodigoToken() == 'IF' or self.token.getCodigoToken() == 'WHILE' or self.token.getCodigoToken() == 'READ' or self.token.getCodigoToken() == 'PRINT' or self.token.getCodigoToken() == 'FOR'):
no3 = self.comand()
if(not(no3 is None)):
no.children.append(no3)
if(self.token.getCodigoToken() == 'INT' or self.token.getCodigoToken() == 'FLOAT' or self.token.getCodigoToken() == 'LBRACE' or self.token.getCodigoToken() == 'ID' or self.token.getCodigoToken() == 'IF' or self.token.getCodigoToken() == 'WHILE' or self.token.getCodigoToken() == 'READ' or self.token.getCodigoToken() == 'PRINT' or self.token.getCodigoToken() == 'FOR'):
self.decl_comand(no)
# print('O no attr aqui ')
print(self.no.children)
def types(self):
if(self.token.getCodigoToken() == 'INT'):
self.match('INT')
self.tableEntry.setTipo('int')
elif(self.token.getCodigoToken() == 'FLOAT'):
self.match('FLOAT')
self.tableEntry.setTipo('float')
def declaration(self, no):
if (self.token.getCodigoToken() == 'INT' or self.token.getCodigoToken() == 'FLOAT'):
self.tableEntry = TableEntry(None, None, None, None)
self.types()
self.tableEntry.setLexema(self.token.getLexema())
self.tableEntry.setNumLinha(self.token.getNumLinha())
#começo da criação da asa
no_id = ''
if(self.token.getCodigoToken() == 'ID'):
no_id= Id(self.token)
self.match('ID')
no_attr = None
if(self.token.getCodigoToken() == 'ATTR'):
no_attr = Assign(no_id, '=', None)
self.declaration2(self.no, no_attr)
def declaration2(self, no_pai, no):
if (self.token.getCodigoToken() == 'COMMA'):
self.match('COMMA')
self.actualTable.symbolTable[self.tableEntry.getLexema()] = self.tableEntry
lastType = self.tableEntry.getTipo()
self.tableEntry = TableEntry(None, lastType, None, None)
self.tableEntry.setLexema(self.token.getLexema())
self.tableEntry.setNumLinha(self.token.getNumLinha())
no2 = Id(self.token)
self.match('ID')
no_attr = None
if(self.token.getCodigoToken() == 'ATTR'):
no_attr = Assign(no2, '=', None)
self.declaration2(no_pai, no_attr)
elif(self.token.getCodigoToken() == 'PCOMMA'):
self.match('PCOMMA')
self.actualTable.symbolTable[self.tableEntry.getLexema()] = self.tableEntry
self.tableEntry = TableEntry(None, None, None, None)
elif(self.token.getCodigoToken() == 'ATTR'):
self.match('ATTR')
no2 = self.expression()
no.children.append(no2)
no.right = no2
no_pai.children.append(no)
self.declaration2(no_pai, no)
def comand(self):
if (self.token.getCodigoToken() == 'LBRACE'):
no = self.block()
return no
elif(self.token.getCodigoToken() == 'ID'):
no = self.attr()
return no
elif(self.token.getCodigoToken() == 'IF'):
no = self.comand_if()
return no
elif(self.token.getCodigoToken() == 'WHILE'):
no = self.comand_while()
return no
elif(self.token.getCodigoToken() == 'READ'):
no = self.comand_read()
return no
elif(self.token.getCodigoToken() == 'PRINT'):
no = self.comand_print()
return no
elif(self.token.getCodigoToken() == 'FOR'):
no = self.comand_for()
return no
def block(self):
self.match('LBRACE')
no_block = Compound()
self.decl_comand(no_block)
self.match('RBRACE')
return no_block
def attr(self):
no1 = Id(self.token)
no_attr = Assign(no1 , '=', None)
self.match('ID')
self.match('ATTR')
no2 = self.expression()
no_attr.children.append(no2)
no_attr.right = no2
self.match('PCOMMA')
return no_attr
def comand_if(self):
no_if = If(None,None,None)
self.match('IF')
self.match('LBRACKET')
no_expr = self.expression()
no_if.children.append(no_expr)
no_if.exp = no_expr
self.match('RBRACKET')
no_comand = self.comand()
no_if.children.append(no_comand)
if(self.token == 'ELSE'):
no_else = self.comand_else()
no_if.children.append(no_else)
return no_if
def comand_else(self):
self.match('ELSE')
no_else = self.comand()
return no_else
def comand_while(self):
no_while = While(None,None)
self.match('WHILE')
self.match('LBRACKET')
no_expr = self.expression()
no_while.children.append(no_expr)
no_while.exp = no_expr
self.match('RBRACKET')
no_comand = self.comand()
no_while.children.append(no_comand)
no_while.commands = no_comand
return no_while
def comand_read(self):
no_read = Read(None)
self.match('READ')
no_id = Id(self.token)
no_read.children.append(no_id)
self.match('ID')
self.match('PCOMMA')
return no_read
def comand_print(self):
no_print = Print(None)
self.match('PRINT')
self.match('LBRACKET')
no_expr = self.expression()
no_print.children.append(no_expr)
no_print.exp = no_expr
self.match('RBRACKET')
self.match('PCOMMA')
return no_print
#sem for por enquanto =('''
def comand_for(self):
no_for = For(None,None,None,None)
self.match('FOR')
self.match('LBRACKET')
no_attr = self.att_for()
no_for.children.append(no_attr)
no_for.attr = no_attr
self.match('PCOMMA')
no_expr = self.expression()
no_for.children.append(no_expr)
no_for.exp = no_expr
self.match('PCOMMA')
no_attr2 = self.att_for()
no_for.children.append(no_attr2)
no_for.attr2 = no_attr2
self.match('RBRACKET')
no_comand = self.comand()
if(not(no_comand is None)):
no_for.children.append(no_comand)
no_for.commands = no_comand
return no_for
def att_for(self):
no_id = Id(self.token)
self.match('ID')
no_attr_for = Assign(no_id,'=',None)
self.match('ATTR')
no_expr = self.expression()
no_attr_for.children.append(no_expr)
no_attr_for.right = no_expr
return no_attr_for
def expression(self):
no = self.conjunction()
if (self.token.getCodigoToken() == 'OR'):
no_expr_opc = self.expressaoOpc()
no_expr_opc.children.append(no)
no_expr_opc.left = no
return no_expr_opc
return no
def expressaoOpc(self):
no_expr_opc = LogicalOp('OR', None, None)
self.match('OR')
self.conjunction()
if(self.token.getCodigoToken() == 'OR'):
no_expr_opc2 = self.expressaoOpc()
no_expr_opc2.children.left(no_expr_opc)
no_expr_opc2.left = no_expr_opc
return no_expr_opc2
return no_expr_opc
def conjunction(self):
no = self.equal()
if(self.token.getCodigoToken() == 'AND'):
no_conj = self.conjuction_opc()
no_conj.children.append(no)
no_conj.left = no
return no
def conjuction_opc(self):
no_conj = LogicalOp('AND', None, None)
self.match('AND')
no = self.equal()
no_conj.children.append(no)
no_conj.right = no
if(self.token == 'AND'):
no_conj2 = self.conjuction_opc()
no_conj2.children.left(no_conj)
no_conj2.left = no_conj
return no_conj2
return no_conj
def equal(self):
no = self.relation()
if (self.token.getCodigoToken() == 'EQ' or self.token.getCodigoToken() == 'NE'):
no_equal_opc = self.equal_opc()
no_equal_opc.children.append(no)
return no_equal_opc
return no
def equal_opc(self):
no_op_equal = self.op_equal()
no = self.relation()
no_op_equal.children.append(no)
no_op_equal.right = no
if (self.token == 'EQ' or self.token == 'NE'):
no_equal_opc2 = self.equal_opc()
no_equal_opc2.children.append(no)
return no_equal_opc2
return no_op_equal
def op_equal(self):
if(self.token.getCodigoToken() == 'EQ' ):
self.match('EQ')
return RelOp(None, '==', None)
elif(self.token.getCodigoToken() == 'NE'):
self.match('NE')
return RelOp(None, '!=', None)
def relation(self):
no = self.add()
if(self.token.getCodigoToken() == 'LT' or self.token.getCodigoToken() == 'LE' or self.token.getCodigoToken() == 'GT' or self.token.getCodigoToken() == 'GE'):
no_relac_opc = self.relac_opc()
no_relac_opc.children.append(no)
no_relac_opc.left = no
return no_relac_opc
return no
def relac_opc(self):
no_op_rel = self.op_rel()
no2 = self.add()
no_op_rel.children.append(no2)
no_op_rel.right = no2
if(self.token == 'LT' or self.token == 'LE' or self.token == 'GT' or self.token == 'GE'):
no_op_rel2 = self.relac_opc()
no_op_rel2.append(no_op_rel)
no_op_rel2.left = no_op_rel
return no_op_rel2
return no_op_rel
def op_rel(self):
if (self.token.getCodigoToken() == 'LT'):
self.match('LT')
return RelOp(None,'<',None)
elif(self.token.getCodigoToken() == 'LE'):
self.match('LE')
return RelOp(None,'<=',None)
elif(self.token.getCodigoToken() == 'GT'):
self.match('GT')
return RelOp(None, '>', None)
elif (self.token.getCodigoToken() == 'GE'):
self.match('GE')
return RelOp(None, '>=', None)
def add(self):
no = self.term()
if (self.token.getCodigoToken() == 'PLUS' or self.token.getCodigoToken() == 'MINUS'):
no_plus_minus = self.add_opc()
no_plus_minus.children.append(no)
no_plus_minus.left = no
return no_plus_minus
return no
def add_opc(self):
no_plus_minus = self.op_add()
no2 = self.term()
no_plus_minus.children.append(no2)
no_plus_minus.right = no2
if (self.token.getCodigoToken() == 'PLUS' or self.token.getCodigoToken() == 'MINUS'):
no_plus_minus2 = self.add_opc()
no_plus_minus2.children.append(no_plus_minus)
no_plus_minus.left = no_plus_minus
return no_plus_minus2
return no_plus_minus
def op_add(self):
if(self.token.getCodigoToken() == 'PLUS'):
no_add = ArithOp('+',None, None)
self.match('PLUS')
return no_add
if(self.token.getCodigoToken() == 'MINUS'):
no_minus = ArithOp('-',None, None)
self.match('MINUS')
return no_minus
def term(self):
no = self.fact()
if(self.token.getCodigoToken() == 'MULT' or self.token.getCodigoToken() == 'DIV'):
no_div_mult = self.term_opc()
no_div_mult.children.append(no)
no_div_mult.left = no
return no_div_mult
return no
def term_opc(self):
no_div_mult = self.op_mult()
no2 = self.fact()
no_div_mult.children.append(no2)
no_div_mult.right = no2
if(self.token == 'MULT' or self.token == 'DIV'):
no_div_mult2 = self.term_opc()
no_div_mult2.children.append(no_div_mult)
no_div_mult.left = no_div_mult
return no_div_mult2
return no_div_mult
def op_mult(self):
if(self.token.getCodigoToken() == 'MULT'):
no_div_mult = ArithOp('*',None,None)
self.match('MULT')
return no_div_mult
elif(self.token.getCodigoToken() == 'DIV'):
no_div_mult = ArithOp('/',None,None)
self.match('DIV')
return no_div_mult
def fact(self):
if (self.token.getCodigoToken() == 'ID'):
no = Id(self.token)
self.match('ID')
return no
elif(self.token.getCodigoToken() == 'INTEGER_CONST'):
no = Num(self.token)
self.match('INTEGER_CONST')
return no
elif(self.token.getCodigoToken() == 'FLOAT_CONST'):
no = Num(self.token)
self.match('FLOAT_CONST')
return no
elif(self.token.getCodigoToken() == 'LBRACKET'):
self.match('LBRACKET')
no = self.expression()
self.match('RBRACKET')
return no
|
"""
This is a spider; it is a seperate class from the crawler because here
we can cleanly encapsulate all the real scrapign logic.
Nov, 28, 2016 - Pablo Caruana pablo dot caruana at gmail dot com
"""
import re
import logging
from bs4 import BeautifulSoup
from requests import exceptions
class Spider:
def __init__(
self,
link,
user_agent,
session
):
self.link = link
self.user_agent = user_agent
self.session = session
self.status_code = None
self.success = None
self.spider_err = False
self.body = ''
self.title = ''
self.links = []
self.html = ''
self.log = logging.getLogger()
def crawl(self):
"""
Request the link, scrapes for hrefs. Stores status code/MIME type/html
is respective object variables.
Returns: nothing
"""
self.log.info('crawling link: {}'.format(self.link))
try:
resp = self.session.get(
self.link,
headers={'User-Agent': self.user_agent}
)
except ConnectionError as conn_err:
# TODO What should we do here?
self.log.exception('What?')
self.spider_err = True
return
except exceptions.ConnectionError as conn_err:
self.log.warning(
'Request to {} denied, marking as dead.'
.format(self.link)
)
self._dead_link()
return
except exceptions.Timeout as to_err:
self.log.warning(
'Request to {} timed out, marking as dead.'
.format(self.link)
)
self._dead_link()
return
except exceptions.RequestException as req_err:
self.log.exception(
'Hit internal requests error, failed to spider {}'
.format(self.link)
)
self.spider_err = True
return
self.log.info('successfully connected to {}'.format(self.link))
self.body = resp.text
self.html = resp.text[0:509]
soup = BeautifulSoup(self.body, 'html.parser')
try:
self.title = soup.title.string
except AttributeError:
self.title = 'N/A'
self._find_links(soup)
self.success = True
self.log.info('Successfully spidered {}'.format(self.link))
self.log.debug('Scraped data: {0}| {1}| {2}'.format(self.title, len(self.title), self.body[0:50]))
self.log.debug('Found {} links.'.format(len(self.links)))
def _dead_link(self, status_code=None):
self.status_code = status_code
self.success = False
def _find_links(self, soup):
for elem in soup.find_all('a', href=True):
link = elem['href']
self._add_link(link)
def _add_link(self, link):
absolute_url_re = re.compile(
'^(http|https):\/\/[A-Za-z0-9]*.onion\/[A-Za-z0-9\/\-._]*'
)
relative_url_re = re.compile(
'^\/[A-Za-z0-9\/\-._]*'
)
abs_url = absolute_url_re.match(link)
if abs_url:
self.links.append(abs_url.group())
else:
rel_url = relative_url_re.match(link)
if rel_url:
self.links.append(rel_url.group())
|
from .core import spell, no_spells
from .spells import unpack_keys, unpack_attrs, args_with_source, dict_of, print_args, call_with_name, delegate_to_attr, \
maybe, select_from, magic_kwargs, assigned_names, switch, timeit
try:
from .version import __version__
except ImportError: # pragma: no cover
# version.py is auto-generated with the git tag when building
__version__ = "???"
|
"""
Start the socker websocket server
Usage:
socker [options]
socker -? | --help
socker --version
Options:
-i INTERFACE Listening interface [default: localhost]
-p PORT Listening port [default: 8765]
-v Enable verbose output
--auth-backend=PATH Auth backend path
[default: socker.auth:default_backend]
--redis-host=HOST Redis host [default: localhost]
--redis-port=PORT Redis port [default: 6379]
--redis-db=DB Redis database [default: 0]
--redis-password=PASSWORD Redis password
--logto FILE Log output to FILE instead of console
--version show version
-? --help Show this screen
"""
import asyncio
import logging
import signal
import pkg_resources
from docopt import docopt
from . import log
from .. import server
version = pkg_resources.require('socker')[0].version
logger = logging.getLogger(__name__)
class Interface(object):
def __init__(self):
self.opts = docopt(__doc__, version='socker v{}'.format(version))
self.setup_logging()
self.register_signals()
self.start()
def setup_logging(self):
filename = self.opts['--logto']
verbose = self.opts['-v']
log.configure(filename, verbose)
def register_signals(self):
# 1; Reload
signal.signal(signal.SIGHUP, lambda *args: self.reload())
# 2; Interrupt, ctrl-c
signal.signal(signal.SIGINT, lambda *args: self.abort())
# 15; Stop
signal.signal(signal.SIGTERM, lambda *args: self.stop())
def start(self):
redis_opts = {k.replace('--', '').replace('-', '_'): v
for k, v in self.opts.items()
if '--redis-' in k}
for key in ['redis_port', 'redis_db']: # Integer arguments
redis_opts[key] = int(redis_opts[key])
server.main(
interface=self.opts['-i'],
port=int(self.opts['-p']),
debug=self.opts['-v'],
auth_backend=self.opts['--auth-backend'],
**redis_opts)
def reload(self):
logger.warn('--- SIGHUP ---')
pass # TODO: Implement
def abort(self):
logger.warn('--- SIGINT ---')
self.safe_quit()
def stop(self):
logger.warn('--- SIGTERM ---')
# Cold exit.
self.quit()
def safe_quit(self):
# TODO: Implement safer way to exit
logger.info('Closing event loop...')
asyncio.get_event_loop().stop()
@staticmethod
def quit(exit_code=0):
logger.debug('Pending tasks at exit: %s',
asyncio.Task.all_tasks(asyncio.get_event_loop()))
logger.info('Bye!')
exit(exit_code)
|
# -*- coding: utf-8 -*-
# Вам дано описание пирамиды из кубиков в формате XML.
# Кубики могут быть трех цветов: красный (red), зеленый (green) и синий (blue).
# Для каждого кубика известны его цвет, и известны кубики, расположенные прямо под ним.
# Пример:
# <cube color="blue">
# <cube color="red">
# <cube color="green">
# </cube>
# </cube>
# <cube color="red">
# </cube>
# </cube>
# Введем понятие ценности для кубиков. Самый верхний кубик, соответствующий корню XML документа имеет ценность 1. Кубики, расположенные прямо под ним, имеют ценность 2. Кубики, расположенные прямо под нижележащими кубиками, имеют ценность 3. И т. д.
# Ценность цвета равна сумме ценностей всех кубиков этого цвета.
# Выведите через пробел три числа: ценности красного, зеленого и синего цветов.
# Sample Input:
# <cube color="blue"><cube color="red"><cube color="green"></cube></cube><cube color="red"></cube></cube>
# Sample Output:
# 4 3 1
from xml.etree import ElementTree
tree = ElementTree.fromstring(input())
result = {'red':0, 'blue':0, 'green':0}
def rec_walk(element, counter):
result[element.attrib["color"]] += counter
for child in element:
rec_walk(child, counter+1)
rec_walk(tree, 1)
print(result["red"], result["green"], result["blue"]) |
#!/usr/bin/env python
"""
https://stackoverflow.com/questions/22959698/distance-from-given-point-to-given-ellipse
"""
import os, sys, argparse, logging, textwrap
import numpy as np, math
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
specs_ = lambda s:filter(lambda s:s[0] != "#", filter(None,map(str.strip, textwrap.dedent(s).split("\n"))))
log = logging.getLogger(__name__)
sys.path.insert(0, os.path.expanduser("~")) # assumes $HOME/opticks
from opticks.analytic.gdml import GDML
from opticks.ana.shape import ellipse_points, circle_points
from opticks.ana.gplt import GPlot, add_line
from opticks.ana.gargs import GArgs
if __name__ == '__main__':
args = GArgs.parse(__doc__)
g = GDML.parse(args.gdmlpath(2))
g.smry()
nnvt = 1
hama = 2
lvx = args.lvname(nnvt)
lv = g.find_one_volume(lvx)
if lv == None:
log.fatal("failed to find lvx:[%s] " % (lvx))
assert lv
#s = lv.solid
#s.sub_traverse()
log.info( "lv %r" % lv )
lvs = g.get_traversed_volumes( lv, maxdepth=args.maxdepth )
plt.ion()
fig, ax = GPlot.MakeFig(plt, lv, args, recurse=True) # all volumes together
fig.show()
path = args.figpath("CombinedFig")
log.info("saving to %s " % path)
fig.savefig(path)
#axs = GPlot.MultiFig(plt, lvs, args)
fig, axs = GPlot.SubPlotsFig(plt, [lvs], args)
fig.show()
path = args.figpath("SplitFig")
log.info("saving to %s " % path)
fig.savefig(path)
#scribble( axs[0,2] )
def scribble(ax):
mm = 1.
deg = 2.*np.pi/360.
m4_torus_r = 80.
m4_torus_angle = 45.*deg
m4_r_2 = 254./2.
m4_r_1 = (m4_r_2+m4_torus_r) - m4_torus_r*np.cos(m4_torus_angle)
m4_h = m4_torus_r*np.sin(m4_torus_angle) + 5.0 # full height of the tube
m4_h/2 # tube to centerline torus offset : so torus centerline level with bottom of tube
neck_z = -210.*mm+m4_h/2.
torus_z = neck_z - m4_h/2
torus_x = m4_r_2+m4_torus_r # radial distance to center of torus circle
add_line(ax, [-300,torus_z], [300,torus_z] )
add_line(ax, [torus_x, -300], [torus_x, 300] )
e = ellipse_points( xy=[0,-5.], ex=254., ez=190., n=1000000 )
#ax.scatter( e[:,0], e[:,1], marker="." )
tc = np.array([torus_x,torus_z])
tr = m4_torus_r
t = circle_points( xy=tc, tr=tr , n=100 )
#ax.scatter( t[:,0], t[:,1], marker="." )
e_inside_t = np.sqrt(np.sum(np.square(e-tc),1)) - tr < 0. # points on the ellipse that are inside the torus circle
ax.scatter( e[e_inside_t][:,0], e[e_inside_t][:,1], marker="." )
|
# 20/20
# SECTION 2 - FUNCTIONS (20PTS TOTAL)
from math import *
# PROBLEM 1 (Length of String - 3pts)
# Make a function which asks the user to enter a string, then prints the length of that string.
# You will need to use the input() function.
# Make a call to that function
string = input("Give me some string: ")
print(len(string))
# PROBLEM 2 (Pythagorean theorem - 4pts)
# The Pythagorean theorem states that of a right triangle, the square of the
# length of the diagonal side is equal to the sum of the squares of the lengths
# of the other two sides (or a^2 + b^2 = c^2).
# Write a program that asks the user for the lengths of the two sides that meet at a right angle.
# Then calculate the length of the third side, and display it in a nicely formatted way.
# You may ignore the fact that the user can enter negative or zero lengths for the sides.
try:
leg_one = float(input("Give me a leg: "))
leg_two = float(input("Give me a leg: "))
print("The third side is {:0.2f}".format(sqrt(leg_one ** 2 + leg_two ** 2)), "units")
except ValueError:
print("Legs have to be numbers or I won't give you the hypotenuse.")
# PROBLEM 3 (Biggest, smallest, average - 4pts)
# Make a function to ask the user to enter three numbers.
# Then print the largest, the smallest, and their average, rounded to 2 decimals.
# Display the answers in a "nicely" formatted way.
# Make a call to your function.
def nicefunction(x, y, z):
print("The max is {:0.2f}.".format(round(max(x, y, x), 2)))
print("The min is {:0.2f}.".format(round(min(x, y, x), 2)))
print("The avg is {:0.2f}.".format(round((x + y + x) / 3, 2)))
try:
nicefunction(float(input("First number: ")), float(input("Second number: ")), float(input("Third number: ")))
except ValueError:
print("Numbers are not words.")
#exceptions. Nice.
# PROBLEM 4 (e to the... - 3pts)
# Calculate the value of e (from the math library) to the power of -1, 0, 1, 2, and 3.
# display the results, with 5 decimals, in a nicely formatted manner.
for i in range(-1, 4):
print("e^" + str(i) + " = {:0.2f}.".format(e ** i))
# PROBLEM 5 (Random int - 3pts)
# Generate a random integer between 1 and 10 (1 and 10 both included),
# but only use the random() function (randrange is not allowed here)
from random import random
print(random() * 9 + 1)
# PROBLEM 6 (add me, multiply me - 3pts)
# Make a function which takes in two integers and RETURNS their sum AND their product.
def hi(x, y):
return x + y, x * y
print(hi(1, 2)[0], hi(1, 2)[1])
|
import redis
import tushare as ts
r = redis.StrictRedis('127.0.0.1',decode_responses=False)
df = ts.get_stock_basics()
print(df.head())
code_list = list(df.index.values)
r.lpush('code',code_list) |
##############################################################################
#
# Copyright (C) 2020-2030 Thorium Corp FP <help@thoriumcorp.website>
#
##############################################################################
from odoo import api, fields, models, _
from odoo.exceptions import MissingError
import requests
import json
import datetime
# __all__ = ['CreateLabTestOrderInit', 'CreateLabTestOrder', 'RequestTest',
# 'RequestPatientLabTestStart', 'RequestPatientLabTest']
# class CreateLabTestOrderInit(ModelView):
# 'Create Test Report Init'
# _name = 'thoriumcorp.lab.test.create.init'
class CreateLabTestOrder(models.TransientModel):
_name = 'thoriumcorp.lab.test.create'
_description = 'Create Lab Test Report'
start = StateView('thoriumcorp.lab.test.create.init',
'thoriumcorp_lab.view_lab_make_test', [
Button('Cancel', 'end', 'thorium-cancel'),
Button('Create Test Order', 'create_lab_test', 'thorium-ok', True),
])
create_lab_test = StateTransition()
def transition_create_lab_test(self):
TestRequest = Pool().get('thoriumcorp.patient.lab.test')
Lab = Pool().get('thoriumcorp.lab')
tests_report_data = []
tests = TestRequest.browse(Transaction().context.get('active_ids'))
for lab_test_order in tests:
test_cases = []
test_report_data = {}
if lab_test_order.state == 'ordered':
self.raise_user_error(
"The Lab test order is already created")
test_report_data['test'] = lab_test_order.name.id
test_report_data['patient'] = lab_test_order.patient_id.id
if lab_test_order.doctor_id:
test_report_data['requestor'] = lab_test_order.doctor_id.id
test_report_data['date_requested'] = lab_test_order.date
test_report_data['request_order'] = lab_test_order.request
for critearea in lab_test_order.name.critearea:
test_cases.append(('create', [{
'name': critearea.name,
'sequence': critearea.sequence,
'lower_limit': critearea.lower_limit,
'upper_limit': critearea.upper_limit,
'normal_range': critearea.normal_range,
'units': critearea.units and critearea.units.id,
}]))
test_report_data['critearea'] = test_cases
tests_report_data.append(test_report_data)
Lab.create(tests_report_data)
TestRequest.write(tests, {'state': 'ordered'})
return 'end'
class RequestTest(ModelView):
'Request - Test'
_name = 'thoriumcorp.request-test'
_table = 'thoriumcorp_request_test'
request = fields.Many2One('thoriumcorp.patient.lab.test.request.start',
'Request', required=True)
test = fields.Many2One('thoriumcorp.lab.test_type', 'Test', required=True)
class RequestPatientLabTestStart(ModelView):
'Request Patient Lab Test Start'
_name = 'thoriumcorp.patient.lab.test.request.start'
date = fields.DateTime('Date')
patient = fields.Many2One('thoriumcorp.patient', 'Patient', required=True)
doctor = fields.Many2One('thoriumcorp.medicalprofessional', 'Doctor',
help="Doctor who Request the lab tests.")
tests = fields.Many2Many('thoriumcorp.request-test', 'request', 'test',
'Tests', required=True)
urgent = fields.Boolean('Urgent')
@staticmethod
def default_date():
return datetime.now()
@staticmethod
def default_patient():
if Transaction().context.get('active_model') == 'thoriumcorp.patient':
return Transaction().context.get('active_id')
@staticmethod
def default_doctor():
pool = Pool()
HealthProf= pool.get('thoriumcorp.medicalprofessional')
hp = HealthProf.get_thoriumcorp_professional()
if not hp:
RequestPatientLabTestStart.raise_user_error(
"No medical professional associated to this user !")
return hp
class RequestPatientLabTest(models.TransientModel):
_name = 'thoriumcorp.patient.lab.test.request'
_description = 'Request Patient Lab Test'
start = StateView('thoriumcorp.patient.lab.test.request.start',
'thoriumcorp_lab.patient_lab_test_request_start_view_form', [
Button('Cancel', 'end', 'thorium-cancel'),
Button('Request', 'request', 'thorium-ok', default=True),
])
request = StateTransition()
def transition_request(self):
PatientLabTest = Pool().get('thoriumcorp.patient.lab.test')
Sequence = Pool().get('ir.sequence')
Config = Pool().get('thoriumcorp.sequences')
config = Config(1)
request_number = Sequence.get_id(config.lab_request_sequence.id)
lab_tests = []
for test in self.start.tests:
lab_test = {}
lab_test['request'] = request_number
lab_test['name'] = test.id
lab_test['patient_id'] = self.start.patient.id
if self.start.doctor:
lab_test['doctor_id'] = self.start.doctor.id
lab_test['date'] = self.start.date
lab_test['urgent'] = self.start.urgent
lab_tests.append(lab_test)
PatientLabTest.create(lab_tests)
return 'end'
|
#!/usr/bin/env python
import pysam
import sys
from copy import copy
from vcftagprimersites import read_bed_file
def trim(s, start_pos, end):
if not end:
pos = s.pos
else:
pos = s.reference_end
eaten = 0
while 1:
## chomp stuff off until we reach pos
if end:
flag, length = cigar.pop()
else:
flag, length = cigar.pop(0)
#print >>sys.stderr, "Chomped a %s, %s" % (flag, length)
if flag == 0:
## match
#to_trim -= length
eaten += length
if not end:
pos += length
else:
pos -= length
if flag == 1:
## insertion to the ref
#to_trim -= length
eaten += length
if flag == 2:
## deletion to the ref
#eaten += length
if not end:
pos += length
else:
pos -= length
pass
if flag == 4:
eaten += length
if not end and pos >= start_pos and flag == 0:
break
if end and pos <= start_pos and flag == 0:
break
#print >>sys.stderr, "pos:%s %s" % (pos, start_pos)
extra = abs(pos - start_pos)
#print >> sys.stderr, "extra %s" % (extra)
if extra:
if flag == 0:
#print >>sys.stderr, "Inserted a %s, %s" % (0, extra)
if end:
cigar.append((0, extra))
else:
cigar.insert(0, (0, extra))
eaten -= extra
if not end:
s.pos = pos - extra
#print >>sys.stderr, "New pos: %s" % (s.pos)
if end:
cigar.append((4, eaten))
else:
cigar.insert(0, (4, eaten))
oldcigarstring = s.cigarstring
s.cigartuples = cigar
#print >>sys.stderr, s.query_name, oldcigarstring[0:50], s.cigarstring[0:50]
bed = read_bed_file('all')
def find_primer(pos, direction):
# {'Amplicon_size': '1874', 'end': 7651, '#Region': 'region_4', 'start': 7633, 'Coords': '7633', "Sequence_(5-3')": 'GCTGGCCCGAAATATGGT', 'Primer_ID': '16_R'}
from operator import itemgetter
closest = min([(abs(p['start'] - pos), p['start'] - pos, p) for p in bed if p['direction'] == direction], key=itemgetter(0))
return closest
infile = pysam.AlignmentFile("-", "rb")
outfile = pysam.AlignmentFile("-", "wh", template=infile)
for s in infile:
cigar = copy(s.cigartuples)
if len(sys.argv) > 1:
if not s.query_name.startswith(sys.argv[1]):
continue
## logic - if alignment start site is _before_ but within X bases of
## a primer site, trim it off
if s.is_unmapped:
continue
p1 = find_primer(s.reference_start, 'F')
p2 = find_primer(s.reference_end, 'R')
print >>sys.stderr, "%s\t%s\t%s_%s\t%s\t%s\t%s\t%s" % (s.reference_start, s.reference_end, p1[2]['Primer_ID'], p2[2]['Primer_ID'], p1[2]['Primer_ID'], abs(p1[1]), p2[2]['Primer_ID'], abs(p2[1]))
#amp_len = p1['end'] - p2['end']
## if the alignment starts before the end of the primer, trim to that position
#print >>sys.stderr, s.reference_start, p1[2]['end']
#print >>sys.stderr, s.reference_end, p2[2]['start']
"""
if p1[0] < 50:
if s.reference_start < p1[2]['start'] - 1:
# trim(s, p1[2]['end']-1, 0)
trim(s, p1[2]['start']-1, 0)
else:
trim(s, s.reference_start + 20, 0)
if p2[0] < 50:
if s.reference_end > p2[2]['end'] - 1:
# trim(s, p2[2]['start']-1, 1)
trim(s, p2[2]['end']-1, 1)
else:
trim(s, s.reference_end - 20, 1)
"""
try:
trim(s, s.reference_start + 20, 0)
trim(s, s.reference_end - 20, 1)
outfile.write(s)
except Exception:
pass
outfile.write(s)
|
import random
class Quick_sort:
def sort(self, nums):
'''
快速排序
:type nums: List[int] 要排序的数组
'''
self.quick_sort(nums, 0, len(nums)-1)
# print(sorted(nums))
def quick_sort(self, nums, left, right):
'''
:type nums: List[int] 要排序的数组
'''
if left < right:
index = self.partition(nums, left, right)
self.quick_sort(nums, left, index-1)
self.quick_sort(nums, index+1, right)
def partition(self, nums, left, right):
# i, j, pivot = left, right+1, nums[left]
# while i < j:
# while nums[i += 1] < pivot and i < right:
# pass
# while nums[j -= 1] > pivot:
# pass
# if i >= j:
# break
# nums[i], nums[j] = nums[j], nums[i]
# nums[j], pivot = pivot, nums[j]
# return j
pivot, i, j = nums[left], left, right
while i < j:
while i < j and nums[j] >= pivot:
j -= 1
nums[i] = nums[j]
while i < j and nums[i] <= pivot:
i += 1
nums[j] = nums[i]
nums[i] = pivot
return i
if __name__ == '__main__':
print("start")
a = [1, 7, 3, 5, 4, 0]
s = Quick_sort()
s.sort(a)
print(a)
print("end")
|
#!/usr/bin/env python3
# Test Client application.
#
# This program attempts to connect to all previously verified Flic buttons by this server.
# Once connected, it prints Down and Up when a button is pressed or released.
# It also monitors when new buttons are verified and connects to them as well. For example, run this program and at the same time the scan_wizard.py program.
import fliclib
import requests
tipo=None
client = fliclib.FlicClient("localhost")
def got_button(bd_addr):
cc = fliclib.ButtonConnectionChannel(bd_addr)
cc.on_button_click_or_hold = lambda channel, click_type, was_queued, time_diff: \
requests.get('http://10.0.1.31/command.htm?key=ENTER') if str(click_type) == 'ClickType.ButtonClick' else requests.get('http://10.0.1.31/command.htm?key=CANCEL')
cc.on_connection_status_changed = lambda channel, connection_status, disconnect_reason: \
print(channel.bd_addr + " " + str(connection_status) + (" " + str(disconnect_reason) if connection_status == fliclib.ConnectionStatus.Disconnected else ""))
client.add_connection_channel(cc)
def got_info(items):
print(items)
for bd_addr in items["bd_addr_of_verified_buttons"]:
got_button(bd_addr)
client.get_info(got_info)
client.on_new_verified_button = got_button
client.handle_events()
|
from django.db import models
class Post(models.Model):
author = models.CharField(max_length=40)
password = models.CharField(max_length=200)
title = models.CharField(max_length=100)
content = models.TextField(max_length=300)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
class Meta:
db_table = 'posts'
def __str__(self):
return f'{self.author} {self.title}'
|
#!/usr/bin/python3
"""Module Matrix-Mul"""
def matrix_mul(m_a, m_b):
"""function that multiplies 2 matrices"""
if type(m_a) != list:
raise TypeError("m_a must be a list")
if type(m_b) != list:
raise TypeError("m_b must be a list")
if len(m_a) and not all(type(i) == list for i in m_a):
raise TypeError("m_a must be a list of lists")
if len(m_b) and not all(type(i) == list for i in m_b):
raise TypeError("m_b must be a list of lists")
if m_a == [] or m_a == [[]]:
raise ValueError("m_a can't be empty")
if m_b == [] or m_b == [[]]:
raise ValueError("m_b can't be empty")
for i in m_a:
if not all(type(j) in (int, float) for j in i):
raise TypeError("m_a should contain only integers or floats")
for i in m_b:
if not all(type(j) in [int, float] for j in i):
raise TypeError("m_b should contain only integers or floats")
l1 = len(m_a[0])
l2 = len(m_b[0])
if not all(len(i) == l1 for i in m_a):
raise TypeError("each row of m_a must be of the same size")
if not all(len(i) == l2 for i in m_b):
raise TypeError("each row of m_b must be of the same size")
if len(m_a[0]) != len(m_b):
raise ValueError("m_a and m_b can't be multiplied")
new = []
for i in range(len(m_b[0])):
new.append([])
for j in range(len(m_b)):
new[i].append(0)
for i in range(len(m_b)):
for j in range(len(m_b[i])):
new[j][i] = m_b[i][j]
result = []
r = 0
for row in range(len(m_a)):
result.append([])
for j in range(len(new)):
for i in range(len(new[row])):
r += m_a[row][i] * new[j][i]
result[row].append(r)
r = 0
return result
|
"""Utilities specific to Go language ecosystem."""
|
import train
import args
import numpy as np
import os
def get_split_index(y, split_values):
for i, thres in enumerate(split_values):
if y < thres:
return i - 1
return len(split_values) - 1
def split_data(data, split_values, splitted_data):
for one_data in data:
one_index = get_split_index(one_data[1], split_values)
splitted_data[one_index].append(one_data)
if __name__ == '__main__':
args = args.get_args()
np.random.seed(7122)
data = train.preprocessing(
args.train_filename,
args.attributes_filename)
split_values = [2, 14, 22, 30, 40, 60, 80, 100, 130]
splitted_data = [list() for _ in range(8)]
split_data(data, split_values, splitted_data)
log_path = os.path.join('logs', args.prefix)
model_path = os.path.join('models', args.prefix)
train.check_dir(log_path)
train.check_dir(model_path)
for split_index, one_split_data in enumerate(splitted_data):
train.train(data=np.array(one_split_data),
validation=args.validation,
prefix=os.path.join(args.prefix,\
'split_%d' % split_index),
total_epoches=args.epoches,
learning_rate=args.learning_rate,
save_intervals=args.save_intervals,
params_init_model=args.params_init_model)
|
if __name__ == "__main__":
# For direct call only
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import pytest
import pylo
class TestEvent:
def setup_method(self):
self.event = pylo.Event()
self.reset_triggered_handler()
def reset_triggered_handler(self):
"""Reset whether the all the handlers have been triggered or not."""
self.handler_1_triggered = False
self.handler_2_triggered = False
def handler1(self):
"""Event handler 1."""
self.handler_1_triggered = True
def handler2(self):
"""Event handler 2."""
self.handler_2_triggered = True
def test_handlers_executed(self):
"""Test if handlers are executed."""
# remove all handlers, reset handlers
self.event.clear()
self.reset_triggered_handler()
# add handlers
self.event["handler_1"] = self.handler1
self.event["handler_2"] = self.handler2
# trigger event
self.event()
# check if both handlers were executed
assert self.handler_1_triggered
assert self.handler_2_triggered
def test_not_triggering_after_remove(self):
"""Test if hanlders are not triggered anymore if they are added and
then removed again."""
# remove all handlers, reset handlers
self.event.clear()
self.reset_triggered_handler()
# add handlers
self.event["handler_1"] = self.handler1
self.event["handler_2"] = self.handler2
# remove handler again
del self.event["handler_1"]
# trigger event
self.event()
# check if handler 1 is executed but not handler 2
assert not self.handler_1_triggered
assert self.handler_2_triggered
def test_triggered_multiple_times(self):
"""Test if hanlders are triggered multiple times."""
# remove all handlers, reset handlers
self.event.clear()
self.reset_triggered_handler()
# add handlers
self.event["handler_1"] = self.handler1
self.event["handler_2"] = self.handler2
# trigger event
self.event()
# check if both handlers are executed
assert self.handler_1_triggered
assert self.handler_2_triggered
# reset for testing again
self.reset_triggered_handler()
# trigger event again
self.event()
# check if both handlers are executed again
assert self.handler_1_triggered
assert self.handler_2_triggered |
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class AntivirusQuarantine(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
AntivirusQuarantine - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'file': 'str',
'last_istag': 'str',
'last_scan': 'int',
'quarantined': 'bool',
'scan_result': 'str',
'scan_status': 'str'
}
self.attribute_map = {
'file': 'file',
'last_istag': 'last_istag',
'last_scan': 'last_scan',
'quarantined': 'quarantined',
'scan_result': 'scan_result',
'scan_status': 'scan_status'
}
self._file = None
self._last_istag = None
self._last_scan = None
self._quarantined = None
self._scan_result = None
self._scan_status = None
@property
def file(self):
"""
Gets the file of this AntivirusQuarantine.
Path of this file, starting with /ifs.
:return: The file of this AntivirusQuarantine.
:rtype: str
"""
return self._file
@file.setter
def file(self, file):
"""
Sets the file of this AntivirusQuarantine.
Path of this file, starting with /ifs.
:param file: The file of this AntivirusQuarantine.
:type: str
"""
self._file = file
@property
def last_istag(self):
"""
Gets the last_istag of this AntivirusQuarantine.
The ICAP Service Tag (ISTag) recorded for this file during the last scan, or null if no tag was recorded. For more information about ISTags, see https://tools.ietf.org/html/rfc3507.
:return: The last_istag of this AntivirusQuarantine.
:rtype: str
"""
return self._last_istag
@last_istag.setter
def last_istag(self, last_istag):
"""
Sets the last_istag of this AntivirusQuarantine.
The ICAP Service Tag (ISTag) recorded for this file during the last scan, or null if no tag was recorded. For more information about ISTags, see https://tools.ietf.org/html/rfc3507.
:param last_istag: The last_istag of this AntivirusQuarantine.
:type: str
"""
self._last_istag = last_istag
@property
def last_scan(self):
"""
Gets the last_scan of this AntivirusQuarantine.
The date and time this file was last scanned for viruses, as a UNIX timestamp. If null, the file has never been scanned.
:return: The last_scan of this AntivirusQuarantine.
:rtype: int
"""
return self._last_scan
@last_scan.setter
def last_scan(self, last_scan):
"""
Sets the last_scan of this AntivirusQuarantine.
The date and time this file was last scanned for viruses, as a UNIX timestamp. If null, the file has never been scanned.
:param last_scan: The last_scan of this AntivirusQuarantine.
:type: int
"""
self._last_scan = last_scan
@property
def quarantined(self):
"""
Gets the quarantined of this AntivirusQuarantine.
If true, this file is quarantined. If false, the file is not quarantined.
:return: The quarantined of this AntivirusQuarantine.
:rtype: bool
"""
return self._quarantined
@quarantined.setter
def quarantined(self, quarantined):
"""
Sets the quarantined of this AntivirusQuarantine.
If true, this file is quarantined. If false, the file is not quarantined.
:param quarantined: The quarantined of this AntivirusQuarantine.
:type: bool
"""
self._quarantined = quarantined
@property
def scan_result(self):
"""
Gets the scan_result of this AntivirusQuarantine.
The result of the last scan on this file. This string is usually one of: never_scanned, clean, quarantined, repaired, truncated, infected_no_action_taken, skipped_per_settings. However, a longer string starting with 'unknown_status' and describing the details can also appear in uncommon edge cases.
:return: The scan_result of this AntivirusQuarantine.
:rtype: str
"""
return self._scan_result
@scan_result.setter
def scan_result(self, scan_result):
"""
Sets the scan_result of this AntivirusQuarantine.
The result of the last scan on this file. This string is usually one of: never_scanned, clean, quarantined, repaired, truncated, infected_no_action_taken, skipped_per_settings. However, a longer string starting with 'unknown_status' and describing the details can also appear in uncommon edge cases.
:param scan_result: The scan_result of this AntivirusQuarantine.
:type: str
"""
self._scan_result = scan_result
@property
def scan_status(self):
"""
Gets the scan_status of this AntivirusQuarantine.
The scanning status of this file. If 'current', the file was scanned with the most up-to-date virus defintions. If 'not_current', it has either not been scanned, been modified since the last scan, or the virus definitions are not current.
:return: The scan_status of this AntivirusQuarantine.
:rtype: str
"""
return self._scan_status
@scan_status.setter
def scan_status(self, scan_status):
"""
Sets the scan_status of this AntivirusQuarantine.
The scanning status of this file. If 'current', the file was scanned with the most up-to-date virus defintions. If 'not_current', it has either not been scanned, been modified since the last scan, or the virus definitions are not current.
:param scan_status: The scan_status of this AntivirusQuarantine.
:type: str
"""
allowed_values = ["current", "not_current"]
if scan_status not in allowed_values:
raise ValueError(
"Invalid value for `scan_status`, must be one of {0}"
.format(allowed_values)
)
self._scan_status = scan_status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
import unittest
from katas.kyu_7.unflatten_a_list import unflatten
class UnflattenTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(unflatten([3, 5, 2, 1]), [[3, 5, 2], 1])
def test_equal_2(self):
self.assertEqual(unflatten([1, 4, 5, 2, 1, 2, 4, 5, 2, 6, 2, 3, 3]),
[1, [4, 5, 2, 1], 2, [4, 5, 2, 6], 2, [3, 3]])
def test_equal_3(self):
self.assertEqual(unflatten([1, 1, 1, 1]), [1, 1, 1, 1])
def test_equal_4(self):
self.assertEqual(unflatten([1]), [1])
def test_equal_5(self):
self.assertEqual(unflatten([99, 1, 1, 1]), [[99, 1, 1, 1]])
def test_equal_6(self):
self.assertEqual(unflatten([3, 1, 1, 3, 1, 1]),
[[3, 1, 1], [3, 1, 1]])
|
# This is a simple Hello World Code
print("My First Hello World!", "Using Python")
palabra="Una Palabra"
print("For a variable 'palabra': "+palabra)
print("palabra[0]="+palabra[0])
print("palabra[1:]="+palabra[1:])
print("palabra[:1]="+palabra[:1])
print("palabra[-1:]="+palabra[-1:])
print("palabra[:-1]="+palabra[:-1])
|
# Python Program to find factorial of a given number
def fac(x):
if x==0:
return 1
elif x>0:
return x*fac(x-1)
|
"""
Tools to import subcatchment paramaters saved as a csv
"""
class Subcatchment(object):
def __init__(self, fields):
"""
load paramaters from fields
:param fields: list of subcatch parameters in typical CUHP order
"""
self.name = fields[0]
self.area = float(fields[3]) # area in square miles
self.imperv = float(fields[7]) # imperviousness as a percentange: 50%, 99% (not 0.5 or .99)
self.depress_stor_perv = float(fields[8]) # pervious depression storage (inches)
self.depress_stor_imperv = float(fields[9]) # impervious depression storage (inches)
self.horton_init = float(fields[10]) # initial hortons infiltration (in/hr)
self.horton_decay = float(fields[11]) # hortons decay coeff (1/secs)
self.horton_final = float(fields[12]) # final hortons infiltration (in/hr)
self.fields = fields
@staticmethod
def header():
return 'subcatch_id,area,imperv_percent,depress_stor_perv,depress_stor_imperv,hrtn_init, hrtn_decay, hrtn_final'
def __str__(self):
s = str(self.name) + ','
s += str(self.area) + ','
s += str(self.imperv) + ','
s += str(self.depress_stor_perv) + ','
s += str(self.depress_stor_imperv) + ','
s += str(self.horton_init) + ','
s += str(self.horton_decay) + ','
s += str(self.horton_final)
return s
def import_params(param_filename):
"""
Imports subcatchment parameters from csv in typical CUHP order, no headings
:param param_filename: filename of csv file
:return: list of Subcatchment objects
"""
subcatches = []
with open(param_filename, 'rt') as infile:
for line in infile:
fields = line.strip().split(',')
new_subcatch = Subcatchment(fields)
subcatches.append(new_subcatch)
return subcatches
def main():
filename = 'csv/hlc_subcatch.csv'
scs = import_params(filename)
print len(scs)
for item in scs:
print item.name, item.__dict__
print
if __name__ == '__main__':
main()
|
fruit = {"one": "apple",
"two": "pear",
"three": "grape",
"four": "watermelon",
"five": "banana"
}
print(fruit)
print(fruit["two"]) # find a value by key ,key as the index
# and key-value
fruit["six"] = "peach"
print(fruit)
del fruit["six"]
print(fruit)
fruit.clear()
print(fruit)
del fruit
print(fruit) |
from storm.monitoring.sensor.api import units
class Measure(object):
def __init__(self, value, unit_type, description = ''):
self.value = value
if unit_type not in units.Units().get_units():
msg = 'The specified unit type %s is not supported' % str(unit_type)
raise units.UnitsError(msg)
self.unit = unit_type
self.description = description
def get_value(self):
"""Return measure value"""
return self.value
def get_unit(self):
"""Return measure information"""
return self.unit
def get_description(self):
"""Return measure information"""
return self.description
|
from ryu.base import app_manager
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import MAIN_DISPATCHER, CONFIG_DISPATCHER
from ryu.controller import ofp_event
from ryu.lib.packet import packet, ether_types, ethernet, dhcp, ipv4, udp
# DHCP
# ofproto 在这个目录下,基本分为两类文件,一类是协议的数据结构定义,另一类是协议解析,也即数据包处理函数文件。
"""
DHCP的OPTION字段:
每个Option都由Tag、Len、Data三大部分组成:
1、Tag表示本Option的作用,1个字节,由RFC2132定义。
2、Len表明后续Data的长度,1 个字节,(Tag=0、255的比较特殊,没有Len和Data)
3、Data内容作为Tag的补充详细说明
"""
class swich(app_manager):
def __init__(self, *args, **kwargs):
super(swich, self).__init__(*args, **kwargs)
self.DHCP_MAC = "",
self.DHCP_server = "",
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
ofproto_parser = datapath.ofproto_parser
match = ofproto_parser.OFPMatch()
action = [ofproto_parser.OFPActionOutput(port=ofproto.OFPP_CONTROLLER,
max_len=ofproto.OFPCML_NO_BUFFER)] # (port,max)
ins = [ofproto_parser.OFPInstructionActions(
type=ofproto.OFPIT_APPLY_ACTIONS,
actions=action
)]
Out = ofproto_parser.OFPFlowMod(datapath=datapath, priority=0, match=match, instructions=ins)
datapath.send_msg(Out)
def get_dhcp_state(self, pkt_dhcp):
"""
tag:53 ,
DHCP Message Type:
1:DHCPDISCOVER
2:DHCPOFFER
3:DHCPREQUEST
4:DHCPDECLINE
5:DHCPACK
6:DHCPNAK
7:DHCPRELEASE
:param pkt_dhcp:
:return:
"""
# for opt in pkt_dhcp.option.option_list:
# if opt.tag == 53:
# return ord(opt[0].value)
dhcp_state = ord([opt for opt in pkt_dhcp.options.option_list if opt.tag == 53][0].value)
if dhcp_state == 1:
state = 'DHCPDISCOVER'
elif dhcp_state == 3:
state = 'DHCPREQUEST'
return state
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
pkt_ethernet = pkt.get_protocol(dhcp.dhcp)
if pkt_ethernet:
self.DHCP_handler(datapath, in_port, pkt)
return
def DHCP_handler(self, datapath, port, pkt):
pkt_dhcp = pkt.get_protocol(dhcp.dhcp)
dhcp_state = self.get_dhcp_state(pkt_dhcp)
if dhcp_state == 'DHCPDISCOVER': #dhcpDiscover
"""
动态主机配置协议(DHCP)是目前广泛使用的动态IP地址分配方法。
DHCP客户端启动时,由于其还未配置IP地址,因此只能使用广播方式发送Dhcpdiscover包,即该数据包的源地址为0.0.0.0,目标地址为255.255.255.255。
"""
self.send_flow_mod(datapath, port, self.assemble_offer(pkt))
elif dhcp_state == 'DHCPREQUEST': #dhcpRequest
self.send_flow_mod()
def send_flow_mod(self, datapath, port, pkt):
ofproto = datapath.ofproto
ofproto_parse = datapath.ofproto_parser
actions = [ofproto_parse.OFPActionOutput(port=port)]
inst = [ofproto_parse.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
pkt.serialize() #初始化
Out = ofproto_parse.OFPFlowMod(
buffer_id=ofproto.OFPCML_NO_BUFFER,
instructions=inst,
in_port= ofproto.OFPP_CONTROLLER,
data=pkt.data
)
datapath.send_msg(Out)
def assemble_offer(self, pkt):
disc_eth = pkt.get_protocol(ethernet.ethernet)
disc_ipv4 = pkt.get_protocol(ipv4.ipv4)
disc_udp = pkt.get_protocol(udp.udp)
disc = pkt.get_protocol(dhcp.dhcp)
#remove pkt.dhcp.option.option_list , tag = 55 (Parameter Request List)
disc.options.option_list.remove(
next(opt for opt in disc.options.option_list if opt.tag == 55))
#remove pkt.dhcp.option.option_list , tag = 53 (DHCP Message Type)
disc.options.option_list.remove(
next(opt for opt in disc.options.option_list if opt.tag == 53))
# remove pkt.dhcp.option.option_list , tag = 12 (Host Name)
disc.options.option_list.remove(
next(opt for opt in disc.options.option_list if opt.tag == 12))
#insert pkt.dhcp.option.option_list , tag = 1 (Subnet Mask)
disc.options.option_list.insert(
0, dhcp.option(tag=1, value=""))
# insert pkt.dhcp.option.option_list , tag = 3 (Router)
disc.options.option_list.insert(
0, dhcp.option(tag=3, value=""))
# insert pkt.dhcp.option.option_list , tag = 6 (Domain Name Server 域名服务器)
disc.options.option_list.insert(
0, dhcp.option(tag=6, value=""))
# insert pkt.dhcp.option.option_list , tag = 12 (Host Name)
disc.options.option_list.insert(
0, dhcp.option(tag=12, value=""))
# insert pkt.dhcp.option.option_list , tag = 53 (DHCP Message Type)
disc.options.option_list.insert(
0, dhcp.option(tag=53, value=''))
# insert pkt.dhcp.option.option_list , tag = 54 (Server Identifier)
disc.options.option_list.insert(
0, dhcp.option(tag=54, value=""))
#Constructing a Packet
offer_pkt = packet.Packet()
#Packet protocol level one
offer_pkt.add_protocol(ethernet.ethernet(
ethertype=disc_eth.ethertype, dst=disc_eth.src, src=self.DHCP_MAC))
#Packet protocol level two
offer_pkt.add_protocol(
ipv4.ipv4(dst=disc_ipv4.dst, src=self.DHCP_server, proto=disc_ipv4.proto))
#Packet protocol level three
offer_pkt.add_protocol(udp.udp(src_port=67, dst_port=68))
#Packet protocol level four
offer_pkt.add_protocol(dhcp.dhcp(op=2,
chaddr=disc_eth.src,
siaddr=self.DHCP_server,
boot_file=disc.boot_file,
yiaddr=self.ip_addr,
xid=disc.xid,
options=disc.options))
return offer_pkt
def assemble_ack(self, pkt):
response_eth = pkt.get_protocol(ethernet.ethernet)
response_ipv4 = pkt.get_protocol(ipv4.ipv4)
response = pkt.get_protocol(dhcp.dhcp)
response.options.option_list.remove(
next(opt for opt in response.options.option_list if opt.tag == 53))
response.options.option_list.insert(0, dhcp.option(tag=51, value=''))
response.options.option_list.insert(
0, dhcp.option(tag=53, value=""))
ack_pkt = packet.Packet()
ack_pkt.add_protocol(ethernet.ethernet(
ethertype=response_eth.ethertype, dst=response_eth.src, src=""))
ack_pkt.add_protocol(
ipv4.ipv4(dst=response_ipv4.dst, src=self.dhcp_server, proto=response_ipv4.proto))
ack_pkt.add_protocol(udp.udp(src_port=67, dst_port=68))
ack_pkt.add_protocol(dhcp.dhcp(op=2, chaddr=response_eth.src,
siaddr="dhcp_server",
boot_file=response.boot_file,
yiaddr="ip_addr",
xid=response.xid,
options=response.options))
return ack_pkt |
from __future__ import annotations
import typing as T
import re
import os
import logging
import math
from pathlib import Path
from datetime import datetime, timedelta
from . import find
from . import namelist
NaN = math.nan
def datetime_range(start: datetime, stop: datetime, step: timedelta) -> list[datetime]:
"""
Generate range of datetime over a closed interval.
pandas.date_range also defaults to a closed interval.
That means that the start AND stop time are included.
Parameters
----------
start : datetime
start time
stop : datetime
stop time
step : timedelta
time step
Returns
-------
times : list of datetime
times requested
"""
return [start + i * step for i in range((stop - start) // step + 1)]
def read_nml(fn: Path) -> dict[str, T.Any]:
"""parse .nml file
for now we don't use the f90nml package, though maybe we will in the future.
Just trying to keep Python prereqs reduced for this simple parsing.
"""
fn = find.config(fn)
params = {"nml": fn}
for k in {"base", "files", "flags", "setup", "neutral_perturb", "precip", "efield", "glow"}:
if namelist_exists(fn, k):
params.update(parse_namelist(fn, k))
return params
def namelist_exists(fn: Path, nml: str) -> bool:
"""determines if a namelist exists in a
does not check for proper formatting etc."""
pat = re.compile(r"^\s*&(" + nml + ")$")
with fn.open("rt") as f:
for line in f:
if pat.match(line) is not None:
return True
return False
def parse_namelist(file: Path, nml: str) -> dict[str, T.Any]:
"""
this is Gemini-specific
don't resolve absolute paths here because that assumes same machine
"""
r = namelist.read(file, nml)
if nml == "base":
P = parse_base(r)
elif nml == "flags":
P = parse_flags(r)
elif nml == "files":
P = parse_files(r)
elif nml == "setup":
P = parse_setup(r)
elif nml == "neutral_perturb":
P = parse_neutral(r)
elif nml == "precip":
P = {
"dtprec": timedelta(seconds=float(r["dtprec"])),
"precdir": r["prec_dir"],
}
elif nml == "efield":
P = {
"dtE0": timedelta(seconds=float(r["dtE0"])),
"E0dir": r["E0_dir"],
}
elif nml == "glow":
P = {
"aurmap_dir": r.get("aurmap_dir", "aurmaps"),
"dtglow": timedelta(seconds=float(r["dtglow"])),
"dtglowout": float(r["dtglowout"]),
}
else:
raise ValueError(f"Not sure how to parse NML namelist {nml}")
P = expand_simroot(P)
return P
def parse_base(r: dict[str, T.Any]) -> dict[str, T.Any]:
P: dict[str, T.Any] = {
"tdur": timedelta(seconds=float(r["tdur"])),
"dtout": timedelta(seconds=float(r["dtout"])),
"f107a": float(r["activ"][0]),
"f107": float(r["activ"][1]),
"Ap": float(r["activ"][2]),
"tcfl": float(r["tcfl"]),
"Teinf": float(r["Teinf"]),
}
t0 = datetime(int(r["ymd"][0]), int(r["ymd"][1]), int(r["ymd"][2])) + timedelta(
seconds=float(r["UTsec0"])
)
P["time"] = datetime_range(t0, t0 + P["tdur"], P["dtout"])
return P
def parse_flags(r: dict[str, T.Any]) -> dict[str, T.Any]:
P = {}
for k in r:
P[k] = int(r[k])
return P
def parse_files(r: dict[str, T.Any]) -> dict[str, T.Any]:
P = {}
for k in ("indat_file", "indat_grid", "indat_size"):
P[k] = r[k]
P["file_format"] = r.get("file_format", Path(P["indat_size"]).suffix)
# defaults to type of input
if "realbits" in r:
P["realbits"] = int(r["realbits"])
else:
if P["file_format"] in ("raw", "dat"):
P["realbits"] = 64
else:
P["realbits"] = 32
return P
def expand_simroot(P: dict[str, T.Any]) -> dict[str, T.Any]:
simroot_key = "@GEMINI_SIMROOT@"
default_dir = "~/gemini_sims"
for k in (
"indat_file",
"indat_grid",
"indat_size",
"eq_dir",
"eq_archive",
"E0dir",
"precdir",
"sourcedir",
"aurmap_dir",
):
if k in P:
if P[k].startswith(simroot_key):
root = os.environ.get(simroot_key[1:-1])
if not root:
root = str(Path(default_dir).expanduser())
logging.warning(
f"{k} refers to undefined environment variable GEMINI_SIMROOT."
f"falling back to {root}"
)
P[k] = P[k].replace(simroot_key, root, 1)
P[k] = Path(P[k]).expanduser()
return P
def parse_neutral(r: dict[str, T.Any]) -> dict[str, T.Any]:
P = {
"interptype": int(r["interptype"]),
"sourcedir": r["source_dir"],
}
for k in ("sourcemlat", "sourcemlon", "dtneu", "dxn", "drhon", "dzn"):
try:
P[k] = float(r[k])
except KeyError:
P[k] = NaN
return P
def parse_setup(r: dict[str, T.Any]) -> dict[str, T.Any]:
"""
r is str, list of str, or float
"""
P = {}
for k in r:
if k in {
"lxp",
"lyp",
"lq",
"lp",
"lphi",
"gridflag",
"Efield_llon",
"Efield_llat",
"precip_llon",
"precip_llat",
}:
P[k] = int(r[k])
elif k == "eqdir": # eqdir obsolete, should use eq_dir
P["eq_dir"] = r[k]
elif k == "setup_functions":
P["setup_functions"] = [r[k]] if isinstance(r[k], str) else r[k]
else:
P[k] = r[k]
return P
def read_ini(fn: Path) -> dict[str, T.Any]:
"""parse .ini file
DEPRECATED
"""
fn = find.config(fn)
with fn.open("rt") as f:
date = list(map(int, f.readline().split()[0].split(",")))[::-1]
sec = float(f.readline().split()[0])
t0 = datetime(date[0], date[1], date[2]) + timedelta(seconds=sec)
P: dict[str, T.Any] = {
"tdur": timedelta(seconds=float(f.readline().split()[0])),
"dtout": timedelta(seconds=float(f.readline().split()[0])),
}
P["time"] = datetime_range(t0, t0 + P["tdur"], P["dtout"])
P["f107a"], P["f107"], P["Ap"] = map(float, f.readline().split()[0].split(","))
P["tcfl"] = float(f.readline().split()[0])
P["Teinf"] = float(f.readline().split()[0])
P["potsolve"] = int(f.readline().split()[0])
P["flagperiodic"] = int(f.readline().split()[0])
P["flagoutput"] = int(f.readline().split()[0])
P["flagcap"] = int(f.readline().split()[0])
for k in ("indat_size", "indat_grid", "indat_file"):
P[k] = Path(f.readline().strip().replace("'", "").replace('"', "")).expanduser()
return P
|
"""
Experiments for studying the learned mean embedding.
Two experiments:
* PCA with several different dynamics.
* Interpolating the latent space between two dynamics.
"""
import os
from datetime import datetime
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from omegaconf import OmegaConf
import torch
from matplotlib.pyplot import rc as rc
from models import ConvNetModel, MLPModel
from datasets import RotNISTDataset, SineData
from utils import get_split
from torch.utils.data import DataLoader
sns.set_style('darkgrid')
sns.set_style('white')
rc('font', family='serif')
rc('text', usetex=True)
darkness = 0.15
truth_darkness = 1
color = '#9013FE'
def plot_sine_img(model, b, t, y, t_context, y_context, t_extra, save_folder, time_tag, num_lines_to_plot=25):
fig = plt.figure()
# for a batch element b
for i in range(num_lines_to_plot):
# plot a number of predictions (on t_extra)
p_y_pred, _, _ = model(t_context, y_context, t_extra)
mu = p_y_pred.loc
order = np.argsort(t_extra[b].flatten().cpu().numpy())
t_, y_ = t_extra[b].cpu().numpy()[order], mu[b].cpu().numpy()[order]
if i == 0:
plt.plot(t_, y_, alpha=darkness, c=color, zorder=-num_lines_to_plot,
label=r'prediction means $\mu_y(z)$ for $z \sim q(z|C)$')
else:
plt.plot(t_, y_, alpha=darkness, c=color, zorder=-num_lines_to_plot)
plt.xlim(-3.14, 3.14)
plt.ylim(-1.1, 1.1)
# and plot the context and the full datapoints
plt.plot(t[b].cpu().numpy(), y[b].cpu().numpy(), c='k', linestyle='--', alpha=truth_darkness, zorder=1,
label='ground truth')
plt.scatter(t_context[b].cpu().numpy(), y_context[b].cpu().numpy(), c='green', alpha=truth_darkness, zorder=2,
label='context')
plt.xlabel('t')
plt.legend()
plt.tight_layout()
plt.savefig(save_folder / f'sine_img_plot_{time_tag}_{b}.png', dpi=300)
plt.show()
def plot_rotnist_img(b, mu, T, t_target_rounded, t_target_initial, y_target, t_context, y_context, save_folder,
time_tag):
# Extract mean of distribution
im = mu[b].numpy().reshape(T, 28, 28)
fig = plt.figure(figsize=(T, 3))
for i, t_i in enumerate(t_target_rounded[b]):
plt.subplot(3, T, i + 1)
if t_i in t_target_initial[b]:
index = torch.where(t_target_initial[b] == t_i)[0].item()
plt.imshow(y_target[b, index].view(28, 28), cmap='gray')
else:
plt.imshow(np.zeros((28, 28)), cmap='gray')
plt.axis('off')
plt.subplot(3, T, i + T + 1)
if t_i in t_context[b]:
index = torch.where(t_context[b] == t_i)[0].item()
plt.imshow(y_context[b, index].view(28, 28), cmap='gray')
else:
plt.imshow(np.zeros((28, 28)), cmap='gray')
plt.axis('off')
plt.subplot(3, T, i + 2 * T + 1)
plt.imshow(im[i], cmap='gray')
plt.axis('off')
plt.tight_layout()
plt.savefig(save_folder / f'rotnist_test_{b}_{time_tag}.png', dpi=300)
plt.show()
def plot_sine_figures(project_dir, time_tag, model_name):
# make the save folder
save_folder = project_dir / f'figures/{model_name}'
try:
os.makedirs(save_folder)
except FileExistsError:
pass
# load previously trained model
device = torch.device('cpu')
conf = OmegaConf.load(project_dir / f'runs/sine/{model_name}/conf.yaml')
h_sizes = OmegaConf.to_container(conf.hidden_sizes)
model = MLPModel(dim_y=1, dim_r=conf.dim_r, dim_z_prime=conf.dim_z_prime, dim_l=conf.dim_l,
hidden_sizes_encoder=h_sizes,
hidden_sizes_ode_net=h_sizes,
hidden_sizes_decoder=h_sizes, t0=-0.1, device=device)
model_weights = torch.load(project_dir / f'runs/sine/{model_name}/seed_0/model.pth',
map_location=device)
model.load_state_dict(model_weights)
# create a sine dataset
dataset = SineData()
dataloader = DataLoader(dataset, batch_size=conf.batch_size, drop_last=True)
t, y = next(iter(dataloader))
t_context, y_context, t_extra, y_extra, _, _ = get_split(t, y, test_context_size=conf.test_context_size)
with torch.no_grad():
plot_sine_img(model, 0, t, y, t_context, y_context, t_extra, save_folder, time_tag)
plot_sine_img(model, 1, t, y, t_context, y_context, t_extra, save_folder, time_tag)
plot_sine_img(model, 2, t, y, t_context, y_context, t_extra, save_folder, time_tag)
plot_sine_img(model, 3, t, y, t_context, y_context, t_extra, save_folder, time_tag)
plot_sine_img(model, 4, t, y, t_context, y_context, t_extra, save_folder, time_tag)
def plot_rotnist_figures(project_dir, time_tag, model_name):
# make the save folder
save_folder = project_dir / f'figures/{model_name}'
try:
os.makedirs(save_folder)
except FileExistsError:
pass
# load previously trained model
device = torch.device('cpu')
epoch = 140
conf = OmegaConf.load(project_dir / f'runs/rotnist/{model_name}/conf.yaml')
h_sizes = OmegaConf.to_container(conf.hidden_sizes)
model = ConvNetModel(dim_r=conf.dim_r, dim_z_prime=conf.dim_z_prime, dim_l=conf.dim_l,
hidden_sizes_ode_net=h_sizes, t0=-0.1, device=device)
model_weights = torch.load(project_dir / f'runs/rotnist/{model_name}/seed_0/model_ep{epoch}.pth',
map_location=device)
model.load_state_dict(model_weights)
# load RotNIST dataset, and get the test samples
dataset_mnist = RotNISTDataset(data_dir=str(project_dir / 'data'))
len_test = 10
dataset_test = dataset_mnist[len(dataset_mnist) - len_test:]
dataloader_test = DataLoader(dataset_test, batch_size=10, shuffle=False, drop_last=True)
t_all, y_all = next(iter(dataloader_test))
t_context, y_context, _, _, t_target_initial, y_target = get_split(t_all, y_all, context_range=(7, 8),
extra_target_range=(9, 10))
# Create a set of target points corresponding to entire [x_min, x_max] range
extrapolation = 5
t_min, t_max = 0., 1.5 # for the rotnist dataset
t_target = torch.linspace(t_min, t_max + extrapolation / 10, 16 + extrapolation)
t_target = t_target.view(1, -1, 1).repeat(t_context.shape[0], 1, 1)
t_target_rounded = torch.round(t_target * 10 ** 3) / (10 ** 3)
# get prediction on test samples, and plot the images for 2 examples
p_y_pred, _, _ = model(t_context, y_context, t_target)
mu = p_y_pred.loc.detach()
_, T, _ = t_target.shape
plot_rotnist_img(0, mu, T, t_target_rounded, t_target_initial, y_target, t_context, y_context, save_folder,
time_tag)
plot_rotnist_img(9, mu, T, t_target_rounded, t_target_initial, y_target, t_context, y_context, save_folder,
time_tag)
def plot_sine_training_figures(project_dir, run_name):
# make the save folder
save_folder = project_dir / f'figures/{run_name}'
try:
os.makedirs(save_folder)
except FileExistsError:
pass
df_mse_test = pd.read_csv(project_dir / f'runs/sine/{run_name}/aggregates/mse_test_epoch--{run_name}.csv',
delimiter=';').rename(columns={'Unnamed: 0': 'step'})[1:]
df_mse_train = pd.read_csv(project_dir / f'runs/sine/{run_name}/aggregates/mse_train_epoch--{run_name}.csv',
delimiter=';').rename(columns={'Unnamed: 0': 'step'})[1:]
df_loss = pd.read_csv(project_dir / f'runs/sine/{run_name}/aggregates/train_loss--{run_name}.csv',
delimiter=';').rename(columns={'Unnamed: 0': 'step'})[1:]
plt.figure()
plt.plot(df_mse_test['mean'], label='test MSE')
plt.fill_between(df_mse_test.step, df_mse_test.amax, df_mse_test.amin, alpha=0.3)
plt.plot(df_mse_train['mean'], label='train MSE')
plt.fill_between(df_mse_train.step, df_mse_train.amax, df_mse_train.amin, alpha=0.3)
plt.xlabel('epoch')
plt.legend()
plt.tight_layout()
plt.savefig(save_folder / f'sine_training.png', dpi=300)
plt.show()
if __name__ == '__main__':
project_dir = Path(__file__).resolve().parents[1]
time_tag = datetime.now().strftime(f'%Y%m%d_%H%M%S')
# plot_sine_figures(project_dir, time_tag, '10_full_T_20210222_114949')
# plot_rotnist_figures(project_dir, time_tag, '10_full_T_20210222_174621')
# 10_full_T_20210222_114949 also works for dim_l=10
plot_sine_training_figures(project_dir, '2_full_T_20210222_115004')
|
import os
from click.testing import CliRunner
import pytest
import signal
import app.configfile as configfile
from app.commands import ls, add, move
def test_add(tmp_path):
runner = CliRunner()
configfile.CONFIG_FILE_PATH = './conf.yml'
with runner.isolated_filesystem():
result = runner.invoke(
add, ['-d', './test/to', '-p', '.py', '-n', 'pyfiles'])
assert result.exit_code == 0
assert 'Error: Destination is not a valid directory\n' == result.output
result = runner.invoke(
add, ['-d', './test/to', '-p', '.py', '-n'])
assert result.exit_code != 0 # Click error: missing arg
result = runner.invoke(
add, ['-d', './test/to', '-p', '.py'])
assert result.exit_code != 0 # Click error: missing option
with runner.isolated_filesystem():
os.makedirs('./test/to')
result = runner.invoke(
add, ['-d', './test/to', '-p', '.py', '-n', 'pyfiles'])
assert result.exit_code == 0
config = configfile.Config({
'name': 'pyfiles',
'dest_dir': os.path.abspath('./test/from'),
'pattern': '.py',
'patterns': None
})
assert 'Added: Name=pyfiles' in result.stdout
result = runner.invoke(
add, ['-d', './test/to', '-p', '.py', '-n', 'pyfiles'])
assert result.exit_code == 0
assert result.stdout == 'Error: Configuration pyfiles already exists\n'
def test_ls():
runner = CliRunner()
configfile.CONFIG_FILE_PATH = './conf.yml'
with runner.isolated_filesystem():
os.makedirs('./test/to')
result = runner.invoke(ls)
assert result.exit_code == 0
assert result.stdout == 'No configurations available\n'
result = runner.invoke(
add, ['-d', './test/to', '-p', '.py', '-n', 'pyfiles'])
assert result.exit_code == 0
result = runner.invoke(
add, ['-d', './test/to', '-p', '.png', '-n', 'images'])
assert result.exit_code == 0
result = runner.invoke(ls)
assert result.exit_code == 0
assert 'Name=pyfiles' in result.stdout
assert 'Name=images' in result.stdout
def test_move():
runner = CliRunner()
configfile.CONFIG_FILE_PATH = './conf.yml'
with runner.isolated_filesystem():
os.makedirs('./test/from')
os.makedirs('./test/to')
with open('./test/from/hello.py', 'w') as f:
f.write("print('hello!'")
result = runner.invoke(
add, ['-d', './test/to', '-p', '.py', '-n', 'pyfiles'])
assert result.exit_code == 0
result = runner.invoke(
move, ['-s', './test/from'])
assert result.exit_code == 0
assert 'hello.py' in result.stdout
assert 'moved to' in result.stdout
assert 'hello.py' in os.listdir('./test/to')
assert 'hello.py' not in os.listdir('./test/from')
# TODO add test for move and watch (-w option)
|
#!/usr/bin/env python
fst = lambda ab: ab[0]
snd = lambda ab: ab[1]
head = lambda xs: xs[0]
tail = lambda xs: xs[1:]
def is_inter(a, b):
""" Integer -> Integer -> Bool"""
return a == b - 1 or a == b
def list_sort(data):
""" :: set -> [Integer],
where the result is sorted
"""
return sorted(list(data))
def interm(ab, n):
""" :: (Integer, Integer) -> Integer -> (Integer, Integer) """
if is_inter(snd(ab), n):
return fst(ab), n
else:
return n, n
def acc_reduce(xs, ab):
""" [Integer] -> (Integer, Integer) -> [(Integer, Integer)]"""
if xs == []:
return [ab]
else:
x = head(xs)
base = interm(ab, x)
if base == (x, x):
return [ab] + acc_reduce(tail(xs), base)
else:
return acc_reduce(tail(xs), base)
def create_intervals(the_set):
if len(the_set) == 0:
return []
xs = list_sort(the_set)
return tail(acc_reduce(xs, (head(xs), head(xs))))
def test_create_intervals():
data = {1, 2, 3, 4, 5, 7, 8, 12}
ls_data = [1, 2, 3, 4, 5, 7, 8, 12]
data2 = {1, 2, 3, 6, 7, 8, 4, 5}
assert is_inter(2, 3) == True
assert is_inter(3, 3) == True
assert is_inter(3, 2) == False
assert is_inter(3, 5) == False
assert interm((1,1), 2) == (1,2)
assert interm((1,2), 4) == (4,4)
assert list_sort(data) == ls_data
assert create_intervals(data) == [(1, 5), (7, 8), (12, 12)]
assert create_intervals(data2) == [(1, 8)]
assert create_intervals({}) == []
assert create_intervals({1}) == [(1, 1)]
assert create_intervals({1, 1}) == [(1, 1)]
assert create_intervals({1, 2, 3}) == [(1, 3)]
assert create_intervals([]) == []
if __name__ == '__main__':
test_create_intervals()
|
import os
import io
import boto3
import mimetypes
s3_client = boto3.client(
"s3",
endpoint_url="https://ams3.digitaloceanspace.com",
aws_access_key_id=os.getenv("DO_SPACE_ACCESS"),
aws_secret_access_key=os.environ("DO_SPACE_SECRET"),
region_name="ams3",
)
def upload_video(local_path, path, bucket_name="video-space", acl="public-"):
with open(local_path, "rb") as f:
file = io.BytesIO(f.read())
try:
mimetype = mimetypes.guess_type(local_path)[0]
except:
raise Exception("Invalid file format")
s3_client.upload_fileobj(
file,
bucket_name,
path,
ExtraArg={
"ACL": acl,
"ContentType": mimetype
}
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.